code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
import cv2
import math
import itertools
jpeg_quantiz_matrix = np.array([[16, 11, 10, 16, 24, 40, 51, 61],
[12, 12, 14, 19, 26, 58, 60, 55],
[14, 13, 16, 24, 40, 57, 69, 56],
[14, 17, 22, 29, 51, 87, 80, 62],
[18, 22, 37, 56, 68, 109, 103, 77],
[24, 35, 55, 64, 81, 104, 113, 92],
[49, 64, 78, 87, 103, 121, 120, 101],
[72, 92, 95, 98, 112, 100, 103, 99]])
def pieces(l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
def zig_zag(array, n=None):
shape = np.array(array).shape
res = np.zeros_like(array)
j = 0
i = 0
direction = 'r' # {'r': right, 'd': down, 'ur': up-right, 'dl': down-left}
for count in range(1, n + 1):
res[j][i] = array[j][i]
if direction == 'r':
i += 1
if j == shape[0] - 1:
direction = 'ur'
else:
direction = 'dl'
elif direction == 'dl':
i -= 1
j += 1
if j == shape[0] - 1:
direction = 'r'
elif i == 0:
direction = 'd'
elif direction == 'd':
j += 1
if i == 0:
direction = 'ur'
else:
direction = 'dl'
elif direction == 'ur':
i += 1
j -= 1
if i == shape[1] - 1:
direction = 'd'
elif j == 0:
direction = 'r'
#print('done2')
return res
def compress(image,num_coeffs=None,scale_factor=1):
image = np.float32(image)
h, w = image.shape
n_height = np.int32(math.ceil(h / 8)) * 8
n_width = np.int32(math.ceil(w / 8)) * 8
new_canvas = np.zeros((n_height, n_width))
new_canvas[0:h, 0:w] = image
image = np.float32(new_canvas)
height, width = image.shape
image_blocks = [image[j:j + 8, i:i + 8] for (j, i) in itertools.product(range(0, height, 8),range(0, width, 8))]
# Applying DCT for every block
dct_blocks = [cv2.dct(image_block) for image_block in image_blocks]
if num_coeffs is not None:
# Keep only the first K DCT coefficients of every block
reduced_dct_coeffs = [zig_zag(dct_block, num_coeffs) for dct_block in dct_blocks]
else:
# Quantize all the DCT coefficients using the quantization matrix and the scaling factor
reduced_dct_coeffs = [np.round(dct_block / (jpeg_quantiz_matrix * scale_factor)) for dct_block in dct_blocks]
reduced_dct_coeffs = [reduced_dct_coeff * (jpeg_quantiz_matrix * scale_factor) for reduced_dct_coeff in reduced_dct_coeffs]
comp_image_blocks = [cv2.idct(coeff_block) for coeff_block in reduced_dct_coeffs]
comp_image = []
for chunk_row_blocks in pieces(comp_image_blocks, width//8):
for row_block_num in range(8):
for block in chunk_row_blocks:
comp_image.extend(block[row_block_num])
comp_image = np.array(comp_image).reshape(height, width)
# round to the nearest integer [0,255] value
comp_image[comp_image < 0] = 0
comp_image[comp_image > 255] = 255
comp_image = np.uint8(comp_image)
return comp_image[0:h, 0:w]
def main():
input_video = cv2.VideoCapture('cv.mp4')
if input_video.isOpened()==False:
print("Error in opening video")
count = 0
compressed_frames = []
while input_video.isOpened():
ret, frame = input_video.read()
if ret==True:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
compressed_frames.append(compress(frame,4))
if count==0:
cv2.imwrite('firstframe_in.jpg',frame)
cv2.imwrite('firstframe_out.jpg',compress(frame,4))
print('Compressed Frame: ',count)
#break
count+=1
else:
break
#print(count) #349,593
h,w = compressed_frames[0].shape
fourcc= cv2.VideoWriter_fourcc(*'XVID')
output_video = cv2.VideoWriter('output.mp4',fourcc,15,(h,w))
for frame in compressed_frames:
output_video.write(frame)
input_video.release()
output_video.release()
cv2.destroyAllWindows()
main() | [
"numpy.uint8",
"cv2.imwrite",
"cv2.dct",
"math.ceil",
"cv2.VideoWriter",
"numpy.array",
"numpy.zeros",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.VideoWriter_fourcc",
"cv2.idct",
"cv2.cvtColor",
"numpy.zeros_like",
"numpy.float32",
"numpy.round"
] | [((88, 394), 'numpy.array', 'np.array', (['[[16, 11, 10, 16, 24, 40, 51, 61], [12, 12, 14, 19, 26, 58, 60, 55], [14, \n 13, 16, 24, 40, 57, 69, 56], [14, 17, 22, 29, 51, 87, 80, 62], [18, 22,\n 37, 56, 68, 109, 103, 77], [24, 35, 55, 64, 81, 104, 113, 92], [49, 64,\n 78, 87, 103, 121, 120, 101], [72, 92, 95, 98, 112, 100, 103, 99]]'], {}), '([[16, 11, 10, 16, 24, 40, 51, 61], [12, 12, 14, 19, 26, 58, 60, 55\n ], [14, 13, 16, 24, 40, 57, 69, 56], [14, 17, 22, 29, 51, 87, 80, 62],\n [18, 22, 37, 56, 68, 109, 103, 77], [24, 35, 55, 64, 81, 104, 113, 92],\n [49, 64, 78, 87, 103, 121, 120, 101], [72, 92, 95, 98, 112, 100, 103, 99]])\n', (96, 394), True, 'import numpy as np\n'), ((772, 792), 'numpy.zeros_like', 'np.zeros_like', (['array'], {}), '(array)\n', (785, 792), True, 'import numpy as np\n'), ((1850, 1867), 'numpy.float32', 'np.float32', (['image'], {}), '(image)\n', (1860, 1867), True, 'import numpy as np\n'), ((2009, 2038), 'numpy.zeros', 'np.zeros', (['(n_height, n_width)'], {}), '((n_height, n_width))\n', (2017, 2038), True, 'import numpy as np\n'), ((2088, 2110), 'numpy.float32', 'np.float32', (['new_canvas'], {}), '(new_canvas)\n', (2098, 2110), True, 'import numpy as np\n'), ((3491, 3511), 'numpy.uint8', 'np.uint8', (['comp_image'], {}), '(comp_image)\n', (3499, 3511), True, 'import numpy as np\n'), ((3583, 3609), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""cv.mp4"""'], {}), "('cv.mp4')\n", (3599, 3609), False, 'import cv2\n'), ((4356, 4387), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (4378, 4387), False, 'import cv2\n'), ((4408, 4457), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""output.mp4"""', 'fourcc', '(15)', '(h, w)'], {}), "('output.mp4', fourcc, 15, (h, w))\n", (4423, 4457), False, 'import cv2\n'), ((4598, 4621), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4619, 4621), False, 'import cv2\n'), ((739, 754), 'numpy.array', 'np.array', (['array'], {}), '(array)\n', (747, 754), True, 'import numpy as np\n'), ((2321, 2341), 'cv2.dct', 'cv2.dct', (['image_block'], {}), '(image_block)\n', (2328, 2341), False, 'import cv2\n'), ((2968, 2989), 'cv2.idct', 'cv2.idct', (['coeff_block'], {}), '(coeff_block)\n', (2976, 2989), False, 'import cv2\n'), ((1921, 1937), 'math.ceil', 'math.ceil', (['(h / 8)'], {}), '(h / 8)\n', (1930, 1937), False, 'import math\n'), ((1967, 1983), 'math.ceil', 'math.ceil', (['(w / 8)'], {}), '(w / 8)\n', (1976, 1983), False, 'import math\n'), ((2719, 2777), 'numpy.round', 'np.round', (['(dct_block / (jpeg_quantiz_matrix * scale_factor))'], {}), '(dct_block / (jpeg_quantiz_matrix * scale_factor))\n', (2727, 2777), True, 'import numpy as np\n'), ((3301, 3321), 'numpy.array', 'np.array', (['comp_image'], {}), '(comp_image)\n', (3309, 3321), True, 'import numpy as np\n'), ((3860, 3899), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (3872, 3899), False, 'import cv2\n'), ((4014, 4053), 'cv2.imwrite', 'cv2.imwrite', (['"""firstframe_in.jpg"""', 'frame'], {}), "('firstframe_in.jpg', frame)\n", (4025, 4053), False, 'import cv2\n')] |
'''
Test file for the dce_models sub-module
'''
import pytest
import os
import sys
import numpy as np
from tempfile import TemporaryDirectory
sys.path.insert(0,
os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'src')))
#-------------------------------------------------------------------------------
# Tests for dibem module
from QbiPy.dce_models import dibem, two_cxm_model, active_uptake_model
from QbiPy.dce_models import dce_aif
#-------------------------------------------------------------------------------
def test_params_AUEM():
#Test consistency of conversion to/from DIBEM
F_p = 1.0
v_ecs = 0.2
k_i = 0.25
k_ef = 0.05
dibem_params = active_uptake_model.params_to_DIBEM(F_p, v_ecs, k_i, k_ef, False)
F_p2, v_ecs2, k_i2, k_ef2 = active_uptake_model.params_from_DIBEM(*dibem_params, False)
dibem_params = active_uptake_model.params_to_DIBEM(F_p, v_ecs, k_i, k_ef, True)
F_p3, v_ecs3, k_i3, k_ef3 = active_uptake_model.params_from_DIBEM(*dibem_params, True)
assert F_p == pytest.approx(F_p2)
assert v_ecs == pytest.approx(v_ecs2)
assert k_i == pytest.approx(k_i2)
assert k_ef == pytest.approx(k_ef2)
assert F_p == pytest.approx(F_p3)
assert v_ecs == pytest.approx(v_ecs3)
assert k_i == pytest.approx(k_i3)
assert k_ef == pytest.approx(k_ef3)
def test_params_2CXM():
F_p = 1.0
PS = 0.2
v_e = 0.2
v_p = 0.1
dibem_params = two_cxm_model.params_to_DIBEM(F_p, PS, v_e, v_p, False)
print('params_2CXM_to_DIBEM: ', dibem_params)
F_p2, PS2, v_e2, v_p2 = two_cxm_model.params_from_DIBEM(*dibem_params, False)
dibem_params = two_cxm_model.params_to_DIBEM(F_p, PS, v_e, v_p, True)
print('params_2CXM_to_DIBEM: ', dibem_params)
F_p3, PS3, v_e3, v_p3 = two_cxm_model.params_from_DIBEM(*dibem_params, True)
assert F_p == pytest.approx(F_p2)
assert PS == pytest.approx(PS2)
assert v_e == pytest.approx(v_e2)
assert v_p == pytest.approx(v_p2)
assert F_p == pytest.approx(F_p3)
assert PS == pytest.approx(PS3)
assert v_e == pytest.approx(v_e3)
assert v_p == pytest.approx(v_p3)
def test_concentration_from_model_scalar():
times = np.linspace(0, 6, 50)
aif = dce_aif.Aif(times = times)
F_pos = 0.2
F_neg = 0.2
K_pos = 0.5
K_neg = 4.0
f_a = 0.5
tau_a = 0.1
tau_v = 0.05
C_t = dibem.concentration_from_model(
aif,
F_pos, F_neg, K_pos, K_neg,
f_a, tau_a, tau_v
)
assert C_t.size == times.size
assert np.all(np.isfinite(C_t))
def test_concentration_from_array():
times = np.linspace(0, 6, 50)
aif = dce_aif.Aif(times = times)
F_pos = [0.2, 0.25]
F_neg = [0.2, 0.25]
K_pos = [0.5, 0.55]
K_neg = [4.0, 4.5]
f_a = 0.5
tau_a = 0.1
tau_v = 0.05
C_t = dibem.concentration_from_model(
aif,
F_pos, F_neg, K_pos, K_neg,
f_a, tau_a, tau_v
)
assert C_t.size == 2*times.size
assert np.all(np.isfinite(C_t)) | [
"QbiPy.dce_models.dibem.concentration_from_model",
"pytest.approx",
"QbiPy.dce_models.two_cxm_model.params_to_DIBEM",
"QbiPy.dce_models.two_cxm_model.params_from_DIBEM",
"QbiPy.dce_models.active_uptake_model.params_to_DIBEM",
"QbiPy.dce_models.active_uptake_model.params_from_DIBEM",
"numpy.linspace",
... | [((697, 762), 'QbiPy.dce_models.active_uptake_model.params_to_DIBEM', 'active_uptake_model.params_to_DIBEM', (['F_p', 'v_ecs', 'k_i', 'k_ef', '(False)'], {}), '(F_p, v_ecs, k_i, k_ef, False)\n', (732, 762), False, 'from QbiPy.dce_models import dibem, two_cxm_model, active_uptake_model\n'), ((795, 854), 'QbiPy.dce_models.active_uptake_model.params_from_DIBEM', 'active_uptake_model.params_from_DIBEM', (['*dibem_params', '(False)'], {}), '(*dibem_params, False)\n', (832, 854), False, 'from QbiPy.dce_models import dibem, two_cxm_model, active_uptake_model\n'), ((875, 939), 'QbiPy.dce_models.active_uptake_model.params_to_DIBEM', 'active_uptake_model.params_to_DIBEM', (['F_p', 'v_ecs', 'k_i', 'k_ef', '(True)'], {}), '(F_p, v_ecs, k_i, k_ef, True)\n', (910, 939), False, 'from QbiPy.dce_models import dibem, two_cxm_model, active_uptake_model\n'), ((972, 1030), 'QbiPy.dce_models.active_uptake_model.params_from_DIBEM', 'active_uptake_model.params_from_DIBEM', (['*dibem_params', '(True)'], {}), '(*dibem_params, True)\n', (1009, 1030), False, 'from QbiPy.dce_models import dibem, two_cxm_model, active_uptake_model\n'), ((1450, 1505), 'QbiPy.dce_models.two_cxm_model.params_to_DIBEM', 'two_cxm_model.params_to_DIBEM', (['F_p', 'PS', 'v_e', 'v_p', '(False)'], {}), '(F_p, PS, v_e, v_p, False)\n', (1479, 1505), False, 'from QbiPy.dce_models import dibem, two_cxm_model, active_uptake_model\n'), ((1584, 1637), 'QbiPy.dce_models.two_cxm_model.params_from_DIBEM', 'two_cxm_model.params_from_DIBEM', (['*dibem_params', '(False)'], {}), '(*dibem_params, False)\n', (1615, 1637), False, 'from QbiPy.dce_models import dibem, two_cxm_model, active_uptake_model\n'), ((1658, 1712), 'QbiPy.dce_models.two_cxm_model.params_to_DIBEM', 'two_cxm_model.params_to_DIBEM', (['F_p', 'PS', 'v_e', 'v_p', '(True)'], {}), '(F_p, PS, v_e, v_p, True)\n', (1687, 1712), False, 'from QbiPy.dce_models import dibem, two_cxm_model, active_uptake_model\n'), ((1791, 1843), 'QbiPy.dce_models.two_cxm_model.params_from_DIBEM', 'two_cxm_model.params_from_DIBEM', (['*dibem_params', '(True)'], {}), '(*dibem_params, True)\n', (1822, 1843), False, 'from QbiPy.dce_models import dibem, two_cxm_model, active_uptake_model\n'), ((2202, 2223), 'numpy.linspace', 'np.linspace', (['(0)', '(6)', '(50)'], {}), '(0, 6, 50)\n', (2213, 2223), True, 'import numpy as np\n'), ((2234, 2258), 'QbiPy.dce_models.dce_aif.Aif', 'dce_aif.Aif', ([], {'times': 'times'}), '(times=times)\n', (2245, 2258), False, 'from QbiPy.dce_models import dce_aif\n'), ((2387, 2473), 'QbiPy.dce_models.dibem.concentration_from_model', 'dibem.concentration_from_model', (['aif', 'F_pos', 'F_neg', 'K_pos', 'K_neg', 'f_a', 'tau_a', 'tau_v'], {}), '(aif, F_pos, F_neg, K_pos, K_neg, f_a, tau_a,\n tau_v)\n', (2417, 2473), False, 'from QbiPy.dce_models import dibem, two_cxm_model, active_uptake_model\n'), ((2622, 2643), 'numpy.linspace', 'np.linspace', (['(0)', '(6)', '(50)'], {}), '(0, 6, 50)\n', (2633, 2643), True, 'import numpy as np\n'), ((2654, 2678), 'QbiPy.dce_models.dce_aif.Aif', 'dce_aif.Aif', ([], {'times': 'times'}), '(times=times)\n', (2665, 2678), False, 'from QbiPy.dce_models import dce_aif\n'), ((2838, 2924), 'QbiPy.dce_models.dibem.concentration_from_model', 'dibem.concentration_from_model', (['aif', 'F_pos', 'F_neg', 'K_pos', 'K_neg', 'f_a', 'tau_a', 'tau_v'], {}), '(aif, F_pos, F_neg, K_pos, K_neg, f_a, tau_a,\n tau_v)\n', (2868, 2924), False, 'from QbiPy.dce_models import dibem, two_cxm_model, active_uptake_model\n'), ((1050, 1069), 'pytest.approx', 'pytest.approx', (['F_p2'], {}), '(F_p2)\n', (1063, 1069), False, 'import pytest\n'), ((1090, 1111), 'pytest.approx', 'pytest.approx', (['v_ecs2'], {}), '(v_ecs2)\n', (1103, 1111), False, 'import pytest\n'), ((1130, 1149), 'pytest.approx', 'pytest.approx', (['k_i2'], {}), '(k_i2)\n', (1143, 1149), False, 'import pytest\n'), ((1169, 1189), 'pytest.approx', 'pytest.approx', (['k_ef2'], {}), '(k_ef2)\n', (1182, 1189), False, 'import pytest\n'), ((1208, 1227), 'pytest.approx', 'pytest.approx', (['F_p3'], {}), '(F_p3)\n', (1221, 1227), False, 'import pytest\n'), ((1248, 1269), 'pytest.approx', 'pytest.approx', (['v_ecs3'], {}), '(v_ecs3)\n', (1261, 1269), False, 'import pytest\n'), ((1288, 1307), 'pytest.approx', 'pytest.approx', (['k_i3'], {}), '(k_i3)\n', (1301, 1307), False, 'import pytest\n'), ((1327, 1347), 'pytest.approx', 'pytest.approx', (['k_ef3'], {}), '(k_ef3)\n', (1340, 1347), False, 'import pytest\n'), ((1863, 1882), 'pytest.approx', 'pytest.approx', (['F_p2'], {}), '(F_p2)\n', (1876, 1882), False, 'import pytest\n'), ((1900, 1918), 'pytest.approx', 'pytest.approx', (['PS2'], {}), '(PS2)\n', (1913, 1918), False, 'import pytest\n'), ((1937, 1956), 'pytest.approx', 'pytest.approx', (['v_e2'], {}), '(v_e2)\n', (1950, 1956), False, 'import pytest\n'), ((1975, 1994), 'pytest.approx', 'pytest.approx', (['v_p2'], {}), '(v_p2)\n', (1988, 1994), False, 'import pytest\n'), ((2013, 2032), 'pytest.approx', 'pytest.approx', (['F_p3'], {}), '(F_p3)\n', (2026, 2032), False, 'import pytest\n'), ((2050, 2068), 'pytest.approx', 'pytest.approx', (['PS3'], {}), '(PS3)\n', (2063, 2068), False, 'import pytest\n'), ((2087, 2106), 'pytest.approx', 'pytest.approx', (['v_e3'], {}), '(v_e3)\n', (2100, 2106), False, 'import pytest\n'), ((2125, 2144), 'pytest.approx', 'pytest.approx', (['v_p3'], {}), '(v_p3)\n', (2138, 2144), False, 'import pytest\n'), ((2554, 2570), 'numpy.isfinite', 'np.isfinite', (['C_t'], {}), '(C_t)\n', (2565, 2570), True, 'import numpy as np\n'), ((3007, 3023), 'numpy.isfinite', 'np.isfinite', (['C_t'], {}), '(C_t)\n', (3018, 3023), True, 'import numpy as np\n'), ((195, 220), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (210, 220), False, 'import os\n')] |
"""
Currently I only have support for Cora dataset - feel free to add your own graph data.
You can find the details on how Cora was constructed here: http://eliassi.org/papers/ai-mag-tr08.pdf
TL;DR:
The feature vectors are 1433 features long. The authors found the most frequent words across every paper
in the graph (they've removed the low frequency words + some additional processing) and made a vocab from those.
Now feature "i" in the feature vector tells us whether the paper contains i-th word from the vocab (1-yes, 0-no).
e.g. : feature vector 100...00 means that this node/paper contains only the 0th word of the vocab.
Note on Cora processing:
GAT and many other papers (GCN, etc.) used the processed version of Cora that can be found here:
https://github.com/kimiyoung/planetoid
I started from that same data, and after pre-processing it the same way as GAT and GCN,
I've saved it into only 3 files so there is no need to copy-paste the same pre-processing code around anymore.
Node features are saved in CSR sparse format, labels go from 0-6 (not one-hot) and finally the topology of the
graph remained the same I just renamed it to adjacency_list.dict.
Note on sparse matrices:
If you're like me you didn't have to deal with sparse matrices until you started playing with GNNs.
You'll usually see the following formats in GNN implementations: LIL, COO, CSR and CSC.
Occasionally, you'll also see DOK and in special settings DIA and BSR as well (so 7 in total).
It's not nuclear physics (it's harder :P) check out these 2 links and you're good to go:
* https://docs.scipy.org/doc/scipy/reference/sparse.html
* https://en.wikipedia.org/wiki/Sparse_matrix
TL;DR:
LIL, COO and DOK are used for efficient modification of your sparse structure/graph topology (add/remove edges)
CSC and CSR are used for efficient arithmetic operations (addition, multiplication, etc.)
DIA and BSR are used when you're dealing with special types of sparse matrices - diagonal and block matrices.
"""
import pickle
import numpy as np
import networkx as nx
import scipy.sparse as sp
import torch
from utils.constants import *
from utils.visualizations import plot_in_out_degree_distributions, visualize_graph
def load_graph_data(training_config, device):
dataset_name = training_config['dataset_name'].lower()
layer_type = training_config['layer_type']
should_visualize = training_config['should_visualize']
if dataset_name == DatasetType.CORA.name.lower():
# shape = (N, FIN), where N is the number of nodes and FIN is the number of input features
node_features_csr = pickle_read(os.path.join(CORA_PATH, 'node_features.csr'))
# shape = (N, 1)
node_labels_npy = pickle_read(os.path.join(CORA_PATH, 'node_labels.npy'))
# shape = (N, number of neighboring nodes) <- this is a dictionary not a matrix!
adjacency_list_dict = pickle_read(os.path.join(CORA_PATH, 'adjacency_list.dict'))
# Normalize the features
node_features_csr = normalize_features_sparse(node_features_csr)
num_of_nodes = len(node_labels_npy)
if layer_type == LayerType.IMP3:
# Build edge index explicitly (faster than nx ~100 times and as fast as PyGeometric imp but less complex)
# shape = (2, E), where E is the number of edges, and 2 for source and target nodes. Basically edge index
# contains tuples of the format S->T, e.g. 0->3 means that node with id 0 points to a node with id 3.
topology = build_edge_index(adjacency_list_dict, num_of_nodes, add_self_edges=True)
elif layer_type == LayerType.IMP2 or layer_type == LayerType.IMP1:
# adjacency matrix shape = (N, N)
topology = nx.adjacency_matrix(nx.from_dict_of_lists(adjacency_list_dict)).todense().astype(np.float)
topology += np.identity(topology.shape[0]) # add self connections
topology[topology > 0] = 1 # multiple edges not allowed
topology[topology == 0] = -np.inf # make it a mask instead of adjacency matrix (used to mask softmax)
topology[topology == 1] = 0
else:
raise Exception(f'Layer type {layer_type} not yet supported.')
# Note: topology is just a fancy way of naming the graph structure data
# (be it in the edge index format or adjacency matrix)
if should_visualize: # network analysis and graph drawing
plot_in_out_degree_distributions(topology, num_of_nodes, dataset_name)
visualize_graph(topology, node_labels_npy, dataset_name)
# Convert to dense PyTorch tensors
# Needs to be long int type (in implementation 3) because later functions like PyTorch's index_select expect it
topology = torch.tensor(topology, dtype=torch.long if layer_type == LayerType.IMP3 else torch.float, device=device)
node_labels = torch.tensor(node_labels_npy, dtype=torch.long, device=device) # Cross entropy expects a long int
node_features = torch.tensor(node_features_csr.todense(), device=device)
# Indices that help us extract nodes that belong to the train/val and test splits
train_indices = torch.arange(CORA_TRAIN_RANGE[0], CORA_TRAIN_RANGE[1], dtype=torch.long, device=device)
val_indices = torch.arange(CORA_VAL_RANGE[0], CORA_VAL_RANGE[1], dtype=torch.long, device=device)
test_indices = torch.arange(CORA_TEST_RANGE[0], CORA_TEST_RANGE[1], dtype=torch.long, device=device)
return node_features, node_labels, topology, train_indices, val_indices, test_indices
else:
raise Exception(f'{dataset_name} not yet supported.')
# All Cora data is stored as pickle
def pickle_read(path):
with open(path, 'rb') as file:
data = pickle.load(file)
return data
def pickle_save(path, data):
with open(path, 'wb') as file:
pickle.dump(data, file, protocol=pickle.HIGHEST_PROTOCOL)
def normalize_features_sparse(node_features_sparse):
assert sp.issparse(node_features_sparse), f'Expected a sparse matrix, got {node_features_sparse}.'
# Instead of dividing (like in normalize_features_dense()) we do multiplication with inverse sum of features.
# Modern hardware (GPUs, TPUs, ASICs) is optimized for fast matrix multiplications! ^^ (* >> /)
# shape = (N, FIN) -> (N, 1), where N number of nodes and FIN number of input features
node_features_sum = np.array(node_features_sparse.sum(-1)) # sum features for every node feature vector
# Make an inverse (remember * by 1/x is better (faster) then / by x)
# shape = (N, 1) -> (N)
node_features_inv_sum = np.power(node_features_sum, -1).squeeze()
# Again certain sums will be 0 so 1/0 will give us inf so we replace those by 1 which is a neutral element for mul
node_features_inv_sum[np.isinf(node_features_inv_sum)] = 1.
# Create a diagonal matrix whose values on the diagonal come from node_features_inv_sum
diagonal_inv_features_sum_matrix = sp.diags(node_features_inv_sum)
# We return the normalized features.
return diagonal_inv_features_sum_matrix.dot(node_features_sparse)
# Not used -> check out playground.py where it is used in profiling functions
def normalize_features_dense(node_features_dense):
assert isinstance(node_features_dense, np.matrix), f'Expected np matrix got {type(node_features_dense)}.'
# The goal is to make feature vectors normalized (sum equals 1), but since some feature vectors are all 0s
# in those cases we'd have division by 0 so I set the min value (via np.clip) to 1.
# Note: 1 is a neutral element for division i.e. it won't modify the feature vector
return node_features_dense / np.clip(node_features_dense.sum(1), a_min=1, a_max=None)
def build_edge_index(adjacency_list_dict, num_of_nodes, add_self_edges=True):
source_nodes_ids, target_nodes_ids = [], []
seen_edges = set()
for src_node, neighboring_nodes in adjacency_list_dict.items():
for trg_node in neighboring_nodes:
# if this edge hasn't been seen so far we add it to the edge index (coalescing - removing duplicates)
if (src_node, trg_node) not in seen_edges: # it'd be easy to explicitly remove self-edges (Cora has none..)
source_nodes_ids.append(src_node)
target_nodes_ids.append(trg_node)
seen_edges.add((src_node, trg_node))
if add_self_edges:
source_nodes_ids.extend(np.arange(num_of_nodes))
target_nodes_ids.extend(np.arange(num_of_nodes))
# shape = (2, E), where E is the number of edges in the graph
edge_index = np.row_stack((source_nodes_ids, target_nodes_ids))
return edge_index
# Not used - this is yet another way to construct the edge index by leveraging the existing package (networkx)
# (it's just slower than my simple implementation build_edge_index())
def build_edge_index_nx(adjacency_list_dict):
nx_graph = nx.from_dict_of_lists(adjacency_list_dict)
adj = nx.adjacency_matrix(nx_graph)
adj = adj.tocoo() # convert to COO (COOrdinate sparse format)
return np.row_stack((adj.row, adj.col))
| [
"networkx.from_dict_of_lists",
"numpy.identity",
"pickle.dump",
"numpy.power",
"networkx.adjacency_matrix",
"numpy.arange",
"pickle.load",
"scipy.sparse.issparse",
"utils.visualizations.plot_in_out_degree_distributions",
"torch.tensor",
"numpy.row_stack",
"utils.visualizations.visualize_graph"... | [((6174, 6207), 'scipy.sparse.issparse', 'sp.issparse', (['node_features_sparse'], {}), '(node_features_sparse)\n', (6185, 6207), True, 'import scipy.sparse as sp\n'), ((7169, 7200), 'scipy.sparse.diags', 'sp.diags', (['node_features_inv_sum'], {}), '(node_features_inv_sum)\n', (7177, 7200), True, 'import scipy.sparse as sp\n'), ((8806, 8856), 'numpy.row_stack', 'np.row_stack', (['(source_nodes_ids, target_nodes_ids)'], {}), '((source_nodes_ids, target_nodes_ids))\n', (8818, 8856), True, 'import numpy as np\n'), ((9124, 9166), 'networkx.from_dict_of_lists', 'nx.from_dict_of_lists', (['adjacency_list_dict'], {}), '(adjacency_list_dict)\n', (9145, 9166), True, 'import networkx as nx\n'), ((9177, 9206), 'networkx.adjacency_matrix', 'nx.adjacency_matrix', (['nx_graph'], {}), '(nx_graph)\n', (9196, 9206), True, 'import networkx as nx\n'), ((9286, 9318), 'numpy.row_stack', 'np.row_stack', (['(adj.row, adj.col)'], {}), '((adj.row, adj.col))\n', (9298, 9318), True, 'import numpy as np\n'), ((4938, 5046), 'torch.tensor', 'torch.tensor', (['topology'], {'dtype': '(torch.long if layer_type == LayerType.IMP3 else torch.float)', 'device': 'device'}), '(topology, dtype=torch.long if layer_type == LayerType.IMP3 else\n torch.float, device=device)\n', (4950, 5046), False, 'import torch\n'), ((5065, 5127), 'torch.tensor', 'torch.tensor', (['node_labels_npy'], {'dtype': 'torch.long', 'device': 'device'}), '(node_labels_npy, dtype=torch.long, device=device)\n', (5077, 5127), False, 'import torch\n'), ((5360, 5451), 'torch.arange', 'torch.arange', (['CORA_TRAIN_RANGE[0]', 'CORA_TRAIN_RANGE[1]'], {'dtype': 'torch.long', 'device': 'device'}), '(CORA_TRAIN_RANGE[0], CORA_TRAIN_RANGE[1], dtype=torch.long,\n device=device)\n', (5372, 5451), False, 'import torch\n'), ((5470, 5558), 'torch.arange', 'torch.arange', (['CORA_VAL_RANGE[0]', 'CORA_VAL_RANGE[1]'], {'dtype': 'torch.long', 'device': 'device'}), '(CORA_VAL_RANGE[0], CORA_VAL_RANGE[1], dtype=torch.long, device\n =device)\n', (5482, 5558), False, 'import torch\n'), ((5577, 5666), 'torch.arange', 'torch.arange', (['CORA_TEST_RANGE[0]', 'CORA_TEST_RANGE[1]'], {'dtype': 'torch.long', 'device': 'device'}), '(CORA_TEST_RANGE[0], CORA_TEST_RANGE[1], dtype=torch.long,\n device=device)\n', (5589, 5666), False, 'import torch\n'), ((5941, 5958), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (5952, 5958), False, 'import pickle\n'), ((6050, 6107), 'pickle.dump', 'pickle.dump', (['data', 'file'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(data, file, protocol=pickle.HIGHEST_PROTOCOL)\n', (6061, 6107), False, 'import pickle\n'), ((6999, 7030), 'numpy.isinf', 'np.isinf', (['node_features_inv_sum'], {}), '(node_features_inv_sum)\n', (7007, 7030), True, 'import numpy as np\n'), ((4614, 4684), 'utils.visualizations.plot_in_out_degree_distributions', 'plot_in_out_degree_distributions', (['topology', 'num_of_nodes', 'dataset_name'], {}), '(topology, num_of_nodes, dataset_name)\n', (4646, 4684), False, 'from utils.visualizations import plot_in_out_degree_distributions, visualize_graph\n'), ((4697, 4753), 'utils.visualizations.visualize_graph', 'visualize_graph', (['topology', 'node_labels_npy', 'dataset_name'], {}), '(topology, node_labels_npy, dataset_name)\n', (4712, 4753), False, 'from utils.visualizations import plot_in_out_degree_distributions, visualize_graph\n'), ((6811, 6842), 'numpy.power', 'np.power', (['node_features_sum', '(-1)'], {}), '(node_features_sum, -1)\n', (6819, 6842), True, 'import numpy as np\n'), ((8640, 8663), 'numpy.arange', 'np.arange', (['num_of_nodes'], {}), '(num_of_nodes)\n', (8649, 8663), True, 'import numpy as np\n'), ((8697, 8720), 'numpy.arange', 'np.arange', (['num_of_nodes'], {}), '(num_of_nodes)\n', (8706, 8720), True, 'import numpy as np\n'), ((4022, 4052), 'numpy.identity', 'np.identity', (['topology.shape[0]'], {}), '(topology.shape[0])\n', (4033, 4052), True, 'import numpy as np\n'), ((3927, 3969), 'networkx.from_dict_of_lists', 'nx.from_dict_of_lists', (['adjacency_list_dict'], {}), '(adjacency_list_dict)\n', (3948, 3969), True, 'import networkx as nx\n')] |
import unittest
import re
import pytest
import numpy as np
from scipy.optimize import check_grad
from six.moves import xrange
from sklearn.metrics import pairwise_distances
from sklearn.datasets import load_iris, make_classification, make_regression
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.validation import check_X_y
from metric_learn import (
LMNN, NCA, LFDA, Covariance, MLKR, MMC,
LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised)
# Import this specially for testing.
from metric_learn.constraints import wrap_pairs
from metric_learn.lmnn import python_LMNN
def class_separation(X, labels):
unique_labels, label_inds = np.unique(labels, return_inverse=True)
ratio = 0
for li in xrange(len(unique_labels)):
Xc = X[label_inds==li]
Xnc = X[label_inds!=li]
ratio += pairwise_distances(Xc).mean() / pairwise_distances(Xc,Xnc).mean()
return ratio / len(unique_labels)
class MetricTestCase(unittest.TestCase):
@classmethod
def setUpClass(self):
# runs once per test class
iris_data = load_iris()
self.iris_points = iris_data['data']
self.iris_labels = iris_data['target']
np.random.seed(1234)
class TestCovariance(MetricTestCase):
def test_iris(self):
cov = Covariance()
cov.fit(self.iris_points)
csep = class_separation(cov.transform(self.iris_points), self.iris_labels)
# deterministic result
self.assertAlmostEqual(csep, 0.72981476)
class TestLSML(MetricTestCase):
def test_iris(self):
lsml = LSML_Supervised(num_constraints=200)
lsml.fit(self.iris_points, self.iris_labels)
csep = class_separation(lsml.transform(self.iris_points), self.iris_labels)
self.assertLess(csep, 0.8) # it's pretty terrible
def test_deprecation_num_labeled(self):
# test that a deprecation message is thrown if num_labeled is set at
# initialization
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
lsml_supervised = LSML_Supervised(num_labeled=np.inf)
msg = ('"num_labeled" parameter is not used.'
' It has been deprecated in version 0.5.0 and will be'
'removed in 0.6.0')
assert_warns_message(DeprecationWarning, msg, lsml_supervised.fit, X, y)
class TestITML(MetricTestCase):
def test_iris(self):
itml = ITML_Supervised(num_constraints=200)
itml.fit(self.iris_points, self.iris_labels)
csep = class_separation(itml.transform(self.iris_points), self.iris_labels)
self.assertLess(csep, 0.2)
def test_deprecation_num_labeled(self):
# test that a deprecation message is thrown if num_labeled is set at
# initialization
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
itml_supervised = ITML_Supervised(num_labeled=np.inf)
msg = ('"num_labeled" parameter is not used.'
' It has been deprecated in version 0.5.0 and will be'
'removed in 0.6.0')
assert_warns_message(DeprecationWarning, msg, itml_supervised.fit, X, y)
def test_deprecation_bounds(self):
# test that a deprecation message is thrown if bounds is set at
# initialization
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
itml_supervised = ITML_Supervised(bounds=None)
msg = ('"bounds" parameter from initialization is not used.'
' It has been deprecated in version 0.5.0 and will be'
'removed in 0.6.0. Use the "bounds" parameter of this '
'fit method instead.')
assert_warns_message(DeprecationWarning, msg, itml_supervised.fit, X, y)
class TestLMNN(MetricTestCase):
def test_iris(self):
# Test both impls, if available.
for LMNN_cls in set((LMNN, python_LMNN)):
lmnn = LMNN_cls(k=5, learn_rate=1e-6, verbose=False)
lmnn.fit(self.iris_points, self.iris_labels)
csep = class_separation(lmnn.transform(self.iris_points),
self.iris_labels)
self.assertLess(csep, 0.25)
def test_convergence_simple_example(capsys):
# LMNN should converge on this simple example, which it did not with
# this issue: https://github.com/metric-learn/metric-learn/issues/88
X, y = make_classification(random_state=0)
lmnn = python_LMNN(verbose=True)
lmnn.fit(X, y)
out, _ = capsys.readouterr()
assert "LMNN converged with objective" in out
def test_no_twice_same_objective(capsys):
# test that the objective function never has twice the same value
# see https://github.com/metric-learn/metric-learn/issues/88
X, y = make_classification(random_state=0)
lmnn = python_LMNN(verbose=True)
lmnn.fit(X, y)
out, _ = capsys.readouterr()
lines = re.split("\n+", out)
# we get only objectives from each line:
# the regexp matches a float that follows an integer (the iteration
# number), and which is followed by a (signed) float (delta obj). It
# matches for instance:
# 3 **1113.7665747189938** -3.182774197440267 46431.0200999999999998e-06
objectives = [re.search("\d* (?:(\d*.\d*))[ | -]\d*.\d*", s)
for s in lines]
objectives = [match.group(1) for match in objectives if match is not None]
# we remove the last element because it can be equal to the penultimate
# if the last gradient update is null
assert len(objectives[:-1]) == len(set(objectives[:-1]))
class TestSDML(MetricTestCase):
def test_iris(self):
# Note: this is a flaky test, which fails for certain seeds.
# TODO: un-flake it!
rs = np.random.RandomState(5555)
sdml = SDML_Supervised(num_constraints=1500)
sdml.fit(self.iris_points, self.iris_labels, random_state=rs)
csep = class_separation(sdml.transform(self.iris_points), self.iris_labels)
self.assertLess(csep, 0.25)
def test_deprecation_num_labeled(self):
# test that a deprecation message is thrown if num_labeled is set at
# initialization
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
sdml_supervised = SDML_Supervised(num_labeled=np.inf)
msg = ('"num_labeled" parameter is not used.'
' It has been deprecated in version 0.5.0 and will be'
'removed in 0.6.0')
assert_warns_message(DeprecationWarning, msg, sdml_supervised.fit, X, y)
class TestNCA(MetricTestCase):
def test_iris(self):
n = self.iris_points.shape[0]
# Without dimension reduction
nca = NCA(max_iter=(100000//n))
nca.fit(self.iris_points, self.iris_labels)
csep = class_separation(nca.transform(self.iris_points), self.iris_labels)
self.assertLess(csep, 0.15)
# With dimension reduction
nca = NCA(max_iter=(100000//n), num_dims=2)
nca.fit(self.iris_points, self.iris_labels)
csep = class_separation(nca.transform(self.iris_points), self.iris_labels)
self.assertLess(csep, 0.20)
def test_finite_differences(self):
"""Test gradient of loss function
Assert that the gradient is almost equal to its finite differences
approximation.
"""
# Initialize the transformation `M`, as well as `X` and `y` and `NCA`
X, y = make_classification()
M = np.random.randn(np.random.randint(1, X.shape[1] + 1), X.shape[1])
mask = y[:, np.newaxis] == y[np.newaxis, :]
nca = NCA()
nca.n_iter_ = 0
def fun(M):
return nca._loss_grad_lbfgs(M, X, mask)[0]
def grad(M):
return nca._loss_grad_lbfgs(M, X, mask)[1].ravel()
# compute relative error
rel_diff = check_grad(fun, grad, M.ravel()) / np.linalg.norm(grad(M))
np.testing.assert_almost_equal(rel_diff, 0., decimal=6)
def test_simple_example(self):
"""Test on a simple example.
Puts four points in the input space where the opposite labels points are
next to each other. After transform the same labels points should be next
to each other.
"""
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
nca = NCA(num_dims=2,)
nca.fit(X, y)
Xansformed = nca.transform(X)
np.testing.assert_equal(pairwise_distances(Xansformed).argsort()[:, 1],
np.array([2, 3, 0, 1]))
def test_singleton_class(self):
X = self.iris_points
y = self.iris_labels
# one singleton class: test fitting works
singleton_class = 1
ind_singleton, = np.where(y == singleton_class)
y[ind_singleton] = 2
y[ind_singleton[0]] = singleton_class
nca = NCA(max_iter=30)
nca.fit(X, y)
# One non-singleton class: test fitting works
ind_1, = np.where(y == 1)
ind_2, = np.where(y == 2)
y[ind_1] = 0
y[ind_1[0]] = 1
y[ind_2] = 0
y[ind_2[0]] = 2
nca = NCA(max_iter=30)
nca.fit(X, y)
# Only singleton classes: test fitting does nothing (the gradient
# must be null in this case, so the final matrix must stay like
# the initialization)
ind_0, = np.where(y == 0)
ind_1, = np.where(y == 1)
ind_2, = np.where(y == 2)
X = X[[ind_0[0], ind_1[0], ind_2[0]]]
y = y[[ind_0[0], ind_1[0], ind_2[0]]]
EPS = np.finfo(float).eps
A = np.zeros((X.shape[1], X.shape[1]))
np.fill_diagonal(A,
1. / (np.maximum(X.max(axis=0) - X.min(axis=0), EPS)))
nca = NCA(max_iter=30, num_dims=X.shape[1])
nca.fit(X, y)
assert_array_equal(nca.transformer_, A)
def test_one_class(self):
# if there is only one class the gradient is null, so the final matrix
# must stay like the initialization
X = self.iris_points[self.iris_labels == 0]
y = self.iris_labels[self.iris_labels == 0]
EPS = np.finfo(float).eps
A = np.zeros((X.shape[1], X.shape[1]))
np.fill_diagonal(A,
1. / (np.maximum(X.max(axis=0) - X.min(axis=0), EPS)))
nca = NCA(max_iter=30, num_dims=X.shape[1])
nca.fit(X, y)
assert_array_equal(nca.transformer_, A)
class TestLFDA(MetricTestCase):
def test_iris(self):
lfda = LFDA(k=2, num_dims=2)
lfda.fit(self.iris_points, self.iris_labels)
csep = class_separation(lfda.transform(self.iris_points), self.iris_labels)
self.assertLess(csep, 0.15)
# Sanity checks for learned matrices.
self.assertEqual(lfda.get_mahalanobis_matrix().shape, (4, 4))
self.assertEqual(lfda.transformer_.shape, (2, 4))
class TestRCA(MetricTestCase):
def test_iris(self):
rca = RCA_Supervised(num_dims=2, num_chunks=30, chunk_size=2)
rca.fit(self.iris_points, self.iris_labels)
csep = class_separation(rca.transform(self.iris_points), self.iris_labels)
self.assertLess(csep, 0.25)
def test_feature_null_variance(self):
X = np.hstack((self.iris_points, np.eye(len(self.iris_points), M=1)))
# Apply PCA with the number of components
rca = RCA_Supervised(num_dims=2, pca_comps=3, num_chunks=30, chunk_size=2)
rca.fit(X, self.iris_labels)
csep = class_separation(rca.transform(X), self.iris_labels)
self.assertLess(csep, 0.30)
# Apply PCA with the minimum variance ratio
rca = RCA_Supervised(num_dims=2, pca_comps=0.95, num_chunks=30,
chunk_size=2)
rca.fit(X, self.iris_labels)
csep = class_separation(rca.transform(X), self.iris_labels)
self.assertLess(csep, 0.30)
class TestMLKR(MetricTestCase):
def test_iris(self):
mlkr = MLKR()
mlkr.fit(self.iris_points, self.iris_labels)
csep = class_separation(mlkr.transform(self.iris_points), self.iris_labels)
self.assertLess(csep, 0.25)
def test_finite_differences(self):
"""Test gradient of loss function
Assert that the gradient is almost equal to its finite differences
approximation.
"""
# Initialize the transformation `M`, as well as `X`, and `y` and `MLKR`
X, y = make_regression(n_features=4, random_state=1, n_samples=20)
X, y = check_X_y(X, y)
M = np.random.randn(2, X.shape[1])
mlkr = MLKR()
mlkr.n_iter_ = 0
def fun(M):
return mlkr._loss(M, X, y)[0]
def grad_fn(M):
return mlkr._loss(M, X, y)[1].ravel()
# compute relative error
rel_diff = check_grad(fun, grad_fn, M.ravel()) / np.linalg.norm(grad_fn(M))
np.testing.assert_almost_equal(rel_diff, 0.)
class TestMMC(MetricTestCase):
def test_iris(self):
# Generate full set of constraints for comparison with reference implementation
n = self.iris_points.shape[0]
mask = (self.iris_labels[None] == self.iris_labels[:,None])
a, b = np.nonzero(np.triu(mask, k=1))
c, d = np.nonzero(np.triu(~mask, k=1))
# Full metric
mmc = MMC(convergence_threshold=0.01)
mmc.fit(*wrap_pairs(self.iris_points, [a,b,c,d]))
expected = [[+0.000514, +0.000868, -0.001195, -0.001703],
[+0.000868, +0.001468, -0.002021, -0.002879],
[-0.001195, -0.002021, +0.002782, +0.003964],
[-0.001703, -0.002879, +0.003964, +0.005648]]
assert_array_almost_equal(expected, mmc.get_mahalanobis_matrix(),
decimal=6)
# Diagonal metric
mmc = MMC(diagonal=True)
mmc.fit(*wrap_pairs(self.iris_points, [a,b,c,d]))
expected = [0, 0, 1.210220, 1.228596]
assert_array_almost_equal(np.diag(expected), mmc.get_mahalanobis_matrix(),
decimal=6)
# Supervised Full
mmc = MMC_Supervised()
mmc.fit(self.iris_points, self.iris_labels)
csep = class_separation(mmc.transform(self.iris_points), self.iris_labels)
self.assertLess(csep, 0.15)
# Supervised Diagonal
mmc = MMC_Supervised(diagonal=True)
mmc.fit(self.iris_points, self.iris_labels)
csep = class_separation(mmc.transform(self.iris_points), self.iris_labels)
self.assertLess(csep, 0.2)
def test_deprecation_num_labeled(self):
# test that a deprecation message is thrown if num_labeled is set at
# initialization
# TODO: remove in v.0.6
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
mmc_supervised = MMC_Supervised(num_labeled=np.inf)
msg = ('"num_labeled" parameter is not used.'
' It has been deprecated in version 0.5.0 and will be'
'removed in 0.6.0')
assert_warns_message(DeprecationWarning, msg, mmc_supervised.fit, X, y)
@pytest.mark.parametrize(('algo_class', 'dataset'),
[(NCA, make_classification()),
(MLKR, make_regression())])
def test_verbose(algo_class, dataset, capsys):
# assert there is proper output when verbose = True
X, y = dataset
model = algo_class(verbose=True)
model.fit(X, y)
out, _ = capsys.readouterr()
# check output
lines = re.split('\n+', out)
header = '{:>10} {:>20} {:>10}'.format('Iteration', 'Objective Value',
'Time(s)')
assert lines[0] == '[{}]'.format(algo_class.__name__)
assert lines[1] == '[{}] {}'.format(algo_class.__name__, header)
assert lines[2] == '[{}] {}'.format(algo_class.__name__, '-' * len(header))
for line in lines[3:-2]:
# The following regex will match for instance:
# '[NCA] 0 6.988936e+01 0.01'
assert re.match("\[" + algo_class.__name__ + "\]\ *\d+\ *\d\.\d{6}e[+|-]"
"\d+\ *\d+\.\d{2}", line)
assert re.match("\[" + algo_class.__name__ + "\] Training took\ *"
"\d+\.\d{2}s\.", lines[-2])
assert lines[-1] == ''
@pytest.mark.parametrize(('algo_class', 'dataset'),
[(NCA, make_classification()),
(MLKR, make_regression(n_features=10))])
def test_no_verbose(dataset, algo_class, capsys):
# assert by default there is no output (verbose=False)
X, y = dataset
model = algo_class()
model.fit(X, y)
out, _ = capsys.readouterr()
# check output
assert (out == '')
@pytest.mark.parametrize(('algo_class', 'dataset'),
[(NCA, make_classification()),
(MLKR, make_regression(n_features=10))])
def test_convergence_warning(dataset, algo_class):
X, y = dataset
model = algo_class(max_iter=2, verbose=True)
cls_name = model.__class__.__name__
assert_warns_message(ConvergenceWarning,
'[{}] {} did not converge'.format(cls_name, cls_name),
model.fit, X, y)
if __name__ == '__main__':
unittest.main()
| [
"numpy.array",
"metric_learn.MMC",
"unittest.main",
"metric_learn.MMC_Supervised",
"numpy.random.RandomState",
"re.search",
"re.split",
"metric_learn.MLKR",
"metric_learn.NCA",
"sklearn.datasets.make_regression",
"numpy.where",
"numpy.testing.assert_almost_equal",
"numpy.random.seed",
"met... | [((825, 863), 'numpy.unique', 'np.unique', (['labels'], {'return_inverse': '(True)'}), '(labels, return_inverse=True)\n', (834, 863), True, 'import numpy as np\n'), ((4410, 4445), 'sklearn.datasets.make_classification', 'make_classification', ([], {'random_state': '(0)'}), '(random_state=0)\n', (4429, 4445), False, 'from sklearn.datasets import load_iris, make_classification, make_regression\n'), ((4455, 4480), 'metric_learn.lmnn.python_LMNN', 'python_LMNN', ([], {'verbose': '(True)'}), '(verbose=True)\n', (4466, 4480), False, 'from metric_learn.lmnn import python_LMNN\n'), ((4761, 4796), 'sklearn.datasets.make_classification', 'make_classification', ([], {'random_state': '(0)'}), '(random_state=0)\n', (4780, 4796), False, 'from sklearn.datasets import load_iris, make_classification, make_regression\n'), ((4806, 4831), 'metric_learn.lmnn.python_LMNN', 'python_LMNN', ([], {'verbose': '(True)'}), '(verbose=True)\n', (4817, 4831), False, 'from metric_learn.lmnn import python_LMNN\n'), ((4890, 4910), 're.split', 're.split', (['"""\n+"""', 'out'], {}), "('\\n+', out)\n", (4898, 4910), False, 'import re\n'), ((14827, 14847), 're.split', 're.split', (['"""\n+"""', 'out'], {}), "('\\n+', out)\n", (14835, 14847), False, 'import re\n'), ((15442, 15537), 're.match', 're.match', (["('\\\\[' + algo_class.__name__ + '\\\\] Training took\\\\ *\\\\d+\\\\.\\\\d{2}s\\\\.')", 'lines[-2]'], {}), "('\\\\[' + algo_class.__name__ +\n '\\\\] Training took\\\\ *\\\\d+\\\\.\\\\d{2}s\\\\.', lines[-2])\n", (15450, 15537), False, 'import re\n'), ((16518, 16533), 'unittest.main', 'unittest.main', ([], {}), '()\n', (16531, 16533), False, 'import unittest\n'), ((1215, 1226), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (1224, 1226), False, 'from sklearn.datasets import load_iris, make_classification, make_regression\n'), ((1315, 1335), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (1329, 1335), True, 'import numpy as np\n'), ((1409, 1421), 'metric_learn.Covariance', 'Covariance', ([], {}), '()\n', (1419, 1421), False, 'from metric_learn import LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised\n'), ((1672, 1708), 'metric_learn.LSML_Supervised', 'LSML_Supervised', ([], {'num_constraints': '(200)'}), '(num_constraints=200)\n', (1687, 1708), False, 'from metric_learn import LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised\n'), ((2067, 2109), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [2, 0], [2, 1]]'], {}), '([[0, 0], [0, 1], [2, 0], [2, 1]])\n', (2075, 2109), True, 'import numpy as np\n'), ((2118, 2140), 'numpy.array', 'np.array', (['[1, 0, 1, 0]'], {}), '([1, 0, 1, 0])\n', (2126, 2140), True, 'import numpy as np\n'), ((2163, 2198), 'metric_learn.LSML_Supervised', 'LSML_Supervised', ([], {'num_labeled': 'np.inf'}), '(num_labeled=np.inf)\n', (2178, 2198), False, 'from metric_learn import LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised\n'), ((2350, 2422), 'sklearn.utils.testing.assert_warns_message', 'assert_warns_message', (['DeprecationWarning', 'msg', 'lsml_supervised.fit', 'X', 'y'], {}), '(DeprecationWarning, msg, lsml_supervised.fit, X, y)\n', (2370, 2422), False, 'from sklearn.utils.testing import assert_warns_message\n'), ((2491, 2527), 'metric_learn.ITML_Supervised', 'ITML_Supervised', ([], {'num_constraints': '(200)'}), '(num_constraints=200)\n', (2506, 2527), False, 'from metric_learn import LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised\n'), ((2862, 2904), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [2, 0], [2, 1]]'], {}), '([[0, 0], [0, 1], [2, 0], [2, 1]])\n', (2870, 2904), True, 'import numpy as np\n'), ((2913, 2935), 'numpy.array', 'np.array', (['[1, 0, 1, 0]'], {}), '([1, 0, 1, 0])\n', (2921, 2935), True, 'import numpy as np\n'), ((2958, 2993), 'metric_learn.ITML_Supervised', 'ITML_Supervised', ([], {'num_labeled': 'np.inf'}), '(num_labeled=np.inf)\n', (2973, 2993), False, 'from metric_learn import LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised\n'), ((3145, 3217), 'sklearn.utils.testing.assert_warns_message', 'assert_warns_message', (['DeprecationWarning', 'msg', 'itml_supervised.fit', 'X', 'y'], {}), '(DeprecationWarning, msg, itml_supervised.fit, X, y)\n', (3165, 3217), False, 'from sklearn.utils.testing import assert_warns_message\n'), ((3381, 3423), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [2, 0], [2, 1]]'], {}), '([[0, 0], [0, 1], [2, 0], [2, 1]])\n', (3389, 3423), True, 'import numpy as np\n'), ((3432, 3454), 'numpy.array', 'np.array', (['[1, 0, 1, 0]'], {}), '([1, 0, 1, 0])\n', (3440, 3454), True, 'import numpy as np\n'), ((3477, 3505), 'metric_learn.ITML_Supervised', 'ITML_Supervised', ([], {'bounds': 'None'}), '(bounds=None)\n', (3492, 3505), False, 'from metric_learn import LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised\n'), ((3742, 3814), 'sklearn.utils.testing.assert_warns_message', 'assert_warns_message', (['DeprecationWarning', 'msg', 'itml_supervised.fit', 'X', 'y'], {}), '(DeprecationWarning, msg, itml_supervised.fit, X, y)\n', (3762, 3814), False, 'from sklearn.utils.testing import assert_warns_message\n'), ((5212, 5263), 're.search', 're.search', (['"""\\\\d* (?:(\\\\d*.\\\\d*))[ | -]\\\\d*.\\\\d*"""', 's'], {}), "('\\\\d* (?:(\\\\d*.\\\\d*))[ | -]\\\\d*.\\\\d*', s)\n", (5221, 5263), False, 'import re\n'), ((5697, 5724), 'numpy.random.RandomState', 'np.random.RandomState', (['(5555)'], {}), '(5555)\n', (5718, 5724), True, 'import numpy as np\n'), ((5737, 5774), 'metric_learn.SDML_Supervised', 'SDML_Supervised', ([], {'num_constraints': '(1500)'}), '(num_constraints=1500)\n', (5752, 5774), False, 'from metric_learn import LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised\n'), ((6126, 6168), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [2, 0], [2, 1]]'], {}), '([[0, 0], [0, 1], [2, 0], [2, 1]])\n', (6134, 6168), True, 'import numpy as np\n'), ((6177, 6199), 'numpy.array', 'np.array', (['[1, 0, 1, 0]'], {}), '([1, 0, 1, 0])\n', (6185, 6199), True, 'import numpy as np\n'), ((6222, 6257), 'metric_learn.SDML_Supervised', 'SDML_Supervised', ([], {'num_labeled': 'np.inf'}), '(num_labeled=np.inf)\n', (6237, 6257), False, 'from metric_learn import LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised\n'), ((6409, 6481), 'sklearn.utils.testing.assert_warns_message', 'assert_warns_message', (['DeprecationWarning', 'msg', 'sdml_supervised.fit', 'X', 'y'], {}), '(DeprecationWarning, msg, sdml_supervised.fit, X, y)\n', (6429, 6481), False, 'from sklearn.utils.testing import assert_warns_message\n'), ((6617, 6642), 'metric_learn.NCA', 'NCA', ([], {'max_iter': '(100000 // n)'}), '(max_iter=100000 // n)\n', (6620, 6642), False, 'from metric_learn import LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised\n'), ((6844, 6881), 'metric_learn.NCA', 'NCA', ([], {'max_iter': '(100000 // n)', 'num_dims': '(2)'}), '(max_iter=100000 // n, num_dims=2)\n', (6847, 6881), False, 'from metric_learn import LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised\n'), ((7301, 7322), 'sklearn.datasets.make_classification', 'make_classification', ([], {}), '()\n', (7320, 7322), False, 'from sklearn.datasets import load_iris, make_classification, make_regression\n'), ((7455, 7460), 'metric_learn.NCA', 'NCA', ([], {}), '()\n', (7458, 7460), False, 'from metric_learn import LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised\n'), ((7730, 7786), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['rel_diff', '(0.0)'], {'decimal': '(6)'}), '(rel_diff, 0.0, decimal=6)\n', (7760, 7786), True, 'import numpy as np\n'), ((8045, 8087), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [2, 0], [2, 1]]'], {}), '([[0, 0], [0, 1], [2, 0], [2, 1]])\n', (8053, 8087), True, 'import numpy as np\n'), ((8096, 8118), 'numpy.array', 'np.array', (['[1, 0, 1, 0]'], {}), '([1, 0, 1, 0])\n', (8104, 8118), True, 'import numpy as np\n'), ((8129, 8144), 'metric_learn.NCA', 'NCA', ([], {'num_dims': '(2)'}), '(num_dims=2)\n', (8132, 8144), False, 'from metric_learn import LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised\n'), ((8513, 8543), 'numpy.where', 'np.where', (['(y == singleton_class)'], {}), '(y == singleton_class)\n', (8521, 8543), True, 'import numpy as np\n'), ((8628, 8644), 'metric_learn.NCA', 'NCA', ([], {'max_iter': '(30)'}), '(max_iter=30)\n', (8631, 8644), False, 'from metric_learn import LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised\n'), ((8733, 8749), 'numpy.where', 'np.where', (['(y == 1)'], {}), '(y == 1)\n', (8741, 8749), True, 'import numpy as np\n'), ((8765, 8781), 'numpy.where', 'np.where', (['(y == 2)'], {}), '(y == 2)\n', (8773, 8781), True, 'import numpy as np\n'), ((8877, 8893), 'metric_learn.NCA', 'NCA', ([], {'max_iter': '(30)'}), '(max_iter=30)\n', (8880, 8893), False, 'from metric_learn import LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised\n'), ((9100, 9116), 'numpy.where', 'np.where', (['(y == 0)'], {}), '(y == 0)\n', (9108, 9116), True, 'import numpy as np\n'), ((9132, 9148), 'numpy.where', 'np.where', (['(y == 1)'], {}), '(y == 1)\n', (9140, 9148), True, 'import numpy as np\n'), ((9164, 9180), 'numpy.where', 'np.where', (['(y == 2)'], {}), '(y == 2)\n', (9172, 9180), True, 'import numpy as np\n'), ((9312, 9346), 'numpy.zeros', 'np.zeros', (['(X.shape[1], X.shape[1])'], {}), '((X.shape[1], X.shape[1]))\n', (9320, 9346), True, 'import numpy as np\n'), ((9463, 9500), 'metric_learn.NCA', 'NCA', ([], {'max_iter': '(30)', 'num_dims': 'X.shape[1]'}), '(max_iter=30, num_dims=X.shape[1])\n', (9466, 9500), False, 'from metric_learn import LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised\n'), ((9527, 9566), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['nca.transformer_', 'A'], {}), '(nca.transformer_, A)\n', (9545, 9566), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((9858, 9892), 'numpy.zeros', 'np.zeros', (['(X.shape[1], X.shape[1])'], {}), '((X.shape[1], X.shape[1]))\n', (9866, 9892), True, 'import numpy as np\n'), ((10009, 10046), 'metric_learn.NCA', 'NCA', ([], {'max_iter': '(30)', 'num_dims': 'X.shape[1]'}), '(max_iter=30, num_dims=X.shape[1])\n', (10012, 10046), False, 'from metric_learn import LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised\n'), ((10073, 10112), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['nca.transformer_', 'A'], {}), '(nca.transformer_, A)\n', (10091, 10112), False, 'from numpy.testing import assert_array_almost_equal, assert_array_equal\n'), ((10181, 10202), 'metric_learn.LFDA', 'LFDA', ([], {'k': '(2)', 'num_dims': '(2)'}), '(k=2, num_dims=2)\n', (10185, 10202), False, 'from metric_learn import LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised\n'), ((10593, 10648), 'metric_learn.RCA_Supervised', 'RCA_Supervised', ([], {'num_dims': '(2)', 'num_chunks': '(30)', 'chunk_size': '(2)'}), '(num_dims=2, num_chunks=30, chunk_size=2)\n', (10607, 10648), False, 'from metric_learn import LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised\n'), ((10980, 11048), 'metric_learn.RCA_Supervised', 'RCA_Supervised', ([], {'num_dims': '(2)', 'pca_comps': '(3)', 'num_chunks': '(30)', 'chunk_size': '(2)'}), '(num_dims=2, pca_comps=3, num_chunks=30, chunk_size=2)\n', (10994, 11048), False, 'from metric_learn import LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised\n'), ((11237, 11308), 'metric_learn.RCA_Supervised', 'RCA_Supervised', ([], {'num_dims': '(2)', 'pca_comps': '(0.95)', 'num_chunks': '(30)', 'chunk_size': '(2)'}), '(num_dims=2, pca_comps=0.95, num_chunks=30, chunk_size=2)\n', (11251, 11308), False, 'from metric_learn import LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised\n'), ((11531, 11537), 'metric_learn.MLKR', 'MLKR', ([], {}), '()\n', (11535, 11537), False, 'from metric_learn import LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised\n'), ((11961, 12020), 'sklearn.datasets.make_regression', 'make_regression', ([], {'n_features': '(4)', 'random_state': '(1)', 'n_samples': '(20)'}), '(n_features=4, random_state=1, n_samples=20)\n', (11976, 12020), False, 'from sklearn.datasets import load_iris, make_classification, make_regression\n'), ((12032, 12047), 'sklearn.utils.validation.check_X_y', 'check_X_y', (['X', 'y'], {}), '(X, y)\n', (12041, 12047), False, 'from sklearn.utils.validation import check_X_y\n'), ((12056, 12086), 'numpy.random.randn', 'np.random.randn', (['(2)', 'X.shape[1]'], {}), '(2, X.shape[1])\n', (12071, 12086), True, 'import numpy as np\n'), ((12098, 12104), 'metric_learn.MLKR', 'MLKR', ([], {}), '()\n', (12102, 12104), False, 'from metric_learn import LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised\n'), ((12358, 12403), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['rel_diff', '(0.0)'], {}), '(rel_diff, 0.0)\n', (12388, 12403), True, 'import numpy as np\n'), ((12756, 12787), 'metric_learn.MMC', 'MMC', ([], {'convergence_threshold': '(0.01)'}), '(convergence_threshold=0.01)\n', (12759, 12787), False, 'from metric_learn import LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised\n'), ((13234, 13252), 'metric_learn.MMC', 'MMC', ([], {'diagonal': '(True)'}), '(diagonal=True)\n', (13237, 13252), False, 'from metric_learn import LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised\n'), ((13502, 13518), 'metric_learn.MMC_Supervised', 'MMC_Supervised', ([], {}), '()\n', (13516, 13518), False, 'from metric_learn import LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised\n'), ((13719, 13748), 'metric_learn.MMC_Supervised', 'MMC_Supervised', ([], {'diagonal': '(True)'}), '(diagonal=True)\n', (13733, 13748), False, 'from metric_learn import LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised\n'), ((14080, 14122), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [2, 0], [2, 1]]'], {}), '([[0, 0], [0, 1], [2, 0], [2, 1]])\n', (14088, 14122), True, 'import numpy as np\n'), ((14131, 14153), 'numpy.array', 'np.array', (['[1, 0, 1, 0]'], {}), '([1, 0, 1, 0])\n', (14139, 14153), True, 'import numpy as np\n'), ((14175, 14209), 'metric_learn.MMC_Supervised', 'MMC_Supervised', ([], {'num_labeled': 'np.inf'}), '(num_labeled=np.inf)\n', (14189, 14209), False, 'from metric_learn import LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised\n'), ((14361, 14432), 'sklearn.utils.testing.assert_warns_message', 'assert_warns_message', (['DeprecationWarning', 'msg', 'mmc_supervised.fit', 'X', 'y'], {}), '(DeprecationWarning, msg, mmc_supervised.fit, X, y)\n', (14381, 14432), False, 'from sklearn.utils.testing import assert_warns_message\n'), ((15320, 15426), 're.match', 're.match', (["('\\\\[' + algo_class.__name__ +\n '\\\\]\\\\ *\\\\d+\\\\ *\\\\d\\\\.\\\\d{6}e[+|-]\\\\d+\\\\ *\\\\d+\\\\.\\\\d{2}')", 'line'], {}), "('\\\\[' + algo_class.__name__ +\n '\\\\]\\\\ *\\\\d+\\\\ *\\\\d\\\\.\\\\d{6}e[+|-]\\\\d+\\\\ *\\\\d+\\\\.\\\\d{2}', line)\n", (15328, 15426), False, 'import re\n'), ((7347, 7383), 'numpy.random.randint', 'np.random.randint', (['(1)', '(X.shape[1] + 1)'], {}), '(1, X.shape[1] + 1)\n', (7364, 7383), True, 'import numpy as np\n'), ((8302, 8324), 'numpy.array', 'np.array', (['[2, 3, 0, 1]'], {}), '([2, 3, 0, 1])\n', (8310, 8324), True, 'import numpy as np\n'), ((9282, 9297), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (9290, 9297), True, 'import numpy as np\n'), ((9828, 9843), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (9836, 9843), True, 'import numpy as np\n'), ((12664, 12682), 'numpy.triu', 'np.triu', (['mask'], {'k': '(1)'}), '(mask, k=1)\n', (12671, 12682), True, 'import numpy as np\n'), ((12706, 12725), 'numpy.triu', 'np.triu', (['(~mask)'], {'k': '(1)'}), '(~mask, k=1)\n', (12713, 12725), True, 'import numpy as np\n'), ((13379, 13396), 'numpy.diag', 'np.diag', (['expected'], {}), '(expected)\n', (13386, 13396), True, 'import numpy as np\n'), ((14519, 14540), 'sklearn.datasets.make_classification', 'make_classification', ([], {}), '()\n', (14538, 14540), False, 'from sklearn.datasets import load_iris, make_classification, make_regression\n'), ((14576, 14593), 'sklearn.datasets.make_regression', 'make_regression', ([], {}), '()\n', (14591, 14593), False, 'from sklearn.datasets import load_iris, make_classification, make_regression\n'), ((15659, 15680), 'sklearn.datasets.make_classification', 'make_classification', ([], {}), '()\n', (15678, 15680), False, 'from sklearn.datasets import load_iris, make_classification, make_regression\n'), ((15716, 15746), 'sklearn.datasets.make_regression', 'make_regression', ([], {'n_features': '(10)'}), '(n_features=10)\n', (15731, 15746), False, 'from sklearn.datasets import load_iris, make_classification, make_regression\n'), ((16070, 16091), 'sklearn.datasets.make_classification', 'make_classification', ([], {}), '()\n', (16089, 16091), False, 'from sklearn.datasets import load_iris, make_classification, make_regression\n'), ((16127, 16157), 'sklearn.datasets.make_regression', 'make_regression', ([], {'n_features': '(10)'}), '(n_features=10)\n', (16142, 16157), False, 'from sklearn.datasets import load_iris, make_classification, make_regression\n'), ((12801, 12843), 'metric_learn.constraints.wrap_pairs', 'wrap_pairs', (['self.iris_points', '[a, b, c, d]'], {}), '(self.iris_points, [a, b, c, d])\n', (12811, 12843), False, 'from metric_learn.constraints import wrap_pairs\n'), ((13266, 13308), 'metric_learn.constraints.wrap_pairs', 'wrap_pairs', (['self.iris_points', '[a, b, c, d]'], {}), '(self.iris_points, [a, b, c, d])\n', (13276, 13308), False, 'from metric_learn.constraints import wrap_pairs\n'), ((984, 1006), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['Xc'], {}), '(Xc)\n', (1002, 1006), False, 'from sklearn.metrics import pairwise_distances\n'), ((1016, 1043), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['Xc', 'Xnc'], {}), '(Xc, Xnc)\n', (1034, 1043), False, 'from sklearn.metrics import pairwise_distances\n'), ((8226, 8256), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['Xansformed'], {}), '(Xansformed)\n', (8244, 8256), False, 'from sklearn.metrics import pairwise_distances\n')] |
import numpy as np
from core.polymer_chain import Polymer
from core.polymer_chain import RandomChargePolymer
from pymatgen import Molecule
from utils import dihedral_tools
import unittest
__author__ = "<NAME>"
class TestPolymer(unittest.TestCase):
@classmethod
def setUpClass(cls):
# setup for polymer class
cls.monomer_num = 25
cls.monomer_len = 1.5
cls.link_len = 0.5
cls.link_angle = 15.0
cls.sample_num = 1000
cls.prob_angle = np.array([[0.0, -180.0], [0.2, -90.0], [0.3, -45.0], [0.4, 0.0],
[0.5, 45.0], [0.6, 90.0], [0.8, 180.0]])
# create polymer
cls.polymer = Polymer(cls.monomer_num, cls.monomer_len, cls.link_len, cls.link_angle,
cls.prob_angle, cls.sample_num)
# manually calculated bead or atom positions
# the first commented out positions are for when l1 is first instead of l2
"""cls.linear_pos_values = np.array([[0.0, 0.0, 0.0], [1.5, 0.0, 0.0],
[1.9829629131445341, -0.12940952255126037, 0.0],
[3.4829629131445339, -0.12940952255126037, 0.0],
[3.9659258262890682, 0.0, 0.0],
[5.4659258262890678, 0.0, 0.0],
[5.9488887394336016, -0.12940952255126037, 0.0],
[7.4488887394336016, -0.12940952255126037, 0.0]])"""
cls.linear_chain_actual = np.array([[0.0, 0.0, 0.0], [1.5, 0.0, 0.0],
[1.9829629131445341, 0.12940952255126037, 0.0],
[3.4829629131445339, 0.12940952255126037, 0.0],
[3.9659258262890682, 0.0, 0.0],
[5.4659258262890678, 0.0, 0.0],
[5.9488887394336016, 0.12940952255126037, 0.0],
[7.4488887394336016, 0.12940952255126037, 0.0]])
def test_build_chain(self):
np.testing.assert_almost_equal(self.linear_chain_actual, self.polymer.chain[:8])
def test_random_angle(self):
angle_num = self.monomer_num - 1
self.polymer.rotate_chain()
np.testing.assert_equal(angle_num, len(self.polymer.dihedral_set))
for angle in self.polymer.dihedral_set:
self.assertIn(angle, self.prob_angle[:, 1])
def test_rotate_chain(self):
self.polymer.rotate_chain()
# this makes a fake molecule and checks all the dihedral angles
fake_atoms = []
fake_atom_coords = []
for coord in self.polymer.relax_chain:
fake_atoms.append('C')
fake_atom_coords.append(coord)
fake_mol = Molecule(species=fake_atoms, coords=fake_atom_coords)
# find all the dihedral angles
dihedral_list_actual = []
for site, val in enumerate(fake_mol, 1):
if site <= len(fake_mol) - 3 and site % 2 != 0:
da = round(dihedral_tools.get_dihedral(fake_mol, [site, site + 1, site + 2, site + 3]))
# this if statement ensures 180 == -180 and 0 == -0
if da == -180.0 or da == -0.0:
da = abs(da)
dihedral_list_actual.append(da)
self.assertEqual(len(dihedral_list_actual), len(self.polymer.dihedral_set))
rotate_chain_dihedral_set = []
# again this loop ensures 180 == -180 and 0 == -0
for angle in self.polymer.dihedral_set:
if angle == -180.0 or angle == -0.0:
rotate_chain_dihedral_set.append(abs(angle))
else:
rotate_chain_dihedral_set.append(angle)
np.testing.assert_almost_equal(dihedral_list_actual, rotate_chain_dihedral_set)
def test_tangent_auto_corr(self):
# check case where all tangent vectors are aligned
self.polymer.tangent_auto_corr(self.polymer.chain)
for stat in self.polymer.tangent_corr:
np.testing.assert_allclose(stat.mean, 1.0)
def test_unit_normal_vectors(self):
self.polymer._unit_normal_vectors(self.polymer.chain)
np.testing.assert_array_equal(len(self.polymer.unit_normal), self.monomer_num)
totally_planar_normal = np.array([0.0, 0.0, 1.0])
for u_vec in self.polymer.unit_normal:
np.testing.assert_almost_equal(u_vec ** 2, totally_planar_normal ** 2)
self.polymer._unit_normal_vectors(self.polymer.relax_chain)
for u_vec in self.polymer.unit_normal:
np.testing.assert_almost_equal(np.linalg.norm(u_vec), 1.0)
calc_u_vectors = np.zeros((self.monomer_num, 3))
index = 0
for i, pt in enumerate(self.polymer.relax_chain):
if i == 0:
vec1 = self.polymer.relax_chain[i + 1] - pt
vec2 = self.polymer.relax_chain[i + 2] - pt
calc_u_vectors[i] = np.cross(vec1, vec2)
calc_u_vectors[i] /= np.linalg.norm(calc_u_vectors[i])
index += 1
if i % 2 != 0 and i < (len(self.polymer.relax_chain) - 2):
vec1 = self.polymer.relax_chain[i + 1] - pt
vec2 = self.polymer.relax_chain[i + 2] - pt
calc_u_vectors[index] = np.cross(vec1, vec2)
calc_u_vectors[index] /= np.linalg.norm(calc_u_vectors[index])
index += 1
np.testing.assert_almost_equal(self.polymer.unit_normal ** 2, calc_u_vectors ** 2)
def test_p2_order_param(self):
# two case all aligned, and isotropic
# case 1 all aligned
z_unit = np.array([0., 0., 1.] * 1000)
z_unit.shape = (1000, 3)
self.polymer.p2_order_param(unit_vectors=z_unit)
np.testing.assert_almost_equal(np.trace(self.polymer.director_matrix), 0.0)
np.testing.assert_almost_equal(self.polymer.s_order_param.mean, 1.0)
# case 2 isotropic
# generate uniform vectors on a unit sphere
index = 0
n = 50000
iso_unit = np.zeros((n, 3))
while index <= (n - 1):
chi_1 = np.random.uniform(0.0, 1.0, 1)
chi_2 = np.random.uniform(0.0, 1.0, 1)
xhi_1 = 1 - (2 * chi_1)
xhi_2 = 1 - (2 * chi_2)
xhi_sq = xhi_1 ** 2 + xhi_2 ** 2
if xhi_sq < 1:
iso_unit[index] = [2 * xhi_1 * ((1 - xhi_sq) ** (1. / 2.)),
2 * xhi_2 * ((1 - xhi_sq) ** (1. / 2.)),
1 - 2 * xhi_sq]
index += 1
self.polymer.p2_order_param(unit_vectors=iso_unit)
np.testing.assert_almost_equal(np.trace(self.polymer.director_matrix), 0.0)
np.testing.assert_almost_equal(self.polymer.s_order_param.mean, 0.0, decimal=1)
def test_p2_auto_corr(self):
samples = 200
p2_polymer = Polymer(self.monomer_num, self.monomer_len, self.link_len, self.link_angle, self.prob_angle)
p2_polymer.p2_auto_corr(p2_polymer.chain)
# check correlation is 1 when all aligned
for stat in p2_polymer.s_x_corr:
np.testing.assert_allclose(1.0, stat.mean)
# check the correlation over a bunch of samples
pair_interacts = int((self.monomer_num * (self.monomer_num + 1)) / 2)
# adds 1 to all lists for case where everything is aligned
ensemble_list = [[1.0] for i in range(self.monomer_num)]
# loops of the number of samples
for sample in range(1, samples):
p2_polymer.rotate_chain()
p2_polymer.p2_auto_corr(p2_polymer.relax_chain)
polymer_list = []
for i in range(self.monomer_num):
pair_list = []
for j in range(i, self.monomer_num, 1):
pair_list.append(((3. / 2.) * (np.dot(p2_polymer.unit_normal[i],
p2_polymer.unit_normal[j]) ** 2)) - (1. / 2.))
polymer_list.append(pair_list)
for l in polymer_list:
for i, val in enumerate(l):
ensemble_list[i].append(val)
actual_means = [np.mean(i) for i in ensemble_list]
# check the right number of pair interactions were sampled
# checks all the self interactions
np.testing.assert_equal(int((samples * self.monomer_num)), int(p2_polymer.s_x_corr[0].k))
# checks the longest interaction only 1 per polymer chain sample
np.testing.assert_equal(int(samples), int(p2_polymer.s_x_corr[-1].k))
for i, stat in enumerate(p2_polymer.s_x_corr):
# print(actual_means[i], stat.mean)
np.testing.assert_allclose(actual_means[i], stat.mean, atol=0.01, rtol=0.0)
def test_sample_chain(self):
# sample by looping over rotate_chains
# start a new chain
sample_polymer = Polymer(self.monomer_num, self.monomer_len, self.link_len, self.link_angle,
self.prob_angle, sample_num=self.sample_num)
end_to_end = []
for i in range(self.sample_num):
sample_polymer.rotate_chain()
end_to_end.append(sample_polymer.end_to_end[-1])
mean_ete = np.mean(end_to_end)
std_ete = np.std(end_to_end)
# sample using polymer class
sample_polymer.sample_chains()
# print(mean_ete, sample_polymer.ete_stats.mean[-1])
# print(std_ete, sample_polymer.ete_stats.stdev[-1])
np.testing.assert_allclose(mean_ete, sample_polymer.ete_stats.mean[-1], atol=0.85, rtol=0.0)
np.testing.assert_allclose(std_ete, sample_polymer.ete_stats.stdev[-1], atol=0.85, rtol=0.0)
class TestRandomChargedPolymer(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.monomer_num = 51
cls.monomer_len = 1.5
cls.link_len = 0.5
cls.link_angle = 15.0
cls.sample_num = 500
cls.prob_angle = np.array([[0.0, -180.0], [0.2, -90.0], [0.3, -45.0], [0.4, 0.0],
[0.5, 45.0], [0.6, 90.0], [0.8, 180.0]])
cls.c_monomer_len = 1.6
cls.c_link_len = 0.6
cls.c_link_angle = 14.0
cls.c_prob_angle = np.array([[0.0, 175.0], [0.5, 5.0]])
cls.c_polymer = RandomChargePolymer(cls.monomer_num, cls.monomer_len, cls.link_len, cls.link_angle,
cls.prob_angle, cls.c_monomer_len, cls.c_link_len, cls.c_link_angle,
cls.c_prob_angle, cls.sample_num)
def test_c_random_angle(self):
self.c_polymer.shuffle_charged_chain(10)
c_angle_num = 10
self.assertEqual(c_angle_num, len(self.c_polymer.c_dihedral_set))
for angle in self.c_polymer.c_dihedral_set:
self.assertIn(angle, self.c_prob_angle[:, 1])
def test_shuffle_charged_chain(self):
self.c_polymer.shuffle_charged_chain(10)
# check position lists are same length
self.assertEqual(len(self.c_polymer.relax_chain), len(self.c_polymer.charged_chain))
# loop through the chain and check dihedral angles
fake_atoms = []
fake_atom_coords = []
for coord in self.c_polymer.charged_chain:
fake_atoms.append('C')
fake_atom_coords.append(coord)
fake_mol = Molecule(species=fake_atoms, coords=fake_atom_coords)
# find all the dihedral angles
dihedral_list_actual = []
for site, val in enumerate(fake_mol, 1):
if site <= len(fake_mol) - 3 and site % 2 != 0:
da = round(dihedral_tools.get_dihedral(fake_mol, [site, site + 1, site + 2, site + 3]))
# this if statement ensures 180 == -180 and 0 == -0
if da == -180.0 or da == -0.0:
da = abs(da)
dihedral_list_actual.append(da)
self.assertEqual(len(dihedral_list_actual), len(self.c_polymer.shuffle_dihedral_set))
shuffle_dihedral_set = []
# again this loop ensures 180 == -180 and 0 == -0
for angle in self.c_polymer.shuffle_dihedral_set:
if angle == -180.0 or angle == -0.0:
shuffle_dihedral_set.append(abs(angle))
else:
shuffle_dihedral_set.append(angle)
np.testing.assert_almost_equal(dihedral_list_actual, shuffle_dihedral_set)
def test_c_build_chain(self):
self.c_polymer.shuffle_charged_chain(0)
# check the length is right
self.assertEqual(self.monomer_num * 2, len(self.c_polymer.c_chain))
# check that build_chain and c_build_chain are the same when there are 0 excited dihedrals
np.testing.assert_almost_equal(self.c_polymer.chain, self.c_polymer.c_chain)
self.c_polymer.shuffle_charged_chain(10)
# check the location and lengths associated with c_indexes
for i in self.c_polymer.c_indexes:
pos_0 = self.c_polymer.c_chain[i * 2]
pos_1 = self.c_polymer.c_chain[(i * 2) + 1]
pos_2 = self.c_polymer.c_chain[(i * 2) + 2]
c_mono = pos_1 - pos_0
# checking the momomer length
np.testing.assert_allclose(self.c_polymer.c_monomer_len, c_mono[0], atol=0.001, rtol=0.0)
# checking link length
link_len = np.linalg.norm(pos_2 - pos_1)
np.testing.assert_allclose(self.c_polymer.c_link_len, link_len, atol=0.001, rtol=0.0)
# check link angle
vec_1 = pos_0 - pos_1
vec_2 = pos_2 - pos_1
cosine_angle = np.dot(vec_1, vec_2) / (np.linalg.norm(vec_1) * np.linalg.norm(vec_2))
angle = np.pi - np.arccos(cosine_angle)
np.testing.assert_allclose(self.c_polymer.c_link_angle, angle, atol=0.001, rtol=0.0)
if __name__ == '__main__':
unittest.main()
| [
"numpy.mean",
"numpy.trace",
"numpy.arccos",
"core.polymer_chain.RandomChargePolymer",
"core.polymer_chain.Polymer",
"numpy.cross",
"pymatgen.Molecule",
"numpy.testing.assert_allclose",
"numpy.linalg.norm",
"numpy.array",
"numpy.testing.assert_almost_equal",
"numpy.zeros",
"numpy.random.unif... | [((13949, 13964), 'unittest.main', 'unittest.main', ([], {}), '()\n', (13962, 13964), False, 'import unittest\n'), ((498, 608), 'numpy.array', 'np.array', (['[[0.0, -180.0], [0.2, -90.0], [0.3, -45.0], [0.4, 0.0], [0.5, 45.0], [0.6, \n 90.0], [0.8, 180.0]]'], {}), '([[0.0, -180.0], [0.2, -90.0], [0.3, -45.0], [0.4, 0.0], [0.5, 45.0\n ], [0.6, 90.0], [0.8, 180.0]])\n', (506, 608), True, 'import numpy as np\n'), ((686, 794), 'core.polymer_chain.Polymer', 'Polymer', (['cls.monomer_num', 'cls.monomer_len', 'cls.link_len', 'cls.link_angle', 'cls.prob_angle', 'cls.sample_num'], {}), '(cls.monomer_num, cls.monomer_len, cls.link_len, cls.link_angle, cls\n .prob_angle, cls.sample_num)\n', (693, 794), False, 'from core.polymer_chain import Polymer\n'), ((1579, 1893), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [1.5, 0.0, 0.0], [1.982962913144534, 0.12940952255126037,\n 0.0], [3.482962913144534, 0.12940952255126037, 0.0], [3.965925826289068,\n 0.0, 0.0], [5.465925826289068, 0.0, 0.0], [5.948888739433602, \n 0.12940952255126037, 0.0], [7.448888739433602, 0.12940952255126037, 0.0]]'], {}), '([[0.0, 0.0, 0.0], [1.5, 0.0, 0.0], [1.982962913144534, \n 0.12940952255126037, 0.0], [3.482962913144534, 0.12940952255126037, 0.0\n ], [3.965925826289068, 0.0, 0.0], [5.465925826289068, 0.0, 0.0], [\n 5.948888739433602, 0.12940952255126037, 0.0], [7.448888739433602, \n 0.12940952255126037, 0.0]])\n', (1587, 1893), True, 'import numpy as np\n'), ((2185, 2270), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['self.linear_chain_actual', 'self.polymer.chain[:8]'], {}), '(self.linear_chain_actual, self.polymer.chain[:8]\n )\n', (2215, 2270), True, 'import numpy as np\n'), ((2896, 2949), 'pymatgen.Molecule', 'Molecule', ([], {'species': 'fake_atoms', 'coords': 'fake_atom_coords'}), '(species=fake_atoms, coords=fake_atom_coords)\n', (2904, 2949), False, 'from pymatgen import Molecule\n'), ((3853, 3932), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['dihedral_list_actual', 'rotate_chain_dihedral_set'], {}), '(dihedral_list_actual, rotate_chain_dihedral_set)\n', (3883, 3932), True, 'import numpy as np\n'), ((4414, 4439), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (4422, 4439), True, 'import numpy as np\n'), ((4781, 4812), 'numpy.zeros', 'np.zeros', (['(self.monomer_num, 3)'], {}), '((self.monomer_num, 3))\n', (4789, 4812), True, 'import numpy as np\n'), ((5553, 5640), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['(self.polymer.unit_normal ** 2)', '(calc_u_vectors ** 2)'], {}), '(self.polymer.unit_normal ** 2, \n calc_u_vectors ** 2)\n', (5583, 5640), True, 'import numpy as np\n'), ((5764, 5796), 'numpy.array', 'np.array', (['([0.0, 0.0, 1.0] * 1000)'], {}), '([0.0, 0.0, 1.0] * 1000)\n', (5772, 5796), True, 'import numpy as np\n'), ((5976, 6044), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['self.polymer.s_order_param.mean', '(1.0)'], {}), '(self.polymer.s_order_param.mean, 1.0)\n', (6006, 6044), True, 'import numpy as np\n'), ((6179, 6195), 'numpy.zeros', 'np.zeros', (['(n, 3)'], {}), '((n, 3))\n', (6187, 6195), True, 'import numpy as np\n'), ((6857, 6936), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['self.polymer.s_order_param.mean', '(0.0)'], {'decimal': '(1)'}), '(self.polymer.s_order_param.mean, 0.0, decimal=1)\n', (6887, 6936), True, 'import numpy as np\n'), ((7014, 7110), 'core.polymer_chain.Polymer', 'Polymer', (['self.monomer_num', 'self.monomer_len', 'self.link_len', 'self.link_angle', 'self.prob_angle'], {}), '(self.monomer_num, self.monomer_len, self.link_len, self.link_angle,\n self.prob_angle)\n', (7021, 7110), False, 'from core.polymer_chain import Polymer\n'), ((9021, 9145), 'core.polymer_chain.Polymer', 'Polymer', (['self.monomer_num', 'self.monomer_len', 'self.link_len', 'self.link_angle', 'self.prob_angle'], {'sample_num': 'self.sample_num'}), '(self.monomer_num, self.monomer_len, self.link_len, self.link_angle,\n self.prob_angle, sample_num=self.sample_num)\n', (9028, 9145), False, 'from core.polymer_chain import Polymer\n'), ((9362, 9381), 'numpy.mean', 'np.mean', (['end_to_end'], {}), '(end_to_end)\n', (9369, 9381), True, 'import numpy as np\n'), ((9400, 9418), 'numpy.std', 'np.std', (['end_to_end'], {}), '(end_to_end)\n', (9406, 9418), True, 'import numpy as np\n'), ((9625, 9721), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['mean_ete', 'sample_polymer.ete_stats.mean[-1]'], {'atol': '(0.85)', 'rtol': '(0.0)'}), '(mean_ete, sample_polymer.ete_stats.mean[-1],\n atol=0.85, rtol=0.0)\n', (9651, 9721), True, 'import numpy as np\n'), ((9726, 9822), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['std_ete', 'sample_polymer.ete_stats.stdev[-1]'], {'atol': '(0.85)', 'rtol': '(0.0)'}), '(std_ete, sample_polymer.ete_stats.stdev[-1],\n atol=0.85, rtol=0.0)\n', (9752, 9822), True, 'import numpy as np\n'), ((10084, 10194), 'numpy.array', 'np.array', (['[[0.0, -180.0], [0.2, -90.0], [0.3, -45.0], [0.4, 0.0], [0.5, 45.0], [0.6, \n 90.0], [0.8, 180.0]]'], {}), '([[0.0, -180.0], [0.2, -90.0], [0.3, -45.0], [0.4, 0.0], [0.5, 45.0\n ], [0.6, 90.0], [0.8, 180.0]])\n', (10092, 10194), True, 'import numpy as np\n'), ((10345, 10381), 'numpy.array', 'np.array', (['[[0.0, 175.0], [0.5, 5.0]]'], {}), '([[0.0, 175.0], [0.5, 5.0]])\n', (10353, 10381), True, 'import numpy as np\n'), ((10406, 10602), 'core.polymer_chain.RandomChargePolymer', 'RandomChargePolymer', (['cls.monomer_num', 'cls.monomer_len', 'cls.link_len', 'cls.link_angle', 'cls.prob_angle', 'cls.c_monomer_len', 'cls.c_link_len', 'cls.c_link_angle', 'cls.c_prob_angle', 'cls.sample_num'], {}), '(cls.monomer_num, cls.monomer_len, cls.link_len, cls.\n link_angle, cls.prob_angle, cls.c_monomer_len, cls.c_link_len, cls.\n c_link_angle, cls.c_prob_angle, cls.sample_num)\n', (10425, 10602), False, 'from core.polymer_chain import RandomChargePolymer\n'), ((11468, 11521), 'pymatgen.Molecule', 'Molecule', ([], {'species': 'fake_atoms', 'coords': 'fake_atom_coords'}), '(species=fake_atoms, coords=fake_atom_coords)\n', (11476, 11521), False, 'from pymatgen import Molecule\n'), ((12430, 12504), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['dihedral_list_actual', 'shuffle_dihedral_set'], {}), '(dihedral_list_actual, shuffle_dihedral_set)\n', (12460, 12504), True, 'import numpy as np\n'), ((12807, 12883), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['self.c_polymer.chain', 'self.c_polymer.c_chain'], {}), '(self.c_polymer.chain, self.c_polymer.c_chain)\n', (12837, 12883), True, 'import numpy as np\n'), ((4149, 4191), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['stat.mean', '(1.0)'], {}), '(stat.mean, 1.0)\n', (4175, 4191), True, 'import numpy as np\n'), ((4499, 4569), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['(u_vec ** 2)', '(totally_planar_normal ** 2)'], {}), '(u_vec ** 2, totally_planar_normal ** 2)\n', (4529, 4569), True, 'import numpy as np\n'), ((5923, 5961), 'numpy.trace', 'np.trace', (['self.polymer.director_matrix'], {}), '(self.polymer.director_matrix)\n', (5931, 5961), True, 'import numpy as np\n'), ((6248, 6278), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)', '(1)'], {}), '(0.0, 1.0, 1)\n', (6265, 6278), True, 'import numpy as np\n'), ((6299, 6329), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)', '(1)'], {}), '(0.0, 1.0, 1)\n', (6316, 6329), True, 'import numpy as np\n'), ((6804, 6842), 'numpy.trace', 'np.trace', (['self.polymer.director_matrix'], {}), '(self.polymer.director_matrix)\n', (6812, 6842), True, 'import numpy as np\n'), ((7260, 7302), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(1.0)', 'stat.mean'], {}), '(1.0, stat.mean)\n', (7286, 7302), True, 'import numpy as np\n'), ((8301, 8311), 'numpy.mean', 'np.mean', (['i'], {}), '(i)\n', (8308, 8311), True, 'import numpy as np\n'), ((8811, 8886), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['actual_means[i]', 'stat.mean'], {'atol': '(0.01)', 'rtol': '(0.0)'}), '(actual_means[i], stat.mean, atol=0.01, rtol=0.0)\n', (8837, 8886), True, 'import numpy as np\n'), ((13294, 13388), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['self.c_polymer.c_monomer_len', 'c_mono[0]'], {'atol': '(0.001)', 'rtol': '(0.0)'}), '(self.c_polymer.c_monomer_len, c_mono[0], atol=\n 0.001, rtol=0.0)\n', (13320, 13388), True, 'import numpy as np\n'), ((13442, 13471), 'numpy.linalg.norm', 'np.linalg.norm', (['(pos_2 - pos_1)'], {}), '(pos_2 - pos_1)\n', (13456, 13471), True, 'import numpy as np\n'), ((13484, 13573), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['self.c_polymer.c_link_len', 'link_len'], {'atol': '(0.001)', 'rtol': '(0.0)'}), '(self.c_polymer.c_link_len, link_len, atol=0.001,\n rtol=0.0)\n', (13510, 13573), True, 'import numpy as np\n'), ((13831, 13919), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['self.c_polymer.c_link_angle', 'angle'], {'atol': '(0.001)', 'rtol': '(0.0)'}), '(self.c_polymer.c_link_angle, angle, atol=0.001,\n rtol=0.0)\n', (13857, 13919), True, 'import numpy as np\n'), ((4728, 4749), 'numpy.linalg.norm', 'np.linalg.norm', (['u_vec'], {}), '(u_vec)\n', (4742, 4749), True, 'import numpy as np\n'), ((5068, 5088), 'numpy.cross', 'np.cross', (['vec1', 'vec2'], {}), '(vec1, vec2)\n', (5076, 5088), True, 'import numpy as np\n'), ((5126, 5159), 'numpy.linalg.norm', 'np.linalg.norm', (['calc_u_vectors[i]'], {}), '(calc_u_vectors[i])\n', (5140, 5159), True, 'import numpy as np\n'), ((5418, 5438), 'numpy.cross', 'np.cross', (['vec1', 'vec2'], {}), '(vec1, vec2)\n', (5426, 5438), True, 'import numpy as np\n'), ((5480, 5517), 'numpy.linalg.norm', 'np.linalg.norm', (['calc_u_vectors[index]'], {}), '(calc_u_vectors[index])\n', (5494, 5517), True, 'import numpy as np\n'), ((13696, 13716), 'numpy.dot', 'np.dot', (['vec_1', 'vec_2'], {}), '(vec_1, vec_2)\n', (13702, 13716), True, 'import numpy as np\n'), ((13795, 13818), 'numpy.arccos', 'np.arccos', (['cosine_angle'], {}), '(cosine_angle)\n', (13804, 13818), True, 'import numpy as np\n'), ((3159, 3234), 'utils.dihedral_tools.get_dihedral', 'dihedral_tools.get_dihedral', (['fake_mol', '[site, site + 1, site + 2, site + 3]'], {}), '(fake_mol, [site, site + 1, site + 2, site + 3])\n', (3186, 3234), False, 'from utils import dihedral_tools\n'), ((11731, 11806), 'utils.dihedral_tools.get_dihedral', 'dihedral_tools.get_dihedral', (['fake_mol', '[site, site + 1, site + 2, site + 3]'], {}), '(fake_mol, [site, site + 1, site + 2, site + 3])\n', (11758, 11806), False, 'from utils import dihedral_tools\n'), ((13720, 13741), 'numpy.linalg.norm', 'np.linalg.norm', (['vec_1'], {}), '(vec_1)\n', (13734, 13741), True, 'import numpy as np\n'), ((13744, 13765), 'numpy.linalg.norm', 'np.linalg.norm', (['vec_2'], {}), '(vec_2)\n', (13758, 13765), True, 'import numpy as np\n'), ((7963, 8023), 'numpy.dot', 'np.dot', (['p2_polymer.unit_normal[i]', 'p2_polymer.unit_normal[j]'], {}), '(p2_polymer.unit_normal[i], p2_polymer.unit_normal[j])\n', (7969, 8023), True, 'import numpy as np\n')] |
import numpy as np
# ResNet-18, 5 classes, 100 linear probing epochs, classical, 8 width, variable epoch size
def results():
accs = np.array([
[(37.94, 9), (40.4, 19), (45.72, 49), (48.2, 99), (53.68, 199), (56.1, 299)],
[(43.32, 9), (47.64, 19), (48.98, 49), (51.96, 99), (53.82, 199), (54.48, 299)],
[(43.04, 9), (48.34, 19), (51.9, 49), (56.1, 99), (60.3, 199), (62.62, 299)],
[(44.86, 9), (47.86, 19), (53.4, 49), (56.58, 99), (57.3, 199), (59.7, 299)],
[(52.56, 9), (55.16, 19), (60.88, 49), (62.08, 99), (63.56, 199), (64.02, 299)]])
epoch_size = np.array([2500, 5000, 17500, 10000, 25000])
order = epoch_size.argsort()
epoch_size.sort()
sorted_accs = accs[order]
return epoch_size, sorted_accs
def results_low_tepochs():
accs = np.array([
[(39.74, 2), (45.32, 5), (44.8, 8), (44.18, 11), (45.76, 14), (45.98, 17), (48.24, 20), (47.16, 23), (48.9, 26),
(50.68, 29)],
[(35.26, 2), (36.7, 5), (36.92, 8), (38.68, 11), (38.32, 14), (39.0, 17), (38.54, 20), (38.62, 23), (39.24, 26),
(40.9, 29)],
[(36.34, 2), (38.56, 5), (42.76, 8), (45.9, 11), (45.84, 14), (46.7, 17), (46.68, 20), (46.34, 23), (45.92, 26),
(45.58, 29)],
[(41.82, 2), (44.2, 5), (45.08, 8), (47.5, 11), (47.52, 14), (48.3, 17), (48.8, 20), (47.76, 23), (48.8, 26),
(48.84, 29)],
[(48.94, 2), (51.56, 5), (52.02, 8), (51.78, 11), (53.5, 14), (53.0, 17), (53.94, 20), (55.38, 23), (55.88, 26),
(57.44, 29)]])
epoch_size = np.array([10000, 2500, 5000, 17500, 25000])
order = epoch_size.argsort()
epoch_size.sort()
sorted_accs = accs[order]
return epoch_size, sorted_accs
| [
"numpy.array"
] | [((138, 566), 'numpy.array', 'np.array', (['[[(37.94, 9), (40.4, 19), (45.72, 49), (48.2, 99), (53.68, 199), (56.1, 299\n )], [(43.32, 9), (47.64, 19), (48.98, 49), (51.96, 99), (53.82, 199), (\n 54.48, 299)], [(43.04, 9), (48.34, 19), (51.9, 49), (56.1, 99), (60.3, \n 199), (62.62, 299)], [(44.86, 9), (47.86, 19), (53.4, 49), (56.58, 99),\n (57.3, 199), (59.7, 299)], [(52.56, 9), (55.16, 19), (60.88, 49), (\n 62.08, 99), (63.56, 199), (64.02, 299)]]'], {}), '([[(37.94, 9), (40.4, 19), (45.72, 49), (48.2, 99), (53.68, 199), (\n 56.1, 299)], [(43.32, 9), (47.64, 19), (48.98, 49), (51.96, 99), (53.82,\n 199), (54.48, 299)], [(43.04, 9), (48.34, 19), (51.9, 49), (56.1, 99),\n (60.3, 199), (62.62, 299)], [(44.86, 9), (47.86, 19), (53.4, 49), (\n 56.58, 99), (57.3, 199), (59.7, 299)], [(52.56, 9), (55.16, 19), (60.88,\n 49), (62.08, 99), (63.56, 199), (64.02, 299)]])\n', (146, 566), True, 'import numpy as np\n'), ((604, 647), 'numpy.array', 'np.array', (['[2500, 5000, 17500, 10000, 25000]'], {}), '([2500, 5000, 17500, 10000, 25000])\n', (612, 647), True, 'import numpy as np\n'), ((810, 1488), 'numpy.array', 'np.array', (['[[(39.74, 2), (45.32, 5), (44.8, 8), (44.18, 11), (45.76, 14), (45.98, 17),\n (48.24, 20), (47.16, 23), (48.9, 26), (50.68, 29)], [(35.26, 2), (36.7,\n 5), (36.92, 8), (38.68, 11), (38.32, 14), (39.0, 17), (38.54, 20), (\n 38.62, 23), (39.24, 26), (40.9, 29)], [(36.34, 2), (38.56, 5), (42.76, \n 8), (45.9, 11), (45.84, 14), (46.7, 17), (46.68, 20), (46.34, 23), (\n 45.92, 26), (45.58, 29)], [(41.82, 2), (44.2, 5), (45.08, 8), (47.5, 11\n ), (47.52, 14), (48.3, 17), (48.8, 20), (47.76, 23), (48.8, 26), (48.84,\n 29)], [(48.94, 2), (51.56, 5), (52.02, 8), (51.78, 11), (53.5, 14), (\n 53.0, 17), (53.94, 20), (55.38, 23), (55.88, 26), (57.44, 29)]]'], {}), '([[(39.74, 2), (45.32, 5), (44.8, 8), (44.18, 11), (45.76, 14), (\n 45.98, 17), (48.24, 20), (47.16, 23), (48.9, 26), (50.68, 29)], [(35.26,\n 2), (36.7, 5), (36.92, 8), (38.68, 11), (38.32, 14), (39.0, 17), (38.54,\n 20), (38.62, 23), (39.24, 26), (40.9, 29)], [(36.34, 2), (38.56, 5), (\n 42.76, 8), (45.9, 11), (45.84, 14), (46.7, 17), (46.68, 20), (46.34, 23\n ), (45.92, 26), (45.58, 29)], [(41.82, 2), (44.2, 5), (45.08, 8), (47.5,\n 11), (47.52, 14), (48.3, 17), (48.8, 20), (47.76, 23), (48.8, 26), (\n 48.84, 29)], [(48.94, 2), (51.56, 5), (52.02, 8), (51.78, 11), (53.5, \n 14), (53.0, 17), (53.94, 20), (55.38, 23), (55.88, 26), (57.44, 29)]])\n', (818, 1488), True, 'import numpy as np\n'), ((1556, 1599), 'numpy.array', 'np.array', (['[10000, 2500, 5000, 17500, 25000]'], {}), '([10000, 2500, 5000, 17500, 25000])\n', (1564, 1599), True, 'import numpy as np\n')] |
"""a rewrite of cnn.py
this version is mostly inspired by
NIPS2017 (mask cnn).
see https://github.com/leelabcnbc/thesis-proposal-yimeng/blob/master/thesis_proposal/population_neuron_fitting/maskcnn/cnn.py
"""
import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.nn import init as nn_init
import math
from copy import deepcopy
import numpy as np
from collections import OrderedDict
from .configs.cnn_arch import sanity_check_arch_config
from .configs.cnn_init import sanity_check_init_config
from .configs.cnn_opt import sanity_check_opt_config, sanity_check_one_optimizer_opt_config
from torch.nn.functional import mse_loss
class HalfSquare(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return F.relu(input) ** 2
class Square(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return input ** 2
class Abs(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return torch.abs(input)
class FactoredLinear2D(nn.Module):
"""
skeleton copied from implementation of nn.Linear from PyTorch 0.3.1
# just copied from my maskcnn implementation.
"""
def __init__(self, in_channels, map_size, out_features, bias=True,
weight_feature_constraint=None, weight_spatial_constraint=None):
super().__init__()
assert isinstance(in_channels, int) and in_channels > 0
self.in_channels = in_channels
self.map_size = _check_input_size(map_size)
assert isinstance(out_features, int) and out_features > 0
self.out_features = out_features
assert weight_feature_constraint in {None, 'abs'}
self.weight_feature_constraint = weight_feature_constraint
assert weight_spatial_constraint in {None, 'abs'}
self.weight_spatial_constraint = weight_spatial_constraint
self.weight_spatial: nn.Parameter = nn.Parameter(
torch.Tensor(self.out_features, self.map_size[0], self.map_size[1]))
self.weight_feature: nn.Parameter = nn.Parameter(torch.Tensor(self.out_features, self.in_channels))
if bias:
self.bias: nn.Parameter = nn.Parameter(torch.Tensor(self.out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
# print('changed impl')
def reset_parameters(self):
# this is simply adapted from nn.Linear. should always be initialized by hand.
stdv = 1. / math.sqrt(self.in_channels * self.map_size[0] * self.map_size[1])
self.weight_spatial.data.uniform_(-stdv, stdv)
self.weight_feature.data.fill_(1.0)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
# I assume that input has shape (N, in_channels, map_size[0], map_size[1]
# first get the weights.
weight_spatial_view = self.weight_spatial
weight_feature_view = self.weight_feature
if self.weight_feature_constraint is not None:
if self.weight_feature_constraint == 'abs':
# weight_spatial_view = torch.abs(weight_spatial_view)
weight_feature_view = torch.abs(weight_feature_view)
else:
raise RuntimeError
if self.weight_spatial_constraint is not None:
if self.weight_spatial_constraint == 'abs':
weight_spatial_view = torch.abs(weight_spatial_view)
else:
raise RuntimeError
weight_spatial_view = weight_spatial_view.view(self.out_features, 1, self.map_size[0], self.map_size[1])
weight_feature_view = weight_feature_view.view(self.out_features, self.in_channels, 1, 1)
# then broadcast to get new weight.
if self.in_channels != 1:
weight = weight_spatial_view * weight_feature_view
else:
# feature weighting not needed
# this is for both quicker learning, as well as being compatible with `CNN.py` in the original repo.
weight = weight_spatial_view.expand(self.out_features, self.in_channels, self.map_size[0], self.map_size[1])
weight = weight.view(self.out_features, self.in_channels * self.map_size[0] * self.map_size[1])
return F.linear(input.view(input.size(0), -1), weight, self.bias)
def _check_input_size(input_size):
if isinstance(input_size, int):
input_size = (input_size, input_size)
input_size = (input_size, input_size) if isinstance(input_size, int) else input_size
assert isinstance(input_size, tuple) and len(input_size) == 2
assert isinstance(input_size[0], int) and input_size[0] > 0
assert isinstance(input_size[1], int) and input_size[1] > 0
return input_size
def _new_map_size(map_size, kernel_size, padding, stride):
map_size_new = ((map_size[0] - kernel_size + 2 * padding) // stride + 1,
(map_size[1] - kernel_size + 2 * padding) // stride + 1)
assert (map_size_new[0] - 1) * stride + kernel_size == map_size[0] + 2 * padding
assert (map_size_new[1] - 1) * stride + kernel_size == map_size[1] + 2 * padding
# print(map_size_new)
return map_size_new
def inv_softplus(x):
assert np.all(x > 0)
# copied from original code.
# I think numerically it's not very stable.
return np.log(np.exp(x) - 1)
class CNN(nn.Module):
"""
a class that can handle all CNN variants in the paper.
"""
def __init__(self,
arch_config,
init_config,
input_size=20,
n=1,
bn_eps=0.001,
mean_response=None,
seed=None, scale_hack=None,
):
super().__init__()
# ====== parameter check start ======
# if init_config is None:
# # match what's in original code.
# init_config = {
# 'conv_std': 0.01,
# 'fc_std': 0.01,
# }
if seed is not None:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
sanity_check_arch_config(arch_config)
sanity_check_init_config(init_config)
self.input_size = _check_input_size(input_size)
self.act_fn = arch_config['act_fn']
# ====== parameter check end ======
# ====== define conv layers ======
if len(arch_config['conv']) > 0:
self.conv, map_size = self._generate_conv(arch_config['conv'], bn_eps,
arch_config['conv_last_no_act'])
else:
# for GLM stuff.
self.conv, map_size = None, self.input_size
# ====== define fc layer ======
self.reshape_conv = not arch_config['fc']['factored']
self.fc = self._generate_fc(map_size, arch_config['conv'][-1]['out_channel'] if self.conv is not None else 1,
arch_config['fc'], n)
# ====== define last act fn ======
if not arch_config['linear_output']:
self.final_act = self._gen_nonlinearity()
else:
self.final_act = None
self.scale_hack = scale_hack
self.init_weights(init_config)
if mean_response is not None:
self.init_bias(mean_response)
# helper for computing loss.
if self.conv is not None:
self.conv_module_list = [x for x in self.conv.children() if isinstance(x, nn.Conv2d)]
else:
self.conv_module_list = []
def _gen_nonlinearity(self):
assert self.act_fn is not None, 'you should not come here if there is no nonlinearity'
if self.act_fn == 'softplus':
return nn.Softplus()
elif self.act_fn == 'relu':
return nn.ReLU()
elif self.act_fn == 'sq':
return Square()
elif self.act_fn == 'halfsq':
return HalfSquare()
elif self.act_fn == 'abs':
return Abs()
else:
# to implement other nonlinearities
# such as HalfSquaring, etc.
# check http://pytorch.org/docs/master/_modules/torch/nn/modules/activation.html#Sigmoid
# to define a new module.
# it's not difficult.
raise NotImplementedError
def _generate_conv(self, conv_config, bn_eps, last_no_act):
map_size = self.input_size
conv_all = []
for idx, conv_this_layer in enumerate(conv_config):
kernel_size = conv_this_layer['kernel_size']
in_channels = 1 if idx == 0 else conv_config[idx - 1]['out_channel']
stride = conv_this_layer['stride']
padding = conv_this_layer['padding']
dilation = conv_this_layer['dilation']
map_size = _new_map_size(map_size, kernel_size + (dilation - 1) * (kernel_size - 1),
padding, stride)
conv_all.append(
(f'conv{idx}', nn.Conv2d(in_channels=in_channels,
out_channels=conv_this_layer['out_channel'],
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=not conv_this_layer['bn'],
dilation=dilation))
)
if conv_this_layer['bn']:
conv_all.append(
# notice that, to match behavior of original code,
# for the optimizer, I need to set learning rate for gamma to be 0.
# or .weight here.
(f'bn{idx}', nn.BatchNorm2d(num_features=conv_this_layer['out_channel'],
eps=bn_eps, momentum=0.1, affine=conv_this_layer['bn_affine']))
)
if self.act_fn is not None and ((not last_no_act) or idx != len(conv_config) - 1):
conv_all.append(
(f'act{idx}',
# this is essentially what `elu` (which is NOT the ELU in standard usage)
# means in the original code.
self._gen_nonlinearity()
)
)
# finally, add pooling.
pool_config = conv_this_layer['pool']
if pool_config is not None:
if pool_config['pool_type'] == 'max':
conv_all.append(
(f'pool{idx}', nn.MaxPool2d(kernel_size=pool_config['kernel_size'],
stride=pool_config['stride'],
padding=pool_config['padding']))
)
elif pool_config['pool_type'] == 'avg':
conv_all.append(
(f'pool{idx}', nn.AvgPool2d(kernel_size=pool_config['kernel_size'],
stride=pool_config['stride'],
padding=pool_config['padding']))
)
else:
raise NotImplementedError
map_size = _new_map_size(map_size, pool_config['kernel_size'], pool_config['padding'],
pool_config['stride'])
return nn.Sequential(OrderedDict(conv_all)), map_size
def _generate_fc(self, map_size, out_channel, fc_config, n):
module_list = []
if fc_config['factored']:
assert fc_config['mlp'] is None
module_list.append(('fc', FactoredLinear2D(out_channel,
map_size, n, bias=True,
weight_spatial_constraint=fc_config['factored_constraint'],
weight_feature_constraint=fc_config['factored_constraint'])))
else:
if fc_config['mlp'] is None:
module_list.append(('fc', nn.Linear(map_size[0] * map_size[1] * out_channel, n)))
else:
module_list.append(('mlp', nn.Linear(map_size[0] * map_size[1] * out_channel, fc_config['mlp'])))
# should be there.
module_list.append(('mlp_act', self._gen_nonlinearity()))
module_list.append(('fc', nn.Linear(fc_config['mlp'], n)))
if fc_config['dropout'] is not None:
module_list.append(('dropout', nn.Dropout(p=fc_config['dropout'])))
return nn.Sequential(OrderedDict(module_list))
def init_bias(self, mean_response):
# always assume that previous layer has 0 output.
if self.final_act is None:
b = mean_response
else:
# raise RuntimeError('should not be here for a regular CNN, which has linear output')
# well this controls last layer
if self.act_fn == 'softplus':
b = inv_softplus(mean_response)
elif self.act_fn == 'relu':
b = mean_response
else:
raise NotImplementedError
assert b.shape == self.fc.fc.bias.size()
assert np.all(np.isfinite(b))
self.fc.fc.bias.data[...] = torch.Tensor(b)
def init_weights(self, init_config):
name_mapping_random = {
'conv.conv0.weight': 'conv_init',
'conv.conv1.weight': 'conv_init',
'conv.conv2.weight': 'conv_init',
'conv.conv0.bias': 0,
'conv.conv1.bias': 0,
'conv.conv2.bias': 0,
'conv.bn0.bias': 0,
'conv.bn1.bias': 0,
'conv.bn2.bias': 0,
'conv.bn0.weight': 1,
'conv.bn1.weight': 1,
'conv.bn2.weight': 1,
# for factored case
'fc.fc.weight_feature': 'fc_init',
'fc.fc.weight_spatial': 'fc_init',
# for unfactored
'fc.fc.weight': 'fc_init',
# for both
# also, this param will be intialized by mean params.
'fc.fc.bias': 0,
# for MLP
# use conv init as it acts like conv. also, it uses ReLU.
'fc.mlp.weight': 'conv_init',
'fc.mlp.bias': 0,
}
# use init_config
for param_name, param_value in self.named_parameters():
# print(param_name, type(param_value), param_value.size())
data_to_fill = name_mapping_random[param_name]
if not isinstance(data_to_fill, str):
param_value.data[...] = data_to_fill
else:
# simple. with preconditioning of mean parameters,
# it should not be that bad.
fill_value = init_config[data_to_fill]
if isinstance(fill_value, float):
param_value.data.normal_(0, fill_value)
else:
assert fill_value == 'kaiming_fan_out'
nn_init.kaiming_normal(param_value.data, mode='fan_out')
if self.scale_hack is not None:
print('hack scale')
param_value.data.mul_(self.scale_hack)
pass
def forward(self, input):
if self.conv is not None:
x = self.conv(input)
else:
x = input
if self.reshape_conv:
x = x.view(x.size(0), -1)
x = self.fc(x)
if self.final_act is not None:
x = self.final_act(x)
return x
def get_optimizer(model: CNN, optimizer_config: dict):
assert sanity_check_one_optimizer_opt_config(optimizer_config)
# always learn everything.
if optimizer_config['optimizer_type'] == 'sgd':
optimizer_this = optim.SGD(model.parameters(), lr=optimizer_config['lr'],
momentum=optimizer_config['momentum'])
elif optimizer_config['optimizer_type'] == 'adam':
optimizer_this = optim.Adam(model.parameters(), lr=optimizer_config['lr'])
else:
raise NotImplementedError
return optimizer_this
def get_conv_loss(opt_conv_config, conv_module_list):
sum_list = []
for m, s in zip(conv_module_list, opt_conv_config):
w_this: nn.Parameter = m.weight
if s['l2'] != 0:
sum_list.append(s['l2'] * 0.5 * torch.sum(w_this ** 2))
if s['l1'] != 0:
sum_list.append(s['l1'] * torch.sum(torch.abs(w_this)))
if m.bias is not None:
if s['l2_bias'] != 0:
sum_list.append(s['l2_bias'] * 0.5 * torch.sum(m.bias ** 2))
if s['l1_bias'] != 0:
sum_list.append(s['l1_bias'] * torch.sum(torch.abs(m.bias)))
return sum(sum_list)
def get_fc_loss(opt_fc_config, fc_module):
# print(opt_fc_config)
sum_list = []
if isinstance(fc_module, nn.Linear):
# simple
w_this: nn.Parameter = fc_module.weight
# print(w_this)
if opt_fc_config['l2'] != 0:
sum_list.append(opt_fc_config['l2'] * 0.5 * torch.sum(w_this ** 2))
# print('L2', sum_list)
if opt_fc_config['l1'] != 0:
sum_list.append(opt_fc_config['l1'] * torch.sum(torch.abs(w_this)))
# print('L1', sum_list)
elif isinstance(fc_module, FactoredLinear2D):
w_this_1: nn.Parameter = fc_module.weight_feature
w_this_2: nn.Parameter = fc_module.weight_spatial
if opt_fc_config['l2'] != 0:
sum_list.append(opt_fc_config['l2'] * 0.5 * torch.sum(w_this_1 ** 2))
sum_list.append(opt_fc_config['l2'] * 0.5 * torch.sum(w_this_2 ** 2))
if opt_fc_config['l1'] != 0:
sum_list.append(opt_fc_config['l1'] * torch.sum(torch.abs(w_this_1)))
sum_list.append(opt_fc_config['l1'] * torch.sum(torch.abs(w_this_2)))
else:
raise NotImplementedError
if fc_module.bias is not None:
if opt_fc_config['l2_bias'] != 0:
sum_list.append(opt_fc_config['l2_bias'] * 0.5 * torch.sum(fc_module.bias ** 2))
if opt_fc_config['l1_bias'] != 0:
sum_list.append(opt_fc_config['l1_bias'] * torch.sum(torch.abs(fc_module.bias)))
# print(sum_list)
return sum(sum_list)
def get_output_loss(yhat, y, loss_type):
if loss_type == 'mse':
return mse_loss(yhat, y)
elif loss_type == 'poisson':
# 1e-5 is for numerical stability.
# same in NIPS2017 (mask CNN) code.
return torch.mean(yhat - y * torch.log(yhat + 1e-5))
else:
raise NotImplementedError
def get_loss(opt_config: dict, model: CNN = None, strict=True):
assert sanity_check_opt_config(opt_config)
opt_config = deepcopy(opt_config)
# we don't need model. but that can be of help.
has_mlp = False
if strict:
assert model is not None
if model is not None:
has_mlp = hasattr(model.fc, 'mlp')
if has_mlp:
assert len(model.conv_module_list) == 0
else:
assert len(model.conv_module_list) == len(opt_config['conv'])
if has_mlp:
assert len(opt_config['conv']) == 1
def loss_func_inner(yhat, y, model_this: CNN):
conv_loss = get_conv_loss(opt_config['conv'], model_this.conv_module_list)
fc_loss = get_fc_loss(opt_config['fc'], model_this.fc.fc)
output_loss = get_output_loss(yhat, y, opt_config['loss'])
# print(conv_loss, type(conv_loss))
# print(fc_loss, type(fc_loss))
# print(output_loss, type(output_loss))
return conv_loss + fc_loss + output_loss
def loss_func_inner_mlp(yhat, y, model_this: CNN):
mlp_loss = get_fc_loss(opt_config['conv'][0], model_this.fc.mlp)
fc_loss = get_fc_loss(opt_config['fc'], model_this.fc.fc)
output_loss = get_output_loss(yhat, y, opt_config['loss'])
# print(conv_loss, type(conv_loss))
# print(fc_loss, type(fc_loss))
# print(output_loss, type(output_loss))
return mlp_loss + fc_loss + output_loss
if has_mlp:
return loss_func_inner_mlp
else:
return loss_func_inner
| [
"torch.nn.ReLU",
"torch.nn.Dropout",
"math.sqrt",
"numpy.isfinite",
"torch.sum",
"copy.deepcopy",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"numpy.exp",
"collections.OrderedDict",
"torch.abs",
"torch.nn.functional.mse_loss",
"torch.Tensor",
"torch.nn.functional.relu",
"torch.nn.init.... | [((5324, 5337), 'numpy.all', 'np.all', (['(x > 0)'], {}), '(x > 0)\n', (5330, 5337), True, 'import numpy as np\n'), ((18915, 18935), 'copy.deepcopy', 'deepcopy', (['opt_config'], {}), '(opt_config)\n', (18923, 18935), False, 'from copy import deepcopy\n'), ((1065, 1081), 'torch.abs', 'torch.abs', (['input'], {}), '(input)\n', (1074, 1081), False, 'import torch\n'), ((13491, 13506), 'torch.Tensor', 'torch.Tensor', (['b'], {}), '(b)\n', (13503, 13506), False, 'import torch\n'), ((18542, 18559), 'torch.nn.functional.mse_loss', 'mse_loss', (['yhat', 'y'], {}), '(yhat, y)\n', (18550, 18559), False, 'from torch.nn.functional import mse_loss\n'), ((790, 803), 'torch.nn.functional.relu', 'F.relu', (['input'], {}), '(input)\n', (796, 803), True, 'from torch.nn import functional as F\n'), ((2024, 2091), 'torch.Tensor', 'torch.Tensor', (['self.out_features', 'self.map_size[0]', 'self.map_size[1]'], {}), '(self.out_features, self.map_size[0], self.map_size[1])\n', (2036, 2091), False, 'import torch\n'), ((2150, 2199), 'torch.Tensor', 'torch.Tensor', (['self.out_features', 'self.in_channels'], {}), '(self.out_features, self.in_channels)\n', (2162, 2199), False, 'import torch\n'), ((2570, 2635), 'math.sqrt', 'math.sqrt', (['(self.in_channels * self.map_size[0] * self.map_size[1])'], {}), '(self.in_channels * self.map_size[0] * self.map_size[1])\n', (2579, 2635), False, 'import math\n'), ((5437, 5446), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (5443, 5446), True, 'import numpy as np\n'), ((6135, 6158), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (6152, 6158), False, 'import torch\n'), ((6171, 6203), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (6197, 6203), False, 'import torch\n'), ((7845, 7858), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (7856, 7858), False, 'from torch import nn, optim\n'), ((12798, 12822), 'collections.OrderedDict', 'OrderedDict', (['module_list'], {}), '(module_list)\n', (12809, 12822), False, 'from collections import OrderedDict\n'), ((13439, 13453), 'numpy.isfinite', 'np.isfinite', (['b'], {}), '(b)\n', (13450, 13453), True, 'import numpy as np\n'), ((2269, 2300), 'torch.Tensor', 'torch.Tensor', (['self.out_features'], {}), '(self.out_features)\n', (2281, 2300), False, 'import torch\n'), ((3286, 3316), 'torch.abs', 'torch.abs', (['weight_feature_view'], {}), '(weight_feature_view)\n', (3295, 3316), False, 'import torch\n'), ((3520, 3550), 'torch.abs', 'torch.abs', (['weight_spatial_view'], {}), '(weight_spatial_view)\n', (3529, 3550), False, 'import torch\n'), ((7914, 7923), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7921, 7923), False, 'from torch import nn, optim\n'), ((11585, 11606), 'collections.OrderedDict', 'OrderedDict', (['conv_all'], {}), '(conv_all)\n', (11596, 11606), False, 'from collections import OrderedDict\n'), ((9111, 9307), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': "conv_this_layer['out_channel']", 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'bias': "(not conv_this_layer['bn'])", 'dilation': 'dilation'}), "(in_channels=in_channels, out_channels=conv_this_layer[\n 'out_channel'], kernel_size=kernel_size, stride=stride, padding=padding,\n bias=not conv_this_layer['bn'], dilation=dilation)\n", (9120, 9307), False, 'from torch import nn, optim\n'), ((15226, 15282), 'torch.nn.init.kaiming_normal', 'nn_init.kaiming_normal', (['param_value.data'], {'mode': '"""fan_out"""'}), "(param_value.data, mode='fan_out')\n", (15248, 15282), True, 'from torch.nn import init as nn_init\n'), ((16570, 16592), 'torch.sum', 'torch.sum', (['(w_this ** 2)'], {}), '(w_this ** 2)\n', (16579, 16592), False, 'import torch\n'), ((17279, 17301), 'torch.sum', 'torch.sum', (['(w_this ** 2)'], {}), '(w_this ** 2)\n', (17288, 17301), False, 'import torch\n'), ((18243, 18273), 'torch.sum', 'torch.sum', (['(fc_module.bias ** 2)'], {}), '(fc_module.bias ** 2)\n', (18252, 18273), False, 'import torch\n'), ((9862, 9988), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', ([], {'num_features': "conv_this_layer['out_channel']", 'eps': 'bn_eps', 'momentum': '(0.1)', 'affine': "conv_this_layer['bn_affine']"}), "(num_features=conv_this_layer['out_channel'], eps=bn_eps,\n momentum=0.1, affine=conv_this_layer['bn_affine'])\n", (9876, 9988), False, 'from torch import nn, optim\n'), ((12263, 12316), 'torch.nn.Linear', 'nn.Linear', (['(map_size[0] * map_size[1] * out_channel)', 'n'], {}), '(map_size[0] * map_size[1] * out_channel, n)\n', (12272, 12316), False, 'from torch import nn, optim\n'), ((12380, 12448), 'torch.nn.Linear', 'nn.Linear', (['(map_size[0] * map_size[1] * out_channel)', "fc_config['mlp']"], {}), "(map_size[0] * map_size[1] * out_channel, fc_config['mlp'])\n", (12389, 12448), False, 'from torch import nn, optim\n'), ((12602, 12632), 'torch.nn.Linear', 'nn.Linear', (["fc_config['mlp']", 'n'], {}), "(fc_config['mlp'], n)\n", (12611, 12632), False, 'from torch import nn, optim\n'), ((12731, 12765), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': "fc_config['dropout']"}), "(p=fc_config['dropout'])\n", (12741, 12765), False, 'from torch import nn, optim\n'), ((16667, 16684), 'torch.abs', 'torch.abs', (['w_this'], {}), '(w_this)\n', (16676, 16684), False, 'import torch\n'), ((16805, 16827), 'torch.sum', 'torch.sum', (['(m.bias ** 2)'], {}), '(m.bias ** 2)\n', (16814, 16827), False, 'import torch\n'), ((17436, 17453), 'torch.abs', 'torch.abs', (['w_this'], {}), '(w_this)\n', (17445, 17453), False, 'import torch\n'), ((17751, 17775), 'torch.sum', 'torch.sum', (['(w_this_1 ** 2)'], {}), '(w_this_1 ** 2)\n', (17760, 17775), False, 'import torch\n'), ((17833, 17857), 'torch.sum', 'torch.sum', (['(w_this_2 ** 2)'], {}), '(w_this_2 ** 2)\n', (17842, 17857), False, 'import torch\n'), ((18382, 18407), 'torch.abs', 'torch.abs', (['fc_module.bias'], {}), '(fc_module.bias)\n', (18391, 18407), False, 'import torch\n'), ((18717, 18740), 'torch.log', 'torch.log', (['(yhat + 1e-05)'], {}), '(yhat + 1e-05)\n', (18726, 18740), False, 'import torch\n'), ((10704, 10823), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': "pool_config['kernel_size']", 'stride': "pool_config['stride']", 'padding': "pool_config['padding']"}), "(kernel_size=pool_config['kernel_size'], stride=pool_config[\n 'stride'], padding=pool_config['padding'])\n", (10716, 10823), False, 'from torch import nn, optim\n'), ((16920, 16937), 'torch.abs', 'torch.abs', (['m.bias'], {}), '(m.bias)\n', (16929, 16937), False, 'import torch\n'), ((17956, 17975), 'torch.abs', 'torch.abs', (['w_this_1'], {}), '(w_this_1)\n', (17965, 17975), False, 'import torch\n'), ((18038, 18057), 'torch.abs', 'torch.abs', (['w_this_2'], {}), '(w_this_2)\n', (18047, 18057), False, 'import torch\n'), ((11078, 11197), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', ([], {'kernel_size': "pool_config['kernel_size']", 'stride': "pool_config['stride']", 'padding': "pool_config['padding']"}), "(kernel_size=pool_config['kernel_size'], stride=pool_config[\n 'stride'], padding=pool_config['padding'])\n", (11090, 11197), False, 'from torch import nn, optim\n')] |
import cv2
import glob, os
import numpy as np
import re
import fnmatch
import pickle
import random
from shutil import copy, copyfile
import json
def saveAnnotation(jointCamPath, positions):
fOut = open(jointCamPath, 'w')
fOut.write("F4_KNU1_A " + str(positions[0][0]) + " " + str(positions[0][1]) + "\n")
fOut.write("F4_KNU1_B " + str(positions[1][0]) + " " + str(positions[1][1]) + "\n")
fOut.write("F4_KNU2_A " + str(positions[2][0]) + " " + str(positions[2][1]) + "\n")
fOut.write("F4_KNU3_A " + str(positions[3][0]) + " " + str(positions[3][1]) + "\n")
fOut.write("F3_KNU1_A " + str(positions[4][0]) + " " + str(positions[4][1]) + "\n")
fOut.write("F3_KNU1_B " + str(positions[5][0]) + " " + str(positions[5][1]) + "\n")
fOut.write("F3_KNU2_A " + str(positions[6][0]) + " " + str(positions[6][1]) + "\n")
fOut.write("F3_KNU3_A " + str(positions[7][0]) + " " + str(positions[7][1]) + "\n")
fOut.write("F1_KNU1_A " + str(positions[8][0]) + " " + str(positions[8][1]) + "\n")
fOut.write("F1_KNU1_B " + str(positions[9][0]) + " " + str(positions[9][1]) + "\n")
fOut.write("F1_KNU2_A " + str(positions[10][0]) + " " + str(positions[10][1]) + "\n")
fOut.write("F1_KNU3_A " + str(positions[11][0]) + " " + str(positions[11][1]) + "\n")
fOut.write("F2_KNU1_A " + str(positions[12][0]) + " " + str(positions[12][1]) + "\n")
fOut.write("F2_KNU1_B " + str(positions[13][0]) + " " + str(positions[13][1]) + "\n")
fOut.write("F2_KNU2_A " + str(positions[14][0]) + " " + str(positions[14][1]) + "\n")
fOut.write("F2_KNU3_A " + str(positions[15][0]) + " " + str(positions[15][1]) + "\n")
fOut.write("TH_KNU1_A " + str(positions[16][0]) + " " + str(positions[16][1]) + "\n")
fOut.write("TH_KNU1_B " + str(positions[17][0]) + " " + str(positions[17][1]) + "\n")
fOut.write("TH_KNU2_A " + str(positions[18][0]) + " " + str(positions[18][1]) + "\n")
fOut.write("TH_KNU3_A " + str(positions[19][0]) + " " + str(positions[19][1]) + "\n")
fOut.write("PALM_POSITION " + str(positions[20][0]) + " " + str(positions[20][1]) + "\n")
fOut.close()
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(l, key=alphanum_key)
def recursive_glob(rootdir='.', pattern='*'):
matches = []
for root, dirnames, filenames in os.walk(rootdir):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
return matches
def readAnnotation3D(file):
f = open(file, "r")
an = []
for l in f:
l = l.split()
an.append((float(l[1]), float(l[2]), float(l[3])))
return np.array(an, dtype=float)
def getCameraMatrix():
Fx = 614.878
Fy = 615.479
Cx = 313.219
Cy = 231.288
cameraMatrix = np.array([[Fx, 0, Cx],
[0, Fy, Cy],
[0, 0, 1]])
return cameraMatrix
def getDistCoeffs():
return np.array([0.092701, -0.175877, -0.0035687, -0.00302299, 0])
def viz(inpath, outpath):
"""
normdat is a function that convert this dataset to standard ezxr format output
Args:
:param inpath: path to this dataset
:param outpath: output path of the formatted files
Returns:
:return: None
"""
pathToDataset = inpath
cameraMatrix = getCameraMatrix()
distCoeffs = getDistCoeffs()
outputdir = outpath
# iterate sequences
for i in os.listdir(pathToDataset):
# read the color frames
path = pathToDataset + i + "/"
colorFrames = recursive_glob(path, "*_webcam_[0-9]*")
colorFrames = natural_sort(colorFrames)
print("There are", len(colorFrames), "color frames on the sequence data_" + str(i))
# read the calibrations for each camera
print("Loading calibration for ../calibrations/" + i)
# f = open("../calibrations/data_" + str(i) + "/webcam_1/rvec.pkl", "r")
c_0_0 = pickle.load(open("../calibrations/" + i + "/webcam_1/rvec.pkl", "r"))
c_0_1 = pickle.load(open("../calibrations/" + i + "/webcam_1/tvec.pkl", "r"))
c_1_0 = pickle.load(open("../calibrations/" + i + "/webcam_2/rvec.pkl", "r"))
c_1_1 = pickle.load(open("../calibrations/" + i + "/webcam_2/tvec.pkl", "r"))
c_2_0 = pickle.load(open("../calibrations/" + i + "/webcam_3/rvec.pkl", "r"))
c_2_1 = pickle.load(open("../calibrations/" + i + "/webcam_3/tvec.pkl", "r"))
c_3_0 = pickle.load(open("../calibrations/" + i + "/webcam_4/rvec.pkl", "r"))
c_3_1 = pickle.load(open("../calibrations/" + i + "/webcam_4/tvec.pkl", "r"))
for j in range(len(colorFrames)):
toks1 = colorFrames[j].split("/")
toks2 = toks1[3].split("_")
jointPath = toks1[0] + "/" + toks1[1] + "/" + toks1[2] + "/" + toks2[0] + "_joints.txt"
points3d = readAnnotation3D(jointPath)[0:21] # the last point is the normal
# project 3d LM points to the image plane
webcam_id = int(toks2[2].split(".")[0]) - 1
# print("Calibration for webcam id:",webcam_id)
if webcam_id == 0:
rvec = c_0_0
tvec = c_0_1
elif webcam_id == 1:
rvec = c_1_0
tvec = c_1_1
elif webcam_id == 2:
rvec = c_2_0
tvec = c_2_1
elif webcam_id == 3:
rvec = c_3_0
tvec = c_3_1
pts2d, _ = cv2.projectPoints(points3d, rvec, tvec, cameraMatrix, distCoeffs)
edges = [[20, 1], [1, 0], [0, 2], [2, 3], [20, 5], [5, 4], [4, 6], [6, 7], [20, 9], [9, 8], [8, 10],
[10, 11], [20, 13],
[13, 12], [12, 14], [14, 15], [20, 17], [17, 16], [16, 18], [18, 19]]
max_x = 0
min_x = 99999
max_y = 0
min_y = 99999
for k in range(len(pts2d)):
p = pts2d[k][0]
if p[0] > max_x:
max_x = p[0]
if p[0] < min_x:
min_x = p[0]
if p[1] > max_y:
max_y = p[1]
if p[1] < min_y:
min_y = p[1]
hand_bbox = [min_x, max_x, min_y, max_y]
f52 = {'x': -1, 'y': -1, 'd': -1}
f12 = [{'x': pts2d[1, 0, 0], 'y': pts2d[1, 0, 1], 'd': -1},
{'x': pts2d[0, 0, 0], 'y': pts2d[0, 0, 1], 'd': -1},
{'x': pts2d[2, 0, 0], 'y': pts2d[2, 0, 1], 'd': -1},
{'x': pts2d[3, 0, 0], 'y': pts2d[3, 0, 1], 'd': -1}]
f22 = [{'x': pts2d[5, 0, 0], 'y': pts2d[5, 0, 1], 'd': -1},
{'x': pts2d[4, 0, 0], 'y': pts2d[4, 0, 1], 'd': -1},
{'x': pts2d[6, 0, 0], 'y': pts2d[6, 0, 1], 'd': -1},
{'x': pts2d[7, 0, 0], 'y': pts2d[7, 0, 1], 'd': -1}]
f42 = [{'x': pts2d[9, 0, 0], 'y': pts2d[9, 0, 1], 'd': -1},
{'x': pts2d[8, 0, 0], 'y': pts2d[8, 0, 1], 'd': -1},
{'x': pts2d[10, 0, 0], 'y': pts2d[10, 0, 1], 'd': -1},
{'x': pts2d[11, 0, 0], 'y': pts2d[11, 0, 1], 'd': -1}]
f32 = [{'x': pts2d[13, 0, 0], 'y': pts2d[13, 0, 1], 'd': -1},
{'x': pts2d[12, 0, 0], 'y': pts2d[12, 0, 1], 'd': -1},
{'x': pts2d[14, 0, 0], 'y': pts2d[14, 0, 1], 'd': -1},
{'x': pts2d[15, 0, 0], 'y': pts2d[15, 0, 1], 'd': -1}]
f02 = [{'x': pts2d[17, 0, 0], 'y': pts2d[17, 0, 1], 'd': -1},
{'x': pts2d[16, 0, 0], 'y': pts2d[16, 0, 1], 'd': -1},
{'x': pts2d[18, 0, 0], 'y': pts2d[18, 0, 1], 'd': -1},
{'x': pts2d[19, 0, 0], 'y': pts2d[19, 0, 1], 'd': -1}]
f53 = {'x': -1, 'y': -1, 'd': -1}
f13 = [{'x': points3d[1, 0], 'y': points3d[1, 1], 'z': points3d[1, 2]},
{'x': points3d[0, 0], 'y': points3d[0, 1], 'z': points3d[0, 2]},
{'x': points3d[2, 0], 'y': points3d[2, 1], 'z': points3d[2, 2]},
{'x': points3d[3, 0], 'y': points3d[3, 1], 'z': points3d[3, 2]}]
f23 = [{'x': points3d[5, 0], 'y': points3d[5, 1], 'z': points3d[5, 2]},
{'x': points3d[4, 0], 'y': points3d[4, 1], 'z': points3d[4, 2]},
{'x': points3d[6, 0], 'y': points3d[6, 1], 'z': points3d[6, 2]},
{'x': points3d[7, 0], 'y': points3d[7, 1], 'z': points3d[7, 2]}]
f43 = [{'x': points3d[9, 0], 'y': points3d[9, 1], 'z': points3d[9, 2]},
{'x': points3d[8, 0], 'y': points3d[8, 1], 'z': points3d[8, 2]},
{'x': points3d[10, 0], 'y': points3d[10, 1], 'z': points3d[10, 2]},
{'x': points3d[11, 0], 'y': points3d[11, 1], 'z': points3d[11, 2]}]
f33 = [{'x': points3d[13, 0], 'y': points3d[13, 1], 'z': points3d[13, 2]},
{'x': points3d[12, 0], 'y': points3d[12, 1], 'z': points3d[12, 2]},
{'x': points3d[14, 0], 'y': points3d[14, 1], 'z': points3d[14, 2]},
{'x': points3d[15, 0], 'y': points3d[15, 1], 'z': points3d[15, 2]}]
f03 = [{'x': points3d[17, 0], 'y': points3d[17, 1], 'z': points3d[17, 2]},
{'x': points3d[16, 0], 'y': points3d[16, 1], 'z': points3d[16, 2]},
{'x': points3d[18, 0], 'y': points3d[18, 1], 'z': points3d[18, 2]},
{'x': points3d[19, 0], 'y': points3d[19, 1], 'z': points3d[19, 2]}]
# show a random sample of the sequence
dict_kp = {'palm_center': points3d[20, :].tolist(), 'is_left': -1, 'hand_bbox': hand_bbox,
'f52': [f52 for _ in range(4)], 'f53': [f53 for _ in range(4)],
'f03': f03, 'f13': f13, 'f23': f23, 'f33': f33, 'f43': f43,
'f02': f02, 'f12': f12, 'f22': f22, 'f32': f32, 'f42': f42}
outpath = outputdir + colorFrames[j][19:-4]
outpath_img = outpath + '.jpg'
outpath_json = outpath + '.json'
copyfile(colorFrames[j], outpath_img)
with open(outpath_json, 'w') as outfile:
json.dump(dict_kp, outfile)
def main():
pathToDataset = "../annotated_frames/"
outpath = "../out_2"
viz(pathToDataset, outpath)
if __name__ == "__main__":
main()
| [
"re.split",
"os.listdir",
"cv2.projectPoints",
"json.dump",
"os.path.join",
"numpy.array",
"shutil.copyfile",
"fnmatch.filter",
"os.walk"
] | [((2438, 2454), 'os.walk', 'os.walk', (['rootdir'], {}), '(rootdir)\n', (2445, 2454), False, 'import glob, os\n'), ((2768, 2793), 'numpy.array', 'np.array', (['an'], {'dtype': 'float'}), '(an, dtype=float)\n', (2776, 2793), True, 'import numpy as np\n'), ((2906, 2953), 'numpy.array', 'np.array', (['[[Fx, 0, Cx], [0, Fy, Cy], [0, 0, 1]]'], {}), '([[Fx, 0, Cx], [0, Fy, Cy], [0, 0, 1]])\n', (2914, 2953), True, 'import numpy as np\n'), ((3070, 3129), 'numpy.array', 'np.array', (['[0.092701, -0.175877, -0.0035687, -0.00302299, 0]'], {}), '([0.092701, -0.175877, -0.0035687, -0.00302299, 0])\n', (3078, 3129), True, 'import numpy as np\n'), ((3566, 3591), 'os.listdir', 'os.listdir', (['pathToDataset'], {}), '(pathToDataset)\n', (3576, 3591), False, 'import glob, os\n'), ((2480, 2514), 'fnmatch.filter', 'fnmatch.filter', (['filenames', 'pattern'], {}), '(filenames, pattern)\n', (2494, 2514), False, 'import fnmatch\n'), ((5620, 5685), 'cv2.projectPoints', 'cv2.projectPoints', (['points3d', 'rvec', 'tvec', 'cameraMatrix', 'distCoeffs'], {}), '(points3d, rvec, tvec, cameraMatrix, distCoeffs)\n', (5637, 5685), False, 'import cv2\n'), ((10245, 10282), 'shutil.copyfile', 'copyfile', (['colorFrames[j]', 'outpath_img'], {}), '(colorFrames[j], outpath_img)\n', (10253, 10282), False, 'from shutil import copy, copyfile\n'), ((2270, 2295), 're.split', 're.split', (['"""([0-9]+)"""', 'key'], {}), "('([0-9]+)', key)\n", (2278, 2295), False, 'import re\n'), ((2543, 2571), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (2555, 2571), False, 'import glob, os\n'), ((10352, 10379), 'json.dump', 'json.dump', (['dict_kp', 'outfile'], {}), '(dict_kp, outfile)\n', (10361, 10379), False, 'import json\n')] |
# import only necessary functions from modules to reduce load
from fdtd_venv import fdtd_mod as fdtd
from numpy import arange, array, where
from matplotlib.pyplot import subplot, plot, xlabel, ylabel, legend, title, suptitle, show, ylim, figure
from scipy.optimize import curve_fit
from os import path
from sys import argv
from time import time
def fit_func(x, a, b, c):
return a*x**2 + b*x + c
start_time = time()
animate = False
run_time = 400
saveStuff = False
results = True
transmit_detectors = 32
# grid
grid = fdtd.Grid(shape=(200, 15.5e-6, 1), grid_spacing=77.5e-9)
if saveStuff:
grid.save_simulation(argv[1] if len(argv) > 1 else None)
# objects
# source
#grid[15, 99, 0] = fdtd.PointSource(period = 1550e-9 / (3e8), name="source1")
grid[15, 100, 0] = fdtd.PointSource(period = 1550e-9 / (3e8), name="source2")
# detectors
#grid[80:200, 80:120, 0] = fdtd.BlockDetector(name="BlockDetector")
#grid[80:200, 100, 0] = fdtd.LineDetector(name="LineDetectorVert")
grid[19, 75:125, 0] = fdtd.LineDetector(name="LineDetectorHorIncident")
for i in range(transmit_detectors):
grid[30+5*i, 75:125, 0] = fdtd.LineDetector(name="LineDetectorHorEmergent_"+str(30+5*i))
# x boundaries
grid[0:10, :, :] = fdtd.PML(name="pml_xlow")
grid[-10:, :, :] = fdtd.PML(name="pml_xhigh")
# y boundaries
grid[:, 0:10, :] = fdtd.PML(name="pml_ylow")
grid[:, -10:, :] = fdtd.PML(name="pml_yhigh")
# Saving grid geometry
if saveStuff:
with open(path.join("./fdtd_output", grid.folder, "grid.txt"), "w") as f:
f.write(str(grid))
wavelength = 3e8/grid.source.frequency
wavelengthUnits = wavelength/grid.grid_spacing
GD = array([grid.x, grid.y, grid.z])
gridRange = [arange(x/grid.grid_spacing) for x in GD]
objectRange = array([[gridRange[0][x.x], gridRange[1][x.y], gridRange[2][x.z]] for x in grid.objects]).T
f.write("\n\nGrid details (in wavelength scale):")
f.write("\n\tGrid dimensions: ")
f.write(str(GD/wavelength))
f.write("\n\tSource dimensions: ")
f.write(str(array([grid.source.x[-1] - grid.source.x[0] + 1, grid.source.y[-1] - grid.source.y[0] + 1, grid.source.z[-1] - grid.source.z[0] + 1])/wavelengthUnits))
f.write("\n\tObject dimensions: ")
f.write(str([(max(map(max, x)) - min(map(min, x)) + 1)/wavelengthUnits for x in objectRange]))
if animate:
if run_time > 0:
for i in range(run_time):
grid.run(total_time=1)
if saveStuff:
grid.visualize(z=0, animate=True, index=i, save=True, folder=grid.folder)
else:
grid.visualize(z=0, animate=True)
if saveStuff:
grid.generate_video(delete_frames=True)
grid.save_data()
else:
if run_time > 0:
grid.run(total_time=run_time)
if saveStuff:
grid.visualize(z=0, show=True, index=0, save=True, folder=grid.folder)
grid.save_data()
else:
grid.visualize(z=0, show=True)
if results:
phase_profile_incident = []
phase_profile_emergent = [[] for j in range(transmit_detectors)] # number of trasmit side detectors
det_vals_incident = array(grid.detectors[0].detector_values()['E'][-grid.sources[0].period:])
det_vals_emergent = [array(grid.detectors[i].detector_values()['E'][-grid.sources[0].period:]) for i in range(1, len(phase_profile_emergent)+1)]
for i in range(len(grid.detectors[0].x)):
period_phase = grid.sources[0].period - where(det_vals_incident[:, i, 2] == max(det_vals_incident[:, i, 2]))[-1][-1]
phase_profile_incident.append(((grid.sources[0].period/4 + period_phase)/grid.sources[0].period)%1)
for j in range(len(phase_profile_emergent)):
period_phase = grid.sources[0].period - where(det_vals_emergent[j][:, i, 2] == max(det_vals_emergent[j][:, i, 2]))[-1][-1]
phase_profile_emergent[j].append(((grid.sources[0].period/4 + period_phase)/grid.sources[0].period)%1)
xdata = array([x for x in range(-25, 25)])
#phase_difference = (-array(phase_profile_emergent) + array(phase_profile_incident) + 2*pi)%(2*pi)
#popt, _ = curve_fit(fit_func, xdata, phase_difference)
figure(num="phase_profile")
plot(xdata/grid.sources[0].period, phase_profile_incident, label="Incident phase")
for j in range(len(phase_profile_emergent)):
plot(xdata/grid.sources[0].period, phase_profile_emergent[j], label="Emergent phase"+str(j))
#plot(xdata/grid.sources[0].period, phase_difference, label="Phase difference")
#plot(xdata/grid.sources[0].period, fit_func(xdata, *popt), label="Curve-fit for Phase difference")
for j in range(len(phase_profile_emergent)):
popt, _ = curve_fit(fit_func, xdata, phase_profile_emergent[j])
plot(xdata/grid.sources[0].period, fit_func(xdata, *popt), label="Curve-fit for Phase in detector"+str(j))
#print(popt)
xlabel("x/lambda")
ylabel("phase/2pi")
legend()
#title("Curve-fit Phase difference: %5.6f*x**2 + %5.6f*x + %5.6f" % tuple(popt))
show()
figure(num="phase_profile_curve_fit")
for j in range(len(phase_profile_emergent)):
if j%4 == 0:
subplot(2, 4, j/4+1)
title("Detectors %1i to %2i" % tuple([j, j+3]))
xlabel("x/lambda")
ylabel("phase/2pi")
ylim(0, 1)
popt, _ = curve_fit(fit_func, xdata, phase_profile_emergent[j])
plot(xdata/grid.sources[0].period, fit_func(xdata, *popt), label="Curve-fit for Phase in detector"+str(j))
print(popt)
#legend()
suptitle("Curve-fiting (Consecutive order of detectors in each plot, blue: 1, orange: 2, green: 3, red: 4)")
show()
figure(num="phase_bent")
plot([x for x in range(30, 30+5*len(phase_profile_emergent), 5)], [detector[len(detector)//2] - detector[0] for detector in phase_profile_emergent], label="Phase profile 'bent'")
plot([x for x in range(30, 30+5*len(phase_profile_emergent), 5)], [0 for x in range(len(phase_profile_emergent))], label="Zero line")
xlabel("detector position")
title("Measure of 'bent' of different phase profiles as difference of phases at mid and at end of each detector")
legend()
show()
end_time = time()
print("Runtime:", end_time-start_time)
| [
"matplotlib.pyplot.ylabel",
"numpy.array",
"numpy.arange",
"fdtd_venv.fdtd_mod.PointSource",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"fdtd_venv.fdtd_mod.Grid",
"matplotlib.pyplot.ylim",
"fdtd_venv.fdtd_mod.LineDetector",
"matplotlib.pyplot.title",
"time.time",
"matplotlib.pyplot.... | [((413, 419), 'time.time', 'time', ([], {}), '()\n', (417, 419), False, 'from time import time\n'), ((524, 582), 'fdtd_venv.fdtd_mod.Grid', 'fdtd.Grid', ([], {'shape': '(200, 1.55e-05, 1)', 'grid_spacing': '(7.75e-08)'}), '(shape=(200, 1.55e-05, 1), grid_spacing=7.75e-08)\n', (533, 582), True, 'from fdtd_venv import fdtd_mod as fdtd\n'), ((771, 834), 'fdtd_venv.fdtd_mod.PointSource', 'fdtd.PointSource', ([], {'period': '(1.55e-06 / 300000000.0)', 'name': '"""source2"""'}), "(period=1.55e-06 / 300000000.0, name='source2')\n", (787, 834), True, 'from fdtd_venv import fdtd_mod as fdtd\n'), ((1000, 1049), 'fdtd_venv.fdtd_mod.LineDetector', 'fdtd.LineDetector', ([], {'name': '"""LineDetectorHorIncident"""'}), "(name='LineDetectorHorIncident')\n", (1017, 1049), True, 'from fdtd_venv import fdtd_mod as fdtd\n'), ((1211, 1236), 'fdtd_venv.fdtd_mod.PML', 'fdtd.PML', ([], {'name': '"""pml_xlow"""'}), "(name='pml_xlow')\n", (1219, 1236), True, 'from fdtd_venv import fdtd_mod as fdtd\n'), ((1256, 1282), 'fdtd_venv.fdtd_mod.PML', 'fdtd.PML', ([], {'name': '"""pml_xhigh"""'}), "(name='pml_xhigh')\n", (1264, 1282), True, 'from fdtd_venv import fdtd_mod as fdtd\n'), ((1318, 1343), 'fdtd_venv.fdtd_mod.PML', 'fdtd.PML', ([], {'name': '"""pml_ylow"""'}), "(name='pml_ylow')\n", (1326, 1343), True, 'from fdtd_venv import fdtd_mod as fdtd\n'), ((1363, 1389), 'fdtd_venv.fdtd_mod.PML', 'fdtd.PML', ([], {'name': '"""pml_yhigh"""'}), "(name='pml_yhigh')\n", (1371, 1389), True, 'from fdtd_venv import fdtd_mod as fdtd\n'), ((5789, 5795), 'time.time', 'time', ([], {}), '()\n', (5793, 5795), False, 'from time import time\n'), ((3907, 3934), 'matplotlib.pyplot.figure', 'figure', ([], {'num': '"""phase_profile"""'}), "(num='phase_profile')\n", (3913, 3934), False, 'from matplotlib.pyplot import subplot, plot, xlabel, ylabel, legend, title, suptitle, show, ylim, figure\n'), ((3936, 4025), 'matplotlib.pyplot.plot', 'plot', (['(xdata / grid.sources[0].period)', 'phase_profile_incident'], {'label': '"""Incident phase"""'}), "(xdata / grid.sources[0].period, phase_profile_incident, label=\n 'Incident phase')\n", (3940, 4025), False, 'from matplotlib.pyplot import subplot, plot, xlabel, ylabel, legend, title, suptitle, show, ylim, figure\n'), ((4579, 4597), 'matplotlib.pyplot.xlabel', 'xlabel', (['"""x/lambda"""'], {}), "('x/lambda')\n", (4585, 4597), False, 'from matplotlib.pyplot import subplot, plot, xlabel, ylabel, legend, title, suptitle, show, ylim, figure\n'), ((4599, 4618), 'matplotlib.pyplot.ylabel', 'ylabel', (['"""phase/2pi"""'], {}), "('phase/2pi')\n", (4605, 4618), False, 'from matplotlib.pyplot import subplot, plot, xlabel, ylabel, legend, title, suptitle, show, ylim, figure\n'), ((4620, 4628), 'matplotlib.pyplot.legend', 'legend', ([], {}), '()\n', (4626, 4628), False, 'from matplotlib.pyplot import subplot, plot, xlabel, ylabel, legend, title, suptitle, show, ylim, figure\n'), ((4712, 4718), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (4716, 4718), False, 'from matplotlib.pyplot import subplot, plot, xlabel, ylabel, legend, title, suptitle, show, ylim, figure\n'), ((4721, 4758), 'matplotlib.pyplot.figure', 'figure', ([], {'num': '"""phase_profile_curve_fit"""'}), "(num='phase_profile_curve_fit')\n", (4727, 4758), False, 'from matplotlib.pyplot import subplot, plot, xlabel, ylabel, legend, title, suptitle, show, ylim, figure\n'), ((5155, 5273), 'matplotlib.pyplot.suptitle', 'suptitle', (['"""Curve-fiting (Consecutive order of detectors in each plot, blue: 1, orange: 2, green: 3, red: 4)"""'], {}), "(\n 'Curve-fiting (Consecutive order of detectors in each plot, blue: 1, orange: 2, green: 3, red: 4)'\n )\n", (5163, 5273), False, 'from matplotlib.pyplot import subplot, plot, xlabel, ylabel, legend, title, suptitle, show, ylim, figure\n'), ((5265, 5271), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (5269, 5271), False, 'from matplotlib.pyplot import subplot, plot, xlabel, ylabel, legend, title, suptitle, show, ylim, figure\n'), ((5275, 5299), 'matplotlib.pyplot.figure', 'figure', ([], {'num': '"""phase_bent"""'}), "(num='phase_bent')\n", (5281, 5299), False, 'from matplotlib.pyplot import subplot, plot, xlabel, ylabel, legend, title, suptitle, show, ylim, figure\n'), ((5616, 5643), 'matplotlib.pyplot.xlabel', 'xlabel', (['"""detector position"""'], {}), "('detector position')\n", (5622, 5643), False, 'from matplotlib.pyplot import subplot, plot, xlabel, ylabel, legend, title, suptitle, show, ylim, figure\n'), ((5645, 5768), 'matplotlib.pyplot.title', 'title', (['"""Measure of \'bent\' of different phase profiles as difference of phases at mid and at end of each detector"""'], {}), '(\n "Measure of \'bent\' of different phase profiles as difference of phases at mid and at end of each detector"\n )\n', (5650, 5768), False, 'from matplotlib.pyplot import subplot, plot, xlabel, ylabel, legend, title, suptitle, show, ylim, figure\n'), ((5760, 5768), 'matplotlib.pyplot.legend', 'legend', ([], {}), '()\n', (5766, 5768), False, 'from matplotlib.pyplot import subplot, plot, xlabel, ylabel, legend, title, suptitle, show, ylim, figure\n'), ((5770, 5776), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (5774, 5776), False, 'from matplotlib.pyplot import subplot, plot, xlabel, ylabel, legend, title, suptitle, show, ylim, figure\n'), ((1621, 1652), 'numpy.array', 'array', (['[grid.x, grid.y, grid.z]'], {}), '([grid.x, grid.y, grid.z])\n', (1626, 1652), False, 'from numpy import arange, array, where\n'), ((4400, 4453), 'scipy.optimize.curve_fit', 'curve_fit', (['fit_func', 'xdata', 'phase_profile_emergent[j]'], {}), '(fit_func, xdata, phase_profile_emergent[j])\n', (4409, 4453), False, 'from scipy.optimize import curve_fit\n'), ((4966, 5019), 'scipy.optimize.curve_fit', 'curve_fit', (['fit_func', 'xdata', 'phase_profile_emergent[j]'], {}), '(fit_func, xdata, phase_profile_emergent[j])\n', (4975, 5019), False, 'from scipy.optimize import curve_fit\n'), ((1439, 1490), 'os.path.join', 'path.join', (['"""./fdtd_output"""', 'grid.folder', '"""grid.txt"""'], {}), "('./fdtd_output', grid.folder, 'grid.txt')\n", (1448, 1490), False, 'from os import path\n'), ((1668, 1697), 'numpy.arange', 'arange', (['(x / grid.grid_spacing)'], {}), '(x / grid.grid_spacing)\n', (1674, 1697), False, 'from numpy import arange, array, where\n'), ((1725, 1817), 'numpy.array', 'array', (['[[gridRange[0][x.x], gridRange[1][x.y], gridRange[2][x.z]] for x in grid.\n objects]'], {}), '([[gridRange[0][x.x], gridRange[1][x.y], gridRange[2][x.z]] for x in\n grid.objects])\n', (1730, 1817), False, 'from numpy import arange, array, where\n'), ((4823, 4847), 'matplotlib.pyplot.subplot', 'subplot', (['(2)', '(4)', '(j / 4 + 1)'], {}), '(2, 4, j / 4 + 1)\n', (4830, 4847), False, 'from matplotlib.pyplot import subplot, plot, xlabel, ylabel, legend, title, suptitle, show, ylim, figure\n'), ((4898, 4916), 'matplotlib.pyplot.xlabel', 'xlabel', (['"""x/lambda"""'], {}), "('x/lambda')\n", (4904, 4916), False, 'from matplotlib.pyplot import subplot, plot, xlabel, ylabel, legend, title, suptitle, show, ylim, figure\n'), ((4920, 4939), 'matplotlib.pyplot.ylabel', 'ylabel', (['"""phase/2pi"""'], {}), "('phase/2pi')\n", (4926, 4939), False, 'from matplotlib.pyplot import subplot, plot, xlabel, ylabel, legend, title, suptitle, show, ylim, figure\n'), ((4943, 4953), 'matplotlib.pyplot.ylim', 'ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (4947, 4953), False, 'from matplotlib.pyplot import subplot, plot, xlabel, ylabel, legend, title, suptitle, show, ylim, figure\n'), ((1985, 2123), 'numpy.array', 'array', (['[grid.source.x[-1] - grid.source.x[0] + 1, grid.source.y[-1] - grid.source.\n y[0] + 1, grid.source.z[-1] - grid.source.z[0] + 1]'], {}), '([grid.source.x[-1] - grid.source.x[0] + 1, grid.source.y[-1] - grid.\n source.y[0] + 1, grid.source.z[-1] - grid.source.z[0] + 1])\n', (1990, 2123), False, 'from numpy import arange, array, where\n')] |
import unittest
import numpy as np
import torch
from sklearn.metrics import accuracy_score
from pytorch_adapt.validators import AccuracyValidator, ScoreHistory
class TestAccuracyValidator(unittest.TestCase):
def test_accuracy_validator(self):
dataset_size = 1000
ignore_epoch = 0
for start in [-1, 0, 1]:
for step in [1, 2]:
validator = AccuracyValidator()
validator = ScoreHistory(validator, ignore_epoch=ignore_epoch)
correct_scores = []
for i, epoch in enumerate(range(start, 5, step)):
labels = torch.randint(0, 10, (dataset_size,))
logits = torch.randn(dataset_size, 10)
preds = torch.softmax(logits, dim=1)
kwargs = {"src_val": {"labels": labels, "preds": preds}}
score = validator.score(epoch=epoch, **kwargs)
correct_score = accuracy_score(
labels.numpy(), np.argmax(logits.numpy(), axis=1)
)
if epoch != ignore_epoch:
correct_scores.append(correct_score)
self.assertTrue(
validator.score_history_ignore_epoch[validator.best_idx]
== validator.best_score
)
self.assertTrue(
validator.epochs_ignore_epoch[validator.best_idx]
== validator.best_epoch
)
self.assertTrue(
np.isclose(validator.best_score, max(correct_scores))
)
self.assertTrue(
np.isclose(validator.best_idx, np.argmax(correct_scores))
)
elif i == 0 and epoch == ignore_epoch:
self.assertTrue(validator.best_epoch is None)
self.assertTrue(validator.best_score is None)
self.assertTrue(np.isclose(score, correct_score))
self.assertTrue(validator.latest_score == score)
| [
"pytorch_adapt.validators.AccuracyValidator",
"numpy.isclose",
"numpy.argmax",
"torch.softmax",
"torch.randint",
"pytorch_adapt.validators.ScoreHistory",
"torch.randn"
] | [((398, 417), 'pytorch_adapt.validators.AccuracyValidator', 'AccuracyValidator', ([], {}), '()\n', (415, 417), False, 'from pytorch_adapt.validators import AccuracyValidator, ScoreHistory\n'), ((446, 496), 'pytorch_adapt.validators.ScoreHistory', 'ScoreHistory', (['validator'], {'ignore_epoch': 'ignore_epoch'}), '(validator, ignore_epoch=ignore_epoch)\n', (458, 496), False, 'from pytorch_adapt.validators import AccuracyValidator, ScoreHistory\n'), ((628, 665), 'torch.randint', 'torch.randint', (['(0)', '(10)', '(dataset_size,)'], {}), '(0, 10, (dataset_size,))\n', (641, 665), False, 'import torch\n'), ((695, 724), 'torch.randn', 'torch.randn', (['dataset_size', '(10)'], {}), '(dataset_size, 10)\n', (706, 724), False, 'import torch\n'), ((753, 781), 'torch.softmax', 'torch.softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (766, 781), False, 'import torch\n'), ((2121, 2153), 'numpy.isclose', 'np.isclose', (['score', 'correct_score'], {}), '(score, correct_score)\n', (2131, 2153), True, 'import numpy as np\n'), ((1832, 1857), 'numpy.argmax', 'np.argmax', (['correct_scores'], {}), '(correct_scores)\n', (1841, 1857), True, 'import numpy as np\n')] |
import sklearn.datasets as skl_ds
import pandas as pd
import sklearn.model_selection as skl_ms
import sklearn.feature_selection as skl_fs
import sklearn.linear_model as skl_lm
import numpy as np
# Loading the dataset
boston = skl_ds.load_boston()
X = pd.DataFrame(boston.data, columns=boston.feature_names) # Feature Matrix
Y = pd.DataFrame(boston.target, columns=["MEDV"]) # Target Variable
# Spliting the dataset into training subset (70%) and testinf subset (%30)
X_train, X_test, Y_train, Y_test = skl_ms.train_test_split(
X, Y, test_size=0.3, random_state=0)
# initiating the score list
score_list = []
selected_feature_masks = []
# iterating over different numbers of features to be selected
for n in range(1, len(X.columns)):
# constructing the regression model
model = skl_lm.LinearRegression()
# initiate the RFE model
rfe_selector = skl_fs.RFE(model, n_features_to_select=n)
# finding the most relevant features based on recursively fitting the "model" object passed in the previous step; and removing the non-selected features from X_train
X_train_rfe = rfe_selector.fit_transform(X_train, Y_train)
# removing the non-selected features from X_test
X_test_rfe = rfe_selector.transform(X_test)
# fitting the regression model only with the selected features
model.fit(X_train_rfe, Y_train)
# scoring the model with the test data
score = model.score(X_test_rfe, Y_test)
# storing the score value
score_list.append(score)
# storing the feature mask
selected_feature_masks.append(rfe_selector.support_)
# retrieving the name of features
features = np.array(X.columns)
score_list = np.array(score_list)
# finding the index of the maximum score -> finding the feature mask used by RFE for the maximum score -> masking the features list with the corresponding mask
SelFeat = features[selected_feature_masks[score_list.argmax()]]
# print the list of selected features
print(SelFeat)
| [
"sklearn.model_selection.train_test_split",
"sklearn.datasets.load_boston",
"numpy.array",
"sklearn.feature_selection.RFE",
"pandas.DataFrame",
"sklearn.linear_model.LinearRegression"
] | [((228, 248), 'sklearn.datasets.load_boston', 'skl_ds.load_boston', ([], {}), '()\n', (246, 248), True, 'import sklearn.datasets as skl_ds\n'), ((253, 308), 'pandas.DataFrame', 'pd.DataFrame', (['boston.data'], {'columns': 'boston.feature_names'}), '(boston.data, columns=boston.feature_names)\n', (265, 308), True, 'import pandas as pd\n'), ((331, 376), 'pandas.DataFrame', 'pd.DataFrame', (['boston.target'], {'columns': "['MEDV']"}), "(boston.target, columns=['MEDV'])\n", (343, 376), True, 'import pandas as pd\n'), ((507, 567), 'sklearn.model_selection.train_test_split', 'skl_ms.train_test_split', (['X', 'Y'], {'test_size': '(0.3)', 'random_state': '(0)'}), '(X, Y, test_size=0.3, random_state=0)\n', (530, 567), True, 'import sklearn.model_selection as skl_ms\n'), ((1630, 1649), 'numpy.array', 'np.array', (['X.columns'], {}), '(X.columns)\n', (1638, 1649), True, 'import numpy as np\n'), ((1663, 1683), 'numpy.array', 'np.array', (['score_list'], {}), '(score_list)\n', (1671, 1683), True, 'import numpy as np\n'), ((796, 821), 'sklearn.linear_model.LinearRegression', 'skl_lm.LinearRegression', ([], {}), '()\n', (819, 821), True, 'import sklearn.linear_model as skl_lm\n'), ((870, 911), 'sklearn.feature_selection.RFE', 'skl_fs.RFE', (['model'], {'n_features_to_select': 'n'}), '(model, n_features_to_select=n)\n', (880, 911), True, 'import sklearn.feature_selection as skl_fs\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 15 09:48:24 2020
@author: tcandela
"""
# =============================================================================
# IMPORTS
# =============================================================================
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import random as rd
from shapely.geometry import LineString
from scipy import interpolate, stats
#Personal Librairies
sys.path.append('/homelocal-px/px-179/tcandela/STAMM/LIB/') #localisation of STAMM librairies
import netCDF_lib as ncl
import plot_lib as pl
import turtle_lib as tul
# =============================================================================
# PATHS & FILES
# =============================================================================
beach = 'NC_GouaroBay'
year = 2004
indir = '/data/rd_exchange2/tcandela/STAMM/run/PACIFIC/' + beach + '/'
filename_type = '_3y_leatherback_passives_'
filename = beach + filename_type + str(year)
outdir = indir
# =============================================================================
# USER PARAMETERS
# =============================================================================
#MAP PLOT PARAMETERS
#xmin = -100
#xmax = 40
#ymin = -10
#ymax = 60
#MAP PLOT PARAMETERS
xmin = 80
xmax = 220
ymin = -60
ymax = 10
#
lat_space = 10
lon_space = 20
#LETHARGY
coef_SMR = 5
lethargy = 30
To = 24
endday = 183
month = 6
# =============================================================================
# =============================================================================
# =============================================================================
# /!\/!\/!\/!\/!\/!\ CODE /!\/!\/!\/!\/!\/!\
# =============================================================================
# =============================================================================
# =============================================================================
# =============================================================================
# PRESENTATION
# =============================================================================
print('======================================================================')
print('=====================COMPUTATION OF TRAJECTORIES======================')
print('======================================================================')
# =============================================================================
# LOADING DATA
# =============================================================================
print('\nLoading data...')
data = ncl.read_nc(indir + filename +'.nc', ['traj_lat','traj_lon','date','traj_temp','init_t','traj_time'])
traj_lat = data['traj_lat']
traj_lon = data['traj_lon']
traj_temp = data['traj_temp']
init_t = data['init_t']
date = data['date']
traj_time = data['traj_time']
nb_turtles = traj_lat.shape[1]
nb_days = traj_lat.shape[0]
print('\nLoaded file : ' + indir + filename +'.nc')
#%% =============================================================================
# SEPARATION OF LIVING AND DEAD TURTLES
# =============================================================================
print('\nSeparation of living and dead turtles...')
days = traj_temp.shape[0]
age_year = np.arange(days)/365.
index_dead = []
index_alive = []
date_death = tul.find_date_death(nb_turtles, traj_temp, To, coef_SMR, lethargy, init_t, nb_days)
turtle = 0
for turtle in np.arange(nb_turtles):
if date_death[turtle] == 1.0000e+34:
index_alive.append(turtle)
else:
index_dead.append(turtle)
print('\n' + str(len(index_alive)) + ' alive turtles and ' + str(len(index_dead)) + ' dead turtles has been spotted !')
# =============================================================================
# PEAK TRAJECTORIES
# =============================================================================
print('\nComputation of trajectories for alive turtles...')
ifile = indir + filename + '.nc'
ofile = indir + filename + '_alive_dispersion_' + str(month) + 'm.png'
list_turtle = []
for turtle in np.arange(nb_turtles):
# if turtle in index_alive:
list_turtle.append(turtle)
lon = np.zeros((nb_days, len(list_turtle)))
lat = np.zeros((nb_days, len(list_turtle)))
time = np.zeros((nb_days, len(list_turtle)))
i = 0
for turtle in np.arange(nb_turtles):
if turtle in list_turtle:
for day in np.arange(nb_days):
lon[day, i] = traj_lon[day, turtle]
lat[day, i] = traj_lat[day, turtle]
time[day, i] = traj_time[day, turtle]
i += 1
# Plot figure ------
c = 0.89
f = plt.figure(figsize = (12*c/2.54,8*c/2.54))
gs = gridspec.GridSpec(2,1,height_ratios=[11,1],left=0.08, right=0.98, bottom=0.07, top=0.95)
ax = plt.subplot(gs[0])
im,time = pl.display_trajectories_particular(lon[:endday,:], lat[:endday,:], time[:endday,:], xmin, f,ax)
pl.show_start_point(ax, lat, lon)
pl.plot_map(ax,ymin,ymax,xmin,xmax,lon_space=lon_space,lat_space=lat_space)
ax.spines['right'].set_linewidth(0.5)
ax.spines['left'].set_linewidth(0.5)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['top'].set_linewidth(0.5)
if np.max(time)>0*365:
ax_cb = plt.subplot(gs[1])
label = u"Age (Days)"
pl.display_colorbar(f,im, ax_cb, label)
else:
ax_cb = plt.subplot(gs[2])
label = u"Age (Days)"
pl.display_colorbar(f,im, ax_cb, label)
plt.savefig(ofile,bbox_inches='tight',dpi=800)
#plt.show()
print('\nPlot Saved at : ' + ofile)
| [
"plot_lib.show_start_point",
"netCDF_lib.read_nc",
"plot_lib.plot_map",
"matplotlib.pyplot.savefig",
"plot_lib.display_colorbar",
"numpy.max",
"matplotlib.pyplot.subplot",
"turtle_lib.find_date_death",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"plot_lib.display_trajectories_par... | [((582, 641), 'sys.path.append', 'sys.path.append', (['"""/homelocal-px/px-179/tcandela/STAMM/LIB/"""'], {}), "('/homelocal-px/px-179/tcandela/STAMM/LIB/')\n", (597, 641), False, 'import sys\n'), ((2723, 2834), 'netCDF_lib.read_nc', 'ncl.read_nc', (["(indir + filename + '.nc')", "['traj_lat', 'traj_lon', 'date', 'traj_temp', 'init_t', 'traj_time']"], {}), "(indir + filename + '.nc', ['traj_lat', 'traj_lon', 'date',\n 'traj_temp', 'init_t', 'traj_time'])\n", (2734, 2834), True, 'import netCDF_lib as ncl\n'), ((3459, 3546), 'turtle_lib.find_date_death', 'tul.find_date_death', (['nb_turtles', 'traj_temp', 'To', 'coef_SMR', 'lethargy', 'init_t', 'nb_days'], {}), '(nb_turtles, traj_temp, To, coef_SMR, lethargy, init_t,\n nb_days)\n', (3478, 3546), True, 'import turtle_lib as tul\n'), ((3569, 3590), 'numpy.arange', 'np.arange', (['nb_turtles'], {}), '(nb_turtles)\n', (3578, 3590), True, 'import numpy as np\n'), ((4214, 4235), 'numpy.arange', 'np.arange', (['nb_turtles'], {}), '(nb_turtles)\n', (4223, 4235), True, 'import numpy as np\n'), ((4454, 4475), 'numpy.arange', 'np.arange', (['nb_turtles'], {}), '(nb_turtles)\n', (4463, 4475), True, 'import numpy as np\n'), ((4766, 4815), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12 * c / 2.54, 8 * c / 2.54)'}), '(figsize=(12 * c / 2.54, 8 * c / 2.54))\n', (4776, 4815), True, 'import matplotlib.pyplot as plt\n'), ((4814, 4910), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(1)'], {'height_ratios': '[11, 1]', 'left': '(0.08)', 'right': '(0.98)', 'bottom': '(0.07)', 'top': '(0.95)'}), '(2, 1, height_ratios=[11, 1], left=0.08, right=0.98,\n bottom=0.07, top=0.95)\n', (4831, 4910), False, 'from matplotlib import gridspec\n'), ((4908, 4926), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0]'], {}), '(gs[0])\n', (4919, 4926), True, 'import matplotlib.pyplot as plt\n'), ((4937, 5041), 'plot_lib.display_trajectories_particular', 'pl.display_trajectories_particular', (['lon[:endday, :]', 'lat[:endday, :]', 'time[:endday, :]', 'xmin', 'f', 'ax'], {}), '(lon[:endday, :], lat[:endday, :], time[:\n endday, :], xmin, f, ax)\n', (4971, 5041), True, 'import plot_lib as pl\n'), ((5033, 5066), 'plot_lib.show_start_point', 'pl.show_start_point', (['ax', 'lat', 'lon'], {}), '(ax, lat, lon)\n', (5052, 5066), True, 'import plot_lib as pl\n'), ((5067, 5153), 'plot_lib.plot_map', 'pl.plot_map', (['ax', 'ymin', 'ymax', 'xmin', 'xmax'], {'lon_space': 'lon_space', 'lat_space': 'lat_space'}), '(ax, ymin, ymax, xmin, xmax, lon_space=lon_space, lat_space=\n lat_space)\n', (5078, 5153), True, 'import plot_lib as pl\n'), ((5546, 5594), 'matplotlib.pyplot.savefig', 'plt.savefig', (['ofile'], {'bbox_inches': '"""tight"""', 'dpi': '(800)'}), "(ofile, bbox_inches='tight', dpi=800)\n", (5557, 5594), True, 'import matplotlib.pyplot as plt\n'), ((3391, 3406), 'numpy.arange', 'np.arange', (['days'], {}), '(days)\n', (3400, 3406), True, 'import numpy as np\n'), ((5302, 5314), 'numpy.max', 'np.max', (['time'], {}), '(time)\n', (5308, 5314), True, 'import numpy as np\n'), ((5335, 5353), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1]'], {}), '(gs[1])\n', (5346, 5353), True, 'import matplotlib.pyplot as plt\n'), ((5384, 5424), 'plot_lib.display_colorbar', 'pl.display_colorbar', (['f', 'im', 'ax_cb', 'label'], {}), '(f, im, ax_cb, label)\n', (5403, 5424), True, 'import plot_lib as pl\n'), ((5443, 5461), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[2]'], {}), '(gs[2])\n', (5454, 5461), True, 'import matplotlib.pyplot as plt\n'), ((5492, 5532), 'plot_lib.display_colorbar', 'pl.display_colorbar', (['f', 'im', 'ax_cb', 'label'], {}), '(f, im, ax_cb, label)\n', (5511, 5532), True, 'import plot_lib as pl\n'), ((4538, 4556), 'numpy.arange', 'np.arange', (['nb_days'], {}), '(nb_days)\n', (4547, 4556), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import matplotlib.image as img
import matplotlib.pyplot as plt
sess = tf.Session()
diag = tf.diag([1,1,1,1])
truncated = tf.truncated_normal([2,3])
fill = tf.fill([2,3],5.0)
uniform = tf.random_uniform([3,2])
convert_tensor = tf.convert_to_tensor(np.array([[1.,2.,3.],[-3.,-7.,-1.],[0.,5.,-2.]]))
truncatedTwo = tf.truncated_normal([3,4],mean=0.0,stddev=1.0)
input_data = tf.constant([[1.,2.,3.],[1.,5.,3.],[1.,2.,7.],[6.,2.,3.],[8.,2.,3.]])
shuffle = tf.random_shuffle(input_data)
crop = tf.random_crop(input_data,[1,1])
# 指定尺寸,图片随机裁剪
image = img.imread('./resources/test.jpg')
plt.imshow(image)
plt.show()
reshaped_image = tf.cast(image,tf.float32)
size = tf.cast(tf.shape(reshaped_image),tf.int32)
height = sess.run(size[0]//2)
width = sess.run(size[1]//2)
distorted_image = tf.random_crop(reshaped_image,[height,width,3])
plt.imshow(sess.run(tf.cast(distorted_image,tf.uint8)))
plt.show()
for i in range(9):
a = tf.random_crop(reshaped_image,[height,width,3])
plt.imshow(sess.run(tf.cast(a, tf.uint8)))
plt.show() | [
"matplotlib.pyplot.imshow",
"tensorflow.shape",
"tensorflow.fill",
"tensorflow.diag",
"tensorflow.Session",
"matplotlib.image.imread",
"tensorflow.random_shuffle",
"tensorflow.random_uniform",
"tensorflow.random_crop",
"numpy.array",
"tensorflow.constant",
"tensorflow.cast",
"tensorflow.trun... | [((165, 177), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (175, 177), True, 'import tensorflow as tf\n'), ((186, 207), 'tensorflow.diag', 'tf.diag', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (193, 207), True, 'import tensorflow as tf\n'), ((217, 244), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[2, 3]'], {}), '([2, 3])\n', (236, 244), True, 'import tensorflow as tf\n'), ((251, 271), 'tensorflow.fill', 'tf.fill', (['[2, 3]', '(5.0)'], {}), '([2, 3], 5.0)\n', (258, 271), True, 'import tensorflow as tf\n'), ((280, 305), 'tensorflow.random_uniform', 'tf.random_uniform', (['[3, 2]'], {}), '([3, 2])\n', (297, 305), True, 'import tensorflow as tf\n'), ((408, 457), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[3, 4]'], {'mean': '(0.0)', 'stddev': '(1.0)'}), '([3, 4], mean=0.0, stddev=1.0)\n', (427, 457), True, 'import tensorflow as tf\n'), ((468, 571), 'tensorflow.constant', 'tf.constant', (['[[1.0, 2.0, 3.0], [1.0, 5.0, 3.0], [1.0, 2.0, 7.0], [6.0, 2.0, 3.0], [8.0, \n 2.0, 3.0]]'], {}), '([[1.0, 2.0, 3.0], [1.0, 5.0, 3.0], [1.0, 2.0, 7.0], [6.0, 2.0, \n 3.0], [8.0, 2.0, 3.0]])\n', (479, 571), True, 'import tensorflow as tf\n'), ((548, 577), 'tensorflow.random_shuffle', 'tf.random_shuffle', (['input_data'], {}), '(input_data)\n', (565, 577), True, 'import tensorflow as tf\n'), ((585, 619), 'tensorflow.random_crop', 'tf.random_crop', (['input_data', '[1, 1]'], {}), '(input_data, [1, 1])\n', (599, 619), True, 'import tensorflow as tf\n'), ((641, 675), 'matplotlib.image.imread', 'img.imread', (['"""./resources/test.jpg"""'], {}), "('./resources/test.jpg')\n", (651, 675), True, 'import matplotlib.image as img\n'), ((676, 693), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (686, 693), True, 'import matplotlib.pyplot as plt\n'), ((694, 704), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (702, 704), True, 'import matplotlib.pyplot as plt\n'), ((722, 748), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (729, 748), True, 'import tensorflow as tf\n'), ((875, 925), 'tensorflow.random_crop', 'tf.random_crop', (['reshaped_image', '[height, width, 3]'], {}), '(reshaped_image, [height, width, 3])\n', (889, 925), True, 'import tensorflow as tf\n'), ((979, 989), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (987, 989), True, 'import matplotlib.pyplot as plt\n'), ((343, 408), 'numpy.array', 'np.array', (['[[1.0, 2.0, 3.0], [-3.0, -7.0, -1.0], [0.0, 5.0, -2.0]]'], {}), '([[1.0, 2.0, 3.0], [-3.0, -7.0, -1.0], [0.0, 5.0, -2.0]])\n', (351, 408), True, 'import numpy as np\n'), ((763, 787), 'tensorflow.shape', 'tf.shape', (['reshaped_image'], {}), '(reshaped_image)\n', (771, 787), True, 'import tensorflow as tf\n'), ((1018, 1068), 'tensorflow.random_crop', 'tf.random_crop', (['reshaped_image', '[height, width, 3]'], {}), '(reshaped_image, [height, width, 3])\n', (1032, 1068), True, 'import tensorflow as tf\n'), ((1117, 1127), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1125, 1127), True, 'import matplotlib.pyplot as plt\n'), ((943, 977), 'tensorflow.cast', 'tf.cast', (['distorted_image', 'tf.uint8'], {}), '(distorted_image, tf.uint8)\n', (950, 977), True, 'import tensorflow as tf\n'), ((1090, 1110), 'tensorflow.cast', 'tf.cast', (['a', 'tf.uint8'], {}), '(a, tf.uint8)\n', (1097, 1110), True, 'import tensorflow as tf\n')] |
import numpy as np
dt = 0.05
number_inputs = 3
number_neurons = 6
V = np.random.rand(number_neurons, number_inputs)*4-2
W = np.random.rand(number_neurons, number_neurons)*4-2
net_type = 'CTRNN3'
u = np.random.rand(number_inputs)*4-2
N = 100
x = np.random.rand(number_neurons)*4-2
for t in range(N):
print("t=" + str(t) + ": x=" + str(x))
if net_type == 'Elman1':
x = np.arctan(np.matmul(W, x) + np.matmul(V, u))
elif net_type == 'Elman2':
x = np.clip(np.matmul(W, x) + np.matmul(V, u), -1, 1)
elif net_type == 'CTRNN1':
x = x + dt*(np.matmul(W, np.arctan(x)) + np.matmul(V, u))
x = np.clip(x, -1, 1)
elif net_type == 'CTRNN2':
x = x + dt*(np.matmul(W, x) + np.matmul(V, u))
x = np.clip(x, -1, 1)
elif net_type == 'CTRNN3':
x = x + dt * (np.matmul(W, x) + np.matmul(V, u))
x = np.arctan(x)
print("Finished")
| [
"numpy.clip",
"numpy.matmul",
"numpy.random.rand",
"numpy.arctan"
] | [((73, 118), 'numpy.random.rand', 'np.random.rand', (['number_neurons', 'number_inputs'], {}), '(number_neurons, number_inputs)\n', (87, 118), True, 'import numpy as np\n'), ((127, 173), 'numpy.random.rand', 'np.random.rand', (['number_neurons', 'number_neurons'], {}), '(number_neurons, number_neurons)\n', (141, 173), True, 'import numpy as np\n'), ((204, 233), 'numpy.random.rand', 'np.random.rand', (['number_inputs'], {}), '(number_inputs)\n', (218, 233), True, 'import numpy as np\n'), ((252, 282), 'numpy.random.rand', 'np.random.rand', (['number_neurons'], {}), '(number_neurons)\n', (266, 282), True, 'import numpy as np\n'), ((403, 418), 'numpy.matmul', 'np.matmul', (['W', 'x'], {}), '(W, x)\n', (412, 418), True, 'import numpy as np\n'), ((421, 436), 'numpy.matmul', 'np.matmul', (['V', 'u'], {}), '(V, u)\n', (430, 436), True, 'import numpy as np\n'), ((642, 659), 'numpy.clip', 'np.clip', (['x', '(-1)', '(1)'], {}), '(x, -1, 1)\n', (649, 659), True, 'import numpy as np\n'), ((490, 505), 'numpy.matmul', 'np.matmul', (['W', 'x'], {}), '(W, x)\n', (499, 505), True, 'import numpy as np\n'), ((508, 523), 'numpy.matmul', 'np.matmul', (['V', 'u'], {}), '(V, u)\n', (517, 523), True, 'import numpy as np\n'), ((759, 776), 'numpy.clip', 'np.clip', (['x', '(-1)', '(1)'], {}), '(x, -1, 1)\n', (766, 776), True, 'import numpy as np\n'), ((878, 890), 'numpy.arctan', 'np.arctan', (['x'], {}), '(x)\n', (887, 890), True, 'import numpy as np\n'), ((613, 628), 'numpy.matmul', 'np.matmul', (['V', 'u'], {}), '(V, u)\n', (622, 628), True, 'import numpy as np\n'), ((597, 609), 'numpy.arctan', 'np.arctan', (['x'], {}), '(x)\n', (606, 609), True, 'import numpy as np\n'), ((712, 727), 'numpy.matmul', 'np.matmul', (['W', 'x'], {}), '(W, x)\n', (721, 727), True, 'import numpy as np\n'), ((730, 745), 'numpy.matmul', 'np.matmul', (['V', 'u'], {}), '(V, u)\n', (739, 745), True, 'import numpy as np\n'), ((831, 846), 'numpy.matmul', 'np.matmul', (['W', 'x'], {}), '(W, x)\n', (840, 846), True, 'import numpy as np\n'), ((849, 864), 'numpy.matmul', 'np.matmul', (['V', 'u'], {}), '(V, u)\n', (858, 864), True, 'import numpy as np\n')] |
from PIL import Image
from rembg.bg import remove
import numpy as np
import io
from django.db.models.functions import Radians, Cos, Sin, ASin, Sqrt
from date_site import settings
def add_watermark(image,):
background = np.fromfile(settings.WATERMARK)
result = remove(background)
base_image = Image.open(image)
watermark = Image.open(io.BytesIO(result)).convert("RGBA")
watermark.thumbnail((90, 90))
width, height = base_image.size
transparent = Image.new('RGBA', (width, height), (0, 0, 0, 0))
transparent.paste(base_image, (0, 0))
transparent.paste(watermark, (0, 0), mask=watermark)
transparent.save(image, format='png')
def distance_1(La1, La2, Lo1, Lo2):
Lo1 = Radians(Lo1)
Lo2 = Radians(Lo2)
La1 = Radians(La1)
La2 = Radians(La2)
return 2 * ASin(Sqrt(Sin((La2 - La1) / 2) ** 2 + Cos(La1) * Cos(La2) * Sin((Lo2 - Lo1) / 2) ** 2)) * 6371
| [
"numpy.fromfile",
"PIL.Image.open",
"PIL.Image.new",
"django.db.models.functions.Radians",
"io.BytesIO",
"django.db.models.functions.Cos",
"rembg.bg.remove",
"django.db.models.functions.Sin"
] | [((226, 257), 'numpy.fromfile', 'np.fromfile', (['settings.WATERMARK'], {}), '(settings.WATERMARK)\n', (237, 257), True, 'import numpy as np\n'), ((271, 289), 'rembg.bg.remove', 'remove', (['background'], {}), '(background)\n', (277, 289), False, 'from rembg.bg import remove\n'), ((307, 324), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (317, 324), False, 'from PIL import Image\n'), ((476, 524), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(width, height)', '(0, 0, 0, 0)'], {}), "('RGBA', (width, height), (0, 0, 0, 0))\n", (485, 524), False, 'from PIL import Image\n'), ((714, 726), 'django.db.models.functions.Radians', 'Radians', (['Lo1'], {}), '(Lo1)\n', (721, 726), False, 'from django.db.models.functions import Radians, Cos, Sin, ASin, Sqrt\n'), ((737, 749), 'django.db.models.functions.Radians', 'Radians', (['Lo2'], {}), '(Lo2)\n', (744, 749), False, 'from django.db.models.functions import Radians, Cos, Sin, ASin, Sqrt\n'), ((760, 772), 'django.db.models.functions.Radians', 'Radians', (['La1'], {}), '(La1)\n', (767, 772), False, 'from django.db.models.functions import Radians, Cos, Sin, ASin, Sqrt\n'), ((783, 795), 'django.db.models.functions.Radians', 'Radians', (['La2'], {}), '(La2)\n', (790, 795), False, 'from django.db.models.functions import Radians, Cos, Sin, ASin, Sqrt\n'), ((352, 370), 'io.BytesIO', 'io.BytesIO', (['result'], {}), '(result)\n', (362, 370), False, 'import io\n'), ((821, 841), 'django.db.models.functions.Sin', 'Sin', (['((La2 - La1) / 2)'], {}), '((La2 - La1) / 2)\n', (824, 841), False, 'from django.db.models.functions import Radians, Cos, Sin, ASin, Sqrt\n'), ((849, 857), 'django.db.models.functions.Cos', 'Cos', (['La1'], {}), '(La1)\n', (852, 857), False, 'from django.db.models.functions import Radians, Cos, Sin, ASin, Sqrt\n'), ((860, 868), 'django.db.models.functions.Cos', 'Cos', (['La2'], {}), '(La2)\n', (863, 868), False, 'from django.db.models.functions import Radians, Cos, Sin, ASin, Sqrt\n'), ((871, 891), 'django.db.models.functions.Sin', 'Sin', (['((Lo2 - Lo1) / 2)'], {}), '((Lo2 - Lo1) / 2)\n', (874, 891), False, 'from django.db.models.functions import Radians, Cos, Sin, ASin, Sqrt\n')] |
import numpy as np
import soundfile as sf
import argparse
import os
import keras
import sklearn
import librosa
from keras import backend as K
eps = np.finfo(np.float).eps
def class_mae(y_true, y_pred):
return K.mean(
K.abs(
K.argmax(y_pred, axis=-1) - K.argmax(y_true, axis=-1)
),
axis=-1
)
def count(audio, model, scaler):
# compute STFT
X = np.abs(librosa.stft(audio, n_fft=400, hop_length=160)).T
# apply global (featurewise) standardization to mean1, var0
X = scaler.transform(X)
# cut to input shape length (500 frames x 201 STFT bins)
X = X[:500, :]
# apply l2 normalization
Theta = np.linalg.norm(X, axis=1) + eps
X /= np.mean(Theta)
# add sample dimension
X = X[np.newaxis, ...]
if len(model.input_shape) == 4:
X = X[:, np.newaxis, ...]
ys = model.predict(X, verbose=0)
return np.argmax(ys, axis=1)[0]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Load keras model and predict speaker count'
)
parser.add_argument(
'audio',
help='audio file (samplerate 16 kHz) of 5 seconds duration'
)
parser.add_argument(
'--model', default='CRNN',
help='model name'
)
args = parser.parse_args()
# load model
model = keras.models.load_model(
os.path.join('models', args.model + '.h5'),
custom_objects={
'class_mae': class_mae,
'exp': K.exp
}
)
# print model configuration
model.summary()
# save as svg file
# load standardisation parameters
scaler = sklearn.preprocessing.StandardScaler()
with np.load(os.path.join("models", 'scaler.npz')) as data:
scaler.mean_ = data['arr_0']
scaler.scale_ = data['arr_1']
# compute audio
audio, rate = sf.read(args.audio, always_2d=True)
# downmix to mono
audio = np.mean(audio, axis=1)
estimate = count(audio, model, scaler)
print("Speaker Count Estimate: ", estimate)
| [
"numpy.mean",
"argparse.ArgumentParser",
"os.path.join",
"numpy.argmax",
"sklearn.preprocessing.StandardScaler",
"numpy.linalg.norm",
"numpy.finfo",
"keras.backend.argmax",
"soundfile.read",
"librosa.stft"
] | [((150, 168), 'numpy.finfo', 'np.finfo', (['np.float'], {}), '(np.float)\n', (158, 168), True, 'import numpy as np\n'), ((715, 729), 'numpy.mean', 'np.mean', (['Theta'], {}), '(Theta)\n', (722, 729), True, 'import numpy as np\n'), ((972, 1058), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Load keras model and predict speaker count"""'}), "(description=\n 'Load keras model and predict speaker count')\n", (995, 1058), False, 'import argparse\n'), ((1646, 1684), 'sklearn.preprocessing.StandardScaler', 'sklearn.preprocessing.StandardScaler', ([], {}), '()\n', (1682, 1684), False, 'import sklearn\n'), ((1863, 1898), 'soundfile.read', 'sf.read', (['args.audio'], {'always_2d': '(True)'}), '(args.audio, always_2d=True)\n', (1870, 1898), True, 'import soundfile as sf\n'), ((1934, 1956), 'numpy.mean', 'np.mean', (['audio'], {'axis': '(1)'}), '(audio, axis=1)\n', (1941, 1956), True, 'import numpy as np\n'), ((674, 699), 'numpy.linalg.norm', 'np.linalg.norm', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (688, 699), True, 'import numpy as np\n'), ((905, 926), 'numpy.argmax', 'np.argmax', (['ys'], {'axis': '(1)'}), '(ys, axis=1)\n', (914, 926), True, 'import numpy as np\n'), ((1373, 1415), 'os.path.join', 'os.path.join', (['"""models"""', "(args.model + '.h5')"], {}), "('models', args.model + '.h5')\n", (1385, 1415), False, 'import os\n'), ((408, 454), 'librosa.stft', 'librosa.stft', (['audio'], {'n_fft': '(400)', 'hop_length': '(160)'}), '(audio, n_fft=400, hop_length=160)\n', (420, 454), False, 'import librosa\n'), ((1702, 1738), 'os.path.join', 'os.path.join', (['"""models"""', '"""scaler.npz"""'], {}), "('models', 'scaler.npz')\n", (1714, 1738), False, 'import os\n'), ((252, 277), 'keras.backend.argmax', 'K.argmax', (['y_pred'], {'axis': '(-1)'}), '(y_pred, axis=-1)\n', (260, 277), True, 'from keras import backend as K\n'), ((280, 305), 'keras.backend.argmax', 'K.argmax', (['y_true'], {'axis': '(-1)'}), '(y_true, axis=-1)\n', (288, 305), True, 'from keras import backend as K\n')] |
import os
import sys
import time
from pickle import Pickler, Unpickler
from random import shuffle
import numpy as np
from Arena import Arena
from MCTS import MCTS
from connect4.Connect4BoardEvaluate import getBoardScoreTheoretical
from connect4.Connect4Game import Connect4Game
from connect4.Connect4Heuristics import heuristic2_prob
from connect4.Connect4Openings import *
from connect4.Connect4Players import EngineConnect4Player
from pytorch_classification.utils import Bar, AverageMeter
class Coach():
"""
This class executes the self-play + learning. It uses the functions defined
in Game and NeuralNet. args are specified in main.py.
"""
def __init__(self, game, nnet, args):
self.game = game
self.nnet = nnet
self.pnet = self.nnet.__class__(self.game) # the competitor network
self.args = args
self.window_size = args.numItersForTrainExamplesHistoryStart
self.mcts = MCTS(self.game, self.nnet, self.args)
self.trainExamplesHistory = [] # history of examples from args.numItersForTrainExamplesHistory latest iterations
self.skipFirstSelfPlay = False # can be overriden in loadTrainExamples()
def executeEpisode(self):
"""
This function executes one episode of self-play, starting with player 1.
As the game is played, each turn is added as a training example to
trainExamples. The game is played till the game ends. After the game
ends, the outcome of the game is used to assign values to each example
in trainExamples.
It uses a temp=1 if episodeStep < tempThreshold, and thereafter
uses temp=0.
Returns:
trainExamples: a list of examples of the form (canonicalBoard,pi,v)
pi is the MCTS informed policy vector, v is +1 if
the player eventually won the game, else -1.
"""
trainExamples = []
board = self.game.getInitBoard()
self.curPlayer = 1
episodeStep = 0
moveHistory = []
if "openings_prob" in self.args:
use_opening = random.random() < self.args.openings_prob
else:
use_opening = False
if use_opening:
opening = opening_tree()
while True:
episodeStep += 1
canonicalBoard = self.game.getCanonicalForm(board, self.curPlayer)
temp = int(episodeStep < self.args.tempThreshold)
pi = self.mcts.getActionProb(canonicalBoard, temp=temp)
valids = self.game.getValidMoves(canonicalBoard, self.curPlayer)
pi = pi * valids
pi = pi / sum(pi)
if not use_opening or episodeStep >= len(opening):
if self.args.heuristic_type == 'combined':
fraction = self.args.heuristic_probability
h_prob = self.args.heuristic_function(canonicalBoard)
new_pi = (np.array(pi) * (1 - fraction) + h_prob * fraction)
if self.args.change_probabilities:
pi = new_pi
action = np.random.choice(len(new_pi), p=new_pi)
elif self.args.heuristic_type == 'normal' or self.args.heuristic_type == 'cooling':
if self.args.heuristic_type == 'cooling':
prob = self.args.heuristic_probability - (
episodeStep - 1) * self.args.heuristic_probability / 42
else:
prob = self.args.heuristic_probability
if np.random.ranf(1)[0] > prob:
action = np.random.choice(len(pi), p=pi)
else:
new_pi = self.args.heuristic_function(canonicalBoard)
if self.args.change_probabilities:
pi = new_pi
action = np.random.choice(len(new_pi), p=new_pi)
elif self.args.heuristic_type == 'cooling_iter':
fraction = max(0, ((50 - (self.args.curIter - 1)) / 50))
h_prob = heuristic2_prob(canonicalBoard)
new_pi = (np.array(pi) * (1 - fraction) + h_prob * fraction)
if self.args.change_probabilities:
pi = new_pi
action = np.random.choice(len(new_pi), p=new_pi)
elif self.args.heuristic_type == 'custom':
prob = self.args.probability_function(episodeStep)
if np.random.ranf(1)[0] > prob:
action = np.random.choice(len(pi), p=pi)
else:
action = self.args.heuristic_function(canonicalBoard)
elif self.args.heuristic_type == 'perfect':
action = EngineConnect4Player(Connect4Game()).play(canonicalBoard)
elif self.args.heuristic_type == 'default':
action = np.random.choice(len(pi), p=pi)
else:
raise NameError("Wrong heuristic type '" + self.args.heuristic_type + "'")
else:
action = opening[episodeStep - 1]
# pi = np.array(7)
# pi[action] = 1
sym = self.game.getSymmetries(canonicalBoard, pi)
for b, p in sym:
if np.all(b == canonicalBoard):
trainExamples.append([b, self.curPlayer, p, list(moveHistory), None])
else:
trainExamples.append([b, self.curPlayer, p, [6 - x for x in moveHistory], None])
board, self.curPlayer = self.game.getNextState(board, self.curPlayer, action)
moveHistory.append(action)
r = self.game.getGameEnded(board, self.curPlayer)
if r != 0:
if self.args.supervised:
result = []
for x in trainExamples:
r = getBoardScoreTheoretical(x[3])
result.append((x[0], x[2], r))
print(x[0], "Moves", "".join([str(i + 1) for i in x[3]]), "Theoretical value", r)
return result
else:
if self.args.value_game_length:
r = 1.198 - 99 / 3500 * episodeStep
res = [[x[0], x[2], r * ((-1) ** (x[1] != self.curPlayer))] for x in trainExamples]
if use_opening:
score = 1
for i in range(len(opening)):
res[i][2] = score
score *= -1
return res
def learn(self, verbose=False):
"""
Performs numIters iterations with numEps episodes of self-play in each
iteration. After every iteration, it retrains neural network with
examples in trainExamples (which has a maximium length of maxlenofQueue).
It then pits the new neural network against the old one and accepts it
only if it wins >= updateThreshold fraction of games.
"""
start_idx = 1
if self.args.load_model:
start_idx += self.args.checkpoint_index
for i in range(start_idx, self.args.numIters + 1):
self.args.curIter = i
# bookkeeping
print('------ITER ' + str(i) + '------')
# examples of the iteration
if not self.skipFirstSelfPlay or i > 1:
iterationTrainExamples = []
eps_time = AverageMeter()
if verbose:
bar = Bar('Self Play', max=self.args.numEps)
end = time.time()
for eps in range(self.args.numEps):
self.mcts = MCTS(self.game, self.nnet, self.args) # reset search tree
iterationTrainExamples.extend(self.executeEpisode())
# bookkeeping + plot progress
eps_time.update(time.time() - end)
end = time.time()
if verbose:
bar.suffix = '({eps}/{maxeps}) Eps Time: {et:.3f}s | Total: {total:} | ETA: {eta:}'.format(
eps=eps + 1, maxeps=self.args.numEps, et=eps_time.avg,
total=bar.elapsed_td, eta=bar.eta_td)
bar.next()
if verbose:
bar.finish()
# save the iteration examples to the history
_, ind = np.unique(np.array(list(map(str, iterationTrainExamples))),
return_index=True) # removing of duplicates
self.trainExamplesHistory.append(np.array(iterationTrainExamples)[ind])
if self.window_size < self.args.numItersForTrainExamplesHistoryMax and \
self.args.numItersForTrainExamplesHistoryStart < len(self.trainExamplesHistory):
self.window_size += 0.5
if len(self.trainExamplesHistory) > self.window_size:
print("len(trainExamplesHistory) =", len(self.trainExamplesHistory),
" => remove the oldest trainExamples")
self.trainExamplesHistory.pop(0)
# backup history to a file
# NB! the examples were collected using the model from the previous iteration, so (i-1)
self.saveTrainExamples(i - 1)
# shuffle examples before training
trainExamples = []
for e in self.trainExamplesHistory:
trainExamples.extend(e)
shuffle(trainExamples)
# training new network, keeping a copy of the old one
self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')
self.pnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')
pmcts = MCTS(self.game, self.pnet, self.args)
self.nnet.train(trainExamples)
nmcts = MCTS(self.game, self.nnet, self.args)
print('PITTING AGAINST PREVIOUS VERSION')
arena = Arena(lambda x: np.argmax(pmcts.getActionProb(x, temp=0)),
lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game)
pwins, nwins, draws = arena.playGames(self.args.arenaCompare)
print('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))
if pwins + nwins == 0 or float(nwins) / (pwins + nwins) < self.args.updateThreshold:
print('REJECTING NEW MODEL')
self.nnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')
else:
print('ACCEPTING NEW MODEL')
self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=self.getCheckpointFile(i))
self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='best.pth.tar')
if self.args.heuristic_probability_cooling and self.args.heuristic_probability - self.args.heuristic_probability_cooling_step >= 0:
self.args.heuristic_probability -= self.args.heuristic_probability_cooling_step
def getCheckpointFile(self, iteration):
return 'checkpoint_' + str(iteration) + '.pth.tar'
def saveTrainExamples(self, iteration):
folder = self.args.checkpoint
if not os.path.exists(folder):
os.makedirs(folder)
filename = os.path.join(folder, self.getCheckpointFile(iteration) + ".examples")
with open(filename, "wb+") as f:
Pickler(f).dump(self.trainExamplesHistory)
f.closed
def loadTrainExamples(self):
modelFile = os.path.join(self.args.load_folder_file[0], self.args.load_folder_file[1])
examplesFile = modelFile + ".examples"
if not os.path.isfile(examplesFile):
print(examplesFile)
r = input("File with trainExamples not found. Continue? [y|n]")
if r != "y":
sys.exit()
else:
print("File with trainExamples found. Read it.")
with open(examplesFile, "rb") as f:
self.trainExamplesHistory = Unpickler(f).load()
f.closed
# examples based on the model were already collected (loaded)
self.skipFirstSelfPlay = True
| [
"os.path.exists",
"connect4.Connect4Game.Connect4Game",
"MCTS.MCTS",
"connect4.Connect4Heuristics.heuristic2_prob",
"random.shuffle",
"os.makedirs",
"pytorch_classification.utils.AverageMeter",
"os.path.join",
"pickle.Pickler",
"connect4.Connect4BoardEvaluate.getBoardScoreTheoretical",
"os.path.... | [((948, 985), 'MCTS.MCTS', 'MCTS', (['self.game', 'self.nnet', 'self.args'], {}), '(self.game, self.nnet, self.args)\n', (952, 985), False, 'from MCTS import MCTS\n'), ((11743, 11817), 'os.path.join', 'os.path.join', (['self.args.load_folder_file[0]', 'self.args.load_folder_file[1]'], {}), '(self.args.load_folder_file[0], self.args.load_folder_file[1])\n', (11755, 11817), False, 'import os\n'), ((9666, 9688), 'random.shuffle', 'shuffle', (['trainExamples'], {}), '(trainExamples)\n', (9673, 9688), False, 'from random import shuffle\n'), ((9960, 9997), 'MCTS.MCTS', 'MCTS', (['self.game', 'self.pnet', 'self.args'], {}), '(self.game, self.pnet, self.args)\n', (9964, 9997), False, 'from MCTS import MCTS\n'), ((10062, 10099), 'MCTS.MCTS', 'MCTS', (['self.game', 'self.nnet', 'self.args'], {}), '(self.game, self.nnet, self.args)\n', (10066, 10099), False, 'from MCTS import MCTS\n'), ((11431, 11453), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (11445, 11453), False, 'import os\n'), ((11467, 11486), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (11478, 11486), False, 'import os\n'), ((11881, 11909), 'os.path.isfile', 'os.path.isfile', (['examplesFile'], {}), '(examplesFile)\n', (11895, 11909), False, 'import os\n'), ((5411, 5438), 'numpy.all', 'np.all', (['(b == canonicalBoard)'], {}), '(b == canonicalBoard)\n', (5417, 5438), True, 'import numpy as np\n'), ((7605, 7619), 'pytorch_classification.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (7617, 7619), False, 'from pytorch_classification.utils import Bar, AverageMeter\n'), ((7735, 7746), 'time.time', 'time.time', ([], {}), '()\n', (7744, 7746), False, 'import time\n'), ((12060, 12070), 'sys.exit', 'sys.exit', ([], {}), '()\n', (12068, 12070), False, 'import sys\n'), ((7674, 7712), 'pytorch_classification.utils.Bar', 'Bar', (['"""Self Play"""'], {'max': 'self.args.numEps'}), "('Self Play', max=self.args.numEps)\n", (7677, 7712), False, 'from pytorch_classification.utils import Bar, AverageMeter\n'), ((7832, 7869), 'MCTS.MCTS', 'MCTS', (['self.game', 'self.nnet', 'self.args'], {}), '(self.game, self.nnet, self.args)\n', (7836, 7869), False, 'from MCTS import MCTS\n'), ((8096, 8107), 'time.time', 'time.time', ([], {}), '()\n', (8105, 8107), False, 'import time\n'), ((11629, 11639), 'pickle.Pickler', 'Pickler', (['f'], {}), '(f)\n', (11636, 11639), False, 'from pickle import Pickler, Unpickler\n'), ((6014, 6044), 'connect4.Connect4BoardEvaluate.getBoardScoreTheoretical', 'getBoardScoreTheoretical', (['x[3]'], {}), '(x[3])\n', (6038, 6044), False, 'from connect4.Connect4BoardEvaluate import getBoardScoreTheoretical\n'), ((8778, 8810), 'numpy.array', 'np.array', (['iterationTrainExamples'], {}), '(iterationTrainExamples)\n', (8786, 8810), True, 'import numpy as np\n'), ((12238, 12250), 'pickle.Unpickler', 'Unpickler', (['f'], {}), '(f)\n', (12247, 12250), False, 'from pickle import Pickler, Unpickler\n'), ((2971, 2983), 'numpy.array', 'np.array', (['pi'], {}), '(pi)\n', (2979, 2983), True, 'import numpy as np\n'), ((4153, 4184), 'connect4.Connect4Heuristics.heuristic2_prob', 'heuristic2_prob', (['canonicalBoard'], {}), '(canonicalBoard)\n', (4168, 4184), False, 'from connect4.Connect4Heuristics import heuristic2_prob\n'), ((8051, 8062), 'time.time', 'time.time', ([], {}), '()\n', (8060, 8062), False, 'import time\n'), ((3612, 3629), 'numpy.random.ranf', 'np.random.ranf', (['(1)'], {}), '(1)\n', (3626, 3629), True, 'import numpy as np\n'), ((4215, 4227), 'numpy.array', 'np.array', (['pi'], {}), '(pi)\n', (4223, 4227), True, 'import numpy as np\n'), ((4580, 4597), 'numpy.random.ranf', 'np.random.ranf', (['(1)'], {}), '(1)\n', (4594, 4597), True, 'import numpy as np\n'), ((4888, 4902), 'connect4.Connect4Game.Connect4Game', 'Connect4Game', ([], {}), '()\n', (4900, 4902), False, 'from connect4.Connect4Game import Connect4Game\n')] |
import numpy as np
positions = np.loadtxt("input.txt", dtype=int, delimiter=",")
part1_fuel = np.abs(
np.tile(positions, (positions.size, 1))
- np.arange(1, positions.size + 1).reshape(-1, 1)
)
part2_fuel = part1_fuel * (part1_fuel + 1) // 2
print("Part 1:", part1_fuel.sum(axis=1).min())
print("Part 2:", part2_fuel.sum(axis=1).min())
| [
"numpy.tile",
"numpy.loadtxt",
"numpy.arange"
] | [((32, 81), 'numpy.loadtxt', 'np.loadtxt', (['"""input.txt"""'], {'dtype': 'int', 'delimiter': '""","""'}), "('input.txt', dtype=int, delimiter=',')\n", (42, 81), True, 'import numpy as np\n'), ((108, 147), 'numpy.tile', 'np.tile', (['positions', '(positions.size, 1)'], {}), '(positions, (positions.size, 1))\n', (115, 147), True, 'import numpy as np\n'), ((154, 186), 'numpy.arange', 'np.arange', (['(1)', '(positions.size + 1)'], {}), '(1, positions.size + 1)\n', (163, 186), True, 'import numpy as np\n')] |
import numpy
import pandas
from utils import pos_range
class CrossWordPuzzle():
def __init__(self, word_df, layout_df):
assert len(word_df) == len(layout_df)
self.word_list = word_df["word"]
self.puzzle_df = pandas.concat([word_df, layout_df], axis=1)
self.puzzle_df["len"] = [*map(len, self.word_list)]
self.puzzle_dict = self._init_dict_().copy()
self.num_rows, self.num_cols = self._init_size_()
def _init_dict_(self):
output_puzzle = {}
word_pos = self.puzzle_df[["row", "col", "len", "direction"]].apply(
lambda x: pos_range(*x), axis=1)
for pos, word in zip(word_pos, self.word_list):
assert len(pos) == len(word)
for (row, col), letter in zip(pos, word):
if (row, col) not in output_puzzle:
output_puzzle[(row, col)] = letter
else:
assert output_puzzle[(row, col)] == letter
return output_puzzle
def _init_size_(self):
word_len = numpy.array(self.puzzle_df["len"])
word_dir = numpy.array(self.puzzle_df["direction"])
row_end = numpy.array(self.puzzle_df["row"]) + (word_dir == 'v') * word_len
col_end = numpy.array(self.puzzle_df["col"]) + (word_dir == 'h') * word_len
return max(row_end), max(col_end)
def eval_cross_ratio(self):
total_letters = sum(map(len, self.word_list))
total_cells = len(self.puzzle_dict)
# higher ratio => better puzzle design
return 1 - total_cells / total_letters
def eval_shape_ratio(self):
# shape ratio closer to 1 => better puzzle design
return self.num_rows / self.num_cols
def eval_density(self):
# density closer to 100% => better puzzle design
return len(self.puzzle_dict) / (self.num_rows * self.num_cols)
def evaluate(self):
puzzle_quality = {
"words_cross": self.eval_cross_ratio(),
"board_shape": self.eval_shape_ratio(),
"board_density": self.eval_density()
}
return puzzle_quality
| [
"numpy.array",
"utils.pos_range",
"pandas.concat"
] | [((216, 259), 'pandas.concat', 'pandas.concat', (['[word_df, layout_df]'], {'axis': '(1)'}), '([word_df, layout_df], axis=1)\n', (229, 259), False, 'import pandas\n'), ((899, 933), 'numpy.array', 'numpy.array', (["self.puzzle_df['len']"], {}), "(self.puzzle_df['len'])\n", (910, 933), False, 'import numpy\n'), ((947, 987), 'numpy.array', 'numpy.array', (["self.puzzle_df['direction']"], {}), "(self.puzzle_df['direction'])\n", (958, 987), False, 'import numpy\n'), ((1003, 1037), 'numpy.array', 'numpy.array', (["self.puzzle_df['row']"], {}), "(self.puzzle_df['row'])\n", (1014, 1037), False, 'import numpy\n'), ((1081, 1115), 'numpy.array', 'numpy.array', (["self.puzzle_df['col']"], {}), "(self.puzzle_df['col'])\n", (1092, 1115), False, 'import numpy\n'), ((543, 556), 'utils.pos_range', 'pos_range', (['*x'], {}), '(*x)\n', (552, 556), False, 'from utils import pos_range\n')] |
import numpy as np
import re
import decimal
rule_names=[]
rules=[]
### Parsing
r1=re.compile("(.+): (\d+)-(\d+) or (\d+)-(\d+)")
with open('resources/day_16_tickets-data.txt','r') as f:
while(True):
m=r1.match(f.readline().strip())
if not m:
break
rule_names.append(m.groups()[0])
rules.append(list(map(int, m.groups()[1:])))
rules=np.array(rules, dtype=int)
while (True):
d=f.readline().strip()
if "your ticket" in d:
mt = np.array(list(map(int, f.readline().split(","))), dtype=int)
elif "nearby tickets" in d:
tickets=np.genfromtxt(f.read().split('\n'), delimiter=',', dtype='int')
break
#find invalid:
def apply_func(ticket, field, rule):
r=rules[rule]
v=tickets[ticket, field]
return ((r[0]<=v & v<= r[1])|(r[2]<=v & v<= r[3]))
vmatrix=np.fromfunction(np.vectorize(apply_func), (tickets.shape[0], tickets.shape[1], rules.shape[0]), dtype=int)
print("Bad ticket sum",np.sum(tickets[np.where(vmatrix.sum(axis=2)==0)]))
good_tickets=np.delete(tickets, np.where(vmatrix.sum(axis=2)==0)[0], 0)
good_matrix=np.delete(vmatrix, np.where(vmatrix.sum(axis=2)==0)[0], 0)
valid=good_matrix.sum(axis=0)==good_matrix.shape[0]
product=1
#compute the magic square
mask=np.ones(valid.shape, dtype='bool')
for i in range(valid.shape[0]):
cur=valid & mask
axissum=cur.sum(axis=1)
field=np.where(axissum==1)[0][0]
rule=np.where(cur[field, :]==True)[0][0]
if rule_names[rule].startswith("departure"):
#print(field, rule, mt[field])
product *= int(mt[field])
mask[:,rule]=False
print ("Product", product)
| [
"numpy.ones",
"re.compile",
"numpy.where",
"numpy.array",
"numpy.vectorize"
] | [((85, 135), 're.compile', 're.compile', (['"""(.+): (\\\\d+)-(\\\\d+) or (\\\\d+)-(\\\\d+)"""'], {}), "('(.+): (\\\\d+)-(\\\\d+) or (\\\\d+)-(\\\\d+)')\n", (95, 135), False, 'import re\n'), ((1300, 1334), 'numpy.ones', 'np.ones', (['valid.shape'], {'dtype': '"""bool"""'}), "(valid.shape, dtype='bool')\n", (1307, 1334), True, 'import numpy as np\n'), ((388, 414), 'numpy.array', 'np.array', (['rules'], {'dtype': 'int'}), '(rules, dtype=int)\n', (396, 414), True, 'import numpy as np\n'), ((893, 917), 'numpy.vectorize', 'np.vectorize', (['apply_func'], {}), '(apply_func)\n', (905, 917), True, 'import numpy as np\n'), ((1426, 1448), 'numpy.where', 'np.where', (['(axissum == 1)'], {}), '(axissum == 1)\n', (1434, 1448), True, 'import numpy as np\n'), ((1462, 1493), 'numpy.where', 'np.where', (['(cur[field, :] == True)'], {}), '(cur[field, :] == True)\n', (1470, 1493), True, 'import numpy as np\n')] |
import numpy as np
import torch
import torch.nn as nn
from .utils import register_model, get_model
from . import cos_norm_classifier
@register_model('MannNet')
class MannNet(nn.Module):
"""Defines a Dynamic Meta-Embedding Network."""
def __init__(self, num_cls=10, model='LeNet', src_weights_init=None,
weights_init=None, use_domain_factor_selector=False, centroids_path=None, feat_dim=512):
super(MannNet, self).__init__()
self.name = 'MannNet'
self.base_model = model
self.num_cls = num_cls
self.feat_dim = feat_dim
self.use_domain_factor_selector = use_domain_factor_selector
self.cls_criterion = nn.CrossEntropyLoss()
self.gan_criterion = nn.CrossEntropyLoss()
self.centroids = torch.from_numpy(np.load(centroids_path)).float().cuda()
assert self.centroids is not None
self.centroids.requires_grad = False
self.setup_net()
if weights_init is not None:
self.load(weights_init)
elif src_weights_init is not None:
self.load_src_net(src_weights_init)
else:
raise Exception('MannNet must be initialized with weights.')
def forward(self, x_s, x_t):
"""Pass source and target images through their respective networks."""
score_s, x_s = self.src_net(x_s, with_ft=True)
score_t, x_t = self.tgt_net(x_t, with_ft=True)
if self.discrim_feat:
d_s = self.discriminator(x_s.clone())
d_t = self.discriminator(x_t.clone())
else:
d_s = self.discriminator(score_s.clone())
d_t = self.discriminator(score_t.clone())
return score_s, score_t, d_s, d_t
def setup_net(self):
"""Setup source, target and discriminator networks."""
self.src_net = get_model(self.base_model, num_cls=self.num_cls, feat_dim=self.feat_dim)
self.tgt_net = get_model(self.base_model, num_cls=self.num_cls, feat_dim=self.feat_dim)
input_dim = self.num_cls
self.discriminator = nn.Sequential(
nn.Linear(input_dim, 500),
nn.ReLU(),
nn.Linear(500, 500),
nn.ReLU(),
nn.Linear(500, 2),
)
self.fc_selector = nn.Linear(self.feat_dim, self.feat_dim)
if self.use_domain_factor_selector:
self.domain_factor_selector = nn.Linear(self.feat_dim, self.feat_dim)
self.classifier = cos_norm_classifier.create_model(self.feat_dim, self.num_cls)
self.image_size = self.src_net.image_size
self.num_channels = self.src_net.num_channels
def load(self, init_path):
"""Loads full src and tgt models."""
net_init_dict = torch.load(init_path)
self.load_state_dict(net_init_dict)
def load_src_net(self, init_path):
"""Initialize source and target with source weights."""
self.src_net.load(init_path)
self.tgt_net.load(init_path)
net_init_dict = torch.load(init_path)
classifier_weights = net_init_dict['classifier.weight']
self.classifier.weight.data = classifier_weights.data.clone()
def save(self, out_path):
torch.save(self.state_dict(), out_path)
def save_tgt_net(self, out_path):
torch.save(self.tgt_net.state_dict(), out_path)
| [
"torch.nn.ReLU",
"torch.nn.CrossEntropyLoss",
"torch.load",
"torch.nn.Linear",
"numpy.load"
] | [((687, 708), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (706, 708), True, 'import torch.nn as nn\n'), ((738, 759), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (757, 759), True, 'import torch.nn as nn\n'), ((2281, 2320), 'torch.nn.Linear', 'nn.Linear', (['self.feat_dim', 'self.feat_dim'], {}), '(self.feat_dim, self.feat_dim)\n', (2290, 2320), True, 'import torch.nn as nn\n'), ((2743, 2764), 'torch.load', 'torch.load', (['init_path'], {}), '(init_path)\n', (2753, 2764), False, 'import torch\n'), ((3012, 3033), 'torch.load', 'torch.load', (['init_path'], {}), '(init_path)\n', (3022, 3033), False, 'import torch\n'), ((2102, 2127), 'torch.nn.Linear', 'nn.Linear', (['input_dim', '(500)'], {}), '(input_dim, 500)\n', (2111, 2127), True, 'import torch.nn as nn\n'), ((2141, 2150), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2148, 2150), True, 'import torch.nn as nn\n'), ((2164, 2183), 'torch.nn.Linear', 'nn.Linear', (['(500)', '(500)'], {}), '(500, 500)\n', (2173, 2183), True, 'import torch.nn as nn\n'), ((2197, 2206), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2204, 2206), True, 'import torch.nn as nn\n'), ((2220, 2237), 'torch.nn.Linear', 'nn.Linear', (['(500)', '(2)'], {}), '(500, 2)\n', (2229, 2237), True, 'import torch.nn as nn\n'), ((2408, 2447), 'torch.nn.Linear', 'nn.Linear', (['self.feat_dim', 'self.feat_dim'], {}), '(self.feat_dim, self.feat_dim)\n', (2417, 2447), True, 'import torch.nn as nn\n'), ((803, 826), 'numpy.load', 'np.load', (['centroids_path'], {}), '(centroids_path)\n', (810, 826), True, 'import numpy as np\n')] |
import time
from collections import deque
import torch
import numpy as np
from ... import mohex, hex
from . import json, analysis
from .. import common
from rebar import arrdict
from pavlov import stats, runs, logs
from logging import getLogger
import activelo
import pandas as pd
from functools import wraps
from contextlib import contextmanager
from multiprocessing import set_start_method, Process
log = getLogger(__name__)
BOARDSIZES = [3, 5, 7, 9]
RUN_NAMES = [f'mohex-{s}' for s in BOARDSIZES]
def elos(run_name, names=None, queue=[]):
n = (json.symmetric_games(run_name)
.reindex(index=names, columns=names)
.fillna(0))
w = (json.symmetric_wins(run_name)
.reindex(index=names, columns=names)
.fillna(0))
for (i, j) in queue:
ni, nj = names[i], names[j]
w.loc[ni, nj] += (w.loc[ni, nj] + 1)/(n.loc[ni, nj] + 2)
n.loc[ni, nj] += 1
return activelo.solve(n.values, w.values)
def offdiag_refill(run, names, queue, count=1):
n = (json.symmetric_games(run)
.reindex(index=names, columns=names)
.fillna(0))
for (i, j) in queue:
ni, nj = names[i], names[j]
n.loc[ni, nj] += 1
rs, cs = np.indices(n.shape)
mask = ((rs == cs + 1) | (rs == cs - 1))
excess = (n.values - n.values[mask].min())
excess[~mask] = np.inf
probs = np.exp(-excess)/np.exp(-excess).sum()
while len(queue) < count:
idx = np.random.choice(np.arange(n.size), p=probs.flatten())
pair = (idx // n.shape[0], idx % n.shape[0])
queue.append(pair)
queue.append(pair[::-1])
def uniform_refill(run, names, queue, count=1, target=128):
n = (json.symmetric_games(run)
.reindex(index=names, columns=names)
.fillna(0))
for (i, j) in queue:
ni, nj = names[i], names[j]
n.loc[ni, nj] += 1
deficit = (target - n.values).clip(0, None)
deficit[np.diag_indices_from(deficit)] = 0
if (deficit == 0).all():
return
probs = deficit/deficit.sum()
while len(queue) < count:
idx = np.random.choice(np.arange(n.size), p=probs.flatten())
pair = (idx // n.shape[0], idx % n.shape[0])
queue.append(pair)
queue.append(pair[::-1])
def reference_ladder(boardsize):
"""Run this to generate the `mohex-{boardsize}.json` files"""
#TODO: This is all a mess. Why'd I design it this way?
from IPython import display
run_name = f'mohex-{boardsize}'
agent = mohex.MoHexAgent()
worlds = hex.Hex.initial(n_envs=8, boardsize=boardsize)
universe = torch.linspace(0, 1, 11)
names = sorted([f'mohex-{r:.2f}' for r in universe])
queue = []
uniform_refill(run_name, names, queue, worlds.n_envs)
active = torch.tensor(queue[:worlds.n_envs])
queue = queue[worlds.n_envs:]
moves = torch.zeros((worlds.n_envs,))
while (queue or active.size(0)):
display.clear_output(wait=True)
print(f'{boardsize}: {len(queue)} in queue, {len(active)} in active')
idxs = active.gather(1, worlds.seats[:, None].long().cpu())[:, 0]
agent.random = universe[idxs]
decisions = agent(worlds)
worlds, transitions = worlds.step(decisions.actions)
moves += 1
rewards = transitions.rewards.cpu()
wins = (rewards == 1).int()
terminal = transitions.terminal.cpu()
for idx in terminal.nonzero(as_tuple=False).squeeze(-1):
result = arrdict.arrdict(
names=(f'mohex-{universe[active[idx][0]]:.2f}', f'mohex-{universe[active[idx][1]]:.2f}'),
wins=tuple(map(int, wins[idx])),
moves=int(moves[idx]),
boardsize=worlds.boardsize)
json.save(run_name, result)
moves[idx] = 0
uniform_refill(run_name, names, queue)
if not queue:
return
active[idx] = torch.tensor(queue[0])
queue = queue[1:]
def append(df, name):
names = list(df.index) + [name]
return df.reindex(index=names, columns=names).fillna(0)
class RollingArena:
def __init__(self, worlds, max_history):
self.worlds = worlds
self.mohex = mohex.MoHexAgent()
self.history = deque(maxlen=worlds.n_seats*max_history//self.worlds.n_envs)
self.soln = None
def play(self, agent):
size = self.worlds.boardsize
games = json.symmetric_games(f'mohex-{size}').pipe(append, 'agent')
wins = json.symmetric_wins(f'mohex-{size}').pipe(append, 'agent')
for result in self.history:
games.loc[result.names[0], result.names[1]] += result.games
games.loc[result.names[1], result.names[0]] += result.games
wins.loc[result.names[0], result.names[1]] += result.wins[0]
wins.loc[result.names[1], result.names[0]] += result.wins[1]
self.soln = activelo.solve(games, wins, soln=self.soln)
μ, σ = analysis.difference(self.soln, 'mohex-0.00', 'agent')
log.info(f'Agent elo is {μ:.2f}±{σ:.2f} based on {int(games.loc["agent"].sum())} games')
stats.mean_std('elo-mohex', μ, σ)
imp = activelo.improvement(self.soln)
imp = pd.DataFrame(imp, games.index, games.index)
challenger = imp['agent'].idxmax()
randomness = float(challenger.split('-')[1])
self.mohex.random = randomness
results = common.evaluate(self.worlds, {'agent': agent, challenger: self.mohex})
log.info(f'Agent played {challenger}, {int(results[0].wins[0] + results[1].wins[1])}-{int(results[0].wins[1] + results[1].wins[0])}')
self.history.extend(results)
return arrdict.arrdict(games=games.loc['agent'].sum(), mean=μ, std=σ)
def run_sync(run):
log.info('Arena launched')
run = runs.resolve(run)
log.info(f'Running arena for "{run}"')
with logs.to_run(run), stats.to_run(run):
worlds = common.worlds(run, 4)
arena = RollingArena(worlds, 128)
i = 0
agent = None
last_load, last_step = 0, 0
while True:
if time.time() - last_load > 15:
last_load = time.time()
agent = common.agent(run)
if agent and (time.time() - last_step > 1):
last_step = time.time()
log.info('Running trial')
arena.play(agent)
i += 1
@wraps(run_sync)
@contextmanager
def run(*args, **kwargs):
set_start_method('spawn', True)
p = Process(target=run_sync, args=args, kwargs=kwargs, name='mohex-arena')
try:
p.start()
yield p
finally:
for _ in range(50):
if not p.is_alive():
log.info('Arena monitor dead')
break
time.sleep(.1)
else:
log.info('Abruptly terminating arena monitor; it should have shut down naturally!')
p.terminate()
| [
"logging.getLogger",
"pavlov.runs.resolve",
"pavlov.logs.to_run",
"multiprocessing.Process",
"pavlov.stats.to_run",
"time.sleep",
"multiprocessing.set_start_method",
"numpy.arange",
"numpy.diag_indices_from",
"activelo.improvement",
"collections.deque",
"functools.wraps",
"numpy.exp",
"pan... | [((408, 427), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (417, 427), False, 'from logging import getLogger\n'), ((6455, 6470), 'functools.wraps', 'wraps', (['run_sync'], {}), '(run_sync)\n', (6460, 6470), False, 'from functools import wraps\n'), ((940, 974), 'activelo.solve', 'activelo.solve', (['n.values', 'w.values'], {}), '(n.values, w.values)\n', (954, 974), False, 'import activelo\n'), ((1235, 1254), 'numpy.indices', 'np.indices', (['n.shape'], {}), '(n.shape)\n', (1245, 1254), True, 'import numpy as np\n'), ((2617, 2641), 'torch.linspace', 'torch.linspace', (['(0)', '(1)', '(11)'], {}), '(0, 1, 11)\n', (2631, 2641), False, 'import torch\n'), ((2787, 2822), 'torch.tensor', 'torch.tensor', (['queue[:worlds.n_envs]'], {}), '(queue[:worlds.n_envs])\n', (2799, 2822), False, 'import torch\n'), ((2870, 2899), 'torch.zeros', 'torch.zeros', (['(worlds.n_envs,)'], {}), '((worlds.n_envs,))\n', (2881, 2899), False, 'import torch\n'), ((5829, 5846), 'pavlov.runs.resolve', 'runs.resolve', (['run'], {}), '(run)\n', (5841, 5846), False, 'from pavlov import stats, runs, logs\n'), ((6517, 6548), 'multiprocessing.set_start_method', 'set_start_method', (['"""spawn"""', '(True)'], {}), "('spawn', True)\n", (6533, 6548), False, 'from multiprocessing import set_start_method, Process\n'), ((6557, 6627), 'multiprocessing.Process', 'Process', ([], {'target': 'run_sync', 'args': 'args', 'kwargs': 'kwargs', 'name': '"""mohex-arena"""'}), "(target=run_sync, args=args, kwargs=kwargs, name='mohex-arena')\n", (6564, 6627), False, 'from multiprocessing import set_start_method, Process\n'), ((1386, 1401), 'numpy.exp', 'np.exp', (['(-excess)'], {}), '(-excess)\n', (1392, 1401), True, 'import numpy as np\n'), ((1956, 1985), 'numpy.diag_indices_from', 'np.diag_indices_from', (['deficit'], {}), '(deficit)\n', (1976, 1985), True, 'import numpy as np\n'), ((2945, 2976), 'IPython.display.clear_output', 'display.clear_output', ([], {'wait': '(True)'}), '(wait=True)\n', (2965, 2976), False, 'from IPython import display\n'), ((4279, 4343), 'collections.deque', 'deque', ([], {'maxlen': '(worlds.n_seats * max_history // self.worlds.n_envs)'}), '(maxlen=worlds.n_seats * max_history // self.worlds.n_envs)\n', (4284, 4343), False, 'from collections import deque\n'), ((4928, 4971), 'activelo.solve', 'activelo.solve', (['games', 'wins'], {'soln': 'self.soln'}), '(games, wins, soln=self.soln)\n', (4942, 4971), False, 'import activelo\n'), ((5146, 5179), 'pavlov.stats.mean_std', 'stats.mean_std', (['"""elo-mohex"""', 'μ', 'σ'], {}), "('elo-mohex', μ, σ)\n", (5160, 5179), False, 'from pavlov import stats, runs, logs\n'), ((5195, 5226), 'activelo.improvement', 'activelo.improvement', (['self.soln'], {}), '(self.soln)\n', (5215, 5226), False, 'import activelo\n'), ((5241, 5284), 'pandas.DataFrame', 'pd.DataFrame', (['imp', 'games.index', 'games.index'], {}), '(imp, games.index, games.index)\n', (5253, 5284), True, 'import pandas as pd\n'), ((5900, 5916), 'pavlov.logs.to_run', 'logs.to_run', (['run'], {}), '(run)\n', (5911, 5916), False, 'from pavlov import stats, runs, logs\n'), ((5918, 5935), 'pavlov.stats.to_run', 'stats.to_run', (['run'], {}), '(run)\n', (5930, 5935), False, 'from pavlov import stats, runs, logs\n'), ((1485, 1502), 'numpy.arange', 'np.arange', (['n.size'], {}), '(n.size)\n', (1494, 1502), True, 'import numpy as np\n'), ((2130, 2147), 'numpy.arange', 'np.arange', (['n.size'], {}), '(n.size)\n', (2139, 2147), True, 'import numpy as np\n'), ((3948, 3970), 'torch.tensor', 'torch.tensor', (['queue[0]'], {}), '(queue[0])\n', (3960, 3970), False, 'import torch\n'), ((6826, 6841), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (6836, 6841), False, 'import time\n'), ((1402, 1417), 'numpy.exp', 'np.exp', (['(-excess)'], {}), '(-excess)\n', (1408, 1417), True, 'import numpy as np\n'), ((6191, 6202), 'time.time', 'time.time', ([], {}), '()\n', (6200, 6202), False, 'import time\n'), ((6342, 6353), 'time.time', 'time.time', ([], {}), '()\n', (6351, 6353), False, 'import time\n'), ((6133, 6144), 'time.time', 'time.time', ([], {}), '()\n', (6142, 6144), False, 'import time\n'), ((6284, 6295), 'time.time', 'time.time', ([], {}), '()\n', (6293, 6295), False, 'import time\n')] |
# Automatic Domain Randomization, see https://arxiv.org/abs/1910.07113 for details
# Implemented by <NAME> and <NAME>
import numpy as np
from gym.spaces import Box
from collections import deque
from TeachMyAgent.teachers.algos.AbstractTeacher import AbstractTeacher
class ADR(AbstractTeacher):
def __init__(self, mins, maxs, seed, env_reward_lb, env_reward_ub, step_size, max_reward_thr, min_reward_thr,
initial_dist=None, boundary_sampling_p=0.5, queue_len=10, scale_reward=False):
'''
Automatic Domain Randomization (https://arxiv.org/abs/1910.07113).
Args:
step_size: Size of the growth (or decrease) of a bound at update
max_reward_thr: Upper reward threshold used to inflate distribution
min_reward_thr: Lowers reward threshold used to deflate distribution
initial_dist: The mean of this initial distribution is used as the initial task used by ADR
boundary_sampling_p: Probability to sample a dimension at a bound
queue_len: Size of the queue associated to each bound. Once reached, ADR increases or decreases the bound.
'''
AbstractTeacher.__init__(self, mins, maxs, env_reward_lb, env_reward_ub, seed)
self.nb_dims = len(self.mins)
# Boundary sampling probability p_r
self.bound_sampling_p = boundary_sampling_p
# ADR step size
self.step_size = step_size
# Max reward threshold, sampling distribution inflates if mean reward above this
self.max_reward_threshold = max_reward_thr
if scale_reward:
self.max_reward_threshold = np.interp(self.max_reward_threshold,
(self.env_reward_lb, self.env_reward_ub),
(0, 1))
# Min reward threshold, sampling distribution deflates if mean reward below this
self.min_reward_threshold = min_reward_thr
if scale_reward:
self.min_reward_threshold = np.interp(self.min_reward_threshold,
(self.env_reward_lb, self.env_reward_ub),
(0, 1))
# max queue length
self.window_len = queue_len
# Set initial task space to predefined calibrated task
initial_mean, initial_variance = self.get_or_create_dist(initial_dist, mins, maxs, subspace=True)
# Single task version (as the original paper)
self.cur_mins = initial_mean
self.cur_maxs = initial_mean
self.cur_mins = np.array(self.cur_mins, dtype=np.float32) # current min bounds
self.cur_maxs = np.array(self.cur_maxs, dtype=np.float32) # current max bounds
self.task_space = Box(self.cur_mins, self.cur_maxs, dtype=np.float32)
self.task_space.seed(self.seed)
# Init queues, one per task space dimension
self.min_queues = [deque(maxlen=self.window_len) for _ in range(self.nb_dims)]
self.max_queues = [deque(maxlen=self.window_len) for _ in range(self.nb_dims)]
# Boring book-keeping
self.episode_nb = 0
self.bk = {'task_space': [(self.cur_mins.copy(),self.cur_maxs.copy())],
'episodes': []}
def episodic_update(self, task, reward, is_success):
self.episode_nb += 1
# check for updates
for i, (min_q, max_q, cur_min, cur_max) in enumerate(zip(self.min_queues, self.max_queues, self.cur_mins, self.cur_maxs)):
if task[i] == cur_min: # if the proposed task has the i^th dimension set to min boundary
min_q.append(reward)
if len(min_q) == self.window_len:
if np.mean(min_q) >= self.max_reward_threshold: # decrease min boundary (inflate sampling space)
self.cur_mins[i] = max(self.cur_mins[i] - self.step_size, self.mins[i])
elif np.mean(min_q) <= self.min_reward_threshold: # increase min boundary (deflate sampling space)
self.cur_mins[i] = min(self.cur_mins[i] + self.step_size, self.cur_maxs[i])
self.min_queues[i] = deque(maxlen=self.window_len) # reset queue
if task[i] == cur_max: # if the proposed task has the i^th dimension set to max boundary
max_q.append(reward)
if len(max_q) == self.window_len: # queue is full, time to update
if np.mean(max_q) >= self.max_reward_threshold: # increase max boundary
self.cur_maxs[i] = min(self.cur_maxs[i] + self.step_size, self.maxs[i])
elif np.mean(max_q) <= self.min_reward_threshold: # decrease max boundary
self.cur_maxs[i] = max(self.cur_maxs[i] - self.step_size, self.cur_mins[i])
self.max_queues[i] = deque(maxlen=self.window_len) # reset queue
prev_cur_mins, prev_cur_maxs = self.bk['task_space'][-1]
if (prev_cur_mins != self.cur_mins).any() or (prev_cur_maxs != self.cur_maxs).any(): # were boundaries changed ?
self.task_space = Box(self.cur_mins, self.cur_maxs, dtype=np.float32)
self.task_space.seed(self.seed)
# book-keeping only if boundaries were updates
self.bk['task_space'].append((self.cur_mins.copy(), self.cur_maxs.copy()))
self.bk['episodes'].append(self.episode_nb)
def sample_task(self):
new_task = self.non_exploratory_task_sampling()["task"]
if self.random_state.random() < self.bound_sampling_p: # set random dimension to min or max bound
idx = self.random_state.randint(0, self.nb_dims)
is_min_max_capped = np.array([self.cur_mins[idx] == self.mins[idx], self.cur_maxs[idx] == self.maxs[idx]])
if not is_min_max_capped.all(): # both min and max bounds can increase, choose extremum randomly
if self.random_state.random() < 0.5: # skip min bound if already
new_task[idx] = self.cur_mins[idx]
else:
new_task[idx] = self.cur_maxs[idx]
elif not is_min_max_capped[0]:
new_task[idx] = self.cur_mins[idx]
elif not is_min_max_capped[1]:
new_task[idx] = self.cur_maxs[idx]
return new_task
def non_exploratory_task_sampling(self):
return {"task": self.task_space.sample(),
"infos": {
"bk_index": len(self.bk[list(self.bk.keys())[0]]) - 1,
"task_infos": None}
} | [
"numpy.mean",
"collections.deque",
"gym.spaces.Box",
"numpy.array",
"numpy.interp",
"TeachMyAgent.teachers.algos.AbstractTeacher.AbstractTeacher.__init__"
] | [((1199, 1277), 'TeachMyAgent.teachers.algos.AbstractTeacher.AbstractTeacher.__init__', 'AbstractTeacher.__init__', (['self', 'mins', 'maxs', 'env_reward_lb', 'env_reward_ub', 'seed'], {}), '(self, mins, maxs, env_reward_lb, env_reward_ub, seed)\n', (1223, 1277), False, 'from TeachMyAgent.teachers.algos.AbstractTeacher import AbstractTeacher\n'), ((2647, 2688), 'numpy.array', 'np.array', (['self.cur_mins'], {'dtype': 'np.float32'}), '(self.cur_mins, dtype=np.float32)\n', (2655, 2688), True, 'import numpy as np\n'), ((2735, 2776), 'numpy.array', 'np.array', (['self.cur_maxs'], {'dtype': 'np.float32'}), '(self.cur_maxs, dtype=np.float32)\n', (2743, 2776), True, 'import numpy as np\n'), ((2825, 2876), 'gym.spaces.Box', 'Box', (['self.cur_mins', 'self.cur_maxs'], {'dtype': 'np.float32'}), '(self.cur_mins, self.cur_maxs, dtype=np.float32)\n', (2828, 2876), False, 'from gym.spaces import Box\n'), ((1679, 1770), 'numpy.interp', 'np.interp', (['self.max_reward_threshold', '(self.env_reward_lb, self.env_reward_ub)', '(0, 1)'], {}), '(self.max_reward_threshold, (self.env_reward_lb, self.\n env_reward_ub), (0, 1))\n', (1688, 1770), True, 'import numpy as np\n'), ((2072, 2163), 'numpy.interp', 'np.interp', (['self.min_reward_threshold', '(self.env_reward_lb, self.env_reward_ub)', '(0, 1)'], {}), '(self.min_reward_threshold, (self.env_reward_lb, self.\n env_reward_ub), (0, 1))\n', (2081, 2163), True, 'import numpy as np\n'), ((2997, 3026), 'collections.deque', 'deque', ([], {'maxlen': 'self.window_len'}), '(maxlen=self.window_len)\n', (3002, 3026), False, 'from collections import deque\n'), ((3084, 3113), 'collections.deque', 'deque', ([], {'maxlen': 'self.window_len'}), '(maxlen=self.window_len)\n', (3089, 3113), False, 'from collections import deque\n'), ((5184, 5235), 'gym.spaces.Box', 'Box', (['self.cur_mins', 'self.cur_maxs'], {'dtype': 'np.float32'}), '(self.cur_mins, self.cur_maxs, dtype=np.float32)\n', (5187, 5235), False, 'from gym.spaces import Box\n'), ((5774, 5865), 'numpy.array', 'np.array', (['[self.cur_mins[idx] == self.mins[idx], self.cur_maxs[idx] == self.maxs[idx]]'], {}), '([self.cur_mins[idx] == self.mins[idx], self.cur_maxs[idx] == self.\n maxs[idx]])\n', (5782, 5865), True, 'import numpy as np\n'), ((4229, 4258), 'collections.deque', 'deque', ([], {'maxlen': 'self.window_len'}), '(maxlen=self.window_len)\n', (4234, 4258), False, 'from collections import deque\n'), ((4921, 4950), 'collections.deque', 'deque', ([], {'maxlen': 'self.window_len'}), '(maxlen=self.window_len)\n', (4926, 4950), False, 'from collections import deque\n'), ((3777, 3791), 'numpy.mean', 'np.mean', (['min_q'], {}), '(min_q)\n', (3784, 3791), True, 'import numpy as np\n'), ((4519, 4533), 'numpy.mean', 'np.mean', (['max_q'], {}), '(max_q)\n', (4526, 4533), True, 'import numpy as np\n'), ((3993, 4007), 'numpy.mean', 'np.mean', (['min_q'], {}), '(min_q)\n', (4000, 4007), True, 'import numpy as np\n'), ((4710, 4724), 'numpy.mean', 'np.mean', (['max_q'], {}), '(max_q)\n', (4717, 4724), True, 'import numpy as np\n')] |
import logging
from os.path import dirname, join, realpath
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import astroplan as ap
from scipy.constants import c as c_light_ms
from tqdm import tqdm
from skimage import io
from skimage import transform as tf
from scipy.interpolate import interp1d
from scipy.optimize import curve_fit
from scipy.stats import ttest_ind, norm, t
from astropy import units as u
from astropy.constants import c
from astropy.utils.iers import IERS_Auto
from cats.simulator.detector import Crires
from cats.extractor.runner import CatsRunner
from exoorbit.orbit import Orbit
# TODO List:
# - automatically mask points before fitting with SME
# - if star and planet steps aren't run manually, we use the initial values
# instead we should load the data if possible
# - Tests for all the steps
# - Refactoring of the steps, a lot of the code is strewm all over the place
# - Determine Uncertainties for each point
def shear(x, shear=1, inplace=False):
afine_tf = tf.AffineTransform(shear=shear)
modified = tf.warp(x, inverse_map=afine_tf)
return modified
def gauss(x, height, mu, sig, floor):
return height * np.exp(-(((x - mu) / sig) ** 2) / 2) + floor
def gaussfit(x, y, p0=None):
"""
Fit a simple gaussian to data
gauss(x, a, mu, sigma, floor) = a * exp(-z**2/2) + floor
with z = (x - mu) / sigma
Parameters
----------
x : array(float)
x values
y : array(float)
y values
Returns
-------
gauss(x), parameters
fitted values for x, fit paramters (a, mu, sigma)
"""
if p0 is None:
p0 = [np.max(y) - np.min(y), 0, 1, np.min(y)]
popt, _ = curve_fit(gauss, x, y, p0=p0)
return gauss(x, *popt), popt
def welch_t(a, b, ua=None, ub=None):
# t = (mean(a) - mean(b)) / sqrt(std(a)**2 + std(b)**2)
if ua is None:
ua = a.std() / np.sqrt(a.size)
if ub is None:
ub = b.std() / np.sqrt(b.size)
xa = a.mean()
xb = b.mean()
t = (xa - xb) / np.sqrt(ua**2 + ub**2)
return t
# Update IERS tables if necessary
IERS_Auto()
# Detector
setting = "K/2/4"
detectors = [1, 2, 3]
orders = [7, 6, 5, 4, 3, 2]
detector = Crires(setting, detectors, orders=orders)
# Linelist
linelist = join(dirname(__file__), "crires_k_2_4.lin")
# Star info
star = "WASP-107"
planet = "b"
# Initialize the CATS runner
dataset = "WASP-107b_SNR200"
base_dir = realpath(join(dirname(__file__), f"../datasets/{dataset}"))
raw_dir = join(base_dir, "Spectrum_00")
medium_dir = join(base_dir, "medium")
done_dir = join(base_dir, "done")
runner = CatsRunner(
detector,
star,
planet,
linelist,
base_dir=base_dir,
raw_dir=raw_dir,
medium_dir=medium_dir,
done_dir=done_dir,
)
rv_step = 0.25
rv_range = 200
runner.configuration["cross_correlation"]["rv_range"] = rv_range
runner.configuration["cross_correlation"]["rv_points"] = int((2 * rv_range + 1) / rv_step)
runner.configuration["cross_correlation_reference"]["rv_range"] = rv_range
runner.configuration["cross_correlation_reference"]["rv_points"] = int((2 * rv_range + 1) / rv_step)
# Override data with known information
star = runner.star
planet = runner.planet
orbit = Orbit(star, planet)
planet.radius = 1 * u.Rjup
planet.mass = 1 * u.Mjup
atmosphere_height = planet.atm_scale_height(star.teff)
snr = star.radius ** 2 / (2 * planet.radius * atmosphere_height)
snr = snr.decompose()
velocity_semi_amplitude = orbit.radial_velocity_semiamplitude_planet()
t_exp = c / (2 * np.pi * velocity_semi_amplitude) * planet.period / detector.resolution
t_exp = t_exp.decompose()
print("SNR required: ", snr)
print("Maximum exposure time: ", t_exp)
print(f"Planet Velocity Kp {velocity_semi_amplitude.to('km/s')}")
# Run the Runner
# data = runner.run(["solve_problem"])
# d = data["solve_problem"]
# for k, v in d.items():
# plt.plot(v.wavelength, v.flux, label=f"{k}")
# plt.legend()
# plt.show()
# data = runner.run_module("cross_correlation_reference", load=False)
data = runner.run_module("cross_correlation", load=True)
spectra = runner.data["spectra"]
# Barycentric correction
observer = ap.Observer.at_site("paranal")
obstime = spectra.datetime[len(spectra)//2]
sky_location = star.coordinates
sky_location.obstime = obstime
sky_location.location = observer.location
correction = sky_location.radial_velocity_correction()
# runner.steps["cross_correlation"].plot(data, sysrem_iterations=5, sysrem_iterations_afterwards=6)
# for i in range(3, 10):
# plt.plot(np.sum(data[f"{i}"][10:27], axis=0) / 100, label=f"{i}")
# for j in range(10):
# plt.plot(np.sum(data[f"{i}.{j}"][10:27], axis=0) / 100, label=f"{i}.{j}")
data = data["7"]
config = runner.configuration["cross_correlation"]
rv_range = config["rv_range"]
rv_points = config["rv_points"]
rv_step = (2 * rv_range + 1) / rv_points
rv = np.linspace(-rv_range, rv_range, rv_points)
plt.imshow(data, aspect="auto", origin="lower")
plt.xlabel("rv [km/s]")
xticks = plt.xticks()[0][1:-1]
xticks_labels = np.interp(xticks, np.arange(len(rv)), rv)
xticks_labels = [f"{x:.3g}" for x in xticks_labels]
plt.xticks(xticks, labels=xticks_labels)
plt.show()
datetime = spectra.datetime
phi = (datetime - planet.time_of_transit) / planet.period
phi = phi.to_value(1)
# We only care about the fraction
phi = phi % 1
c_light = c_light_ms * 1e-3
interpolator = interp1d(rv, data, kind="linear", bounds_error=False)
vsys_min, vsys_max = 0, 25
kp_min, kp_max = 0, 300
vsys = np.linspace(vsys_min, vsys_max, int((vsys_max-vsys_min+1)//rv_step))
kp = np.linspace(kp_min, kp_max, int((kp_max-kp_min+1)//rv_step))
combined = np.zeros((len(kp), len(vsys)))
for i, vs in enumerate(tqdm(vsys)):
for j, k in enumerate(tqdm(kp, leave=False)):
vp = vs + k * np.sin(2 * np.pi * phi)
# shifted = [np.interp(vp[i], rv, data[i], left=np.nan, right=np.nan) for i in range(len(vp))]
shifted = np.diag(interpolator(vp))
combined[j, i] = np.nansum(shifted)
# Normalize to the number of input spectra
combined /= data.shape[0]
combined /= combined.std()
# Normalize to median 0
median = np.nanmedian(combined)
combined -= median
kp_peak = combined.shape[0] // 2
kp_width = kp_peak
for i in range(3):
# Determine the peak position in vsys and kp
kp_width_int = int(np.ceil(kp_width))
mean_vsys = np.nanmean(combined[kp_peak-kp_width_int+1:kp_peak+kp_width_int+1, :], axis=0)
vsys_peak = np.argmax(mean_vsys)
# And then fit gaussians to determine the width
curve, vsys_popt = gaussfit(
vsys,
mean_vsys,
p0=[mean_vsys[vsys_peak] - np.min(mean_vsys), vsys[vsys_peak], 1, np.min(mean_vsys)],
)
vsys_width = vsys_popt[2] / rv_step
# Do the same for the planet velocity
vsys_width_int = int(np.ceil(vsys_width)) // 4
peak = combined[:, vsys_peak - vsys_width_int : vsys_peak + vsys_width_int + 1]
mean_kp = np.nanmean(peak, axis=1)
kp_peak = np.argmax(mean_kp)
curve, kp_popt = gaussfit(
kp,
mean_kp,
p0=[mean_kp[kp_peak] - np.min(mean_kp), kp[kp_peak], 1, np.min(mean_kp)],
)
kp_width = kp_popt[2] / rv_step
# Plot the results
ax = plt.subplot(121)
plt.imshow(combined, aspect="auto", origin="lower")
ax.add_patch(plt.Rectangle((vsys_peak-vsys_width, kp_peak-kp_width), 2 * vsys_width, 2 * kp_width, fill=False, color="red"))
plt.xlabel("vsys [km/s]")
xticks = plt.xticks()[0][1:-1]
xticks_labels = np.interp(xticks, np.arange(len(vsys)), vsys)
xticks_labels = [f"{x:.3g}" for x in xticks_labels]
plt.xticks(xticks, labels=xticks_labels)
plt.ylabel("Kp [km/s]")
yticks = plt.yticks()[0][1:-1]
yticks_labels = np.interp(yticks, np.arange(len(kp)), kp)
yticks_labels = [f"{y:.3g}" for y in yticks_labels]
plt.yticks(yticks, labels=yticks_labels)
plt.subplot(222)
plt.plot(vsys, gauss(vsys, *vsys_popt), "r--")
plt.plot(vsys, mean_vsys)
plt.vlines(vsys[vsys_peak], np.min(mean_vsys), mean_vsys[vsys_peak], "k", "--")
plt.xlabel("vsys [km/s]")
plt.subplot(224)
plt.plot(kp, mean_kp)
plt.vlines(kp[kp_peak], np.min(mean_kp), mean_kp[kp_peak], "k", "--")
plt.plot(kp, gauss(kp, *kp_popt), "r--")
plt.xlabel("Kp [km/s]")
plt.suptitle(dataset)
plt.show()
# Have to check that this makes sense
vsys_width = int(np.ceil(vsys_width))
kp_width = int(np.ceil(kp_width))
mask = np.full(combined.shape, False)
mask[kp_peak-kp_width:kp_peak+kp_width, vsys_peak - vsys_width : vsys_peak + vsys_width] = True
in_trail = combined[mask].ravel()
out_trail = combined[~mask].ravel()
hrange = (np.min(combined), np.max(combined))
bins = 100
in_values, hbins = np.histogram(in_trail, bins=bins, range=hrange, density=True)
out_values, _ = np.histogram(out_trail, bins=bins, range=hrange, density=True)
tresult = ttest_ind(in_trail, out_trail, equal_var=False, trim=0.25)
# TODO: What is the degrees of freedom
# If we use the number of points like in the scipy function we get very large sigma values
# so is it vsys and kp and err?
df = 3
pvalue = t.sf(np.abs(tresult.statistic), df)
sigma = norm.isf(pvalue)
# sigma = norm.isf(tresult.pvalue)
# Alternative sigma value, based on my understanding of sigmas and Gaussian distributions
# sigma = np.abs((in_trail.mean() - out_trail.mean()) / (in_trail.std() + out_trail.std()))
plt.hist(in_trail.ravel(), bins=hbins, density=True, histtype="step", label="in transit")
plt.hist(out_trail.ravel(), bins=hbins, density=True, histtype="step", label="out of transit")
plt.legend()
plt.title(f"{dataset}\nsigma: {sigma}")
plt.show()
# plt.imshow(data["5.6"], aspect="auto", origin="lower")
# plt.show()
# plt.plot(np.sum(shear(data[f"5.6"], -0.8), axis=0) / 100, label=f"5.6")
# plt.xlabel("v [km/s]")
# xticks = plt.xticks()[0][1:-1]
# xticks_labels = xticks - 100
# plt.xticks(xticks, labels=xticks_labels)
# plt.ylabel("ccf [SNR]")
# plt.legend()
# plt.show()
pass
| [
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"exoorbit.orbit.Orbit",
"scipy.interpolate.interp1d",
"numpy.nanmean",
"cats.extractor.runner.CatsRunner",
"scipy.stats.ttest_ind",
"numpy.sin",
"matplotlib.pyplot.imshow",
"numpy.histogram",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"ski... | [((2110, 2121), 'astropy.utils.iers.IERS_Auto', 'IERS_Auto', ([], {}), '()\n', (2119, 2121), False, 'from astropy.utils.iers import IERS_Auto\n'), ((2213, 2254), 'cats.simulator.detector.Crires', 'Crires', (['setting', 'detectors'], {'orders': 'orders'}), '(setting, detectors, orders=orders)\n', (2219, 2254), False, 'from cats.simulator.detector import Crires\n'), ((2506, 2535), 'os.path.join', 'join', (['base_dir', '"""Spectrum_00"""'], {}), "(base_dir, 'Spectrum_00')\n", (2510, 2535), False, 'from os.path import dirname, join, realpath\n'), ((2549, 2573), 'os.path.join', 'join', (['base_dir', '"""medium"""'], {}), "(base_dir, 'medium')\n", (2553, 2573), False, 'from os.path import dirname, join, realpath\n'), ((2585, 2607), 'os.path.join', 'join', (['base_dir', '"""done"""'], {}), "(base_dir, 'done')\n", (2589, 2607), False, 'from os.path import dirname, join, realpath\n'), ((2617, 2744), 'cats.extractor.runner.CatsRunner', 'CatsRunner', (['detector', 'star', 'planet', 'linelist'], {'base_dir': 'base_dir', 'raw_dir': 'raw_dir', 'medium_dir': 'medium_dir', 'done_dir': 'done_dir'}), '(detector, star, planet, linelist, base_dir=base_dir, raw_dir=\n raw_dir, medium_dir=medium_dir, done_dir=done_dir)\n', (2627, 2744), False, 'from cats.extractor.runner import CatsRunner\n'), ((3228, 3247), 'exoorbit.orbit.Orbit', 'Orbit', (['star', 'planet'], {}), '(star, planet)\n', (3233, 3247), False, 'from exoorbit.orbit import Orbit\n'), ((4153, 4183), 'astroplan.Observer.at_site', 'ap.Observer.at_site', (['"""paranal"""'], {}), "('paranal')\n", (4172, 4183), True, 'import astroplan as ap\n'), ((4874, 4917), 'numpy.linspace', 'np.linspace', (['(-rv_range)', 'rv_range', 'rv_points'], {}), '(-rv_range, rv_range, rv_points)\n', (4885, 4917), True, 'import numpy as np\n'), ((4919, 4966), 'matplotlib.pyplot.imshow', 'plt.imshow', (['data'], {'aspect': '"""auto"""', 'origin': '"""lower"""'}), "(data, aspect='auto', origin='lower')\n", (4929, 4966), True, 'import matplotlib.pyplot as plt\n'), ((4967, 4990), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""rv [km/s]"""'], {}), "('rv [km/s]')\n", (4977, 4990), True, 'import matplotlib.pyplot as plt\n'), ((5132, 5172), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xticks'], {'labels': 'xticks_labels'}), '(xticks, labels=xticks_labels)\n', (5142, 5172), True, 'import matplotlib.pyplot as plt\n'), ((5173, 5183), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5181, 5183), True, 'import matplotlib.pyplot as plt\n'), ((5385, 5438), 'scipy.interpolate.interp1d', 'interp1d', (['rv', 'data'], {'kind': '"""linear"""', 'bounds_error': '(False)'}), "(rv, data, kind='linear', bounds_error=False)\n", (5393, 5438), False, 'from scipy.interpolate import interp1d\n'), ((6130, 6152), 'numpy.nanmedian', 'np.nanmedian', (['combined'], {}), '(combined)\n', (6142, 6152), True, 'import numpy as np\n'), ((7200, 7216), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (7211, 7216), True, 'import matplotlib.pyplot as plt\n'), ((7217, 7268), 'matplotlib.pyplot.imshow', 'plt.imshow', (['combined'], {'aspect': '"""auto"""', 'origin': '"""lower"""'}), "(combined, aspect='auto', origin='lower')\n", (7227, 7268), True, 'import matplotlib.pyplot as plt\n'), ((7395, 7420), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""vsys [km/s]"""'], {}), "('vsys [km/s]')\n", (7405, 7420), True, 'import matplotlib.pyplot as plt\n'), ((7566, 7606), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xticks'], {'labels': 'xticks_labels'}), '(xticks, labels=xticks_labels)\n', (7576, 7606), True, 'import matplotlib.pyplot as plt\n'), ((7608, 7631), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Kp [km/s]"""'], {}), "('Kp [km/s]')\n", (7618, 7631), True, 'import matplotlib.pyplot as plt\n'), ((7773, 7813), 'matplotlib.pyplot.yticks', 'plt.yticks', (['yticks'], {'labels': 'yticks_labels'}), '(yticks, labels=yticks_labels)\n', (7783, 7813), True, 'import matplotlib.pyplot as plt\n'), ((7815, 7831), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(222)'], {}), '(222)\n', (7826, 7831), True, 'import matplotlib.pyplot as plt\n'), ((7879, 7904), 'matplotlib.pyplot.plot', 'plt.plot', (['vsys', 'mean_vsys'], {}), '(vsys, mean_vsys)\n', (7887, 7904), True, 'import matplotlib.pyplot as plt\n'), ((7985, 8010), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""vsys [km/s]"""'], {}), "('vsys [km/s]')\n", (7995, 8010), True, 'import matplotlib.pyplot as plt\n'), ((8012, 8028), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(224)'], {}), '(224)\n', (8023, 8028), True, 'import matplotlib.pyplot as plt\n'), ((8029, 8050), 'matplotlib.pyplot.plot', 'plt.plot', (['kp', 'mean_kp'], {}), '(kp, mean_kp)\n', (8037, 8050), True, 'import matplotlib.pyplot as plt\n'), ((8162, 8185), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Kp [km/s]"""'], {}), "('Kp [km/s]')\n", (8172, 8185), True, 'import matplotlib.pyplot as plt\n'), ((8187, 8208), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['dataset'], {}), '(dataset)\n', (8199, 8208), True, 'import matplotlib.pyplot as plt\n'), ((8209, 8219), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8217, 8219), True, 'import matplotlib.pyplot as plt\n'), ((8341, 8371), 'numpy.full', 'np.full', (['combined.shape', '(False)'], {}), '(combined.shape, False)\n', (8348, 8371), True, 'import numpy as np\n'), ((8616, 8677), 'numpy.histogram', 'np.histogram', (['in_trail'], {'bins': 'bins', 'range': 'hrange', 'density': '(True)'}), '(in_trail, bins=bins, range=hrange, density=True)\n', (8628, 8677), True, 'import numpy as np\n'), ((8694, 8756), 'numpy.histogram', 'np.histogram', (['out_trail'], {'bins': 'bins', 'range': 'hrange', 'density': '(True)'}), '(out_trail, bins=bins, range=hrange, density=True)\n', (8706, 8756), True, 'import numpy as np\n'), ((8768, 8826), 'scipy.stats.ttest_ind', 'ttest_ind', (['in_trail', 'out_trail'], {'equal_var': '(False)', 'trim': '(0.25)'}), '(in_trail, out_trail, equal_var=False, trim=0.25)\n', (8777, 8826), False, 'from scipy.stats import ttest_ind, norm, t\n'), ((9049, 9065), 'scipy.stats.norm.isf', 'norm.isf', (['pvalue'], {}), '(pvalue)\n', (9057, 9065), False, 'from scipy.stats import ttest_ind, norm, t\n'), ((9471, 9483), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9481, 9483), True, 'import matplotlib.pyplot as plt\n'), ((9484, 9526), 'matplotlib.pyplot.title', 'plt.title', (['f"""{dataset}\nsigma: {sigma}"""'], {}), '(f"""{dataset}\nsigma: {sigma}""")\n', (9493, 9526), True, 'import matplotlib.pyplot as plt\n'), ((9524, 9534), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9532, 9534), True, 'import matplotlib.pyplot as plt\n'), ((1023, 1054), 'skimage.transform.AffineTransform', 'tf.AffineTransform', ([], {'shear': 'shear'}), '(shear=shear)\n', (1041, 1054), True, 'from skimage import transform as tf\n'), ((1070, 1102), 'skimage.transform.warp', 'tf.warp', (['x'], {'inverse_map': 'afine_tf'}), '(x, inverse_map=afine_tf)\n', (1077, 1102), True, 'from skimage import transform as tf\n'), ((1704, 1733), 'scipy.optimize.curve_fit', 'curve_fit', (['gauss', 'x', 'y'], {'p0': 'p0'}), '(gauss, x, y, p0=p0)\n', (1713, 1733), False, 'from scipy.optimize import curve_fit\n'), ((2283, 2300), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (2290, 2300), False, 'from os.path import dirname, join, realpath\n'), ((5698, 5708), 'tqdm.tqdm', 'tqdm', (['vsys'], {}), '(vsys)\n', (5702, 5708), False, 'from tqdm import tqdm\n'), ((6352, 6442), 'numpy.nanmean', 'np.nanmean', (['combined[kp_peak - kp_width_int + 1:kp_peak + kp_width_int + 1, :]'], {'axis': '(0)'}), '(combined[kp_peak - kp_width_int + 1:kp_peak + kp_width_int + 1,\n :], axis=0)\n', (6362, 6442), True, 'import numpy as np\n'), ((6447, 6467), 'numpy.argmax', 'np.argmax', (['mean_vsys'], {}), '(mean_vsys)\n', (6456, 6467), True, 'import numpy as np\n'), ((6932, 6956), 'numpy.nanmean', 'np.nanmean', (['peak'], {'axis': '(1)'}), '(peak, axis=1)\n', (6942, 6956), True, 'import numpy as np\n'), ((6971, 6989), 'numpy.argmax', 'np.argmax', (['mean_kp'], {}), '(mean_kp)\n', (6980, 6989), True, 'import numpy as np\n'), ((7282, 7400), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(vsys_peak - vsys_width, kp_peak - kp_width)', '(2 * vsys_width)', '(2 * kp_width)'], {'fill': '(False)', 'color': '"""red"""'}), "((vsys_peak - vsys_width, kp_peak - kp_width), 2 * vsys_width,\n 2 * kp_width, fill=False, color='red')\n", (7295, 7400), True, 'import matplotlib.pyplot as plt\n'), ((7933, 7950), 'numpy.min', 'np.min', (['mean_vsys'], {}), '(mean_vsys)\n', (7939, 7950), True, 'import numpy as np\n'), ((8075, 8090), 'numpy.min', 'np.min', (['mean_kp'], {}), '(mean_kp)\n', (8081, 8090), True, 'import numpy as np\n'), ((8278, 8297), 'numpy.ceil', 'np.ceil', (['vsys_width'], {}), '(vsys_width)\n', (8285, 8297), True, 'import numpy as np\n'), ((8314, 8331), 'numpy.ceil', 'np.ceil', (['kp_width'], {}), '(kp_width)\n', (8321, 8331), True, 'import numpy as np\n'), ((8550, 8566), 'numpy.min', 'np.min', (['combined'], {}), '(combined)\n', (8556, 8566), True, 'import numpy as np\n'), ((8568, 8584), 'numpy.max', 'np.max', (['combined'], {}), '(combined)\n', (8574, 8584), True, 'import numpy as np\n'), ((9010, 9035), 'numpy.abs', 'np.abs', (['tresult.statistic'], {}), '(tresult.statistic)\n', (9016, 9035), True, 'import numpy as np\n'), ((2038, 2064), 'numpy.sqrt', 'np.sqrt', (['(ua ** 2 + ub ** 2)'], {}), '(ua ** 2 + ub ** 2)\n', (2045, 2064), True, 'import numpy as np\n'), ((2450, 2467), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (2457, 2467), False, 'from os.path import dirname, join, realpath\n'), ((5000, 5012), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {}), '()\n', (5010, 5012), True, 'import matplotlib.pyplot as plt\n'), ((5737, 5758), 'tqdm.tqdm', 'tqdm', (['kp'], {'leave': '(False)'}), '(kp, leave=False)\n', (5741, 5758), False, 'from tqdm import tqdm\n'), ((5979, 5997), 'numpy.nansum', 'np.nansum', (['shifted'], {}), '(shifted)\n', (5988, 5997), True, 'import numpy as np\n'), ((6317, 6334), 'numpy.ceil', 'np.ceil', (['kp_width'], {}), '(kp_width)\n', (6324, 6334), True, 'import numpy as np\n'), ((7430, 7442), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {}), '()\n', (7440, 7442), True, 'import matplotlib.pyplot as plt\n'), ((7641, 7653), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {}), '()\n', (7651, 7653), True, 'import matplotlib.pyplot as plt\n'), ((1183, 1217), 'numpy.exp', 'np.exp', (['(-((x - mu) / sig) ** 2 / 2)'], {}), '(-((x - mu) / sig) ** 2 / 2)\n', (1189, 1217), True, 'import numpy as np\n'), ((1678, 1687), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (1684, 1687), True, 'import numpy as np\n'), ((1907, 1922), 'numpy.sqrt', 'np.sqrt', (['a.size'], {}), '(a.size)\n', (1914, 1922), True, 'import numpy as np\n'), ((1965, 1980), 'numpy.sqrt', 'np.sqrt', (['b.size'], {}), '(b.size)\n', (1972, 1980), True, 'import numpy as np\n'), ((6808, 6827), 'numpy.ceil', 'np.ceil', (['vsys_width'], {}), '(vsys_width)\n', (6815, 6827), True, 'import numpy as np\n'), ((1649, 1658), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (1655, 1658), True, 'import numpy as np\n'), ((1661, 1670), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (1667, 1670), True, 'import numpy as np\n'), ((5783, 5806), 'numpy.sin', 'np.sin', (['(2 * np.pi * phi)'], {}), '(2 * np.pi * phi)\n', (5789, 5806), True, 'import numpy as np\n'), ((6673, 6690), 'numpy.min', 'np.min', (['mean_vsys'], {}), '(mean_vsys)\n', (6679, 6690), True, 'import numpy as np\n'), ((7115, 7130), 'numpy.min', 'np.min', (['mean_kp'], {}), '(mean_kp)\n', (7121, 7130), True, 'import numpy as np\n'), ((6634, 6651), 'numpy.min', 'np.min', (['mean_vsys'], {}), '(mean_vsys)\n', (6640, 6651), True, 'import numpy as np\n'), ((7082, 7097), 'numpy.min', 'np.min', (['mean_kp'], {}), '(mean_kp)\n', (7088, 7097), True, 'import numpy as np\n')] |
#!/usr/bin/env python
from collections import Iterable, OrderedDict
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.ioff()
from RouToolPa.Parsers.Abstract import Record, Collection, Metadata, Header
from RouToolPa.Parsers.VCF import CollectionVCF, MetadataVCF, HeaderVCF
class RecordCCF(Record, Iterable):
@staticmethod
def _check_chrom(vcf_records_list):
#print (vcf_records_list)
#print(vcf_records_list.records)
chrom = vcf_records_list[0].chrom
for record in vcf_records_list:
if record.chrom != chrom:
raise ValueError("Records from different regions in same cluster")
return chrom
def __init__(self, id=None, chrom=None, size=None, start=None, end=None, description=None, flags=None,
collection_vcf=None, bad_vcf_records=0, from_records=True, subclusters=None, features=None,
):
# possible flags:
# IP - indel(s) are present in record
# BR - record is located in bad region
self.records = collection_vcf
self.subclusters = subclusters
if from_records:
self.chrom = self._check_chrom(collection_vcf)
self.size = len(collection_vcf)
self.start = collection_vcf[0].pos
self.end = collection_vcf[-1].pos - 1 + \
max(map(lambda x: len(x), collection_vcf[-1].alt_list + [collection_vcf[-1].ref]))
for record in self.records:
if record.check_indel():
self.flags.add("IP")
break
self.description = description if description else {}
self.features = features
if id:
self.id = id
else:
self.id = "CL_%s_%i" % (self.chrom, self.start)
self.flags = set(flags) if flags else set()
self.bad_records = 0
for record in self.records:
if "BR" in record.flags:
self.bad_records += 1
else:
if id:
self.id = id
else:
self.id = "CL_%s_%i" % (self.chrom, self.start)
self.chrom = chrom
self.size = size
self.start = start
self.end = end
self.features = features
self.description = description
self.mean_dist = None
self.flags = set(flags) if flags else set()
self.bad_records = bad_vcf_records
self.len = self.end - self.start + 1
if bad_vcf_records > 0:
self.flags.add("BR")
else:
self.flags - set(["BR"])
distances = self.distances()
if self.description is None:
self.description = {}
if self.size > 1:
self.description["Mean"] = np.mean(distances)
self.description["Median"] = np.median(distances)
self.description["Power"] = self.size / np.median(distances)
def __len__(self):
return self.size
def __iter__(self):
for record in self.records:
yield record
def __str__(self):
attributes_string = "Size=%i;Bad_records=%i" % (self.size, self.bad_records)
if self.flags:
attributes_string += ";" + ";".join(self.flags)
if self.description:
attributes_string += ";" + ";".join(["%s=%s" % (key, ",".join(map(lambda x: str(x), self.description[key])) if isinstance(self.description[key], list) or isinstance(self.description[key], set) else str(self.description[key]))
for key in self.description])
if self.subclusters != None:
#print(self.subclusters)
attributes_string += ";Subclusters=" + ",".join(map(lambda x: str(x), self.subclusters))
cluster_string = ">%s\t%s\t%i\t%i\t%s" % (self.id, self.chrom, self.start, self.end, attributes_string)
return cluster_string + "\nVariants\n\t" + "\n\t".join([str(record) for record in self.records])
def distances(self):
#returns distances between variants in cluster
positions = np.array([record.pos for record in self])
return np.ediff1d(positions)
def reset_flags(self, flags_to_reset='all'):
# flags_to_reset can take values 'all', set or list, or None
# if all - all flags will be removed
# if None - nothing will be done
# if set/list - only flags in set/list will be removed
if flags_to_reset == 'all':
self.flags = set([])
elif flags_to_reset:
flags_to_remove = set(flags_to_reset)
self.flags = self.flags - flags_to_remove
def check_flags(self, flag_list, mismatch_list=[], expression_list=[], remove_mismatch_list=[],
flags_to_reset=None, mode="all", min_cluster_size=[]):
# TODO: fix changes in loc in description after removal
# at moment subclustering resets after check if some record was removed
# allow_flag_mismatch - number of allowed mismaches. if zero or None no mismatches are allowed
self.reset_flags(flags_to_reset)
if self.flags is None:
self.flags = set([])
mismatches = mismatch_list if mismatch_list else [0 for flag in flag_list]
min_cluster = min_cluster_size if min_cluster_size else [3 for flag in flag_list]
remove_list = remove_mismatch_list if remove_mismatch_list else[False for flag in flag_list]
# possible from_records_flag_mode:
# all - record to be counted as 'with flag' must have all flags from flags_list
# one - record to be counted as 'with flag' must have at least one flag from flags_list
# option flags_to_consider works only for remove_mismatch_records
if mode == "one":
for record in self:
self.flags |= record.flags
elif mode == "all":
record_to_remove_dict = dict([(flag, set([])) for flag in flag_list])
mismatch_count_dict = dict([(flag, 0) for flag in flag_list])
index = 0
for record in self:
expressions = [eval(expression) for expression in expression_list] \
if expression_list else [True for flag in flag_list]
for flag, expression, min_size in zip(flag_list, expressions, min_cluster):
#if self.size < min_size:
# continue
if flag not in record.flags:
if mismatch_count_dict[flag] is not None:
mismatch_count_dict[flag] += 1
if expression and self.size >= min_size:# expression to reconsider cluster with mismatch as good
record_to_remove_dict[flag].add(index)
else:
# if expression is not True flag will not be set, None is used to indicate it
mismatch_count_dict[flag] = None
index += 1
for flag, mismatch, min_size in zip(flag_list, mismatches, min_cluster):
"""
if self.size < min_size and mismatch_count_dict[flag] > 0:
#if cluster_size is less then min_size dont allow any mismatch
record_to_remove_dict[flag].clear()
continue
"""
#print(mismatch_count_dict, record_to_remove_dict)
if (mismatch_count_dict[flag] is not None) and mismatch_count_dict[flag] <= mismatch:
self.flags.add(flag)
if mismatch_count_dict[flag] > 0:
self.description["N%sR" % flag] = [mismatch_count_dict[flag]]
else:
record_to_remove_dict[flag].clear()
records_to_remove = set([])
for flag, remove in zip(flag_list, remove_list):
if remove and (mismatch_count_dict[flag] is not None):
records_to_remove |= record_to_remove_dict[flag]
records_to_remove = sorted(records_to_remove, reverse=True)
#print(records_to_remove)
if records_to_remove:
#print(self)
for index in records_to_remove:
self.records.records.pop(index)
self.subclusters = np.delete(self.subclusters, index)
self.__init__(collection_vcf=self.records, description=self.description,
flags=self.flags, subclusters=self.subclusters,
from_records=True)
def check_location(self, bad_region_collection_gff, expression="bad_region.start <= self.pos <= bad_region.end"):
self.bad_records = 0
for variant in self:
if "BR" in variant.flags:
self.bad_records += 1
if self.bad_records > 0:
self.flags.add("BR")
def get_location(self, record_dict, key="Loc", use_synonym=False, synonym_dict=None):
# function is written for old variant (with sub_feature)s rather then new (with CompoundLocation)
# id of one SeqRecord in record_dict must be equal to record.pos
if not self.description:
self.description = {}
if not self.features:
self.features = []
if key not in self.description:
self.description[key] = set([])
for variant in self:
if key in variant.description:
self.description[key] |= set(variant.description[key])
for feature in record_dict[self.chrom].features:
if (variant.pos - 1) in feature:
self.features.append(feature)
self.description[key].add(self.get_synonym(feature.type, use_synonym=use_synonym,
synonym_dict=synonym_dict))
for sub_feature in feature.sub_features:
if (variant.pos - 1) in sub_feature:
self.description[key].add(self.get_synonym(sub_feature.type, use_synonym=use_synonym,
synonym_dict=synonym_dict))
def subclustering(self,
method="inconsistent",
threshold=0.8,
cluster_distance='average'):
tmp = self.records.get_clusters(extracting_method=method,
threshold=threshold,
cluster_distance=cluster_distance,
split_by_regions=False,
draw_dendrogramm=False,
return_collection=False,
write_inconsistent=False,
write_correlation=False)
self.subclusters = tmp[tmp.keys()[0]]
def adjust(self, border_limit=None, min_size_to_adjust=2, remove_border_subclusters=False, remove_size_limit=1):
# adjusts cluster borders, returns list of new cluster records
# skip adjustment for clusters with 3 or less mutations
if (self.size < min_size_to_adjust) or (self.subclusters is None):
#return -1
return [self]
limit = border_limit if border_limit else len(self.subclusters)
for i in range(0, limit):
if self.subclusters[i] == self.subclusters[0]:
left_subcluster_end = i
else:
break
# exit if cluster doesnt have subclusters
if left_subcluster_end == len(self.subclusters) - 1:
#return 1
return [self]
for i in range(-1, -limit - 1, -1):
if self.subclusters[i] == self.subclusters[-1]:
right_subcluster_start = i
else:
break
if remove_border_subclusters:
start = left_subcluster_end + 1 if left_subcluster_end < remove_size_limit else 0
end = right_subcluster_start if right_subcluster_start >= -remove_size_limit else len(self.subclusters)
new_left_cluster, new_right_cluster = None, None
if start > 0:
new_left_cluster = RecordCCF(collection_vcf=CollectionVCF(record_list=self.records.records[:start], from_file=False),
subclusters=self.subclusters[:start], from_records=True)
if end < len(self.subclusters):
new_right_cluster = RecordCCF(collection_vcf=CollectionVCF(record_list=self.records.records[end:], from_file=False),
subclusters=self.subclusters[end:], from_records=True)
"""
self.__init__(collection_vcf=CollectionVCF(record_list=self.records.records[start:end], from_file=False),
subclusters=self.subclusters[start:end], from_records=True)
"""
new_middle_cluster = RecordCCF(collection_vcf=CollectionVCF(record_list=self.records.records[start:end], from_file=False),
subclusters=self.subclusters[start:end], from_records=True)
"""
if new_left_cluster or new_right_cluster:
print("original")
print(self)
print("adjusted")
print(new_left_cluster)
print(new_middle_cluster)
print(new_right_cluster)
"""
cluster_list = [new_left_cluster] if new_left_cluster else []
cluster_list += [new_middle_cluster]
cluster_list += [new_right_cluster] if new_right_cluster else []
return cluster_list
def add_description(self, descr_name, value):
if not self.description:
self.description = {}
self.description[descr_name] = value
def check_strandness(self):
#for desaminases only
count_C = 0.
for record in self:
if record.ref == "C":
count_C += 1.
homogenity = count_C / self.size
self.add_description("Homogeneity", homogenity if homogenity >= 0.5 else 1.0 - homogenity)
self.add_description("Strand", "P" if homogenity >= 0.5 else "N")
def gff_string(self):
attributes_string = "Size=%i;Bad_records=%i" % (self.size, self.bad_records)
if self.flags:
attributes_string += ";" + ";".join(self.flags)
if self.description:
attributes_string += ";" + ";".join(["%s=%s" % (key, ",".join(map(lambda x: str(x), self.description[key])) if isinstance(self.description[key], list) or isinstance(self.description[key], set) else str(self.description[key]))
for key in self.description])
if self.subclusters != None:
#print(self.subclusters)
attributes_string += ";Subclusters=" + ",".join(map(lambda x: str(x), self.subclusters))
return "%s\t%s\t%s\t%i\t%i\t.\t.\t.\t%s" % (self.chrom, "custom", "cluster", self.start, self.end, attributes_string)
class MetadataCCF(Metadata):
def __init__(self, samples, vcf_metadata=None, vcf_header=None, metadata={}):
self.samples = samples #list
self.metadata = metadata
self.vcf_metadata = vcf_metadata
self.vcf_header = vcf_header
def __str__(self):
metadata_string = None
if self.vcf_metadata:
metadata_string = "#VCF_METADATA START\n" + str(self.vcf_metadata) + "\n#VCF_METADATA END"
if self.vcf_header:
metadata_string = metadata_string + "\n#VCF_HEADER\n" + str(self.vcf_header) \
if metadata_string else "#VCF_HEADER\n" + str(self.vcf_header)
if self.metadata:
metadata_string += "\n##" + "\n##".join(["%s=%s" % (key, self.metadata[key]) for key in self.metadata])
return metadata_string
class HeaderCCF(list, Header):
def __str__(self):
return "#CCF_HEADER\n#" + "\t".join(self)
class CollectionCCF(Collection):
def read(self, input_file):
# TODO: write read from ccf file
with open(input_file, "r") as in_fd:
stripped_line = in_fd.readline().strip()
if stripped_line == "#VCF_METADATA START":
vcf_metadata = MetadataVCF()
stripped_line = in_fd.readline().strip()
while (stripped_line != "#VCF_METADATA END"):
vcf_metadata.add_metadata(stripped_line)
stripped_line = in_fd.readline().strip()
stripped_line = in_fd.readline().strip()
if stripped_line == "#VCF_HEADER":
header_line = in_fd.readline().strip()
vcf_header = HeaderVCF(header_line[1:].split("\t"))
#print("a\na\na\na\na\n")
#print(vcf_header)
self.metadata = MetadataCCF(vcf_header[9:], vcf_metadata=vcf_metadata, vcf_header=vcf_header)
stripped_line = in_fd.readline().strip()
if stripped_line == "#CCF_HEADER":
header_line = in_fd.readline().strip()
self.header = HeaderCCF(header_line[1:].split("\t"))
flag = 0
self.records = []
while True:
data_line = in_fd.readline()
if data_line == "" or data_line == "\n":
break
stripped_line = data_line.strip()
if data_line[0] == "\t":
#stripped_line = stripped_line[1:]
#print(collection_vcf)
collection_vcf.records.append(collection_vcf.add_record(stripped_line, external_metadata=self.metadata.vcf_metadata))
flag = 1
#print("aaaa")
continue
if flag != 0:
self.records.append(RecordCCF(id=cluster_id, chrom=chrom, size=size, start=start, end=end,
description=description, flags=flags,
collection_vcf=collection_vcf, bad_vcf_records=bad_records,
from_records=False, subclusters=subclusters))
#collection_vcf = None
if stripped_line[0] == ">":
flag = 0
cluster_id, chrom, start, end, description_and_flags = stripped_line[1:].split("\t")
start = int(start)
end = int(end)
description_and_flags = description_and_flags.split(";")
description = OrderedDict({})
flags = set([])
subclusters = None
for descr_entry in description_and_flags:
descr_entry_splited = descr_entry.split("=")
if len(descr_entry_splited) == 1:
flags.add(descr_entry_splited[0])
continue
if descr_entry_splited[0] == "Size":
size = int(descr_entry_splited[1])
elif descr_entry_splited[0] == "Bad_records":
bad_records = int(descr_entry_splited[1])
elif descr_entry_splited[0] == "Mean" or descr_entry_splited[0] == "Median" or descr_entry_splited[0] == "Power" or descr_entry_splited[0] == "Homogeneity":
description[descr_entry_splited[0]] = float(descr_entry_splited[1])
elif descr_entry_splited[0] == "Loc":
description[descr_entry_splited[0]] = descr_entry_splited[1].split(",")
elif descr_entry_splited[0] == "Subclusters":
subclusters = [int(x) for x in descr_entry_splited[1].split(",")]
else:
description[descr_entry_splited[0]] = descr_entry_splited[1].split(",")
if len(description[descr_entry_splited[0]]) == 1:
description[descr_entry_splited[0]] = description[descr_entry_splited[0]][0]
collection_vcf = CollectionVCF(metadata=None, record_list=None, header=None, vcf_file=None, samples=None, from_file=False, external_metadata=None)
continue
self.records.append(RecordCCF(id=cluster_id, chrom=chrom, size=size, start=start, end=end,
description=description, flags=flags,
collection_vcf=collection_vcf, bad_vcf_records=bad_records,
from_records=False, subclusters=subclusters))
def filter_by_expression(self, expression):
filtered_records, filtered_out_records = self.filter_records_by_expression(expression)
return CollectionCCF(metadata=self.metadata, record_list=filtered_records,
from_file=False,
header=self.header), \
CollectionCCF(metadata=self.metadata, record_list=filtered_out_records,
from_file=False,
header=self.header)
def filter_by_size(self, min_size=3):
return self.filter_by_expression("record.size >= %i" % min_size)
def filter_by_flags(self, white_flag_list=[], black_flag_list=[]):
filtered_records = []
filtered_out_records = []
white_list = set(white_flag_list)
black_list = set(black_flag_list)
for record in self.records:
if white_list:
if (white_list & record.flags) and not (black_list & record.flags):
filtered_records.append(record)
else:
filtered_out_records.append(record)
else:
if black_list & record.flags:
filtered_out_records.append(record)
else:
filtered_records.append(record)
return CollectionCCF(metadata=self.metadata, record_list=filtered_records,
header=self.header), \
CollectionCCF(metadata=self.metadata, record_list=filtered_out_records,
header=self.header)
def check_record_location(self, bad_region_collection_gff):
for record in self:
record.check_location(bad_region_collection_gff)
def adjust(self, border_limit=None, min_size_to_adjust=2, remove_border_subclusters=False, remove_size_limit=1):
new_records = []
for record in self:
new_records += record.adjust(border_limit=border_limit, min_size_to_adjust=min_size_to_adjust,
remove_border_subclusters=remove_border_subclusters,
remove_size_limit=remove_size_limit)
self.records = new_records
def subclustering(self,
method="inconsistent",
threshold=0.8,
cluster_distance='average'):
for record in self:
if len(record) < 3:
continue
#print(record)
record.subclustering(method=method,
threshold=threshold,
cluster_distance=cluster_distance)
"""
def get_collection_vcf(self, metadata, header):
vcf_records = []
for cluster in self:
vcf_records += cluster.records
return CollectionVCF(metadata=metadata, header=header, record_list=vcf_records)
"""
def count(self):
sizes = []
for record in self:
sizes.append(record.size)
return sizes
def statistics(self, filename="cluster_size_distribution.svg", title="Distribution of sizes of clusters",
dpi=150, figsize=(10, 10), facecolor="green"):
plt.figure(1, dpi=dpi, figsize=figsize)
plt.subplot(1, 1, 1)
plt.suptitle(title)
counts = self.count()
maximum = max(counts)
bins = np.linspace(0, maximum, maximum)
plt.hist(counts, bins, facecolor=facecolor)
plt.xticks(np.arange(0, maximum, 1.0))
plt.savefig(filename)
plt.close()
def check_flags(self, flag_list, mismatch_list=[], expression_list=[], remove_mismatch_list=[],
flags_to_reset=None, mode="all", min_cluster_size=[]):
for record in self:
record.check_flags(flag_list, mismatch_list=mismatch_list, expression_list=expression_list,
remove_mismatch_list=remove_mismatch_list, flags_to_reset=flags_to_reset, mode=mode,
min_cluster_size=min_cluster_size)
def extract_vcf(self):
vcf = CollectionVCF(metadata=self.metadata.vcf_metadata, record_list=[], header=self.metadata.vcf_header,
samples=self.metadata.samples, from_file=False)
for record in self:
"""
print(record)
print(type(record))
print(record.records)
print(type(record.records))
"""
vcf = vcf + record.records
return vcf
def check_strandness(self):
for record in self:
record.check_strandness()
def get_data_for_stat(self, additional_data=("Median", "Mean", "Power")):
data = []
for record in self:
if record.size == 1:
continue
data.append([record.len, record.size] + ([record.description[add_data] for add_data in additional_data] if additional_data else []))
return np.array(data)
def heatmap_statistics(self, filename="heatmap_statistics.svg", suptitle="Heatmap_statistics",
dpi=150, facecolor="green", n_bins_default=20,
additional_data=("Median", "Mean", "Power")):
labels_dict = {"Median": "Median distance",
"Mean": "Mean distance",
"Power": "Power(Size/median dist.)",
"Homogeneity": "Strand specificity"}
names = ("Length", "Size")
if additional_data is not None:
names += additional_data
data = [self.get_data_for_stat(additional_data=additional_data)[:, i]
for i in range(0, 2 if additional_data is None else 2 + len(additional_data))]
data.append(data[1] / data[2])
data.append((data[1]**2) / data[2])
size = 6 * len(names)
plt.figure(1, dpi=dpi, figsize=(size, size))
plt.suptitle(suptitle)
if len(data[0]) == 0:
return 0
for i in range(0, len(names)):
for j in range(0, len(names)):
if i == j:
continue
plt.subplot(len(names), len(names), i * len(names) + j + 1)
plt.xlabel(labels_dict[names[i]] if names[i] in labels_dict else names[i])
plt.ylabel(labels_dict[names[j]] if names[j] in labels_dict else names[j])
n_x_bins = 10 if names[i] == "Homogeneity" else int(max(data[i])) if names[i] == "Size" else \
int(max(data[i]) * 20) if names[i] == "Power" else n_bins_default
n_y_bins = 10 if names[j] == "Homogeneity" else int(max(data[j])) if names[j] == "Size" else \
int(max(data[j]) * 20) if names[j] == "Power" else n_bins_default
#print(names[i])
#print(data[i])
#print(names[j])
#print(data[j])
#print(n_x_bins, n_y_bins)
#cmap = colors.ListedColormap(['white', 'red'])
#bounds = [0, 5, 10]
#norm = colors.BoundaryNorm(bounds, cmap.N)
plt.hist2d(data[i], data[j], (n_x_bins, n_y_bins), cmin=1) # normed=True)
plt.colorbar()
plt.savefig(filename)
plt.close()
def get_data_for_strand_stat(self):
num_of_P_clusters = 0
num_of_N_clusters = 0
strandness_P_clusters = []
strandness_N_clusters = []
size_P_clusters = []
size_N_clusters = []
length_P_clusters = []
length_N_clusters = []
for record in self:
if "Strand" not in record.description:
print(record)
if record.description["Strand"] == "P":
num_of_P_clusters += 1
strandness_P_clusters.append(record.description["SHom"])
size_P_clusters.append(record.size)
length_P_clusters.append(record.len)
else:
num_of_N_clusters += 1
strandness_N_clusters.append(record.description["SHom"])
size_N_clusters.append(record.size)
length_N_clusters.append(record.len)
return num_of_P_clusters, strandness_P_clusters, size_P_clusters, length_P_clusters, \
num_of_N_clusters, strandness_N_clusters, size_N_clusters, length_N_clusters,
def strandness_statistics(self, filename="Strandness_statistics.svg", suptitle="Cluster strandness_statistics",
dpi=150, figsize=(20, 20), facecolor="green"):
num_of_P_clusters, strandness_P_clusters, size_P_clusters, length_P_clusters, \
num_of_N_clusters, strandness_N_clusters, size_N_clusters, length_N_clusters = \
self.get_data_for_strand_stat()
plt.figure(1, dpi=dpi, figsize=figsize)
plt.subplot(3, 4, 1)
points = np.arange(2)
bar_width = 0.55
rects1 = plt.bar(points, [num_of_P_clusters, num_of_N_clusters], bar_width,
color='b')
plt.xlabel('Strandness')
plt.ylabel('Counts')
plt.title("Distribution of clusters in strands")
plt.xticks(points + bar_width, ('P', 'M'))
for subplot_index, counts, title in zip([5, 9], [strandness_P_clusters, strandness_N_clusters], ["P", "N"]):
plt.subplot(3, 4, subplot_index)
if len(counts) == 0:
continue
bins = np.linspace(0.5, 1.0, 11)
plt.hist(counts, bins, facecolor=facecolor)
plt.xticks(np.arange(0.5, 1.0, 0.1))
plt.title("Strandness coefficient of clusters in %s strand" % title)
for subplot_index, coeff, size, title in zip([6, 10], [strandness_P_clusters, strandness_N_clusters],
[size_P_clusters, size_N_clusters], ["P", "N"]):
plt.subplot(3, 4, subplot_index)
if len(coeff) == 0:
continue
#plt.plot(size, coeff, "b.")
#heatmap, xedges, yedges = np.histogram2d(size, coeff, bins=(10, max(size)))
#extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
#plt.imshow(heatmap)#, extent=extent)
plt.hist2d(size, coeff, (max(size), 10))
#plt.yticks(np.arange(0.5, 1.0, 0.1))
plt.title("Coefficient and size of clusters in %s strand" % title)
plt.xlabel("Size of cluster")
plt.ylabel("Strandness")
for subplot_index, coeff, length, title in zip([7, 11], [strandness_P_clusters, strandness_N_clusters],
[length_P_clusters, length_N_clusters], ["P", "N"]):
plt.subplot(3, 4, subplot_index)
if len(coeff) == 0:
continue
plt.plot(length, coeff, "b.")
plt.title("Coefficient and length of clusters in %s strand" % title)
plt.xlabel("Length of cluster")
plt.ylabel("Strandness")
for subplot_index, size, length, title in zip([8, 12], [size_P_clusters, size_N_clusters],
[length_P_clusters, length_N_clusters], ["P", "N"]):
plt.subplot(3, 4, subplot_index)
if len(size) == 0:
continue
plt.plot(length, size, "b.")
plt.title("Length and size of clusters in %s strand" % title)
plt.xlabel("Length of cluster")
plt.ylabel("Size of cluster")
plt.suptitle(suptitle)
plt.savefig(filename)
plt.close()
def write_gff(self, outfile):
with open(outfile, "w") as out_fd:
for record in self:
out_fd.write(record.gff_string() + "\n")
if __name__ == "__main__":
"""
col = CollectionCCF(from_file=True, input_file="/media/mahajrod/d9e6e5ee-1bf7-4dba-934e-3f898d9611c8/Data/LAN2xx/combined_vcf/PmCDA1_3d/clustering/PmCDA1_3d_adjusted_not_in_br_no_id.ccf")
#for record in col:
print(col.records[0])
#print(col.records)
col.write("test.ccf")
""" | [
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"numpy.array",
"numpy.arange",
"numpy.mean",
"RouToolPa.Parsers.VCF.CollectionVCF",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.delete",
"matplotlib.pyplot.close",
"numpy.linspace",
"collections.OrderedDict",
"matplotlib.... | [((107, 128), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (121, 128), False, 'import matplotlib\n'), ((161, 171), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (169, 171), True, 'import matplotlib.pyplot as plt\n'), ((4213, 4254), 'numpy.array', 'np.array', (['[record.pos for record in self]'], {}), '([record.pos for record in self])\n', (4221, 4254), True, 'import numpy as np\n'), ((4270, 4291), 'numpy.ediff1d', 'np.ediff1d', (['positions'], {}), '(positions)\n', (4280, 4291), True, 'import numpy as np\n'), ((24540, 24579), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'dpi': 'dpi', 'figsize': 'figsize'}), '(1, dpi=dpi, figsize=figsize)\n', (24550, 24579), True, 'import matplotlib.pyplot as plt\n'), ((24588, 24608), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (24599, 24608), True, 'import matplotlib.pyplot as plt\n'), ((24617, 24636), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['title'], {}), '(title)\n', (24629, 24636), True, 'import matplotlib.pyplot as plt\n'), ((24712, 24744), 'numpy.linspace', 'np.linspace', (['(0)', 'maximum', 'maximum'], {}), '(0, maximum, maximum)\n', (24723, 24744), True, 'import numpy as np\n'), ((24753, 24796), 'matplotlib.pyplot.hist', 'plt.hist', (['counts', 'bins'], {'facecolor': 'facecolor'}), '(counts, bins, facecolor=facecolor)\n', (24761, 24796), True, 'import matplotlib.pyplot as plt\n'), ((24852, 24873), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (24863, 24873), True, 'import matplotlib.pyplot as plt\n'), ((24882, 24893), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (24891, 24893), True, 'import matplotlib.pyplot as plt\n'), ((25426, 25578), 'RouToolPa.Parsers.VCF.CollectionVCF', 'CollectionVCF', ([], {'metadata': 'self.metadata.vcf_metadata', 'record_list': '[]', 'header': 'self.metadata.vcf_header', 'samples': 'self.metadata.samples', 'from_file': '(False)'}), '(metadata=self.metadata.vcf_metadata, record_list=[], header=\n self.metadata.vcf_header, samples=self.metadata.samples, from_file=False)\n', (25439, 25578), False, 'from RouToolPa.Parsers.VCF import CollectionVCF, MetadataVCF, HeaderVCF\n'), ((26294, 26308), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (26302, 26308), True, 'import numpy as np\n'), ((27182, 27226), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'dpi': 'dpi', 'figsize': '(size, size)'}), '(1, dpi=dpi, figsize=(size, size))\n', (27192, 27226), True, 'import matplotlib.pyplot as plt\n'), ((27235, 27257), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['suptitle'], {}), '(suptitle)\n', (27247, 27257), True, 'import matplotlib.pyplot as plt\n'), ((28567, 28588), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (28578, 28588), True, 'import matplotlib.pyplot as plt\n'), ((28597, 28608), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (28606, 28608), True, 'import matplotlib.pyplot as plt\n'), ((30191, 30230), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'dpi': 'dpi', 'figsize': 'figsize'}), '(1, dpi=dpi, figsize=figsize)\n', (30201, 30230), True, 'import matplotlib.pyplot as plt\n'), ((30239, 30259), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', '(1)'], {}), '(3, 4, 1)\n', (30250, 30259), True, 'import matplotlib.pyplot as plt\n'), ((30277, 30289), 'numpy.arange', 'np.arange', (['(2)'], {}), '(2)\n', (30286, 30289), True, 'import numpy as np\n'), ((30333, 30410), 'matplotlib.pyplot.bar', 'plt.bar', (['points', '[num_of_P_clusters, num_of_N_clusters]', 'bar_width'], {'color': '"""b"""'}), "(points, [num_of_P_clusters, num_of_N_clusters], bar_width, color='b')\n", (30340, 30410), True, 'import matplotlib.pyplot as plt\n'), ((30445, 30469), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Strandness"""'], {}), "('Strandness')\n", (30455, 30469), True, 'import matplotlib.pyplot as plt\n'), ((30478, 30498), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts"""'], {}), "('Counts')\n", (30488, 30498), True, 'import matplotlib.pyplot as plt\n'), ((30507, 30555), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of clusters in strands"""'], {}), "('Distribution of clusters in strands')\n", (30516, 30555), True, 'import matplotlib.pyplot as plt\n'), ((30564, 30606), 'matplotlib.pyplot.xticks', 'plt.xticks', (['(points + bar_width)', "('P', 'M')"], {}), "(points + bar_width, ('P', 'M'))\n", (30574, 30606), True, 'import matplotlib.pyplot as plt\n'), ((32926, 32948), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['suptitle'], {}), '(suptitle)\n', (32938, 32948), True, 'import matplotlib.pyplot as plt\n'), ((32957, 32978), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (32968, 32978), True, 'import matplotlib.pyplot as plt\n'), ((32987, 32998), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (32996, 32998), True, 'import matplotlib.pyplot as plt\n'), ((2891, 2909), 'numpy.mean', 'np.mean', (['distances'], {}), '(distances)\n', (2898, 2909), True, 'import numpy as np\n'), ((2951, 2971), 'numpy.median', 'np.median', (['distances'], {}), '(distances)\n', (2960, 2971), True, 'import numpy as np\n'), ((24816, 24842), 'numpy.arange', 'np.arange', (['(0)', 'maximum', '(1.0)'], {}), '(0, maximum, 1.0)\n', (24825, 24842), True, 'import numpy as np\n'), ((30737, 30769), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', 'subplot_index'], {}), '(3, 4, subplot_index)\n', (30748, 30769), True, 'import matplotlib.pyplot as plt\n'), ((30847, 30872), 'numpy.linspace', 'np.linspace', (['(0.5)', '(1.0)', '(11)'], {}), '(0.5, 1.0, 11)\n', (30858, 30872), True, 'import numpy as np\n'), ((30885, 30928), 'matplotlib.pyplot.hist', 'plt.hist', (['counts', 'bins'], {'facecolor': 'facecolor'}), '(counts, bins, facecolor=facecolor)\n', (30893, 30928), True, 'import matplotlib.pyplot as plt\n'), ((30990, 31058), 'matplotlib.pyplot.title', 'plt.title', (["('Strandness coefficient of clusters in %s strand' % title)"], {}), "('Strandness coefficient of clusters in %s strand' % title)\n", (30999, 31058), True, 'import matplotlib.pyplot as plt\n'), ((31284, 31316), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', 'subplot_index'], {}), '(3, 4, subplot_index)\n', (31295, 31316), True, 'import matplotlib.pyplot as plt\n'), ((31738, 31804), 'matplotlib.pyplot.title', 'plt.title', (["('Coefficient and size of clusters in %s strand' % title)"], {}), "('Coefficient and size of clusters in %s strand' % title)\n", (31747, 31804), True, 'import matplotlib.pyplot as plt\n'), ((31817, 31846), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Size of cluster"""'], {}), "('Size of cluster')\n", (31827, 31846), True, 'import matplotlib.pyplot as plt\n'), ((31859, 31883), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Strandness"""'], {}), "('Strandness')\n", (31869, 31883), True, 'import matplotlib.pyplot as plt\n'), ((32115, 32147), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', 'subplot_index'], {}), '(3, 4, subplot_index)\n', (32126, 32147), True, 'import matplotlib.pyplot as plt\n'), ((32217, 32246), 'matplotlib.pyplot.plot', 'plt.plot', (['length', 'coeff', '"""b."""'], {}), "(length, coeff, 'b.')\n", (32225, 32246), True, 'import matplotlib.pyplot as plt\n'), ((32259, 32327), 'matplotlib.pyplot.title', 'plt.title', (["('Coefficient and length of clusters in %s strand' % title)"], {}), "('Coefficient and length of clusters in %s strand' % title)\n", (32268, 32327), True, 'import matplotlib.pyplot as plt\n'), ((32340, 32371), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Length of cluster"""'], {}), "('Length of cluster')\n", (32350, 32371), True, 'import matplotlib.pyplot as plt\n'), ((32384, 32408), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Strandness"""'], {}), "('Strandness')\n", (32394, 32408), True, 'import matplotlib.pyplot as plt\n'), ((32627, 32659), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(4)', 'subplot_index'], {}), '(3, 4, subplot_index)\n', (32638, 32659), True, 'import matplotlib.pyplot as plt\n'), ((32728, 32756), 'matplotlib.pyplot.plot', 'plt.plot', (['length', 'size', '"""b."""'], {}), "(length, size, 'b.')\n", (32736, 32756), True, 'import matplotlib.pyplot as plt\n'), ((32769, 32830), 'matplotlib.pyplot.title', 'plt.title', (["('Length and size of clusters in %s strand' % title)"], {}), "('Length and size of clusters in %s strand' % title)\n", (32778, 32830), True, 'import matplotlib.pyplot as plt\n'), ((32843, 32874), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Length of cluster"""'], {}), "('Length of cluster')\n", (32853, 32874), True, 'import matplotlib.pyplot as plt\n'), ((32887, 32916), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Size of cluster"""'], {}), "('Size of cluster')\n", (32897, 32916), True, 'import matplotlib.pyplot as plt\n'), ((3024, 3044), 'numpy.median', 'np.median', (['distances'], {}), '(distances)\n', (3033, 3044), True, 'import numpy as np\n'), ((16543, 16556), 'RouToolPa.Parsers.VCF.MetadataVCF', 'MetadataVCF', ([], {}), '()\n', (16554, 16556), False, 'from RouToolPa.Parsers.VCF import CollectionVCF, MetadataVCF, HeaderVCF\n'), ((27541, 27615), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['(labels_dict[names[i]] if names[i] in labels_dict else names[i])'], {}), '(labels_dict[names[i]] if names[i] in labels_dict else names[i])\n', (27551, 27615), True, 'import matplotlib.pyplot as plt\n'), ((27632, 27706), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['(labels_dict[names[j]] if names[j] in labels_dict else names[j])'], {}), '(labels_dict[names[j]] if names[j] in labels_dict else names[j])\n', (27642, 27706), True, 'import matplotlib.pyplot as plt\n'), ((28453, 28511), 'matplotlib.pyplot.hist2d', 'plt.hist2d', (['data[i]', 'data[j]', '(n_x_bins, n_y_bins)'], {'cmin': '(1)'}), '(data[i], data[j], (n_x_bins, n_y_bins), cmin=1)\n', (28463, 28511), True, 'import matplotlib.pyplot as plt\n'), ((28544, 28558), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (28556, 28558), True, 'import matplotlib.pyplot as plt\n'), ((30952, 30976), 'numpy.arange', 'np.arange', (['(0.5)', '(1.0)', '(0.1)'], {}), '(0.5, 1.0, 0.1)\n', (30961, 30976), True, 'import numpy as np\n'), ((13193, 13268), 'RouToolPa.Parsers.VCF.CollectionVCF', 'CollectionVCF', ([], {'record_list': 'self.records.records[start:end]', 'from_file': '(False)'}), '(record_list=self.records.records[start:end], from_file=False)\n', (13206, 13268), False, 'from RouToolPa.Parsers.VCF import CollectionVCF, MetadataVCF, HeaderVCF\n'), ((19067, 19082), 'collections.OrderedDict', 'OrderedDict', (['{}'], {}), '({})\n', (19078, 19082), False, 'from collections import Iterable, OrderedDict\n'), ((20755, 20888), 'RouToolPa.Parsers.VCF.CollectionVCF', 'CollectionVCF', ([], {'metadata': 'None', 'record_list': 'None', 'header': 'None', 'vcf_file': 'None', 'samples': 'None', 'from_file': '(False)', 'external_metadata': 'None'}), '(metadata=None, record_list=None, header=None, vcf_file=None,\n samples=None, from_file=False, external_metadata=None)\n', (20768, 20888), False, 'from RouToolPa.Parsers.VCF import CollectionVCF, MetadataVCF, HeaderVCF\n'), ((8475, 8509), 'numpy.delete', 'np.delete', (['self.subclusters', 'index'], {}), '(self.subclusters, index)\n', (8484, 8509), True, 'import numpy as np\n'), ((12444, 12516), 'RouToolPa.Parsers.VCF.CollectionVCF', 'CollectionVCF', ([], {'record_list': 'self.records.records[:start]', 'from_file': '(False)'}), '(record_list=self.records.records[:start], from_file=False)\n', (12457, 12516), False, 'from RouToolPa.Parsers.VCF import CollectionVCF, MetadataVCF, HeaderVCF\n'), ((12726, 12796), 'RouToolPa.Parsers.VCF.CollectionVCF', 'CollectionVCF', ([], {'record_list': 'self.records.records[end:]', 'from_file': '(False)'}), '(record_list=self.records.records[end:], from_file=False)\n', (12739, 12796), False, 'from RouToolPa.Parsers.VCF import CollectionVCF, MetadataVCF, HeaderVCF\n')] |
import numpy as np
from .util import ensure_rng
def _hashable(x):
""" ensure that an point is hashable by a python dict """
return tuple(map(float, x))
class TargetSpace(object):
"""
Holds the param-space coordinates (X) and target values (Y)
Allows for constant-time appends while ensuring no duplicates are added
Example
-------
>>> def target_func(p1, p2):
>>> return p1 + p2
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(target_func, pbounds, random_state=0)
>>> x = space.random_points(1)[0]
>>> y = space.register_point(x)
>>> assert self.max_point()['max_val'] == y
"""
def __init__(self, target_func, pbounds, random_state=None):
"""
Parameters
----------
target_func : function
Function to be maximized.
pbounds : dict
Dictionary with parameters names as keys and a tuple with minimum
and maximum values.
random_state : int, RandomState, or None
optionally specify a seed for a random number generator
"""
self.random_state = ensure_rng(random_state)
# The function to be optimized
self.target_func = target_func
# Get the name of the parameters
self._keys = sorted(pbounds)
# Create an array with parameters bounds
self._bounds = np.array(
[item[1] for item in sorted(pbounds.items(), key=lambda x: x[0])],
dtype=np.float
)
# preallocated memory for X and Y points
self._params = np.empty(shape=(0, self.dim))
self._target = np.empty(shape=(0))
# keep track of unique points we have seen so far
self._cache = {}
def __contains__(self, x):
return _hashable(x) in self._cache
def __len__(self):
assert len(self._params) == len(self._target)
return len(self._target)
@property
def empty(self):
return len(self) == 0
@property
def params(self):
return self._params
@property
def target(self):
return self._target
@property
def dim(self):
return len(self._keys)
@property
def keys(self):
return self._keys
@property
def bounds(self):
return self._bounds
def params_to_array(self, params):
if isinstance(params,list):
x = []
for p in params:
try:
assert set(p) == set(self.keys)
except AssertionError:
raise ValueError(
"Parameters' keys ({}) do ".format(sorted(params)) +
"not match the expected set of keys ({}).".format(self.keys)
)
x.append(np.asarray([p[key] for key in self.keys]))
else:
try:
assert set(params) == set(self.keys)
except AssertionError:
raise ValueError(
"Parameters' keys ({}) do ".format(sorted(params)) +
"not match the expected set of keys ({}).".format(self.keys)
)
x = np.asarray([params[key] for key in self.keys])
return x
def array_to_params(self, x):
if isinstance(x,list):
params = []
for param in x:
try:
assert len(param) == len(self.keys)
except AssertionError:
raise ValueError(
"Size of array ({}) is different than the ".format(len(x)) +
"expected number of parameters ({}).".format(len(self.keys))
)
params.append(dict(zip(self.keys, param)))
else:
try:
assert len(x) == len(self.keys)
except AssertionError:
raise ValueError(
"Size of array ({}) is different than the ".format(len(x)) +
"expected number of parameters ({}).".format(len(self.keys))
)
params = dict(zip(self.keys, x))
return params
def _as_array(self, x):
try:
x = np.asarray(x, dtype=float)
except TypeError:
x = self.params_to_array(x)
x = x.ravel()
try:
assert x.size == self.dim
except AssertionError:
raise ValueError(
"Size of array ({}) is different than the ".format(len(x)) +
"expected number of parameters ({}).".format(len(self.keys))
)
return x
def register(self, params, target):
"""
Append a point and its target value to the known data.
Parameters
----------
x : ndarray
a single point, with len(x) == self.dim
y : float
target function value
Raises
------
KeyError:
if the point is not unique
Notes
-----
runs in ammortized constant time
Example
-------
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(lambda p1, p2: p1 + p2, pbounds)
>>> len(space)
0
>>> x = np.array([0, 0])
>>> y = 1
>>> space.add_observation(x, y)
>>> len(space)
1
"""
x = self._as_array(params)
if x in self:
raise KeyError('Data point {} is not unique in continuous space'.format(x))
# Insert data into unique dictionary
self._cache[_hashable(x.ravel())] = target
self._params = np.concatenate([self._params, x.reshape(1, -1)])
self._target = np.concatenate([self._target, [target]])
def probe(self, params):
"""
Evaulates a single point x, to obtain the value y and then records them
as observations.
Notes
-----
If x has been previously seen returns a cached value of y.
Parameters
----------
x : ndarray
a single point, with len(x) == self.dim
Returns
-------
y : float
target function value.
"""
x = self._as_array(params)
try:
target = self._cache[_hashable(x)]
except KeyError:
params = dict(zip(self._keys, x))
target = self.target_func(**params)
self.register(x, target)
return target
def random_sample(self, constraints=[]):
"""
Creates random points within the bounds of the space.
Returns
----------
data: ndarray
[num x dim] array points with dimensions corresponding to `self._keys`
Example
-------
>>> target_func = lambda p1, p2: p1 + p2
>>> pbounds = {'p1': (0, 1), 'p2': (1, 100)}
>>> space = TargetSpace(target_func, pbounds, random_state=0)
>>> space.random_points(1)
array([[ 55.33253689, 0.54488318]])
"""
# TODO: support integer, category, and basic scipy.optimize constraints
data = np.empty((1, self.dim))
reject = True
while reject:
for col, (lower, upper) in enumerate(self._bounds):
data.T[col] = self.random_state.uniform(lower, upper, size=1)
reject = False
for constraint in constraints:
#if eval says reject, reject = true, break
if constraint['fun'](data.ravel())<0:
reject = True
break
return data.ravel()
def max(self):
"""Get maximum target value found and corresponding parametes."""
try:
res = {
'target': self.target.max(),
'params': dict(
zip(self.keys, self.params[self.target.argmax()])
)
}
except ValueError:
res = {}
return res
def res(self):
"""Get all target values found and corresponding parametes."""
params = [dict(zip(self.keys, p)) for p in self.params]
return [
{"target": target, "params": param}
for target, param in zip(self.target, params)
]
def set_bounds(self, new_bounds):
"""
A method that allows changing the lower and upper searching bounds
Parameters
----------
new_bounds : dict
A dictionary with the parameter name and its new bounds
"""
for row, key in enumerate(self.keys):
if key in new_bounds:
self._bounds[row] = new_bounds[key]
class DiscreteSpace(TargetSpace):
'''
Holds the param-space coordinates (X) and target values (Y) in the discretized space.
This mirrors TargetSpace but supers methods to consider the floor value of discretized bins.
That is, a prange (-5,5,.5) will register 1.3 as 1.0 in the cache but as 1.3 in the parameters list.
Allows for constant-time appends while ensuring no duplicates are added
'''
def __init__(self, target_func, prange, random_state=None, feature_scaling=False):
"""
Parameters
----------
target_func : function
Function to be maximized.
pbounds : dict
Dictionary with parameters names as keys and a tuple with minimum
maximum, and step values.
random_state : int, RandomState, or None
optionally specify a seed for a random number generator
feature_scaling : logical
Tracking whether to normalize all features on a zero to 1 scale for model
building. The suggestions will be returned unscaled.
"""
self.random_state = ensure_rng(random_state)
# The function to be optimized
self.target_func = target_func
# Get the name of the parameters
self._keys = sorted(prange)
# Get associated pbounds for TargetSpace()
self._pbounds = {item[0] :(item[1][:2]) for item in sorted(prange.items(), key=lambda x: x[0])}
# Create an array with parameters steps
self._steps = np.array(
[item[1][-1] for item in sorted(prange.items(), key=lambda x: x[0])],
dtype=np.float
)
# keep track of unique points we have seen so far
self._discrete_cache = {}
super(DiscreteSpace, self).__init__(target_func=target_func,
pbounds=self._pbounds,
random_state=random_state)
# feature scaling
self.feature_scaling = feature_scaling
if feature_scaling:
self._scales = self._bounds[:,1] - self._bounds[:,0]
self._shifts = np.array(self._bounds[:,0])
self._bounds[:,0] = self.forward_scaling(self._bounds[:,0])
self._bounds[:,1] = self.forward_scaling(self._bounds[:,1])
self._steps = self._steps /self._scales
else:
self._scales = np.ones_like(self._bounds[:,0])
self._shifts = np.zeros_like(self._bounds[:,0])
@property
def steps(self):
return self._steps
def _bin(self,x):
# TODO: clean using modulo
binned = np.empty((self.dim,1))
for col, (lower, upper) in enumerate(self._bounds):
binned[col] = np.floor((x[col]-lower)/self._steps[col])*self._steps[col]+lower
return binned.ravel()
def __contains__(self,x):
return(_hashable(self._bin(x))) in self._discrete_cache
def forward_scaling(self,x):
return (x - self._shifts)/self._scales
def reverse_scaling(self,x):
return x*self._scales + self._shifts
def register(self, params, target, verbose=False, scaled = False):
"""
Append a point and its target value to the known data.
Parameters
----------
x : ndarray
a single point, with len(x) == self.dim
y : float
target function value
scaled : logical
whether or not the parameters are feature scaled
Raises
------
KeyError:
if the point is not unique
Notes
-----
runs in ammortized constant time
"""
x = self._as_array(params)
if self.feature_scaling and not scaled:
x = self.forward_scaling(x)
if x in self and verbose:
print('Data point {} is not unique. \n(Discrete value {})'.format(x,self._bin(x)))
# Insert data into unique dictionary
self._discrete_cache[_hashable(self._bin(x))] = target
self._cache[_hashable(x.ravel())] = target
self._params = np.concatenate([self._params, x.reshape(1, -1)])
self._target = np.concatenate([self._target, [target]]) | [
"numpy.ones_like",
"numpy.asarray",
"numpy.floor",
"numpy.array",
"numpy.empty",
"numpy.concatenate",
"numpy.zeros_like"
] | [((1599, 1628), 'numpy.empty', 'np.empty', ([], {'shape': '(0, self.dim)'}), '(shape=(0, self.dim))\n', (1607, 1628), True, 'import numpy as np\n'), ((1652, 1669), 'numpy.empty', 'np.empty', ([], {'shape': '(0)'}), '(shape=0)\n', (1660, 1669), True, 'import numpy as np\n'), ((5750, 5790), 'numpy.concatenate', 'np.concatenate', (['[self._target, [target]]'], {}), '([self._target, [target]])\n', (5764, 5790), True, 'import numpy as np\n'), ((7171, 7194), 'numpy.empty', 'np.empty', (['(1, self.dim)'], {}), '((1, self.dim))\n', (7179, 7194), True, 'import numpy as np\n'), ((11461, 11484), 'numpy.empty', 'np.empty', (['(self.dim, 1)'], {}), '((self.dim, 1))\n', (11469, 11484), True, 'import numpy as np\n'), ((13027, 13067), 'numpy.concatenate', 'np.concatenate', (['[self._target, [target]]'], {}), '([self._target, [target]])\n', (13041, 13067), True, 'import numpy as np\n'), ((3197, 3243), 'numpy.asarray', 'np.asarray', (['[params[key] for key in self.keys]'], {}), '([params[key] for key in self.keys])\n', (3207, 3243), True, 'import numpy as np\n'), ((4241, 4267), 'numpy.asarray', 'np.asarray', (['x'], {'dtype': 'float'}), '(x, dtype=float)\n', (4251, 4267), True, 'import numpy as np\n'), ((10948, 10976), 'numpy.array', 'np.array', (['self._bounds[:, 0]'], {}), '(self._bounds[:, 0])\n', (10956, 10976), True, 'import numpy as np\n'), ((11213, 11245), 'numpy.ones_like', 'np.ones_like', (['self._bounds[:, 0]'], {}), '(self._bounds[:, 0])\n', (11225, 11245), True, 'import numpy as np\n'), ((11272, 11305), 'numpy.zeros_like', 'np.zeros_like', (['self._bounds[:, 0]'], {}), '(self._bounds[:, 0])\n', (11285, 11305), True, 'import numpy as np\n'), ((2812, 2853), 'numpy.asarray', 'np.asarray', (['[p[key] for key in self.keys]'], {}), '([p[key] for key in self.keys])\n', (2822, 2853), True, 'import numpy as np\n'), ((11570, 11615), 'numpy.floor', 'np.floor', (['((x[col] - lower) / self._steps[col])'], {}), '((x[col] - lower) / self._steps[col])\n', (11578, 11615), True, 'import numpy as np\n')] |
"""
Samplers for perses automated molecular design.
TODO
----
* Determine where `System` object should be stored: In `SamplerState` or in `Thermodynamic State`, or both, or neither?
* Can we create a generalized, extensible `SamplerState` that also stores chemical/thermodynamic state information?
* Can we create a generalized log biasing weight container class that gracefully handles new chemical states that have yet to be explored?
"""
__author__ = '<NAME>'
################################################################################
# IMPORTS
################################################################################
import mdtraj as md
import numpy as np
import time
from openmmtools.states import SamplerState, ThermodynamicState
from openmmtools import cache, utils
from perses.dispersed.utils import configure_platform
cache.global_context_cache.platform = configure_platform(utils.get_fastest_platform().getName())
from perses.annihilation.ncmc_switching import NCMCEngine
from perses.dispersed import feptasks
from perses.storage import NetCDFStorageView
from perses.utils.openeye import smiles_to_oemol
################################################################################
# LOGGER
################################################################################
import logging
_logger = logging.getLogger()
_logger.setLevel(logging.INFO)
_logger = logging.getLogger("samplers")
################################################################################
# UTILITY FUNCTIONS
################################################################################
def log_sum_exp(a_n):
"""
Compute log(sum(exp(a_n)))
Parameters
----------
a_n : dict of objects : floats
"""
a_n = np.array(list(a_n.values()))
return np.log( np.sum( np.exp(a_n - a_n.max() ) ) )
################################################################################
# EXPANDED ENSEMBLE SAMPLER
################################################################################
class ExpandedEnsembleSampler(object):
"""
Method of expanded ensembles sampling engine.
The acceptance criteria is given in the reference document. Roughly, the proposal scheme is:
* Draw a proposed chemical state k', and calculate reverse proposal probability
* Conditioned on k' and the current positions x, generate new positions with the GeometryEngine
* With new positions, jump to a hybrid system at lambda=0
* Anneal from lambda=0 to lambda=1, accumulating work
* Jump from the hybrid system at lambda=1 to the k' system, and compute reverse GeometryEngine proposal
* Add weight of chemical states k and k' to acceptance probabilities
Properties
----------
sampler : MCMCSampler
The MCMC sampler used for updating positions.
proposal_engine : ProposalEngine
The ProposalEngine to use for proposing new sampler states and topologies.
system_generator : SystemGenerator
The SystemGenerator to use for creating System objects following proposals.
state : hashable object
The current sampler state. Can be any hashable object.
states : set of hashable object
All known states.
iteration : int
Iterations completed.
naccepted : int
Number of accepted thermodynamic/chemical state changes.
nrejected : int
Number of rejected thermodynamic/chemical state changes.
number_of_state_visits : dict of state_key
Cumulative counts of visited states.
verbose : bool
If True, verbose output is printed.
References
----------
[1] <NAME>, <NAME>, <NAME>, and <NAME>. New approach to Monte Carlo calculation of the free energy: Method of expanded ensembles. JCP 96:1776, 1992
http://dx.doi.org/10.1063/1.462133
Examples
--------
>>> # Create a test system
>>> test = testsystems.AlanineDipeptideVacuum()
>>> # Create a SystemGenerator and rebuild the System.
>>> from perses.rjmc.topology_proposal import SystemGenerator
>>> system_generator = SystemGenerator(['amber99sbildn.xml'], forcefield_kwargs={'implicitSolvent' : None, 'constraints' : None }, nonperiodic_forcefield_kwargs={'nonbondedMethod' : app.NoCutoff})
>>> test.system = system_generator.build_system(test.topology)
>>> # Create a sampler state.
>>> sampler_state = SamplerState(system=test.system, positions=test.positions)
>>> # Create a thermodynamic state.
>>> thermodynamic_state = ThermodynamicState(system=test.system, temperature=298.0*unit.kelvin)
>>> # Create an MCMC sampler
>>> mcmc_sampler = MCMCSampler(thermodynamic_state, sampler_state)
>>> # Turn off verbosity
>>> mcmc_sampler.verbose = False
>>> # Create an Expanded Ensemble sampler
>>> from perses.rjmc.topology_proposal import PointMutationEngine
>>> from perses.rjmc.geometry import FFAllAngleGeometryEngine
>>> geometry_engine = FFAllAngleGeometryEngine(metadata={})
>>> allowed_mutations = [[('2','ALA')],[('2','VAL'),('2','LEU')]]
>>> proposal_engine = PointMutationEngine(test.topology, system_generator, max_point_mutants=1, chain_id='1', proposal_metadata=None, allowed_mutations=allowed_mutations)
>>> exen_sampler = ExpandedEnsembleSampler(mcmc_sampler, test.topology, 'ACE-ALA-NME', proposal_engine, geometry_engine)
>>> # Run the sampler
>>> exen_sampler.run()
"""
def __init__(self, sampler, topology, state_key, proposal_engine, geometry_engine, log_weights=None, options=None, platform=None, envname=None, storage=None, ncmc_write_interval=1):
r"""
Create an expanded ensemble sampler.
p(x,k) \propto \exp[-u_k(x) + g_k]
where g_k is the log weight.
Parameters
----------
sampler : MCMCSampler
MCMCSampler initialized with current SamplerState
topology : simtk.openmm.app.Topology
Current topology
state : hashable object
Current chemical state
proposal_engine : ProposalEngine
ProposalEngine to use for proposing new chemical states
geometry_engine : GeometryEngine
GeometryEngine to use for dimension matching
log_weights : dict of object : float
Log weights to use for expanded ensemble biases.
options : dict, optional, default=dict()
Options for initializing switching scheme, such as 'timestep', 'nsteps', 'functions' for NCMC
platform : simtk.openmm.Platform, optional, default=None
Platform to use for NCMC switching. If `None`, default (fastest) platform is used.
storage : NetCDFStorageView, optional, default=None
If specified, use this storage layer.
ncmc_write_interval : int, default 1
How frequently to write out NCMC protocol steps.
"""
# Keep copies of initializing arguments.
# TODO: Make deep copies?
self.sampler = sampler
self._pressure = sampler.thermodynamic_state.pressure
self._temperature = sampler.thermodynamic_state.temperature
self._omm_topology = topology
self.topology = md.Topology.from_openmm(topology)
self.state_key = state_key
self.proposal_engine = proposal_engine
self.log_weights = log_weights
if self.log_weights is None: self.log_weights = dict()
self.storage = None
if storage is not None:
self.storage = NetCDFStorageView(storage, modname=self.__class__.__name__)
# Initialize
self.iteration = 0
option_names = ['timestep', 'nsteps', 'functions', 'nsteps_mcmc', 'splitting']
if options is None:
options = dict()
for option_name in option_names:
if option_name not in options:
options[option_name] = None
if options['splitting']:
self._ncmc_splitting = options['splitting']
else:
self._ncmc_splitting = "V R O H R V"
if options['nsteps']:
self._switching_nsteps = options['nsteps']
self.ncmc_engine = NCMCEngine(temperature=self.sampler.thermodynamic_state.temperature,
timestep=options['timestep'], nsteps=options['nsteps'],
functions=options['functions'], integrator_splitting=self._ncmc_splitting,
platform=platform, storage=self.storage,
write_ncmc_interval=ncmc_write_interval)
else:
self._switching_nsteps = 0
if options['nsteps_mcmc']:
self._n_iterations_per_update = options['nsteps_mcmc']
else:
self._n_iterations_per_update = 100
self.geometry_engine = geometry_engine
self.naccepted = 0
self.nrejected = 0
self.number_of_state_visits = dict()
self.verbose = False
self.pdbfile = None # if not None, write PDB file
self.geometry_pdbfile = None # if not None, write PDB file of geometry proposals
self.accept_everything = False # if True, will accept anything that doesn't lead to NaNs
self.logPs = list()
self.sampler.minimize(max_iterations=40)
@property
def state_keys(self):
return self.log_weights.keys()
def get_log_weight(self, state_key):
"""
Get the log weight of the specified state.
Parameters
----------
state_key : hashable object
The state key (e.g. chemical state key) to look up.
Returns
-------
log_weight : float
The log weight of the provided state key.
Notes
-----
This adds the key to the self.log_weights dict.
"""
if state_key not in self.log_weights:
self.log_weights[state_key] = 0.0
return self.log_weights[state_key]
def _system_to_thermodynamic_state(self, system):
"""
Given an OpenMM system object, create a corresponding ThermodynamicState that has the same
temperature and pressure as the current thermodynamic state.
Parameters
----------
system : openmm.System
The OpenMM system for which to create the thermodynamic state
Returns
-------
new_thermodynamic_state : openmmtools.states.ThermodynamicState
The thermodynamic state object representing the given system
"""
return ThermodynamicState(system, temperature=self._temperature, pressure=self._pressure)
def _geometry_forward(self, topology_proposal, old_sampler_state):
"""
Run geometry engine to propose new positions and compute logP
Parameters
----------
topology_proposal : TopologyProposal
Contains old/new Topology and System objects and atom mappings.
old_sampler_state : openmmtools.states.SamplerState
Configurational properties of the old system atoms.
Returns
-------
new_sampler_state : openmmtools.states.SamplerState
Configurational properties of new atoms proposed by geometry engine calculation.
geometry_logp_propose : float
The log probability of the forward-only proposal
"""
if self.verbose: print("Geometry engine proposal...")
# Generate coordinates for new atoms and compute probability ratio of old and new probabilities.
initial_time = time.time()
new_positions, geometry_logp_propose = self.geometry_engine.propose(topology_proposal, old_sampler_state.positions, self.sampler.thermodynamic_state.beta)
if self.verbose: print('proposal took %.3f s' % (time.time() - initial_time))
if self.geometry_pdbfile is not None:
print("Writing proposed geometry...")
from simtk.openmm.app import PDBFile
PDBFile.writeFile(topology_proposal.new_topology, new_positions, file=self.geometry_pdbfile)
self.geometry_pdbfile.flush()
new_sampler_state = SamplerState(new_positions, box_vectors=old_sampler_state.box_vectors)
return new_sampler_state, geometry_logp_propose
def _geometry_reverse(self, topology_proposal, new_sampler_state, old_sampler_state):
"""
Run geometry engine reverse calculation to determine logP
of proposing the old positions based on the new positions
Parameters
----------
topology_proposal : TopologyProposal
Contains old/new Topology and System objects and atom mappings.
new_sampler_state : openmmtools.states.SamplerState
Configurational properties of the new atoms.
old_sampler_state : openmmtools.states.SamplerState
Configurational properties of the old atoms.
Returns
-------
geometry_logp_reverse : float
The log probability of the proposal for the given transformation
"""
if self.verbose: print("Geometry engine logP_reverse calculation...")
initial_time = time.time()
geometry_logp_reverse = self.geometry_engine.logp_reverse(topology_proposal, new_sampler_state.positions, old_sampler_state.positions, self.sampler.thermodynamic_state.beta)
if self.verbose: print('calculation took %.3f s' % (time.time() - initial_time))
return geometry_logp_reverse
def _ncmc_hybrid(self, topology_proposal, old_sampler_state, new_sampler_state):
"""
Run a hybrid NCMC protocol from lambda = 0 to lambda = 1
Parameters
----------
topology_proposal : TopologyProposal
Contains old/new Topology and System objects and atom mappings.
old_sampler_State : openmmtools.states.SamplerState
SamplerState of old system at the beginning of NCMCSwitching
new_sampler_state : openmmtools.states.SamplerState
SamplerState of new system at the beginning of NCMCSwitching
Returns
-------
old_final_sampler_state : openmmtools.states.SamplerState
SamplerState of old system at the end of switching
new_final_sampler_state : openmmtools.states.SamplerState
SamplerState of new system at the end of switching
logP_work : float
The NCMC work contribution to the log acceptance probability (Eq. 44)
logP_energy : float
The contribution of switching to and from the hybrid system to the acceptance probability (Eq. 45)
"""
if self.verbose: print("Performing NCMC switching")
initial_time = time.time()
[ncmc_old_sampler_state, ncmc_new_sampler_state, logP_work, logP_initial_hybrid, logP_final_hybrid] = self.ncmc_engine.integrate(topology_proposal, old_sampler_state, new_sampler_state, iteration=self.iteration)
if self.verbose: print('NCMC took %.3f s' % (time.time() - initial_time))
# Check that positions are not NaN
if new_sampler_state.has_nan():
raise Exception("Positions are NaN after NCMC insert with %d steps" % self._switching_nsteps)
return ncmc_old_sampler_state, ncmc_new_sampler_state, logP_work, logP_initial_hybrid, logP_final_hybrid
def _geometry_ncmc_geometry(self, topology_proposal, sampler_state, old_log_weight, new_log_weight):
"""
Use a hybrid NCMC protocol to switch from the old system to new system
Will calculate new positions for the new system first, then give both
sets of positions to the hybrid NCMC integrator, and finally use the
final positions of the old and new systems to calculate the reverse
geometry probability
Parameters
----------
topology_proposal : TopologyProposal
Contains old/new Topology and System objects and atom mappings.
sampler_state : openmmtools.states.SamplerState
Configurational properties of old atoms at the beginning of the NCMC switching.
old_log_weight : float
Chemical state weight from SAMSSampler
new_log_weight : float
Chemical state weight from SAMSSampler
Returns
-------
logP_accept : float
Log of acceptance probability of entire Expanded Ensemble switch (Eq. 25 or 46)
ncmc_new_sampler_state : openmmtools.states.SamplerState
Configurational properties of new atoms at the end of the NCMC switching.
"""
if self.verbose: print("Updating chemical state with geometry-ncmc-geometry scheme...")
logP_chemical_proposal = topology_proposal.logp_proposal
old_thermodynamic_state = self.sampler.thermodynamic_state
new_thermodynamic_state = self._system_to_thermodynamic_state(topology_proposal.new_system)
initial_reduced_potential = feptasks.compute_reduced_potential(old_thermodynamic_state, sampler_state)
logP_initial_nonalchemical = - initial_reduced_potential
new_geometry_sampler_state, logP_geometry_forward = self._geometry_forward(topology_proposal, sampler_state)
#if we aren't doing any switching, then skip running the NCMC engine at all.
if self._switching_nsteps == 0:
ncmc_old_sampler_state = sampler_state
ncmc_new_sampler_state = new_geometry_sampler_state
logP_work = 0.0
logP_initial_hybrid = 0.0
logP_final_hybrid = 0.0
else:
ncmc_old_sampler_state, ncmc_new_sampler_state, logP_work, logP_initial_hybrid, logP_final_hybrid = self._ncmc_hybrid(topology_proposal, sampler_state, new_geometry_sampler_state)
if logP_work > -np.inf and logP_initial_hybrid > -np.inf and logP_final_hybrid > -np.inf:
logP_geometry_reverse = self._geometry_reverse(topology_proposal, ncmc_new_sampler_state, ncmc_old_sampler_state)
logP_to_hybrid = logP_initial_hybrid - logP_initial_nonalchemical
final_reduced_potential = feptasks.compute_reduced_potential(new_thermodynamic_state, ncmc_new_sampler_state)
logP_final_nonalchemical = -final_reduced_potential
logP_from_hybrid = logP_final_nonalchemical - logP_final_hybrid
logP_sams_weight = new_log_weight - old_log_weight
# Compute total log acceptance probability according to Eq. 46
logP_accept = logP_to_hybrid - logP_geometry_forward + logP_work + logP_from_hybrid + logP_geometry_reverse + logP_sams_weight
else:
logP_geometry_reverse = 0.0
logP_final = 0.0
logP_to_hybrid = 0.0
logP_from_hybrid = 0.0
logP_sams_weight = new_log_weight - old_log_weight
logP_accept = logP_to_hybrid - logP_geometry_forward + logP_work + logP_from_hybrid + logP_geometry_reverse + logP_sams_weight
#TODO: mark failed proposals as unproposable
if self.verbose:
print("logP_accept = %+10.4e [logP_to_hybrid = %+10.4e, logP_chemical_proposal = %10.4e, logP_reverse = %+10.4e, -logP_forward = %+10.4e, logP_work = %+10.4e, logP_from_hybrid = %+10.4e, logP_sams_weight = %+10.4e]"
% (logP_accept, logP_to_hybrid, logP_chemical_proposal, logP_geometry_reverse, -logP_geometry_forward, logP_work, logP_from_hybrid, logP_sams_weight))
# Write to storage.
if self.storage:
self.storage.write_quantity('logP_accept', logP_accept, iteration=self.iteration)
# Write components to storage
self.storage.write_quantity('logP_ncmc_work', logP_work, iteration=self.iteration)
self.storage.write_quantity('logP_from_hybrid', logP_from_hybrid, iteration=self.iteration)
self.storage.write_quantity('logP_to_hybrid', logP_to_hybrid, iteration=self.iteration)
self.storage.write_quantity('logP_chemical_proposal', logP_chemical_proposal, iteration=self.iteration)
self.storage.write_quantity('logP_reverse', logP_geometry_reverse, iteration=self.iteration)
self.storage.write_quantity('logP_forward', logP_geometry_forward, iteration=self.iteration)
self.storage.write_quantity('logP_sams_weight', logP_sams_weight, iteration=self.iteration)
# Write some aggregate statistics to storage to make contributions to acceptance probability easier to analyze
self.storage.write_quantity('logP_groups_chemical', logP_chemical_proposal, iteration=self.iteration)
self.storage.write_quantity('logP_groups_geometry', logP_geometry_reverse - logP_geometry_forward, iteration=self.iteration)
return logP_accept, ncmc_new_sampler_state
def update_positions(self, n_iterations=1):
"""
Sample new positions.
"""
self.sampler.run(n_iterations=n_iterations)
def update_state(self):
"""
Sample the thermodynamic state.
"""
initial_time = time.time()
# Propose new chemical state.
if self.verbose: print("Proposing new topology...")
[system, positions] = [self.sampler.thermodynamic_state.get_system(remove_thermostat=True), self.sampler.sampler_state.positions]
#omm_topology = topology.to_openmm() #convert to OpenMM topology for proposal engine
self._omm_topology.setPeriodicBoxVectors(self.sampler.sampler_state.box_vectors) #set the box vectors because in OpenMM topology has these...
topology_proposal = self.proposal_engine.propose(system, self._omm_topology)
if self.verbose: print("Proposed transformation: %s => %s" % (topology_proposal.old_chemical_state_key, topology_proposal.new_chemical_state_key))
# Determine state keys
old_state_key = self.state_key
new_state_key = topology_proposal.new_chemical_state_key
# Determine log weight
old_log_weight = self.get_log_weight(old_state_key)
new_log_weight = self.get_log_weight(new_state_key)
logp_accept, ncmc_new_sampler_state = self._geometry_ncmc_geometry(topology_proposal, self.sampler.sampler_state, old_log_weight, new_log_weight)
# Accept or reject.
if np.isnan(logp_accept):
accept = False
print('logp_accept = NaN')
else:
accept = ((logp_accept>=0.0) or (np.random.uniform() < np.exp(logp_accept)))
if self.accept_everything:
print('accept_everything option is turned on; accepting')
accept = True
if accept:
self.sampler.thermodynamic_state.set_system(topology_proposal.new_system, fix_state=True)
self.sampler.sampler_state.system = topology_proposal.new_system
self.topology = md.Topology.from_openmm(topology_proposal.new_topology)
self.sampler.sampler_state = ncmc_new_sampler_state
self.sampler.topology = self.topology
self.state_key = topology_proposal.new_chemical_state_key
self.naccepted += 1
if self.verbose: print(" accepted")
else:
self.nrejected += 1
if self.verbose: print(" rejected")
if self.storage:
self.storage.write_configuration('positions', self.sampler.sampler_state.positions, self.topology, iteration=self.iteration)
self.storage.write_object('state_key', self.state_key, iteration=self.iteration)
self.storage.write_object('proposed_state_key', topology_proposal.new_chemical_state_key, iteration=self.iteration)
self.storage.write_quantity('naccepted', self.naccepted, iteration=self.iteration)
self.storage.write_quantity('nrejected', self.nrejected, iteration=self.iteration)
self.storage.write_quantity('logp_accept', logp_accept, iteration=self.iteration)
self.storage.write_quantity('logp_topology_proposal', topology_proposal.logp_proposal, iteration=self.iteration)
# Update statistics.
self.update_statistics()
def update(self):
"""
Update the sampler with one step of sampling.
"""
if self.verbose:
print("-" * 80)
print("Expanded Ensemble sampler iteration %8d" % self.iteration)
self.update_positions(n_iterations=self._n_iterations_per_update)
self.update_state()
self.iteration += 1
if self.verbose:
print("-" * 80)
if self.pdbfile is not None:
print("Writing frame...")
from simtk.openmm.app import PDBFile
PDBFile.writeModel(self.topology.to_openmm(), self.sampler.sampler_state.positions, self.pdbfile, self.iteration)
self.pdbfile.flush()
if self.storage:
self.storage.sync()
def run(self, niterations=1):
"""
Run the sampler for the specified number of iterations
Parameters
----------
niterations : int, optional, default=1
Number of iterations to run the sampler for.
"""
for iteration in range(niterations):
self.update()
def update_statistics(self):
"""
Update sampler statistics.
"""
if self.state_key not in self.number_of_state_visits:
self.number_of_state_visits[self.state_key] = 0
self.number_of_state_visits[self.state_key] += 1
################################################################################
# SAMS SAMPLER
################################################################################
class SAMSSampler(object):
"""
Self-adjusted mixture sampling engine.
Properties
----------
state_keys : set of objects
The names of states sampled by the sampler.
logZ : dict() of keys : float
logZ[key] is the log partition function (up to an additive constant) estimate for chemical state `key`
update_method : str
Update method. One of ['default']
iteration : int
Iterations completed.
verbose : bool
If True, verbose debug output is printed.
References
----------
[1] <NAME>. (2015) Optimally adjusted mixture sampling and locally weighted histogram analysis, Journal of Computational and Graphical Statistics, to appear. (Supplement)
http://www.stat.rutgers.edu/home/ztan/Publication/SAMS_redo4.pdf
Examples
--------
>>> # Create a test system
>>> test = testsystems.AlanineDipeptideVacuum()
>>> # Create a SystemGenerator and rebuild the System.
>>> from perses.rjmc.topology_proposal import SystemGenerator
>>> system_generator = SystemGenerator(['amber99sbildn.xml'], forcefield_kwargs={'implicitSolvent' : None, 'constraints' : None }, nonperiodic_forcefield_kwargs={'nonbondedMethod' : app.NoCutoff})
>>> test.system = system_generator.build_system(test.topology)
>>> # Create a sampler state.
>>> sampler_state = SamplerState(system=test.system, positions=test.positions)
>>> # Create a thermodynamic state.
>>> thermodynamic_state = ThermodynamicState(system=test.system, temperature=298.0*unit.kelvin)
>>> # Create an MCMC sampler
>>> mcmc_sampler = MCMCSampler(thermodynamic_state, sampler_state)
>>> # Turn off verbosity
>>> mcmc_sampler.verbose = False
>>> from perses.rjmc.geometry import FFAllAngleGeometryEngine
>>> geometry_engine = FFAllAngleGeometryEngine(metadata={})
>>> # Create an Expanded Ensemble sampler
>>> from perses.rjmc.topology_proposal import PointMutationEngine
>>> allowed_mutations = [[('2','ALA')],[('2','VAL'),('2','LEU')]]
>>> proposal_engine = PointMutationEngine(test.topology, system_generator, max_point_mutants=1, chain_id='1', proposal_metadata=None, allowed_mutations=allowed_mutations)
>>> exen_sampler = ExpandedEnsembleSampler(mcmc_sampler, test.topology, 'ACE-ALA-NME', proposal_engine, geometry_engine)
>>> # Create a SAMS sampler
>>> sams_sampler = SAMSSampler(exen_sampler)
>>> # Run the sampler
>>> sams_sampler.run() # doctest: +ELLIPSIS
...
"""
def __init__(self, sampler, logZ=None, log_target_probabilities=None, update_method='two-stage', storage=None, second_stage_start=1000):
"""
Create a SAMS Sampler.
Parameters
----------
sampler : ExpandedEnsembleSampler
The expanded ensemble sampler used to sample both configurations and discrete thermodynamic states.
logZ : dict of key : float, optional, default=None
If specified, the log partition functions for each state will be initialized to the specified dictionary.
log_target_probabilities : dict of key : float, optional, default=None
If specified, unnormalized target probabilities; default is all 0.
update_method : str, optional, default='default'
SAMS update algorithm
storage : NetCDFStorageView, optional, default=None
second_state_start : int, optional, default None
At what iteration number to switch to the optimal gain decay
"""
from scipy.special import logsumexp
# Keep copies of initializing arguments.
# TODO: Make deep copies?
self.sampler = sampler
self.chemical_states = None
self._reference_state = None
try:
self.chemical_states = self.sampler.proposal_engine.chemical_state_list
except NotImplementedError:
_logger.warning("The proposal engine has not properly implemented the chemical state property; SAMS will add states on the fly.")
if self.chemical_states:
# Select a reference state that will always be subtracted (ensure that dict ordering does not change)
self._reference_state = self.chemical_states[0]
# Initialize the logZ dictionary with scores based on the number of atoms
# This is not the negative because the weights are set to the negative of the initial weights
self.logZ = {chemical_state: self._num_dof_compensation(chemical_state) for chemical_state in self.chemical_states}
#Initialize log target probabilities with log(1/n_states)
self.log_target_probabilities = {chemical_state : np.log(len(self.chemical_states)) for chemical_state in self.chemical_states}
#If initial weights are specified, override any weight with what is provided
#However, if the chemical state is not in the reachable chemical state list,throw an exception
if logZ is not None:
for (chemical_state, logZ_value) in logZ:
if chemical_state not in self.chemical_states:
raise ValueError("Provided a logZ initial value for an un-proposable chemical state")
self.logZ[chemical_state] = logZ_value
if log_target_probabilities is not None:
for (chemical_state, log_target_probability) in log_target_probabilities:
if chemical_state not in self.chemical_states:
raise ValueError("Provided a log target probability for an un-proposable chemical state.")
self.log_target_probabilities[chemical_state] = log_target_probability
#normalize target probabilities
#this is likely not necessary, but it is copying the algorithm in Ref 1
log_sum_target_probabilities = logsumexp((list(self.log_target_probabilities.values())))
self.log_target_probabilities = {chemical_state : log_target_probability - log_sum_target_probabilities for chemical_state, log_target_probability in self.log_target_probabilities}
else:
self.logZ = dict()
self.log_target_probabilities = dict()
self.update_method = update_method
self.storage = None
if storage is not None:
self.storage = NetCDFStorageView(storage, modname=self.__class__.__name__)
# Initialize.
self.iteration = 0
self.verbose = False
self.sampler.log_weights = {state_key: - self.logZ[state_key] for state_key in self.logZ.keys()}
self.second_stage_start = 0
if second_stage_start is not None:
self.second_stage_start = second_stage_start
@property
def state_keys(self):
return self.logZ.keys()
def _num_dof_compensation(self, smiles):
"""
Compute an approximate compensating factor for a chemical state based on the number of degrees of freedom that it has.
The formula is:
(num_heavy*heavy_factor) + (num_hydrogen*h_factor) where
heavy_factor = 4.5 and
light_factor = 3.8
Parameters
----------
smiles : str
The SMILES string of the molecule
Returns
-------
correction_factor : float
"""
mol = smiles_to_oemol(smiles)
num_heavy = 0
num_light = 0
heavy_factor = 4.5
light_factor = 3.8
for atom in mol.GetAtoms():
if atom.GetAtomicNum() == 1:
num_light += 1
else:
num_heavy += 1
correction_factor = num_heavy*heavy_factor + num_light*light_factor
return correction_factor
def update_sampler(self):
"""
Update the underlying expanded ensembles sampler.
"""
self.sampler.update()
def update_logZ_estimates(self):
"""
Update the logZ estimates according to self.update_method.
"""
state_key = self.sampler.state_key
# Add state key to dictionaries if we haven't visited this state before.
if state_key not in self.logZ:
_logger.warning("A new state key is being added to the logZ; note that this makes the resultant algorithm different from SAMS")
self.logZ[state_key] = 0.0
if state_key not in self.log_target_probabilities:
_logger.warning("A new state key is being added to the target probabilities; note that this makes the resultant algorithm different from SAMS")
self.log_target_probabilities[state_key] = 0.0
# Update estimates of logZ.
if self.update_method == 'one-stage':
# Based on Eq. 9 of Ref. [1]
gamma = 1.0 / float(self.iteration+1)
elif self.update_method == 'two-stage':
# Keep gamma large until second stage is activated.
if self.iteration < self.second_stage_start:
# First stage.
gamma = 1.0
# TODO: Determine when to switch to second stage
else:
# Second stage.
gamma = 1.0 / float(self.iteration - self.second_stage_start + 1)
else:
raise Exception("SAMS update method '%s' unknown." % self.update_method)
#get the (t-1/2) update from equation 9 in ref 1
self.logZ[state_key] += gamma / np.exp(self.log_target_probabilities[state_key])
if self._reference_state:
#the second step of the (t-1/2 update), subtracting the reference state from everything else.
#we can only do this for cases where all states have been enumerated
self.logZ = {state_key : logZ_estimate - self.logZ[self._reference_state] for state_key, logZ_estimate in self.logZ.items()}
# Update log weights for sampler.
self.sampler.log_weights = { state_key : - self.logZ[state_key] for state_key in self.logZ.keys()}
if self.storage:
self.storage.write_object('logZ', self.logZ, iteration=self.iteration)
self.storage.write_object('log_weights', self.sampler.log_weights, iteration=self.iteration)
def update(self):
"""
Update the sampler with one step of sampling.
"""
if self.verbose:
print("=" * 80)
print("SAMS sampler iteration %5d" % self.iteration)
self.update_sampler()
self.update_logZ_estimates()
if self.storage: self.storage.sync()
self.iteration += 1
if self.verbose:
print("=" * 80)
def run(self, niterations=1):
"""
Run the sampler for the specified number of iterations
Parameters
----------
niterations : int, optional, default=1
Number of iterations to run the sampler for.
"""
for iteration in range(niterations):
self.update()
################################################################################
# MULTITARGET OPTIMIZATION SAMPLER
################################################################################
class MultiTargetDesign(object):
"""
Multi-objective design using self-adjusted mixture sampling with additional recursion steps
that update target weights on the fly.
Parameters
----------
samplers : list of SAMSSampler
The SAMS samplers whose relative partition functions go into the design objective computation.
sampler_exponents : dict of SAMSSampler : float
samplers.keys() are the samplers, and samplers[key]
log_target_probabilities : dict of hashable object : float
log_target_probabilities[key] is the computed log objective function (target probability) for chemical state `key`
verbose : bool
If True, verbose output is printed.
"""
def __init__(self, target_samplers, storage=None, verbose=False):
r"""
Initialize a multi-objective design sampler with the specified target sampler powers.
Parameters
----------
target_samplers : dict
target_samplers[sampler] is the exponent associated with SAMS sampler `sampler` in the multi-objective design.
storage : NetCDFStorage, optional, default=None
If specified, will use the storage layer to write trajectory data.
verbose : bool, optional, default=False
If true, will print verbose output
The target sampler weights for N samplers with specified exponents \alpha_n are given by
\pi_{nk} \propto \prod_{n=1}^N Z_{nk}^{alpha_n}
where \pi_{nk} is the target weight for sampler n state k,
and Z_{nk} is the relative partition function of sampler n among states k.
Examples
--------
Set up a mutation sampler to maximize implicit solvent hydration free energy.
>>> from perses.tests.testsystems import AlanineDipeptideTestSystem
>>> testsystem = AlanineDipeptideTestSystem()
>>> # Set up target samplers.
>>> target_samplers = { testsystem.sams_samplers['implicit'] : 1.0, testsystem.sams_samplers['vacuum'] : -1.0 }
>>> # Set up the design sampler.
>>> designer = MultiTargetDesign(target_samplers)
"""
# Store target samplers.
self.sampler_exponents = target_samplers
self.samplers = list(target_samplers.keys())
self.storage = None
if storage is not None:
self.storage = NetCDFStorageView(storage, modname=self.__class__.__name__)
# Initialize storage for target probabilities.
self.log_target_probabilities = dict()
self.verbose = verbose
self.iteration = 0
@property
def state_keys(self):
return self.log_target_probabilities.keys()
def update_samplers(self):
"""
Update all samplers.
"""
for sampler in self.samplers:
sampler.update()
def update_target_probabilities(self):
"""
Update all target probabilities.
"""
# Gather list of all keys.
state_keys = set()
for sampler in self.samplers:
for key in sampler.state_keys:
state_keys.add(key)
# Compute unnormalized log target probabilities.
log_target_probabilities = { key : 0.0 for key in state_keys }
for (sampler, log_weight) in self.sampler_exponents.items():
for key in sampler.state_keys:
log_target_probabilities[key] += log_weight * sampler.logZ[key]
# Normalize
log_sum = log_sum_exp(log_target_probabilities)
for key in log_target_probabilities:
log_target_probabilities[key] -= log_sum
# Store.
self.log_target_probabilities = log_target_probabilities
if self.verbose:
print("log_target_probabilities = %s" % str(self.log_target_probabilities))
if self.storage:
self.storage.write_object('log_target_probabilities', self.log_target_probabilities, iteration=self.iteration)
def update(self):
"""
Run one iteration of the sampler.
"""
if self.verbose:
print("*" * 80)
print("MultiTargetDesign sampler iteration %8d" % self.iteration)
self.update_samplers()
self.update_target_probabilities()
self.iteration += 1
if self.storage: self.storage.sync()
if self.verbose:
print("*" * 80)
def run(self, niterations=1):
"""
Run the multi-target design sampler for the specified number of iterations.
Parameters
----------
niterations : int
The number of iterations to run the sampler for.
"""
# Update all samplers.
for iteration in range(niterations):
self.update()
################################################################################
# CONSTANT PH SAMPLER
################################################################################
class ProtonationStateSampler(object):
"""
Protonation state sampler with given fixed target probabilities for ligand in solvent.
Parameters
----------
samplers : list of SAMSSampler
The SAMS samplers whose relative partition functions go into the design objective computation.
sampler_exponents : dict of SAMSSampler : float
samplers.keys() are the samplers, and samplers[key]
log_target_probabilities : dict of hashable object : float
log_target_probabilities[key] is the computed log objective function (target probability) for chemical state `key`
verbose : bool
If True, verbose output is printed.
"""
def __init__(self, complex_sampler, solvent_sampler, log_state_penalties, storage=None, verbose=False):
"""
Initialize a protonation state sampler with fixed target probabilities for ligand in solvent.
Parameters
----------
complex_sampler : ExpandedEnsembleSampler
Ligand in complex sampler
solvent_sampler : SAMSSampler
Ligand in solution sampler
log_state_penalties : dict
log_state_penalties[smiles] is the log state free energy (in kT) for ligand state 'smiles'
storage : NetCDFStorage, optional, default=None
If specified, will use the storage layer to write trajectory data.
verbose : bool, optional, default=False
If true, will print verbose output
"""
# Store target samplers.
self.log_state_penalties = log_state_penalties
self.samplers = [complex_sampler, solvent_sampler]
self.complex_sampler = complex_sampler
self.solvent_sampler = solvent_sampler
self.storage = None
if storage is not None:
self.storage = NetCDFStorageView(storage, modname=self.__class__.__name__)
# Initialize storage for target probabilities.
self.log_target_probabilities = { key : - log_state_penalties[key] for key in log_state_penalties }
self.verbose = verbose
self.iteration = 0
@property
def state_keys(self):
return self.log_target_probabilities.keys()
def update_samplers(self):
"""
Update all samplers.
"""
for sampler in self.samplers:
sampler.update()
def update_target_probabilities(self):
"""
Update all target probabilities.
"""
# Update the complex sampler log weights using the solvent sampler log weights
for key in self.solvent_sampler.state_keys:
self.complex_sampler.log_weights[key] = self.solvent_sampler.sampler.log_weights[key]
if self.verbose:
print("log_weights = %s" % str(self.solvent_sampler.sampler.log_weights))
def update(self):
"""
Run one iteration of the sampler.
"""
if self.verbose:
print("*" * 80)
print("ProtonationStateSampler iteration %8d" % self.iteration)
self.update_samplers()
self.update_target_probabilities()
if self.storage: self.storage.sync()
self.iteration += 1
if self.verbose:
print("*" * 80)
def run(self, niterations=1):
"""
Run the protonation state sampler for the specified number of iterations.
Parameters
----------
niterations : int
The number of iterations to run the sampler for.
"""
# Update all samplers.
for iteration in range(niterations):
self.update()
| [
"logging.getLogger",
"simtk.openmm.app.PDBFile.writeFile",
"openmmtools.utils.get_fastest_platform",
"perses.utils.openeye.smiles_to_oemol",
"perses.annihilation.ncmc_switching.NCMCEngine",
"openmmtools.states.SamplerState",
"numpy.exp",
"openmmtools.states.ThermodynamicState",
"perses.dispersed.fep... | [((1333, 1352), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1350, 1352), False, 'import logging\n'), ((1394, 1423), 'logging.getLogger', 'logging.getLogger', (['"""samplers"""'], {}), "('samplers')\n", (1411, 1423), False, 'import logging\n'), ((7187, 7220), 'mdtraj.Topology.from_openmm', 'md.Topology.from_openmm', (['topology'], {}), '(topology)\n', (7210, 7220), True, 'import mdtraj as md\n'), ((10565, 10652), 'openmmtools.states.ThermodynamicState', 'ThermodynamicState', (['system'], {'temperature': 'self._temperature', 'pressure': 'self._pressure'}), '(system, temperature=self._temperature, pressure=self.\n _pressure)\n', (10583, 10652), False, 'from openmmtools.states import SamplerState, ThermodynamicState\n'), ((11573, 11584), 'time.time', 'time.time', ([], {}), '()\n', (11582, 11584), False, 'import time\n'), ((12156, 12226), 'openmmtools.states.SamplerState', 'SamplerState', (['new_positions'], {'box_vectors': 'old_sampler_state.box_vectors'}), '(new_positions, box_vectors=old_sampler_state.box_vectors)\n', (12168, 12226), False, 'from openmmtools.states import SamplerState, ThermodynamicState\n'), ((13174, 13185), 'time.time', 'time.time', ([], {}), '()\n', (13183, 13185), False, 'import time\n'), ((14716, 14727), 'time.time', 'time.time', ([], {}), '()\n', (14725, 14727), False, 'import time\n'), ((16945, 17019), 'perses.dispersed.feptasks.compute_reduced_potential', 'feptasks.compute_reduced_potential', (['old_thermodynamic_state', 'sampler_state'], {}), '(old_thermodynamic_state, sampler_state)\n', (16979, 17019), False, 'from perses.dispersed import feptasks\n'), ((21043, 21054), 'time.time', 'time.time', ([], {}), '()\n', (21052, 21054), False, 'import time\n'), ((22259, 22280), 'numpy.isnan', 'np.isnan', (['logp_accept'], {}), '(logp_accept)\n', (22267, 22280), True, 'import numpy as np\n'), ((33005, 33028), 'perses.utils.openeye.smiles_to_oemol', 'smiles_to_oemol', (['smiles'], {}), '(smiles)\n', (33020, 33028), False, 'from perses.utils.openeye import smiles_to_oemol\n'), ((903, 931), 'openmmtools.utils.get_fastest_platform', 'utils.get_fastest_platform', ([], {}), '()\n', (929, 931), False, 'from openmmtools import cache, utils\n'), ((7493, 7552), 'perses.storage.NetCDFStorageView', 'NetCDFStorageView', (['storage'], {'modname': 'self.__class__.__name__'}), '(storage, modname=self.__class__.__name__)\n', (7510, 7552), False, 'from perses.storage import NetCDFStorageView\n'), ((8145, 8444), 'perses.annihilation.ncmc_switching.NCMCEngine', 'NCMCEngine', ([], {'temperature': 'self.sampler.thermodynamic_state.temperature', 'timestep': "options['timestep']", 'nsteps': "options['nsteps']", 'functions': "options['functions']", 'integrator_splitting': 'self._ncmc_splitting', 'platform': 'platform', 'storage': 'self.storage', 'write_ncmc_interval': 'ncmc_write_interval'}), "(temperature=self.sampler.thermodynamic_state.temperature,\n timestep=options['timestep'], nsteps=options['nsteps'], functions=\n options['functions'], integrator_splitting=self._ncmc_splitting,\n platform=platform, storage=self.storage, write_ncmc_interval=\n ncmc_write_interval)\n", (8155, 8444), False, 'from perses.annihilation.ncmc_switching import NCMCEngine\n'), ((11992, 12089), 'simtk.openmm.app.PDBFile.writeFile', 'PDBFile.writeFile', (['topology_proposal.new_topology', 'new_positions'], {'file': 'self.geometry_pdbfile'}), '(topology_proposal.new_topology, new_positions, file=self.\n geometry_pdbfile)\n', (12009, 12089), False, 'from simtk.openmm.app import PDBFile\n'), ((18094, 18181), 'perses.dispersed.feptasks.compute_reduced_potential', 'feptasks.compute_reduced_potential', (['new_thermodynamic_state', 'ncmc_new_sampler_state'], {}), '(new_thermodynamic_state,\n ncmc_new_sampler_state)\n', (18128, 18181), False, 'from perses.dispersed import feptasks\n'), ((22821, 22876), 'mdtraj.Topology.from_openmm', 'md.Topology.from_openmm', (['topology_proposal.new_topology'], {}), '(topology_proposal.new_topology)\n', (22844, 22876), True, 'import mdtraj as md\n'), ((32019, 32078), 'perses.storage.NetCDFStorageView', 'NetCDFStorageView', (['storage'], {'modname': 'self.__class__.__name__'}), '(storage, modname=self.__class__.__name__)\n', (32036, 32078), False, 'from perses.storage import NetCDFStorageView\n'), ((35082, 35130), 'numpy.exp', 'np.exp', (['self.log_target_probabilities[state_key]'], {}), '(self.log_target_probabilities[state_key])\n', (35088, 35130), True, 'import numpy as np\n'), ((39170, 39229), 'perses.storage.NetCDFStorageView', 'NetCDFStorageView', (['storage'], {'modname': 'self.__class__.__name__'}), '(storage, modname=self.__class__.__name__)\n', (39187, 39229), False, 'from perses.storage import NetCDFStorageView\n'), ((43557, 43616), 'perses.storage.NetCDFStorageView', 'NetCDFStorageView', (['storage'], {'modname': 'self.__class__.__name__'}), '(storage, modname=self.__class__.__name__)\n', (43574, 43616), False, 'from perses.storage import NetCDFStorageView\n'), ((22407, 22426), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (22424, 22426), True, 'import numpy as np\n'), ((22429, 22448), 'numpy.exp', 'np.exp', (['logp_accept'], {}), '(logp_accept)\n', (22435, 22448), True, 'import numpy as np\n'), ((11805, 11816), 'time.time', 'time.time', ([], {}), '()\n', (11814, 11816), False, 'import time\n'), ((13428, 13439), 'time.time', 'time.time', ([], {}), '()\n', (13437, 13439), False, 'import time\n'), ((15001, 15012), 'time.time', 'time.time', ([], {}), '()\n', (15010, 15012), False, 'import time\n')] |
import unittest
import chainer
import chainer.functions as F
import numpy as np
from chainer import testing
import onnx_chainer
@testing.parameterize(
{'ops': 'cast', 'input_shape': (1, 5),
'input_argname': 'x',
'args': {'typ': np.float16}},
{'ops': 'cast', 'input_shape': (1, 5),
'input_argname': 'x',
'args': {'typ': np.float64}},
{'ops': 'depth2space', 'input_shape': (1, 12, 6, 6),
'input_argname': 'X',
'args': {'r': 2}},
{'ops': 'pad', 'input_shape': (1, 5),
'input_argname': 'x',
'args': {'pad_width': (0, 2), 'mode': 'constant'}},
{'ops': 'pad', 'input_shape': (1, 5),
'input_argname': 'x',
'args': {'pad_width': (0, 2), 'mode': 'reflect'}},
{'ops': 'pad', 'input_shape': (1, 5),
'input_argname': 'x',
'args': {'pad_width': (0, 2), 'mode': 'edge'}},
{'ops': 'reshape', 'input_shape': (1, 6),
'input_argname': 'x',
'args': {'shape': (1, 2, 1, 3)}},
{'ops': 'reshape', 'input_shape': (1, 6),
'input_argname': 'x',
'args': {'shape': (1, 2, 1, 3)}},
{'ops': 'space2depth', 'input_shape': (1, 12, 6, 6),
'input_argname': 'X',
'args': {'r': 2}},
{'ops': 'split_axis', 'input_shape': (1, 6),
'input_argname': 'x',
'args': {'indices_or_sections': 2,
'axis': 1, 'force_tuple': True}},
{'ops': 'split_axis', 'input_shape': (1, 6),
'input_argname': 'x',
'args': {'indices_or_sections': 2,
'axis': 1, 'force_tuple': False}},
{'ops': 'squeeze', 'input_shape': (1, 3, 1, 2),
'input_argname': 'x',
'args': {'axis': None}},
{'ops': 'squeeze', 'input_shape': (1, 3, 1, 2, 1),
'input_argname': 'x',
'args': {'axis': (2, 4)}},
{'ops': 'tile', 'input_shape': (1, 5),
'input_argname': 'x',
'args': {'reps': (1, 2)}},
{'ops': 'transpose', 'input_shape': (1, 5),
'input_argname': 'x',
'args': {'axes': None}},
)
class TestArrayOperators(unittest.TestCase):
def setUp(self):
class Model(chainer.Chain):
def __init__(self, ops, args, input_argname):
super(Model, self).__init__()
self.ops = getattr(F, ops)
self.args = args
self.input_argname = input_argname
def __call__(self, x):
self.args[self.input_argname] = x
return self.ops(**self.args)
self.model = Model(self.ops, self.args, self.input_argname)
self.x = np.zeros(self.input_shape, dtype=np.float32)
def test_export_test(self):
chainer.config.train = False
onnx_chainer.export(self.model, self.x)
def test_export_train(self):
chainer.config.train = True
onnx_chainer.export(self.model, self.x)
class TestConcat(unittest.TestCase):
def setUp(self):
class Model(chainer.Chain):
def __init__(self):
super(Model, self).__init__()
def __call__(self, x):
y = chainer.Variable(np.ones(x.shape).astype(x.dtype))
return F.concat((x, y))
self.model = Model()
self.x = np.zeros((1, 5), dtype=np.float32)
def test_export_test(self):
chainer.config.train = False
onnx_chainer.export(self.model, self.x)
def test_export_train(self):
chainer.config.train = True
onnx_chainer.export(self.model, self.x)
| [
"numpy.ones",
"chainer.testing.parameterize",
"chainer.functions.concat",
"numpy.zeros",
"onnx_chainer.export"
] | [((133, 1787), 'chainer.testing.parameterize', 'testing.parameterize', (["{'ops': 'cast', 'input_shape': (1, 5), 'input_argname': 'x', 'args': {'typ':\n np.float16}}", "{'ops': 'cast', 'input_shape': (1, 5), 'input_argname': 'x', 'args': {'typ':\n np.float64}}", "{'ops': 'depth2space', 'input_shape': (1, 12, 6, 6), 'input_argname': 'X',\n 'args': {'r': 2}}", "{'ops': 'pad', 'input_shape': (1, 5), 'input_argname': 'x', 'args': {\n 'pad_width': (0, 2), 'mode': 'constant'}}", "{'ops': 'pad', 'input_shape': (1, 5), 'input_argname': 'x', 'args': {\n 'pad_width': (0, 2), 'mode': 'reflect'}}", "{'ops': 'pad', 'input_shape': (1, 5), 'input_argname': 'x', 'args': {\n 'pad_width': (0, 2), 'mode': 'edge'}}", "{'ops': 'reshape', 'input_shape': (1, 6), 'input_argname': 'x', 'args': {\n 'shape': (1, 2, 1, 3)}}", "{'ops': 'reshape', 'input_shape': (1, 6), 'input_argname': 'x', 'args': {\n 'shape': (1, 2, 1, 3)}}", "{'ops': 'space2depth', 'input_shape': (1, 12, 6, 6), 'input_argname': 'X',\n 'args': {'r': 2}}", "{'ops': 'split_axis', 'input_shape': (1, 6), 'input_argname': 'x', 'args':\n {'indices_or_sections': 2, 'axis': 1, 'force_tuple': True}}", "{'ops': 'split_axis', 'input_shape': (1, 6), 'input_argname': 'x', 'args':\n {'indices_or_sections': 2, 'axis': 1, 'force_tuple': False}}", "{'ops': 'squeeze', 'input_shape': (1, 3, 1, 2), 'input_argname': 'x',\n 'args': {'axis': None}}", "{'ops': 'squeeze', 'input_shape': (1, 3, 1, 2, 1), 'input_argname': 'x',\n 'args': {'axis': (2, 4)}}", "{'ops': 'tile', 'input_shape': (1, 5), 'input_argname': 'x', 'args': {\n 'reps': (1, 2)}}", "{'ops': 'transpose', 'input_shape': (1, 5), 'input_argname': 'x', 'args': {\n 'axes': None}}"], {}), "({'ops': 'cast', 'input_shape': (1, 5), 'input_argname':\n 'x', 'args': {'typ': np.float16}}, {'ops': 'cast', 'input_shape': (1, 5\n ), 'input_argname': 'x', 'args': {'typ': np.float64}}, {'ops':\n 'depth2space', 'input_shape': (1, 12, 6, 6), 'input_argname': 'X',\n 'args': {'r': 2}}, {'ops': 'pad', 'input_shape': (1, 5),\n 'input_argname': 'x', 'args': {'pad_width': (0, 2), 'mode': 'constant'}\n }, {'ops': 'pad', 'input_shape': (1, 5), 'input_argname': 'x', 'args':\n {'pad_width': (0, 2), 'mode': 'reflect'}}, {'ops': 'pad', 'input_shape':\n (1, 5), 'input_argname': 'x', 'args': {'pad_width': (0, 2), 'mode':\n 'edge'}}, {'ops': 'reshape', 'input_shape': (1, 6), 'input_argname':\n 'x', 'args': {'shape': (1, 2, 1, 3)}}, {'ops': 'reshape', 'input_shape':\n (1, 6), 'input_argname': 'x', 'args': {'shape': (1, 2, 1, 3)}}, {'ops':\n 'space2depth', 'input_shape': (1, 12, 6, 6), 'input_argname': 'X',\n 'args': {'r': 2}}, {'ops': 'split_axis', 'input_shape': (1, 6),\n 'input_argname': 'x', 'args': {'indices_or_sections': 2, 'axis': 1,\n 'force_tuple': True}}, {'ops': 'split_axis', 'input_shape': (1, 6),\n 'input_argname': 'x', 'args': {'indices_or_sections': 2, 'axis': 1,\n 'force_tuple': False}}, {'ops': 'squeeze', 'input_shape': (1, 3, 1, 2),\n 'input_argname': 'x', 'args': {'axis': None}}, {'ops': 'squeeze',\n 'input_shape': (1, 3, 1, 2, 1), 'input_argname': 'x', 'args': {'axis':\n (2, 4)}}, {'ops': 'tile', 'input_shape': (1, 5), 'input_argname': 'x',\n 'args': {'reps': (1, 2)}}, {'ops': 'transpose', 'input_shape': (1, 5),\n 'input_argname': 'x', 'args': {'axes': None}})\n", (153, 1787), False, 'from chainer import testing\n'), ((2500, 2544), 'numpy.zeros', 'np.zeros', (['self.input_shape'], {'dtype': 'np.float32'}), '(self.input_shape, dtype=np.float32)\n', (2508, 2544), True, 'import numpy as np\n'), ((2623, 2662), 'onnx_chainer.export', 'onnx_chainer.export', (['self.model', 'self.x'], {}), '(self.model, self.x)\n', (2642, 2662), False, 'import onnx_chainer\n'), ((2741, 2780), 'onnx_chainer.export', 'onnx_chainer.export', (['self.model', 'self.x'], {}), '(self.model, self.x)\n', (2760, 2780), False, 'import onnx_chainer\n'), ((3151, 3185), 'numpy.zeros', 'np.zeros', (['(1, 5)'], {'dtype': 'np.float32'}), '((1, 5), dtype=np.float32)\n', (3159, 3185), True, 'import numpy as np\n'), ((3264, 3303), 'onnx_chainer.export', 'onnx_chainer.export', (['self.model', 'self.x'], {}), '(self.model, self.x)\n', (3283, 3303), False, 'import onnx_chainer\n'), ((3382, 3421), 'onnx_chainer.export', 'onnx_chainer.export', (['self.model', 'self.x'], {}), '(self.model, self.x)\n', (3401, 3421), False, 'import onnx_chainer\n'), ((3087, 3103), 'chainer.functions.concat', 'F.concat', (['(x, y)'], {}), '((x, y))\n', (3095, 3103), True, 'import chainer.functions as F\n'), ((3030, 3046), 'numpy.ones', 'np.ones', (['x.shape'], {}), '(x.shape)\n', (3037, 3046), True, 'import numpy as np\n')] |
import xml.etree.ElementTree as ET
import sys
import numpy as np
import scipy.sparse.csgraph
from argparse import ArgumentParser
from collections import defaultdict
import wknml
def flatten(l):
return [x for y in l for x in y]
def find(pred, l):
return next(x for x in l if pred(x))
parser = ArgumentParser(description="Splits trees in order to fix unlinked nodes.")
parser.add_argument("source", help="Source NML file")
parser.add_argument("target", help="Target NML file")
args = parser.parse_args()
with open(args.source, "rb") as f:
nml = wknml.parse_nml(f)
all_nodes = flatten([t.nodes for t in nml.trees])
all_edges = flatten([t.edges for t in nml.trees])
all_node_ids = [n.id for n in all_nodes]
max_node_id = max(all_node_ids) + 1
print(
"trees={} nodes={} edges={} max_node={}".format(
len(nml.trees), len(all_nodes), len(all_edges), max_node_id
)
)
mat = scipy.sparse.lil_matrix((max_node_id, max_node_id))
for edge in all_edges:
mat[edge.source, edge.target] = 1
# mat_sparse = scipy.sparse.csgraph.csgraph_from_dense(mat)
n_components, labels = scipy.sparse.csgraph.connected_components(
csgraph=mat, directed=False
)
old_new_mapping = defaultdict(list)
new_trees = []
for i in range(n_components):
node_ids, = np.where(labels == i)
node_ids = node_ids.tolist()
if len(node_ids) == 1 and node_ids[0] not in all_node_ids:
continue
old_tree = find(lambda t: any(n.id in node_ids for n in t.nodes), nml.trees)
new_tree = wknml.Tree(
id=i,
color=old_tree.color,
name=old_tree.name,
groupId=old_tree.groupId,
nodes=[n for n in all_nodes if n.id in node_ids],
edges=[e for e in all_edges if e.source in node_ids or e.target in node_ids],
)
old_new_mapping[old_tree.id].append(i)
new_trees.append(new_tree)
new_trees_with_groups = []
new_groups = []
for i, (old_id, new_ids) in enumerate(old_new_mapping.items()):
group_id = i + 1
old_tree = find(lambda t: t.id == old_id, nml.trees)
new_groups.append(wknml.Group(id=group_id, name=old_tree.name, children=[]))
for new_id in new_ids:
new_tree = find(lambda t: t.id == new_id, new_trees)
new_trees_with_groups.append(new_tree._replace(groupId=group_id))
nml = nml._replace(trees=new_trees_with_groups, groups=new_groups)
with open(args.target, "wb") as f:
wknml.write_nml(f, nml)
| [
"wknml.Tree",
"wknml.parse_nml",
"argparse.ArgumentParser",
"numpy.where",
"wknml.Group",
"collections.defaultdict",
"wknml.write_nml"
] | [((307, 381), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Splits trees in order to fix unlinked nodes."""'}), "(description='Splits trees in order to fix unlinked nodes.')\n", (321, 381), False, 'from argparse import ArgumentParser\n'), ((1197, 1214), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1208, 1214), False, 'from collections import defaultdict\n'), ((563, 581), 'wknml.parse_nml', 'wknml.parse_nml', (['f'], {}), '(f)\n', (578, 581), False, 'import wknml\n'), ((1276, 1297), 'numpy.where', 'np.where', (['(labels == i)'], {}), '(labels == i)\n', (1284, 1297), True, 'import numpy as np\n'), ((1508, 1729), 'wknml.Tree', 'wknml.Tree', ([], {'id': 'i', 'color': 'old_tree.color', 'name': 'old_tree.name', 'groupId': 'old_tree.groupId', 'nodes': '[n for n in all_nodes if n.id in node_ids]', 'edges': '[e for e in all_edges if e.source in node_ids or e.target in node_ids]'}), '(id=i, color=old_tree.color, name=old_tree.name, groupId=old_tree\n .groupId, nodes=[n for n in all_nodes if n.id in node_ids], edges=[e for\n e in all_edges if e.source in node_ids or e.target in node_ids])\n', (1518, 1729), False, 'import wknml\n'), ((2387, 2410), 'wknml.write_nml', 'wknml.write_nml', (['f', 'nml'], {}), '(f, nml)\n', (2402, 2410), False, 'import wknml\n'), ((2058, 2115), 'wknml.Group', 'wknml.Group', ([], {'id': 'group_id', 'name': 'old_tree.name', 'children': '[]'}), '(id=group_id, name=old_tree.name, children=[])\n', (2069, 2115), False, 'import wknml\n')] |
import numpy as np
import matplotlib.pyplot as plt
import ibllib.dsp.fourier as ft
def lp(ts, fac, pad=0.2):
"""
Smooth the data in frequency domain (assumes a uniform sampling rate), using edge padding
ibllib.dsp.smooth.lp(ts, [.1, .15])
:param ts: input signal to be smoothed
:param fac: 2 element vector of the frequency edges relative to Nyquist: [0.15, 0.2] keeps
everything up to 15% of the full band tapering down to 20%
:param pad: padding on the edges of the time serie, between 0 and 1 (0.2 means 20% of the size)
:return: smoothed time series
"""
# keep at least two periods for the padding
lpad = np.int(np.ceil(ts.shape[0] * pad))
ts_ = np.pad(ts, lpad, mode='edge')
ts_ = ft.lp(ts_, 1, np.array(fac) / 2)
return ts_[lpad:-lpad]
def rolling_window(x, window_len=11, window='blackman'):
"""
Smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
:param x: The input signal
:type x: list or numpy.array
:param window_len: The dimension of the smoothing window,
should be an **odd** integer, defaults to 11
:type window_len: int, optional
:param window: The type of window from ['flat', 'hanning', 'hamming',
'bartlett', 'blackman']
flat window will produce a moving average smoothing,
defaults to 'blackman'
:type window: str, optional
:raises ValueError: Smooth only accepts 1 dimension arrays.
:raises ValueError: Input vector needs to be bigger than window size.
:raises ValueError: Window is not one of 'flat', 'hanning', 'hamming',
'bartlett', 'blackman'
:return: Smoothed array
:rtype: numpy.array
"""
# **NOTE:** length(output) != length(input), to correct this:
# return y[(window_len/2-1):-(window_len/2)] instead of just y.
if isinstance(x, list):
x = np.array(x)
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return x
if window not in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is not one of 'flat', 'hanning', 'hamming',\
'bartlett', 'blackman'")
s = np.r_[x[window_len - 1:0:-1], x, x[-1:-window_len:-1]]
# print(len(s))
if window == 'flat': # moving average
w = np.ones(window_len, 'd')
else:
w = eval('np.' + window + '(window_len)')
y = np.convolve(w / w.sum(), s, mode='valid')
return y[round((window_len / 2 - 1)):round(-(window_len / 2))]
def smooth_demo():
t = np.linspace(-4, 4, 100)
x = np.sin(t)
xn = x + np.random.randn(len(t)) * 0.1
ws = 31
plt.subplot(211)
plt.plot(np.ones(ws))
windows = ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']
for w in windows[1:]:
eval('plt.plot(np.' + w + '(ws) )')
plt.axis([0, 30, 0, 1.1])
plt.legend(windows)
plt.title("The smoothing windows")
plt.subplot(212)
plt.plot(x)
plt.plot(xn)
for w in windows:
plt.plot(rolling_window(xn, 10, w))
lst = ['original signal', 'signal with noise']
lst.extend(windows)
plt.legend(lst)
plt.title("Smoothing a noisy signal")
plt.ion()
if __name__ == '__main__':
smooth_demo()
| [
"numpy.ceil",
"matplotlib.pyplot.title",
"numpy.ones",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.linspace",
"numpy.sin",
"matplotlib.pyplot.axis",
"numpy.pad",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ion"
] | [((702, 731), 'numpy.pad', 'np.pad', (['ts', 'lpad'], {'mode': '"""edge"""'}), "(ts, lpad, mode='edge')\n", (708, 731), True, 'import numpy as np\n'), ((3002, 3025), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(100)'], {}), '(-4, 4, 100)\n', (3013, 3025), True, 'import numpy as np\n'), ((3034, 3043), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (3040, 3043), True, 'import numpy as np\n'), ((3105, 3121), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (3116, 3121), True, 'import matplotlib.pyplot as plt\n'), ((3294, 3319), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 30, 0, 1.1]'], {}), '([0, 30, 0, 1.1])\n', (3302, 3319), True, 'import matplotlib.pyplot as plt\n'), ((3325, 3344), 'matplotlib.pyplot.legend', 'plt.legend', (['windows'], {}), '(windows)\n', (3335, 3344), True, 'import matplotlib.pyplot as plt\n'), ((3349, 3383), 'matplotlib.pyplot.title', 'plt.title', (['"""The smoothing windows"""'], {}), "('The smoothing windows')\n", (3358, 3383), True, 'import matplotlib.pyplot as plt\n'), ((3388, 3404), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (3399, 3404), True, 'import matplotlib.pyplot as plt\n'), ((3409, 3420), 'matplotlib.pyplot.plot', 'plt.plot', (['x'], {}), '(x)\n', (3417, 3420), True, 'import matplotlib.pyplot as plt\n'), ((3425, 3437), 'matplotlib.pyplot.plot', 'plt.plot', (['xn'], {}), '(xn)\n', (3433, 3437), True, 'import matplotlib.pyplot as plt\n'), ((3584, 3599), 'matplotlib.pyplot.legend', 'plt.legend', (['lst'], {}), '(lst)\n', (3594, 3599), True, 'import matplotlib.pyplot as plt\n'), ((3604, 3641), 'matplotlib.pyplot.title', 'plt.title', (['"""Smoothing a noisy signal"""'], {}), "('Smoothing a noisy signal')\n", (3613, 3641), True, 'import matplotlib.pyplot as plt\n'), ((3646, 3655), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (3653, 3655), True, 'import matplotlib.pyplot as plt\n'), ((664, 690), 'numpy.ceil', 'np.ceil', (['(ts.shape[0] * pad)'], {}), '(ts.shape[0] * pad)\n', (671, 690), True, 'import numpy as np\n'), ((2200, 2211), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2208, 2211), True, 'import numpy as np\n'), ((2769, 2793), 'numpy.ones', 'np.ones', (['window_len', '"""d"""'], {}), "(window_len, 'd')\n", (2776, 2793), True, 'import numpy as np\n'), ((3135, 3146), 'numpy.ones', 'np.ones', (['ws'], {}), '(ws)\n', (3142, 3146), True, 'import numpy as np\n'), ((756, 769), 'numpy.array', 'np.array', (['fac'], {}), '(fac)\n', (764, 769), True, 'import numpy as np\n')] |
from typing import Dict, Tuple
import networkx as nx
from networkx.classes import graph
import numpy as np
from functools import partial
from bokeh.plotting import from_networkx,figure
from bokeh.models import Circle
from bokeh.models import HoverTool
from bokeh.models import MultiLine
from bokeh.models import NodesAndLinkedEdges
from bokeh.models import Range1d
from bokeh.transform import linear_cmap
from bokeh.palettes import brewer
from bokeh.palettes import Spectral4
from matplotlib.lines import Line2D
import matplotlib.pyplot as plt
import pandas as pd
plt.style.use('fivethirtyeight')
def make_proxy(clr, mappable, **kwargs):
return Line2D([0, 1], [0, 1], color=clr, **kwargs)
def color_edge(graph:nx.Graph)->Dict[Tuple[int,int],str]:
POSITIVE_COLOR, NEGATIVE_COLOR = "green", "red"
edge_attrs_color = {}
for start_node, end_node,valuew in graph.edges(data=True):
edge_color = POSITIVE_COLOR if valuew['weight']>=0 else NEGATIVE_COLOR
edge_attrs_color[(start_node, end_node)] = edge_color
return edge_attrs_color
def sizes_edges_f(x,max:float,min:float):
return round(10*((max-x)/(max-min)))+1 if np.isnan(x)!=True else 0
def edge_size(graph:nx.Graph,min:float,max:float)->Dict[Tuple[int,int],str]:
edge_attrs_size = {}
size_edge=partial(sizes_edges_f,max=max,min=min)
for start_node, end_node,valuew in graph.edges(data=True):
edge_color = size_edge(valuew['weight'])
edge_attrs_size[(start_node, end_node)] = edge_color
return edge_attrs_size
def plot_matplotlib(graph:nx.Graph,frame:pd.DataFrame,
max:float,min:float,
kind:str='HDBSCAN',
kind_network:str='circular'):
if kind_network=='circular':
pos = nx.circular_layout(graph)
elif kind_network=='spring':
pos = nx.spring_layout(graph)
else:
raise ValueError('Invalid layout type')
cache=[]
for start_node, end_node,valuew in graph.edges(data=True):
cache.append(np.abs(valuew['weight']))
def get_colors(kind:str='HDBSCAN'):
cmap = plt.cm.viridis
if kind=='HDBSCAN':
COLORS=[(1,0,0,1) if i==-1 else cmap(i*100+20) for i in frame.HDBSCAN.to_list()]
return COLORS
else:
COLORS=[cmap(i*100+50) for i in frame.Community.to_list() ]
return COLORS
Map_Text={str(s):i for i,s in enumerate(frame.OTUS.tolist())}
Map_Num={i:str(s) for i,s in enumerate(frame.OTUS.tolist())}
Sizes_Nodes=(frame.Degree_Centrality*1000+100).astype(int)
COLORS=get_colors(kind=kind)
edge_attrs_color=color_edge(graph=graph)
edge_attrs_size=edge_size(graph=graph,min=min,max=max)
fig = plt.figure(figsize=(16, 16))
plt.title(f'Network - {kind}', fontsize=20, fontweight='bold')
h1=nx.draw_networkx_nodes(graph, pos,
node_size=Sizes_Nodes.to_list(),
node_color=COLORS,
alpha=0.5,
linewidths=1.0,
edgecolors='black')
h2=nx.draw_networkx_edges(graph, pos,
edge_color=list(edge_attrs_color.values()),
width=cache)
proxies = [make_proxy(clr, h2,lw=5) for clr in ['green','red']]
plt.legend(proxies,['Positive','Negative'])
return fig
def plot_bokeh(graph:nx.Graph,frame:pd.DataFrame,
nodes:int,
max:float,min:float,
kind:str='HDBSCAN',
kind_network:str='circular'):
graph=graph.copy()
Map_Text={str(s):i for i,s in enumerate(frame.OTUS.tolist())}
Map_Num={i:str(s) for i,s in enumerate(frame.OTUS.tolist())}
Sizes_Nodes=(frame.Degree_Centrality*60+10).astype(int)
Val_Node_Sizes={s:k for s,k in zip(Map_Text.keys(),Sizes_Nodes)}
graph=nx.relabel_nodes(graph,Map_Num)
edge_attrs_color=color_edge(graph=graph)
edge_attrs_size=edge_size(graph=graph,min=min,max=max)
nx.set_edge_attributes(graph, values=edge_attrs_color, name="edge_color")
nx.set_edge_attributes(graph, values=edge_attrs_size, name="edge_sizes")
nx.set_node_attributes(graph,name='Sizes',values=Val_Node_Sizes)
if kind_network=='circular':
graph_renderer = from_networkx(graph, nx.circular_layout)
elif kind_network=='spring':
graph_renderer = from_networkx(graph, nx.spring_layout)
else:
raise ValueError('Invalid layout type')
if nodes < 501:
tools = "pan,wheel_zoom,save,reset,box_zoom"
tooltips = [("Name", "@index")]
else:
tools = "save"
tooltips =None
plot = figure(width=900,
height=900,
x_range=Range1d(-1.1,1.1),
y_range=Range1d(-1.1,1.1),
tooltips =tooltips,
tools=tools, output_backend="svg")
if nodes > 500:
plot.toolbar.active_inspect = None
plot.title.text = f"Network - {kind}"
plot.title.text_font_size = "30px"
graph_renderer.node_renderer.data_source.data['Degree_Centrality']=frame['Degree_Centrality'].tolist()
graph_renderer.node_renderer.data_source.data['Betweeness_Centrality']=frame['Betweeness_Centrality'].tolist()
graph_renderer.node_renderer.data_source.data['Closeness_Centrality']=frame['Closeness_Centrality'].tolist()
graph_renderer.node_renderer.data_source.data['PageRank']=frame['PageRank'].tolist()
#Colors
if kind=='HDBSCAN':
graph_renderer.node_renderer.data_source.data['Color_Tax_1']=frame['HDBSCAN'].tolist()
graph_renderer.node_renderer.glyph = Circle(size='Sizes',fill_color=linear_cmap('Color_Tax_1', brewer['Spectral'][11],
frame['HDBSCAN'].min(),
frame['HDBSCAN'].max()))
else:
graph_renderer.node_renderer.data_source.data['Color_Tax_1']=frame['Community'].tolist()
graph_renderer.node_renderer.glyph = Circle(size='Sizes',fill_color=linear_cmap('Color_Tax_1', brewer['Spectral'][11],
frame['Community'].min(),
frame['Community'].max()))
graph_renderer.node_renderer.hover_glyph = Circle(size='Sizes', fill_color='#0e0f0f')
graph_renderer.node_renderer.selection_glyph = Circle(size='Sizes', fill_color=Spectral4[2])
graph_renderer.edge_renderer.glyph = MultiLine(line_color="edge_color", line_alpha=0.1, line_width="edge_sizes")
graph_renderer.edge_renderer.selection_glyph = MultiLine(line_color=Spectral4[1], line_width="edge_sizes")
graph_renderer.edge_renderer.hover_glyph = MultiLine(line_color="edge_color", line_width="edge_sizes")
graph_renderer.selection_policy = NodesAndLinkedEdges()
graph_renderer.inspection_policy = NodesAndLinkedEdges()
plot.renderers.append(graph_renderer)
node_hover_tool = HoverTool(tooltips=[("Index", "@index"),
("Centrality","@Degree_Centrality{(0.000)}"),
("Betweeness_Centrality","@Betweeness_Centrality{(0.000)}"),
("Closeness_Centrality","@Closeness_Centrality{(0.000)}"),
("PageRank","@PageRank{(0.000)}"),
("Comunidad","@Color_Tax_1")
])
plot.add_tools(node_hover_tool)
return plot | [
"bokeh.models.MultiLine",
"networkx.classes.graph.edges",
"bokeh.models.Range1d",
"networkx.set_edge_attributes",
"networkx.classes.graph.copy",
"matplotlib.lines.Line2D",
"networkx.relabel_nodes",
"bokeh.models.Circle",
"bokeh.plotting.from_networkx",
"networkx.spring_layout",
"matplotlib.pyplo... | [((567, 599), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (580, 599), True, 'import matplotlib.pyplot as plt\n'), ((653, 696), 'matplotlib.lines.Line2D', 'Line2D', (['[0, 1]', '[0, 1]'], {'color': 'clr'}), '([0, 1], [0, 1], color=clr, **kwargs)\n', (659, 696), False, 'from matplotlib.lines import Line2D\n'), ((875, 897), 'networkx.classes.graph.edges', 'graph.edges', ([], {'data': '(True)'}), '(data=True)\n', (886, 897), False, 'from networkx.classes import graph\n'), ((1300, 1340), 'functools.partial', 'partial', (['sizes_edges_f'], {'max': 'max', 'min': 'min'}), '(sizes_edges_f, max=max, min=min)\n', (1307, 1340), False, 'from functools import partial\n'), ((1380, 1402), 'networkx.classes.graph.edges', 'graph.edges', ([], {'data': '(True)'}), '(data=True)\n', (1391, 1402), False, 'from networkx.classes import graph\n'), ((1941, 1963), 'networkx.classes.graph.edges', 'graph.edges', ([], {'data': '(True)'}), '(data=True)\n', (1952, 1963), False, 'from networkx.classes import graph\n'), ((2688, 2716), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 16)'}), '(figsize=(16, 16))\n', (2698, 2716), True, 'import matplotlib.pyplot as plt\n'), ((2721, 2783), 'matplotlib.pyplot.title', 'plt.title', (['f"""Network - {kind}"""'], {'fontsize': '(20)', 'fontweight': '"""bold"""'}), "(f'Network - {kind}', fontsize=20, fontweight='bold')\n", (2730, 2783), True, 'import matplotlib.pyplot as plt\n'), ((3202, 3247), 'matplotlib.pyplot.legend', 'plt.legend', (['proxies', "['Positive', 'Negative']"], {}), "(proxies, ['Positive', 'Negative'])\n", (3212, 3247), True, 'import matplotlib.pyplot as plt\n'), ((3430, 3442), 'networkx.classes.graph.copy', 'graph.copy', ([], {}), '()\n', (3440, 3442), False, 'from networkx.classes import graph\n'), ((3713, 3745), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['graph', 'Map_Num'], {}), '(graph, Map_Num)\n', (3729, 3745), True, 'import networkx as nx\n'), ((3855, 3928), 'networkx.set_edge_attributes', 'nx.set_edge_attributes', (['graph'], {'values': 'edge_attrs_color', 'name': '"""edge_color"""'}), "(graph, values=edge_attrs_color, name='edge_color')\n", (3877, 3928), True, 'import networkx as nx\n'), ((3933, 4005), 'networkx.set_edge_attributes', 'nx.set_edge_attributes', (['graph'], {'values': 'edge_attrs_size', 'name': '"""edge_sizes"""'}), "(graph, values=edge_attrs_size, name='edge_sizes')\n", (3955, 4005), True, 'import networkx as nx\n'), ((4010, 4076), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['graph'], {'name': '"""Sizes"""', 'values': 'Val_Node_Sizes'}), "(graph, name='Sizes', values=Val_Node_Sizes)\n", (4032, 4076), True, 'import networkx as nx\n'), ((6302, 6344), 'bokeh.models.Circle', 'Circle', ([], {'size': '"""Sizes"""', 'fill_color': '"""#0e0f0f"""'}), "(size='Sizes', fill_color='#0e0f0f')\n", (6308, 6344), False, 'from bokeh.models import Circle\n'), ((6396, 6441), 'bokeh.models.Circle', 'Circle', ([], {'size': '"""Sizes"""', 'fill_color': 'Spectral4[2]'}), "(size='Sizes', fill_color=Spectral4[2])\n", (6402, 6441), False, 'from bokeh.models import Circle\n'), ((6485, 6560), 'bokeh.models.MultiLine', 'MultiLine', ([], {'line_color': '"""edge_color"""', 'line_alpha': '(0.1)', 'line_width': '"""edge_sizes"""'}), "(line_color='edge_color', line_alpha=0.1, line_width='edge_sizes')\n", (6494, 6560), False, 'from bokeh.models import MultiLine\n'), ((6612, 6671), 'bokeh.models.MultiLine', 'MultiLine', ([], {'line_color': 'Spectral4[1]', 'line_width': '"""edge_sizes"""'}), "(line_color=Spectral4[1], line_width='edge_sizes')\n", (6621, 6671), False, 'from bokeh.models import MultiLine\n'), ((6719, 6778), 'bokeh.models.MultiLine', 'MultiLine', ([], {'line_color': '"""edge_color"""', 'line_width': '"""edge_sizes"""'}), "(line_color='edge_color', line_width='edge_sizes')\n", (6728, 6778), False, 'from bokeh.models import MultiLine\n'), ((6819, 6840), 'bokeh.models.NodesAndLinkedEdges', 'NodesAndLinkedEdges', ([], {}), '()\n', (6838, 6840), False, 'from bokeh.models import NodesAndLinkedEdges\n'), ((6880, 6901), 'bokeh.models.NodesAndLinkedEdges', 'NodesAndLinkedEdges', ([], {}), '()\n', (6899, 6901), False, 'from bokeh.models import NodesAndLinkedEdges\n'), ((6969, 7262), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': "[('Index', '@index'), ('Centrality', '@Degree_Centrality{(0.000)}'), (\n 'Betweeness_Centrality', '@Betweeness_Centrality{(0.000)}'), (\n 'Closeness_Centrality', '@Closeness_Centrality{(0.000)}'), ('PageRank',\n '@PageRank{(0.000)}'), ('Comunidad', '@Color_Tax_1')]"}), "(tooltips=[('Index', '@index'), ('Centrality',\n '@Degree_Centrality{(0.000)}'), ('Betweeness_Centrality',\n '@Betweeness_Centrality{(0.000)}'), ('Closeness_Centrality',\n '@Closeness_Centrality{(0.000)}'), ('PageRank', '@PageRank{(0.000)}'),\n ('Comunidad', '@Color_Tax_1')])\n", (6978, 7262), False, 'from bokeh.models import HoverTool\n'), ((1731, 1756), 'networkx.circular_layout', 'nx.circular_layout', (['graph'], {}), '(graph)\n', (1749, 1756), True, 'import networkx as nx\n'), ((4134, 4174), 'bokeh.plotting.from_networkx', 'from_networkx', (['graph', 'nx.circular_layout'], {}), '(graph, nx.circular_layout)\n', (4147, 4174), False, 'from bokeh.plotting import from_networkx, figure\n'), ((1158, 1169), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (1166, 1169), True, 'import numpy as np\n'), ((1804, 1827), 'networkx.spring_layout', 'nx.spring_layout', (['graph'], {}), '(graph)\n', (1820, 1827), True, 'import networkx as nx\n'), ((1986, 2010), 'numpy.abs', 'np.abs', (["valuew['weight']"], {}), "(valuew['weight'])\n", (1992, 2010), True, 'import numpy as np\n'), ((4234, 4272), 'bokeh.plotting.from_networkx', 'from_networkx', (['graph', 'nx.spring_layout'], {}), '(graph, nx.spring_layout)\n', (4247, 4272), False, 'from bokeh.plotting import from_networkx, figure\n'), ((4588, 4606), 'bokeh.models.Range1d', 'Range1d', (['(-1.1)', '(1.1)'], {}), '(-1.1, 1.1)\n', (4595, 4606), False, 'from bokeh.models import Range1d\n'), ((4634, 4652), 'bokeh.models.Range1d', 'Range1d', (['(-1.1)', '(1.1)'], {}), '(-1.1, 1.1)\n', (4641, 4652), False, 'from bokeh.models import Range1d\n')] |
import numpy as np
from time import sleep
from math import exp
import matplotlib.pyplot as plt
from scipy.integrate import trapz
from os import path
import struct
import spectrabuster.functions as sbf
from importlib import import_module
from datetime import date, datetime
from functools import partial
class Spectrum(object):
"""
Class variables. Mainly default values that are used whenever their
instance counterparts are not specified when initializing a Spectrum
object.
"""
# {{{
to_save = {
"int_time",
"correct_nl",
"correct_dc",
"UV_index",
"capture_date",
"capture_time",
"temp",
} # fields that will be written to the file
samples = 1 # how many samples to average
optimize = True # whether or not to call optimize_int_time when sensor saturation is detected
UV_index = None
# }}}
def __init__(self, int_time=None, **kwargs):
# {{{
"""
Initializes the Spectrum with the values specified by kwargs. Absence
of a key results in the attribute taking a default value.
"""
# First of all, define a backend
backend = kwargs["backend"] if "backend" in kwargs else None
try:
self.backend = sbf.get_backend(backend)
except RuntimeError:
self._warn("No backend specified. Using none by default.")
self.backend = sbf.get_backend("none")
# Then get the device
if "device" in kwargs:
self.device = self.backend.Device(kwargs["device"])
else:
self.device = self.backend.first_available_device()
# Some features of the device
if self.backend.features["int_time_limits"]:
self.int_time_limits = self.device.int_time_limits
else:
self.int_time_limits = None
if self.backend.features["sat_intensity"]:
self.sat_intensity = self.device.sat_intensity
else:
self.sat_intensity = None
# Bunch of optional parameters
self.from_index = kwargs["from_index"] if "from_index" in kwargs else None
self.to_index = kwargs["to_index"] if "to_index" in kwargs else None
self.correct_nl = kwargs["correct_nl"] if "correct_nl" in kwargs else False
self.correct_dc = kwargs["correct_dc"] if "correct_dc" in kwargs else False
self.samples = kwargs["samples"] if "samples" in kwargs else None
self.int_time = int_time
# Then the wavelengths and the intensities. It'll get each from the
# device unless provided at the instantiation.
if "wavelengths" in kwargs:
self.wavel = np.array(kwargs["wavelengths"])
elif self.backend.features["measure"]:
self.wavel = self.device.wavelengths()
else:
raise RuntimeError(
f"No wavelengths array passed, and the {self.backend} cannot make measurements."
)
if "intensities" in kwargs:
self.inten = np.array(kwargs["intensities"])
elif self.backend.features["measure"]:
self.inten = self.measure_inten(int_time=self.int_time)
else:
raise RuntimeError(
f"No intensities array passed, and the {self.backend} cannot make masurements."
)
# Finally, slice the intensities and wavelengths arrays.
self.wavel, self.inten, self.slice = self.slice_array(
self.wavel, self.inten, (self.from_index, self.to_index)
)
# }}}
def measure_inten(self, int_time=None, samples=None, **kwargs):
# {{{
samples = samples if samples is not None else self.samples
int_time = int_time if int_time is not None else self.int_time
correct_dc = kwargs["correct_dc"] if "correct_dc" in kwargs else self.correct_dc
correct_nl = kwargs["correct_nl"] if "correct_nl" in kwargs else self.correct_nl
if samples is None:
samples = 1
elif type(samples) is not int or samples < 0:
raise ValueError(
f"Invalid value of {self.samples} for the number of samples to average."
)
if int_time is None:
pass
else:
try:
self.device.set_int_time(float(int_time))
except (TypeError, ValueError) as e:
raise ValueError(
f"Invalid type or value of {int_time} for integration time"
)
measure = partial(
self.device.measure, correct_dc=correct_dc, correct_nl=correct_nl
)
inten_avg = np.average([measure() for i in range(samples)], axis=0)
return inten_avg
# }}}
def write_to_file(self, file_path=None, save_fields=True, **kwargs):
# {{{
"""
Stores spectrum in a .dat text file, using a format that is easy to parse
in gnu octave, R or any other programming language, or visualize in gnuplot,
or any spreadsheet program.
"""
overwrite = kwargs["overwrite"] if "overwrite" in kwargs else None
if path.exists(file_path):
if overwrite:
self._warn(f"WARNING: File {file_path} exists. Overwriting it.")
else:
raise RuntimeError(
f"File {file_path} already exists. Pass 'overwrite=True' if you are sure you want to overwrite it."
)
only_wavelengths = (
kwargs["only_wavelengths"] if "only_wavelengths" in kwargs else False
)
only_intensities = (
kwargs["only_intensities"] if "only_intensities" in kwargs else False
)
to_save = self.to_save # fields that will be written to the file
if not file_path or not isinstance(file_path, str):
raise ValueError(
"Please pass a string as the file path wherein to save the spectrum."
)
with open(file_path, "w+") as arq:
gen_comments = (
f"# {name} = {value}\n"
for name, value in vars(self).items()
if name in to_save
)
arq.writelines(gen_comments)
if only_wavelengths:
gen_wavel_inten = (f"{wavel}\n" for wavel in self.wavel)
elif only_intensities:
gen_wavel_inten = (f"{inten}\n" for inten in self.inten)
else:
gen_wavel_inten = (
f"{wavel}\t{inten}\n" for wavel, inten in zip(*self.spectrum)
)
arq.writelines(gen_wavel_inten)
# }}}
def to_spectral_irrad(self, calibration_file=None, int_time=None):
# {{{
"""
Applies the spectral irradiance calibration and returns another
Spectrum object for the irradiance spectrum.
It also has to be a file with the wavelengths and spectral sensitivity,
by the way. And note that this function assumes that the current
Spectrum and the calibration file have the same wavelengths array,
maybe just sliced differently
"""
if not calibration_file:
raise RuntimeError(
"Please pass the path to the calibration file as an argument."
)
if not int_time and not self.int_time:
raise ValueError(
"No integration time argument passed, and this spectrum's int_time field is empty."
)
elif not int_time and self.int_time:
int_time = self.int_time
calib_wavel, calib_inten, _ = self._read_file(calibration_file)
if self.wavel.size > calib_wavel.size:
from_index = self.find_wavel_index(self.wavel, calib_wavel[0])
to_index = self.find_wavel_index(self.wavel, calib_wavel[-1])
wavel_array = self.wavel[from_index : to_index + 1]
inten_array = self.inten[from_index : to_index + 1]
elif calib_wavel.size > self.wavel.size:
from_index = self.find_wavel_index(calib_wavel, self.wavel[0])
to_index = self.find_wavel_index(calib_wavel, self.wavel[-1])
calib_inten = calib_inten[from_index : to_index + 1]
wavel_array = calib_wavel[from_index : to_index + 1]
inten_array = self.inten
else:
inten_array = self.inten
wavel_array = self.wavel
apply_calibration = lambda counts, calib: counts / (int_time * calib * 0.000001)
inten_array = apply_calibration(inten_array, calib_inten)
return Spectrum(
intensities=inten_array,
wavelengths=wavel_array,
int_time=int_time,
from_index=self.from_index,
to_index=self.to_index,
)
self_params = vars(self).copy()
self_params.update({"intensities": inten_array, "wavelengths": wavel_array})
return Spectrum(**self_params)
# }}}
def to_count_rate(self):
# {{{
"""
Divides the spectrum by its integration time and that's it.
"""
if self.int_time:
return self / (self.int_time * 0.000001)
else:
raise ValueError(
"Integration time undefined for calculation of count rate."
)
# }}}
def calc_uv_index(self, from_wavel=286.0):
# {{{
"""
Calculates the UV index based on Mckinley-Diffey's action spectra for erythema.
"""
weighted_irrad = np.array(
[
self.weight_irrad(wavel, irrad, from_wavel)
for wavel, irrad in zip(*self.spectrum)
]
)
self.UV_index = round(0.04 * trapz(weighted_irrad, self.wavel), 2)
return self.UV_index # just for convenience
# }}}
def optimize_int_time(self, initial=None, limits=(0.8, 1), max_tries=5):
# {{{
"""
Attemps to find an integration time that maximizes signal to noise
ratio while avoiding sensor saturation.
This could probably be done more elegantly with recursion, but I
haven't got time to think about that. Also BEWARE that this will
overwrite the current spectrum.
"""
if initial is None:
initial = self.int_time
min_int_time, max_int_time = self.device.int_time_limits
max_counts = self.device.sat_intensity
target_counts = abs((limits[1] + limits[0]) / 2) * max_counts
int_time = initial
self.int_time = int_time
print("Optimizing integration time...")
i = 0
while i < max_tries or inten_max == max_counts:
self.inten = self.measure_inten(
correct_nl=False, correct_dc=False, samples=1
)
inten_max = np.amax(self.inten)
ratio = inten_max / target_counts
print(f"{i} {self.int_time} {ratio} {inten_max}")
if limits[0] <= ratio <= limits[1]:
break
elif limits[1] <= ratio <= 1:
int_time *= ratio ** 2
elif ratio > 1:
int_time /= ratio ** 2
else:
int_time /= ratio
while int_time < min_int_time or int_time > max_int_time:
int_time /= 2
self.int_time = int_time
i += 1
self.inten = self.measure_inten()[self.slice]
return int_time # just for convenience
# }}}
def join(self, other):
# {{{
"""
Joins two spectra. It will give preference to itself when resolving
overlaps. Probably one of the first functions to get a rewrite.
"""
if not isinstance(other, Spectrum):
raise TypeError("join takes only spectra as arguments")
self_wavel_max = self.wavel[-1]
self_wavel_min = self.wavel[0]
other_wavel_max = other.wavel[-1]
other_wavel_min = other.wavel[0]
# other.wavel starts before self.wavel ends
if np.isclose(other.wavel, self_wavel_max).any():
# NOTE: These variables are indexes referring to self.wavelengths and
# other.wavelengths respectively!
start_overlap = np.argmax(np.isclose(self.wavel, other_wavel_min))
end_overlap = np.argmax(np.isclose(other.wavel, self_wavel_max))
Spectrum._warn(
f"WARNING: The spectra overlap from {other_wavel_min} to {self_wavel_max}"
)
# For some god forsaken reason np.concatenate will only work if you pass
# a tuple of arrays...
new_wavels = np.copy(self.wavel)
new_wavels = np.concatenate(
(new_wavels, np.copy(other.wavel[end_overlap + 1 :]))
)
new_intens = np.copy(self.inten)
new_intens = np.concatenate(
(new_intens, np.copy(other.inten[end_overlap + 1 :]))
)
# self.wavelengths starts before other.wavelengths ends
elif np.isclose(self.wavel, other_wavel_max).any():
# NOTE: These variables are indexes referring to other.wavel and
# self.wavel respectively!
start_overlap = np.argmax(np.isclose(other.wavel, self_wavel_min))
end_overlap = np.argmax(np.isclose(self.wavel, other_wavel_max))
Spectrum._warn(
f"WARNING: The spectra overlap from {self_wavel_min} to {other_wavel_max}"
)
# You see, the preference is always given to self
new_wavels = np.copy(other.wavel[:start_overlap])
new_wavels = np.concatenate((new_wavels, np.copy(self.wavel)))
new_intens = np.copy(other.inten[:start_overlap])
new_intens = np.concatenate((new_intens, np.copy(self.inten)))
# There is no overlap
else:
if other_wavel_min > self_wavel_min:
new_wavels = np.concatenate(
(np.copy(self.wavel), np.copy(other.wavel))
)
new_intens = np.concatenate(
(np.copy(self.inten), np.copy(other.inten))
)
elif other_wavel_min < self_wavel_min:
new_wavels = np.concatenate(
(np.copy(other.wavel), np.copy(self.wavel))
)
new_intens = np.concatenate(
(np.copy(other.inten), np.copy(self.inten))
)
self_params = vars(self).copy()
self_params.update(
{
"intensities": new_intens,
"wavelengths": new_wavels,
"from_index": None,
"to_index": None,
"backend": "none",
}
)
return Spectrum(**self_params)
# }}}
# Esse um é para que o animal lembre de consertar quando saporra
# inevitavelmente der erro
@property
def max_counts(self):
return np.amax(self.inten)
@property
def uv(self):
# {{{
# Lmao...
if self.UV_index:
return self.UV_index
else:
return self.calc_uv_index()
# }}}
@property
def spectrum(self):
return self.wavel, self.inten
@property
def wavelengths(self):
return self.wavel
@property
def intensities(self):
return self.inten
@classmethod
def from_file(cls, inten_wavel_file=None, **kwargs):
# {{{
"""
Creates a spectrum instance with the wavelengths and/or intensities
read from a text file. Additionally, it looks for key-word arguments at
the first few lines of the file. If the same kwargs are passed to this
function, they take precedence.
When retrieving a Spectrum from a file, it will always be assigned the
backend 'none'.
"""
wavel_file = kwargs["wavel_file"] if "wavel_file" in kwargs else None
inten_file = kwargs["inten_file"] if "inten_file" in kwargs else None
inten_wavel_file = (
kwargs["inten_wavel_file"]
if "inten_wavel_file" in kwargs
else inten_wavel_file
)
inten_array = None
wavel_array = None
new_kwargs = {}
if inten_wavel_file:
wavel_array, inten_array, new_kwargs = cls._read_file(inten_wavel_file)
if wavel_file:
wavel_array, _, new_kwargs = cls._read_file(wavel_file)
if inten_file:
inten_array, _, new_kwargs = cls._read_file(inten_file)
if not inten_file and not inten_wavel_file and not wavel_file:
cls._warn(
"WARNING: Instantiating a spectrum with function from_file, but no file path arguments were passed."
)
new_kwargs["intensities"] = inten_array
new_kwargs["wavelengths"] = wavel_array
# The backend 'none' will always be used when loading a
# Spectrum from a file
new_kwargs["backend"] = "none"
new_kwargs.update(kwargs)
return cls(**new_kwargs)
# }}}
@staticmethod
def _read_file(text_file):
# {{{
"""
Used internally by the class method from_file. Returns as many numpy arrays
as there are columns in the file, and a dictionary with whatever comments
(prefaced by #) it finds.
"""
dict_args = {}
col1 = []
col2 = []
with open(text_file, "r") as arq:
# Generator for the lines in the archive
gen_lines = (line.split() for line in arq)
for line_split in gen_lines:
if (
line_split[0] == "#"
): # comment, presumably containing arguments for __init__
dict_args[line_split[1]] = line_split[3]
elif (
len(line_split) > 1
): # 2 or more columns. Will ignore anything after the second column
col1.append(float(line_split[0]))
col2.append(float(line_split[1]))
elif len(line_split) == 1: # 1 column
col1.append(float(line_split[0]))
if not dict_args and not col1 and not col2:
# Check if they're all empty
raise RuntimeError(
f"No arguments, wavelengths and intensities found in {text_file}. Please check if this is a valid file."
)
return np.array(col1), np.array(col2), dict_args
# }}}
@staticmethod
def weight_irrad(wavel, irrad, from_wavel=286.0):
# {{{
"""
Simple implementation of Mckinley-Diffey's action spectrum.
"""
if from_wavel <= wavel < 298:
return irrad
elif 298 <= wavel < 328:
return exp(0.216 * (298 - wavel)) * irrad
elif 328 <= wavel < 400:
return exp(0.034 * (139 - wavel)) * irrad
else:
return 0
# }}}
@staticmethod
def find_wavel_index(wavel_array, wavel, margin=0.5):
# {{{
"""
Attempts to find 'wavel' in 'wavel_array'. Will try using the closest wavelength
at most 0.5 units from 'wavel'
"""
array_diffs = np.abs(wavel_array - wavel)
closest_index = array_diffs.argmin()
if np.isclose(wavel_array[closest_index], wavel):
return closest_index
elif array_diffs[closest_index] < 0.5:
Spectrum._warn(
f"Exact match for {wavel} not found. Using {wavel_array[closest_index]} instead."
)
return closest_index
else:
raise ValueError(
f"A close enough {wavel} wasn't found. Closest value is {wavel_array[closest_index]}."
)
# }}}
@staticmethod
def slice_array(wavel, inten, indices, **kwargs):
# {{{
"""
Takes in two arrays and returns them sliced according to
indices=(from_index, to_index).
If the indeces are integers, it takes them to be literal indeces for the
array. If they are floats, then it'll assume they are wavelengths whose
literal indeces must be found before slicing.
This behaviour can be overriden by passing literal_indices=True or False
"""
literal = kwargs["literal"] if "literal" in kwargs else None
len_array = len(wavel)
if len(inten) != len_array:
raise ValueError("The arrays must be of equal length.")
new_indices = []
for index in indices:
if index is None:
new_indices.append(index)
elif type(index) is int or literal is True:
if not (0 <= abs(index) <= len_array):
raise IndexError(
f"Invalid index of {index} for array of size {len_array}."
)
else:
new_indices.append(index)
elif type(index) in (float, np.float64) or literal is False:
index_wavel = Spectrum.find_wavel_index(wavel, index)
new_indices.append(index_wavel)
array_slice = slice(new_indices[0], new_indices[1])
return wavel[array_slice], inten[array_slice], array_slice
# }}}
@staticmethod
def _warn(string):
# {{{
"""
Warnings can be disabled by setting the class variable 'opt_warnings' to False
"""
print(string)
# }}}
# Magic methods start here
def __iter__(self):
return zip(*self.spectrum)
def __add__(self, other):
# {{{
"""
Adds the first spectrum's intensities with the second's. It can add spectrums
with numpy arrays and lists as well, as long as they are the same length as the
spectrum's wavelengths array.
This operation will always return another spectrum with the added intensities.
"""
if isinstance(other, Spectrum):
if np.isclose(self.wavel, other.wavel).all():
new_inten = self.inten + other.inten
else:
raise ValueError(
"The divided spectrums must have the same wavelengths array."
)
elif isinstance(other, (np.ndarray, list)):
if len(other) == self.wavel.size or len(other) == 1:
new_inten = self.inten + other
else:
raise (
ValueError(
"The other operand must have the same size as the spectrum's wavelengths array, or size 1."
)
)
elif isinstance(other, (float, int)):
new_inten = self.inten + other
else:
raise (TypeError("Incompatible types for addition."))
self_params = vars(self).copy()
self_params.update(
{
"intensities": new_inten,
"wavelengths": self.wavel,
"from_index": None,
"to_index": None,
"backend": "none",
}
)
return Spectrum(**self_params)
# }}}
def __radd__(self, other):
return self + other
def __sub__(self, other):
# {{{
if isinstance(other, Spectrum):
if np.isclose(self.wavel, other.wavel).all():
return self + np.negative(other.intensities)
else:
raise ValueError(
"The subtracted spectrums must have the same wavelengths array."
)
else:
return self + np.negative(other)
# }}}
def __rsub__(self, other):
raise NotImplementedError
def __mul__(self, other):
# {{{
"""
Multiplies the first spectrum's intensities by the second. It can
multiply spectrums with numpy arrays and lists as well, as long as they
are the same length as the spectrum's wavelengths array.
This operation will always return another spectrum with the multiplied intensities.
"""
if isinstance(other, Spectrum):
if np.isclose(self.wavel, other.wavel).all():
new_inten = self.inten * other.inten
else:
raise ValueError(
"The divided spectrums must have the same wavelengths array."
)
elif isinstance(other, (np.ndarray, list)):
if len(other) == self.wavel.size or len(other) == 1:
new_inten = self.inten * other
else:
raise (
ValueError(
"The other operand must have the same size as the spectrum's wavelengths array, or size 1."
)
)
elif isinstance(other, (float, int)):
new_inten = self.inten * other
else:
raise (TypeError("Incompatible types for multiplication."))
self_params = vars(self).copy()
self_params.update(
{
"intensities": new_inten,
"wavelengths": self.wavel,
"from_index": None,
"to_index": None,
"backend": "none",
}
)
return Spectrum(**self_params)
# }}}
def __rmul__(self, other):
return self * other
def __truediv__(self, other):
# {{{
"""
Divides the first spectrum's intensities by the second. I makes no checks whether
division by zero is being requested, that I leave to numpy.
This operation will always return another spectrum with the divided intensities.
The new spectrum's fields will be inherited from the first operand.
"""
if isinstance(other, Spectrum):
if np.isclose(self.wavel, other.wavel).all():
new_inten = self.inten / other.inten
else:
raise ValueError(
"The divided spectrums must have the same wavelengths array."
)
elif isinstance(other, (np.ndarray, list)):
if len(other) == self.wavel.size or len(other) == 1:
new_inten = self.inten / other
else:
raise (
ValueError(
"The other operand must have the same size as the spectrum's wavelengths array, or size 1."
)
)
elif isinstance(other, (float, int)):
new_inten = self.inten / other
else:
raise (TypeError("Incompatible types for division."))
self_params = vars(self).copy()
self_params.update(
{
"intensities": new_inten,
"wavelengths": self.wavel,
"from_index": None,
"to_index": None,
"backend": "none",
}
)
return Spectrum(**self_params)
# }}}
def __rdiv__(self, other):
raise NotImplementedError
def __getitem__(self, key):
# {{{
"""
Takes the key to be a proper index if it is an integer, and as a wavelength
if it is float. It also accepts numpy slices and regular slices, of course.
"""
if isinstance(key, (int, list, np.ndarray)):
return self.inten[key]
elif isinstance(key, float):
int_index = self.find_wavel_index(self.wavel, key)
return self.inten[int_index]
else:
raise TypeError(
"Invalid type for index. Please enter an integer, list, numpy array or a float."
)
# }}}
def __setitem__(self, key, val):
# {{{
"""
Changes the intensity with index 'key' to 'val'. The new value must be a number,
a tuple, list or numpy array. In the latter 3 cases, numpy will handle the assignment.
"""
if isinstance(key, (list, tuple, np.ndarray)):
# Melhorar isto. Adicionar gerenciamento de exceções
key = [
self.find_wavel_index(self.wavel, x)
if isinstance(x, float)
else x
for x in key
]
elif isinstance(key, float):
key = self.find_wavel_index(self.wavel, key)
elif isinstance(key, int):
if abs(key) > self.wavel.size:
raise IndexError(
f"Invalid index of {val} for wavelengths array of size {self.wavel.size}"
)
else:
raise TypeError(
"Invalid type for index. Please enter an integer, list, numpy array or a float."
)
if isinstance(val, (tuple, list, np.ndarray)):
try:
val = [float(x) for x in val]
except (TypeError, ValueError) as exception:
raise ValueError(f"The {type(val)} {val} must contain only numbers.")
else:
try:
val = float(val)
except:
raise ValueError(
f"Invalid value of {val} for intensity. Please enter something convertible to float."
)
self.inten[key] = val
# }}}
def __contains__(self, value):
raise NotImplementedError
def __repr__(self):
return "Spectrum({}, {})".format(self.wavel, self.inten)
def __len__(self):
return self.wavel.size
| [
"os.path.exists",
"numpy.abs",
"spectrabuster.functions.get_backend",
"numpy.isclose",
"numpy.copy",
"scipy.integrate.trapz",
"numpy.array",
"numpy.negative",
"functools.partial",
"math.exp",
"numpy.amax"
] | [((4551, 4625), 'functools.partial', 'partial', (['self.device.measure'], {'correct_dc': 'correct_dc', 'correct_nl': 'correct_nl'}), '(self.device.measure, correct_dc=correct_dc, correct_nl=correct_nl)\n', (4558, 4625), False, 'from functools import partial\n'), ((5163, 5185), 'os.path.exists', 'path.exists', (['file_path'], {}), '(file_path)\n', (5174, 5185), False, 'from os import path\n'), ((15071, 15090), 'numpy.amax', 'np.amax', (['self.inten'], {}), '(self.inten)\n', (15078, 15090), True, 'import numpy as np\n'), ((19402, 19429), 'numpy.abs', 'np.abs', (['(wavel_array - wavel)'], {}), '(wavel_array - wavel)\n', (19408, 19429), True, 'import numpy as np\n'), ((19487, 19532), 'numpy.isclose', 'np.isclose', (['wavel_array[closest_index]', 'wavel'], {}), '(wavel_array[closest_index], wavel)\n', (19497, 19532), True, 'import numpy as np\n'), ((1286, 1310), 'spectrabuster.functions.get_backend', 'sbf.get_backend', (['backend'], {}), '(backend)\n', (1301, 1310), True, 'import spectrabuster.functions as sbf\n'), ((2705, 2736), 'numpy.array', 'np.array', (["kwargs['wavelengths']"], {}), "(kwargs['wavelengths'])\n", (2713, 2736), True, 'import numpy as np\n'), ((3054, 3085), 'numpy.array', 'np.array', (["kwargs['intensities']"], {}), "(kwargs['intensities'])\n", (3062, 3085), True, 'import numpy as np\n'), ((10882, 10901), 'numpy.amax', 'np.amax', (['self.inten'], {}), '(self.inten)\n', (10889, 10901), True, 'import numpy as np\n'), ((12721, 12740), 'numpy.copy', 'np.copy', (['self.wavel'], {}), '(self.wavel)\n', (12728, 12740), True, 'import numpy as np\n'), ((12892, 12911), 'numpy.copy', 'np.copy', (['self.inten'], {}), '(self.inten)\n', (12899, 12911), True, 'import numpy as np\n'), ((1438, 1461), 'spectrabuster.functions.get_backend', 'sbf.get_backend', (['"""none"""'], {}), "('none')\n", (1453, 1461), True, 'import spectrabuster.functions as sbf\n'), ((9779, 9812), 'scipy.integrate.trapz', 'trapz', (['weighted_irrad', 'self.wavel'], {}), '(weighted_irrad, self.wavel)\n', (9784, 9812), False, 'from scipy.integrate import trapz\n'), ((12109, 12148), 'numpy.isclose', 'np.isclose', (['other.wavel', 'self_wavel_max'], {}), '(other.wavel, self_wavel_max)\n', (12119, 12148), True, 'import numpy as np\n'), ((12323, 12362), 'numpy.isclose', 'np.isclose', (['self.wavel', 'other_wavel_min'], {}), '(self.wavel, other_wavel_min)\n', (12333, 12362), True, 'import numpy as np\n'), ((12400, 12439), 'numpy.isclose', 'np.isclose', (['other.wavel', 'self_wavel_max'], {}), '(other.wavel, self_wavel_max)\n', (12410, 12439), True, 'import numpy as np\n'), ((13657, 13693), 'numpy.copy', 'np.copy', (['other.wavel[:start_overlap]'], {}), '(other.wavel[:start_overlap])\n', (13664, 13693), True, 'import numpy as np\n'), ((13795, 13831), 'numpy.copy', 'np.copy', (['other.inten[:start_overlap]'], {}), '(other.inten[:start_overlap])\n', (13802, 13831), True, 'import numpy as np\n'), ((18620, 18634), 'numpy.array', 'np.array', (['col1'], {}), '(col1)\n', (18628, 18634), True, 'import numpy as np\n'), ((18636, 18650), 'numpy.array', 'np.array', (['col2'], {}), '(col2)\n', (18644, 18650), True, 'import numpy as np\n'), ((23805, 23823), 'numpy.negative', 'np.negative', (['other'], {}), '(other)\n', (23816, 23823), True, 'import numpy as np\n'), ((12811, 12849), 'numpy.copy', 'np.copy', (['other.wavel[end_overlap + 1:]'], {}), '(other.wavel[end_overlap + 1:])\n', (12818, 12849), True, 'import numpy as np\n'), ((12982, 13020), 'numpy.copy', 'np.copy', (['other.inten[end_overlap + 1:]'], {}), '(other.inten[end_overlap + 1:])\n', (12989, 13020), True, 'import numpy as np\n'), ((13115, 13154), 'numpy.isclose', 'np.isclose', (['self.wavel', 'other_wavel_max'], {}), '(self.wavel, other_wavel_max)\n', (13125, 13154), True, 'import numpy as np\n'), ((13317, 13356), 'numpy.isclose', 'np.isclose', (['other.wavel', 'self_wavel_min'], {}), '(other.wavel, self_wavel_min)\n', (13327, 13356), True, 'import numpy as np\n'), ((13394, 13433), 'numpy.isclose', 'np.isclose', (['self.wavel', 'other_wavel_max'], {}), '(self.wavel, other_wavel_max)\n', (13404, 13433), True, 'import numpy as np\n'), ((18968, 18994), 'math.exp', 'exp', (['(0.216 * (298 - wavel))'], {}), '(0.216 * (298 - wavel))\n', (18971, 18994), False, 'from math import exp\n'), ((22181, 22216), 'numpy.isclose', 'np.isclose', (['self.wavel', 'other.wavel'], {}), '(self.wavel, other.wavel)\n', (22191, 22216), True, 'import numpy as np\n'), ((23506, 23541), 'numpy.isclose', 'np.isclose', (['self.wavel', 'other.wavel'], {}), '(self.wavel, other.wavel)\n', (23516, 23541), True, 'import numpy as np\n'), ((23579, 23609), 'numpy.negative', 'np.negative', (['other.intensities'], {}), '(other.intensities)\n', (23590, 23609), True, 'import numpy as np\n'), ((24339, 24374), 'numpy.isclose', 'np.isclose', (['self.wavel', 'other.wavel'], {}), '(self.wavel, other.wavel)\n', (24349, 24374), True, 'import numpy as np\n'), ((26024, 26059), 'numpy.isclose', 'np.isclose', (['self.wavel', 'other.wavel'], {}), '(self.wavel, other.wavel)\n', (26034, 26059), True, 'import numpy as np\n'), ((13747, 13766), 'numpy.copy', 'np.copy', (['self.wavel'], {}), '(self.wavel)\n', (13754, 13766), True, 'import numpy as np\n'), ((13885, 13904), 'numpy.copy', 'np.copy', (['self.inten'], {}), '(self.inten)\n', (13892, 13904), True, 'import numpy as np\n'), ((19055, 19081), 'math.exp', 'exp', (['(0.034 * (139 - wavel))'], {}), '(0.034 * (139 - wavel))\n', (19058, 19081), False, 'from math import exp\n'), ((14069, 14088), 'numpy.copy', 'np.copy', (['self.wavel'], {}), '(self.wavel)\n', (14076, 14088), True, 'import numpy as np\n'), ((14090, 14110), 'numpy.copy', 'np.copy', (['other.wavel'], {}), '(other.wavel)\n', (14097, 14110), True, 'import numpy as np\n'), ((14197, 14216), 'numpy.copy', 'np.copy', (['self.inten'], {}), '(self.inten)\n', (14204, 14216), True, 'import numpy as np\n'), ((14218, 14238), 'numpy.copy', 'np.copy', (['other.inten'], {}), '(other.inten)\n', (14225, 14238), True, 'import numpy as np\n'), ((14377, 14397), 'numpy.copy', 'np.copy', (['other.wavel'], {}), '(other.wavel)\n', (14384, 14397), True, 'import numpy as np\n'), ((14399, 14418), 'numpy.copy', 'np.copy', (['self.wavel'], {}), '(self.wavel)\n', (14406, 14418), True, 'import numpy as np\n'), ((14505, 14525), 'numpy.copy', 'np.copy', (['other.inten'], {}), '(other.inten)\n', (14512, 14525), True, 'import numpy as np\n'), ((14527, 14546), 'numpy.copy', 'np.copy', (['self.inten'], {}), '(self.inten)\n', (14534, 14546), True, 'import numpy as np\n')] |
'''Testing for particle_data.py
'''
import copy
from mock import patch, sentinel
import numpy as np
import numpy.testing as npt
import unittest
import galaxy_dive.analyze_data.simulation_data as simulation_data
########################################################################
default_kwargs = {
'data_dir': './tests/data/sdir',
'halo_data_dir': './tests/data/analysis_dir',
'snum': 500,
'ahf_index': 600,
'length_scale_used' : 'r_scale',
'averaging_frac' : 0.5,
}
########################################################################
class TestSnapshotData( unittest.TestCase ):
def setUp( self ):
self.g_data = simulation_data.SnapshotData( **default_kwargs )
self.g_data.data_attrs = {
'hubble': 0.70199999999999996,
'redshift': 0.16946,
}
########################################################################
def test_retrieve_halo_data( self ):
self.g_data.retrieve_halo_data()
# Make sure we have the right redshift
expected = 0.16946
actual = self.g_data.redshift
# Make sure we have the position right for mt halo 0, snap 500
actual = self.g_data.halo_coords
expected = np.array( [ 29414.96458784, 30856.75007114, 32325.90901812] )/(1. + 0.16946 )/0.70199999999999996
npt.assert_allclose( expected, actual )
actual = self.g_data.halo_velocity
expected = np.array( [-48.89, 73.77, 97.25] )
########################################################################
class TestGetData( unittest.TestCase ):
def setUp( self ):
self.g_data = simulation_data.SnapshotData( **default_kwargs )
self.g_data.data_attrs = {
'hubble': 0.70199999999999996,
'omega_matter': 0.272,
'omega_lambda': 0.728,
'redshift': 0.16946,
}
# Setup some necessary data
self.g_data.data = {
'P': np.random.rand( 3, 4 ),
'V': np.random.rand( 3, 4 ),
'Den': np.random.rand( 4 ),
'Z': np.random.uniform( 0., 1. ),
}
########################################################################
def test_get_position_data( self ):
rx_before = copy.copy( self.g_data.data['P'][0] )
actual = self.g_data.get_position_data( 'Rx' )
expected = rx_before - self.g_data.halo_coords[0]
npt.assert_allclose( expected, actual )
########################################################################
def test_get_velocity_data( self ):
# So we don't try and find the hubble velocity
self.g_data.hubble_corrected = True
vx_before = copy.copy( self.g_data.data['V'][0] )
actual = self.g_data.get_velocity_data( 'Vx' )
expected = vx_before - self.g_data.halo_velocity[0]
npt.assert_allclose( expected, actual )
########################################################################
def test_get_data_fails( self ):
self.assertRaises( KeyError, self.g_data.get_data, 'NonexistentData' )
########################################################################
@patch( 'galaxy_dive.analyze_data.simulation_data.SnapshotData.handle_data_key_error' )
def test_fails_after_too_many_attempts( self, mock_handle_data_key_error ):
'''By mocking handle_data_key error, we can emulate it trying to do something'''
self.assertRaises( KeyError, self.g_data.get_data, 'NonexistentData' )
########################################################################
def test_get_processed_data_standard( self ):
'''When nothing changes and processed is the regular.'''
expected = self.g_data.get_data( 'Rx' )
actual = self.g_data.get_processed_data( 'Rx' )
npt.assert_allclose( expected, actual )
expected = self.g_data.get_data( 'Den' )
actual = self.g_data.get_processed_data( 'Den' )
npt.assert_allclose( expected, actual )
expected = self.g_data.get_data( 'Z' )
actual = self.g_data.get_processed_data( 'Z' )
npt.assert_allclose( expected, actual )
########################################################################
def test_get_processed_data_log( self ):
expected = np.log10( self.g_data.get_data( 'Den' ) )
actual = self.g_data.get_processed_data( 'logDen' )
npt.assert_allclose( expected, actual )
########################################################################
def test_get_processed_data_fraction( self ):
expected = self.g_data.get_data( 'Rx' )/self.g_data.length_scale
actual = self.g_data.get_processed_data( 'Rxf' )
npt.assert_allclose( expected, actual )
expected = self.g_data.get_data( 'Z' )/self.g_data.metallicity_scale
actual = self.g_data.get_processed_data( 'Zf' )
npt.assert_allclose( expected, actual )
########################################################################
class TestHandleDataKeyError( unittest.TestCase ):
def setUp( self ):
self.g_data = simulation_data.SnapshotData( **default_kwargs )
########################################################################
@patch.multiple( 'galaxy_dive.analyze_data.simulation_data.SnapshotData',
calc_radial_distance=sentinel.DEFAULT, calc_radial_velocity=sentinel.DEFAULT,
calc_inds=sentinel.DEFAULT, calc_ang_momentum=sentinel.DEFAULT,
calc_phi=sentinel.DEFAULT, calc_abs_phi=sentinel.DEFAULT,
calc_num_den=sentinel.DEFAULT, calc_H_den=sentinel.DEFAULT,
calc_HI_den=sentinel.DEFAULT )
def test_handle_data_key_error( self, **mocks ):
'''Make sure that passing a data_key successfully calls the right function.'''
keys_to_check = [ 'R', 'Vr', 'ind', 'L', 'Phi', 'AbsPhi', 'NumDen', 'HDen', 'HIDen', ]
for key in keys_to_check:
self.g_data.handle_data_key_error( key )
for key in mocks.keys():
mocks[key].assert_called_once()
########################################################################
def test_fails( self ):
self.assertRaises( KeyError, self.g_data.handle_data_key_error, 'NonexistentData' )
########################################################################
class TestCenterCoords( unittest.TestCase ):
def setUp( self ):
self.g_data = simulation_data.SnapshotData( **default_kwargs )
self.g_data.data_attrs = {
'hubble': 0.70199999999999996,
'redshift': 0.16946,
}
# Setup some necessary data
self.g_data.data = {
'P': np.random.rand( 3, 4 ),
}
########################################################################
def test_center_coords_origin_passed( self ):
self.g_data.center_method = np.array([ 0.5, 0.5, 0.5 ])
expected = copy.copy( self.g_data.data['P'] - self.g_data.center_method[:,np.newaxis] )
self.g_data.center_coords()
actual = self.g_data.data['P']
npt.assert_allclose( expected, actual )
########################################################################
def test_center_coords_already_centered( self ):
self.g_data.centered = True
self.g_data.center_method = np.array([ 0.5, 0.5, 0.5 ])
expected = copy.copy( self.g_data.data['P'] )
self.g_data.center_coords()
actual = self.g_data.data['P']
npt.assert_allclose( expected, actual )
########################################################################
def test_center_coords_halo_method( self ):
pos_before = copy.copy( self.g_data.data['P'] )
self.g_data.center_coords()
actual = self.g_data.data['P']
expected = pos_before - self.g_data.halo_coords[:,np.newaxis]
npt.assert_allclose( expected, actual )
########################################################################
class TestCenterVelCoords( unittest.TestCase ):
def setUp( self ):
self.g_data = simulation_data.SnapshotData( **default_kwargs )
self.g_data.data_attrs = {
'hubble': 0.70199999999999996,
'redshift': 0.16946,
}
# Setup some necessary data
self.g_data.data = {
'V': np.random.rand( 3, 4 ),
}
########################################################################
def test_center_vel_coords_origin_passed( self ):
self.g_data.vel_center_method = np.array([ 0.5, 0.5, 0.5 ])
expected = copy.copy( self.g_data.data['V'] - self.g_data.vel_center_method[:,np.newaxis] )
self.g_data.center_vel_coords()
actual = self.g_data.data['V']
npt.assert_allclose( expected, actual )
########################################################################
def test_center_vel_coords_already_centered( self ):
self.g_data.vel_centered = True
self.g_data.vel_center_method = np.array([ 0.5, 0.5, 0.5 ])
expected = copy.copy( self.g_data.data['V'] )
self.g_data.center_vel_coords()
actual = self.g_data.data['V']
npt.assert_allclose( expected, actual )
########################################################################
def test_center_vel_coords_halo_method( self ):
vel_before = copy.copy( self.g_data.data['V'] )
self.g_data.center_vel_coords()
actual = self.g_data.data['V']
expected = vel_before - self.g_data.halo_velocity[:,np.newaxis]
npt.assert_allclose( expected, actual )
########################################################################
class TestProperties( unittest.TestCase ):
def setUp( self ):
self.g_data = simulation_data.SnapshotData( **default_kwargs )
self.g_data.data_attrs = {
'hubble': 0.70199999999999996,
'omega_matter': 0.272,
'omega_lambda': 0.728,
'redshift': 0.16946,
}
########################################################################
def test_v_com( self ):
'''Get the com velocity'''
# So that we don't deal with cosmological junk when testing.
self.g_data.centered = True
self.g_data.vel_centered = True
self.g_data.hubble_corrected = True
self.g_data.data = {
# Have two particles inside and one outside the region.
'P': np.array( [
[ 1., 0., 0., 0., ],
[ 2., 0., 0., 0., ],
[ 0., 1., 0., 0., ],
] )*self.g_data.length_scale*self.g_data.averaging_frac*0.9,
# Have the particle outside have an insane velocity so we notice if it's affecting things.
'V': np.array( [
[ 10., 0., -5., 0., ],
[ 200., 0., 10., -10., ],
[ 0., 10., 0., 0., ],
] ),
'M': np.array( [ 1., 1., 1., 1. ] ),
'Den': np.random.rand( 4 ),
}
actual = self.g_data.v_com
expected = np.array( [ -5./3., 0., 10./3. ] )
npt.assert_allclose( expected, actual )
########################################################################
def test_hubble_z( self ):
# Hubble parameter in km/s/kpc
expected = 75.71*1e-3
actual = self.g_data.hubble_z
npt.assert_allclose( expected, actual, rtol=1e-4 )
try:
self.g_data.hubble_z = 2.
assert False
# We're actually looking for an error here, to test for read only
except AttributeError:
pass
########################################################################
def test_redshift( self ):
# By hand
expected = 0.16946
actual = self.g_data.redshift
npt.assert_allclose( expected, actual )
########################################################################
def test_redshift_halo_data( self ):
del self.g_data.data_attrs['redshift']
# By hand
expected = 0.16946
actual = self.g_data.redshift
npt.assert_allclose( expected, actual )
########################################################################
def test_r_vir( self ):
# By hand
expected = 239.19530785947771
actual = self.g_data.r_vir
npt.assert_allclose( expected, actual )
########################################################################
def test_r_scale( self ):
# By hand
expected = 25.25698
actual = self.g_data.r_scale
npt.assert_allclose( expected, actual, rtol=1e-3 )
########################################################################
def test_v_c( self ):
# By hand
expected = 134.93489906417346
actual = self.g_data.v_c
npt.assert_allclose( expected, actual )
########################################################################
def test_length_scale( self ):
'''Default options.'''
expected = self.g_data.r_scale
actual = self.g_data.length_scale
npt.assert_allclose( expected, actual )
########################################################################
def test_length_scale_r_vir( self ):
'''Default options.'''
self.g_data.length_scale_used = 'R_vir'
expected = self.g_data.r_vir
actual = self.g_data.length_scale
npt.assert_allclose( expected, actual )
########################################################################
def test_velocity_scale( self ):
'''Default options.'''
expected = self.g_data.v_c
actual = self.g_data.velocity_scale
npt.assert_allclose( expected, actual )
########################################################################
def test_metallicity_scale( self ):
'''Default options.'''
expected = self.g_data.z_sun
actual = self.g_data.metallicity_scale
npt.assert_allclose( expected, actual )
########################################################################
def test_base_data_shape( self ):
self.g_data.data = { 'Den': np.random.rand( 5, 3, 2 ), }
expected = ( 5, 3, 2 )
actual = self.g_data.base_data_shape
npt.assert_allclose( expected, actual )
def try_to_change_shape():
self.g_data.base_data_shape = ( 5, )
self.assertRaises( AssertionError, try_to_change_shape, )
########################################################################
class TestHubbleFlow( unittest.TestCase ):
def setUp( self ):
self.g_data = simulation_data.SnapshotData( **default_kwargs )
self.g_data.data_attrs = {
'hubble': 0.70199999999999996,
'redshift': 0.16946,
'omega_matter': 0.272,
'omega_lambda': 0.728,
}
# Setup some necessary data
self.g_data.data = {
'V': np.random.rand( 3, 4 ),
'P': np.random.rand( 3, 4 ),
}
########################################################################
def test_add_hubble_flow( self ):
self.g_data.center_method = np.random.rand( 3 )
self.g_data.vel_center_method = np.random.rand( 3 )
self.g_data.add_hubble_flow()
assert self.g_data.hubble_corrected
assert self.g_data.centered
assert self.g_data.vel_centered
########################################################################
class TestCalcData( unittest.TestCase ):
def setUp( self ):
self.g_data = simulation_data.SnapshotData( **default_kwargs )
self.g_data.centered = True
self.g_data.r_scale = 1.
self.g_data.data = {
# Have two particles inside and one outside the region.
'P': np.array( [
[ 0., 2., 0.5, 0., ],
[ 0.5, 0., 0., 0., ],
[ 0., 0.5, 0.5, 0., ],
] )
}
########################################################################
def test_calc_radial_distance( self ):
self.g_data.calc_radial_distance()
actual = self.g_data.data['R']
# By hand
expected = np.array( [ 0.5, 2.0615528128088303, 0.70710678118654757, 0., ] )
npt.assert_allclose( expected, actual )
| [
"mock.patch",
"numpy.random.rand",
"numpy.testing.assert_allclose",
"copy.copy",
"numpy.array",
"galaxy_dive.analyze_data.simulation_data.SnapshotData",
"numpy.random.uniform",
"mock.patch.multiple"
] | [((3202, 3296), 'mock.patch', 'patch', (['"""galaxy_dive.analyze_data.simulation_data.SnapshotData.handle_data_key_error"""'], {}), "(\n 'galaxy_dive.analyze_data.simulation_data.SnapshotData.handle_data_key_error'\n )\n", (3207, 3296), False, 'from mock import patch, sentinel\n'), ((5282, 5666), 'mock.patch.multiple', 'patch.multiple', (['"""galaxy_dive.analyze_data.simulation_data.SnapshotData"""'], {'calc_radial_distance': 'sentinel.DEFAULT', 'calc_radial_velocity': 'sentinel.DEFAULT', 'calc_inds': 'sentinel.DEFAULT', 'calc_ang_momentum': 'sentinel.DEFAULT', 'calc_phi': 'sentinel.DEFAULT', 'calc_abs_phi': 'sentinel.DEFAULT', 'calc_num_den': 'sentinel.DEFAULT', 'calc_H_den': 'sentinel.DEFAULT', 'calc_HI_den': 'sentinel.DEFAULT'}), "('galaxy_dive.analyze_data.simulation_data.SnapshotData',\n calc_radial_distance=sentinel.DEFAULT, calc_radial_velocity=sentinel.\n DEFAULT, calc_inds=sentinel.DEFAULT, calc_ang_momentum=sentinel.DEFAULT,\n calc_phi=sentinel.DEFAULT, calc_abs_phi=sentinel.DEFAULT, calc_num_den=\n sentinel.DEFAULT, calc_H_den=sentinel.DEFAULT, calc_HI_den=sentinel.DEFAULT\n )\n", (5296, 5666), False, 'from mock import patch, sentinel\n'), ((667, 713), 'galaxy_dive.analyze_data.simulation_data.SnapshotData', 'simulation_data.SnapshotData', ([], {}), '(**default_kwargs)\n', (695, 713), True, 'import galaxy_dive.analyze_data.simulation_data as simulation_data\n'), ((1351, 1388), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (1370, 1388), True, 'import numpy.testing as npt\n'), ((1454, 1486), 'numpy.array', 'np.array', (['[-48.89, 73.77, 97.25]'], {}), '([-48.89, 73.77, 97.25])\n', (1462, 1486), True, 'import numpy as np\n'), ((1651, 1697), 'galaxy_dive.analyze_data.simulation_data.SnapshotData', 'simulation_data.SnapshotData', ([], {}), '(**default_kwargs)\n', (1679, 1697), True, 'import galaxy_dive.analyze_data.simulation_data as simulation_data\n'), ((2276, 2311), 'copy.copy', 'copy.copy', (["self.g_data.data['P'][0]"], {}), "(self.g_data.data['P'][0])\n", (2285, 2311), False, 'import copy\n'), ((2438, 2475), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (2457, 2475), True, 'import numpy.testing as npt\n'), ((2718, 2753), 'copy.copy', 'copy.copy', (["self.g_data.data['V'][0]"], {}), "(self.g_data.data['V'][0])\n", (2727, 2753), False, 'import copy\n'), ((2882, 2919), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (2901, 2919), True, 'import numpy.testing as npt\n'), ((3845, 3882), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (3864, 3882), True, 'import numpy.testing as npt\n'), ((4000, 4037), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (4019, 4037), True, 'import numpy.testing as npt\n'), ((4151, 4188), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (4170, 4188), True, 'import numpy.testing as npt\n'), ((4445, 4482), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (4464, 4482), True, 'import numpy.testing as npt\n'), ((4753, 4790), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (4772, 4790), True, 'import numpy.testing as npt\n'), ((4935, 4972), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (4954, 4972), True, 'import numpy.testing as npt\n'), ((5149, 5195), 'galaxy_dive.analyze_data.simulation_data.SnapshotData', 'simulation_data.SnapshotData', ([], {}), '(**default_kwargs)\n', (5177, 5195), True, 'import galaxy_dive.analyze_data.simulation_data as simulation_data\n'), ((6454, 6500), 'galaxy_dive.analyze_data.simulation_data.SnapshotData', 'simulation_data.SnapshotData', ([], {}), '(**default_kwargs)\n', (6482, 6500), True, 'import galaxy_dive.analyze_data.simulation_data as simulation_data\n'), ((6908, 6933), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (6916, 6933), True, 'import numpy as np\n'), ((6956, 7031), 'copy.copy', 'copy.copy', (["(self.g_data.data['P'] - self.g_data.center_method[:, np.newaxis])"], {}), "(self.g_data.data['P'] - self.g_data.center_method[:, np.newaxis])\n", (6965, 7031), False, 'import copy\n'), ((7118, 7155), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (7137, 7155), True, 'import numpy.testing as npt\n'), ((7364, 7389), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (7372, 7389), True, 'import numpy as np\n'), ((7412, 7444), 'copy.copy', 'copy.copy', (["self.g_data.data['P']"], {}), "(self.g_data.data['P'])\n", (7421, 7444), False, 'import copy\n'), ((7532, 7569), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (7551, 7569), True, 'import numpy.testing as npt\n'), ((7721, 7753), 'copy.copy', 'copy.copy', (["self.g_data.data['P']"], {}), "(self.g_data.data['P'])\n", (7730, 7753), False, 'import copy\n'), ((7912, 7949), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (7931, 7949), True, 'import numpy.testing as npt\n'), ((8122, 8168), 'galaxy_dive.analyze_data.simulation_data.SnapshotData', 'simulation_data.SnapshotData', ([], {}), '(**default_kwargs)\n', (8150, 8168), True, 'import galaxy_dive.analyze_data.simulation_data as simulation_data\n'), ((8584, 8609), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (8592, 8609), True, 'import numpy as np\n'), ((8632, 8711), 'copy.copy', 'copy.copy', (["(self.g_data.data['V'] - self.g_data.vel_center_method[:, np.newaxis])"], {}), "(self.g_data.data['V'] - self.g_data.vel_center_method[:, np.newaxis])\n", (8641, 8711), False, 'import copy\n'), ((8802, 8839), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (8821, 8839), True, 'import numpy.testing as npt\n'), ((9060, 9085), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (9068, 9085), True, 'import numpy as np\n'), ((9108, 9140), 'copy.copy', 'copy.copy', (["self.g_data.data['V']"], {}), "(self.g_data.data['V'])\n", (9117, 9140), False, 'import copy\n'), ((9232, 9269), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (9251, 9269), True, 'import numpy.testing as npt\n'), ((9425, 9457), 'copy.copy', 'copy.copy', (["self.g_data.data['V']"], {}), "(self.g_data.data['V'])\n", (9434, 9457), False, 'import copy\n'), ((9622, 9659), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (9641, 9659), True, 'import numpy.testing as npt\n'), ((9827, 9873), 'galaxy_dive.analyze_data.simulation_data.SnapshotData', 'simulation_data.SnapshotData', ([], {}), '(**default_kwargs)\n', (9855, 9873), True, 'import galaxy_dive.analyze_data.simulation_data as simulation_data\n'), ((11143, 11182), 'numpy.array', 'np.array', (['[-5.0 / 3.0, 0.0, 10.0 / 3.0]'], {}), '([-5.0 / 3.0, 0.0, 10.0 / 3.0])\n', (11151, 11182), True, 'import numpy as np\n'), ((11187, 11224), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (11206, 11224), True, 'import numpy.testing as npt\n'), ((11455, 11505), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {'rtol': '(0.0001)'}), '(expected, actual, rtol=0.0001)\n', (11474, 11505), True, 'import numpy.testing as npt\n'), ((11910, 11947), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (11929, 11947), True, 'import numpy.testing as npt\n'), ((12212, 12249), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (12231, 12249), True, 'import numpy.testing as npt\n'), ((12461, 12498), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (12480, 12498), True, 'import numpy.testing as npt\n'), ((12704, 12753), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {'rtol': '(0.001)'}), '(expected, actual, rtol=0.001)\n', (12723, 12753), True, 'import numpy.testing as npt\n'), ((12960, 12997), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (12979, 12997), True, 'import numpy.testing as npt\n'), ((13237, 13274), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (13256, 13274), True, 'import numpy.testing as npt\n'), ((13567, 13604), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (13586, 13604), True, 'import numpy.testing as npt\n'), ((13844, 13881), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (13863, 13881), True, 'import numpy.testing as npt\n'), ((14129, 14166), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (14148, 14166), True, 'import numpy.testing as npt\n'), ((14437, 14474), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (14456, 14474), True, 'import numpy.testing as npt\n'), ((14794, 14840), 'galaxy_dive.analyze_data.simulation_data.SnapshotData', 'simulation_data.SnapshotData', ([], {}), '(**default_kwargs)\n', (14822, 14840), True, 'import galaxy_dive.analyze_data.simulation_data as simulation_data\n'), ((15347, 15364), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (15361, 15364), True, 'import numpy as np\n'), ((15407, 15424), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (15421, 15424), True, 'import numpy as np\n'), ((15750, 15796), 'galaxy_dive.analyze_data.simulation_data.SnapshotData', 'simulation_data.SnapshotData', ([], {}), '(**default_kwargs)\n', (15778, 15796), True, 'import galaxy_dive.analyze_data.simulation_data as simulation_data\n'), ((16397, 16457), 'numpy.array', 'np.array', (['[0.5, 2.0615528128088303, 0.7071067811865476, 0.0]'], {}), '([0.5, 2.0615528128088303, 0.7071067811865476, 0.0])\n', (16405, 16457), True, 'import numpy as np\n'), ((16472, 16509), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['expected', 'actual'], {}), '(expected, actual)\n', (16491, 16509), True, 'import numpy.testing as npt\n'), ((1975, 1995), 'numpy.random.rand', 'np.random.rand', (['(3)', '(4)'], {}), '(3, 4)\n', (1989, 1995), True, 'import numpy as np\n'), ((2016, 2036), 'numpy.random.rand', 'np.random.rand', (['(3)', '(4)'], {}), '(3, 4)\n', (2030, 2036), True, 'import numpy as np\n'), ((2059, 2076), 'numpy.random.rand', 'np.random.rand', (['(4)'], {}), '(4)\n', (2073, 2076), True, 'import numpy as np\n'), ((2097, 2124), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (2114, 2124), True, 'import numpy as np\n'), ((6708, 6728), 'numpy.random.rand', 'np.random.rand', (['(3)', '(4)'], {}), '(3, 4)\n', (6722, 6728), True, 'import numpy as np\n'), ((8376, 8396), 'numpy.random.rand', 'np.random.rand', (['(3)', '(4)'], {}), '(3, 4)\n', (8390, 8396), True, 'import numpy as np\n'), ((10833, 10922), 'numpy.array', 'np.array', (['[[10.0, 0.0, -5.0, 0.0], [200.0, 0.0, 10.0, -10.0], [0.0, 10.0, 0.0, 0.0]]'], {}), '([[10.0, 0.0, -5.0, 0.0], [200.0, 0.0, 10.0, -10.0], [0.0, 10.0, \n 0.0, 0.0]])\n', (10841, 10922), True, 'import numpy as np\n'), ((11003, 11033), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0, 1.0])\n', (11011, 11033), True, 'import numpy as np\n'), ((11055, 11072), 'numpy.random.rand', 'np.random.rand', (['(4)'], {}), '(4)\n', (11069, 11072), True, 'import numpy as np\n'), ((14323, 14346), 'numpy.random.rand', 'np.random.rand', (['(5)', '(3)', '(2)'], {}), '(5, 3, 2)\n', (14337, 14346), True, 'import numpy as np\n'), ((15118, 15138), 'numpy.random.rand', 'np.random.rand', (['(3)', '(4)'], {}), '(3, 4)\n', (15132, 15138), True, 'import numpy as np\n'), ((15159, 15179), 'numpy.random.rand', 'np.random.rand', (['(3)', '(4)'], {}), '(3, 4)\n', (15173, 15179), True, 'import numpy as np\n'), ((15986, 16062), 'numpy.array', 'np.array', (['[[0.0, 2.0, 0.5, 0.0], [0.5, 0.0, 0.0, 0.0], [0.0, 0.5, 0.5, 0.0]]'], {}), '([[0.0, 2.0, 0.5, 0.0], [0.5, 0.0, 0.0, 0.0], [0.0, 0.5, 0.5, 0.0]])\n', (15994, 16062), True, 'import numpy as np\n'), ((1245, 1303), 'numpy.array', 'np.array', (['[29414.96458784, 30856.75007114, 32325.90901812]'], {}), '([29414.96458784, 30856.75007114, 32325.90901812])\n', (1253, 1303), True, 'import numpy as np\n'), ((10516, 10592), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0], [2.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0], [2.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]])\n', (10524, 10592), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Driver for the Keithley instruments
Manual for the KT2400 found in 'http://research.physics.illinois.edu/bezryadin/
labprotocol/Keithley2400Manual.pdf'
@author: <EMAIL>
"""
import numpy as np
from .generic_instruments import Instrument, INTF_PROLOGIX
def fake_iv_relation(
src_type,
src_val,
v_oc=20.5,
i_sc=3.45,
c1=0.000002694,
c2=0.077976842
):
"""model of solar cell IV curve
source: https://www.sciencedirect.com/science/article/pii/S1658365512600120
src_type should be either 'I' or 'V'
"""
# Make sure the format is a numpy array
src_val = np.append(np.array([]), src_val) * 2.1
# Prepare an answer based on the size of the input
answer = np.zeros(np.size(src_val))
if src_type == 'I':
# Values of the input smaller than the short circuit current
idx_ok = np.where(src_val < i_sc)
answer[idx_ok] = c2 * v_oc \
* np.log(1 + (1 - src_val[idx_ok] / i_sc) / c1)
return answer
elif src_type == 'V':
# Values of the input smaller than the open circuit voltage
idx_ok = np.where(src_val < v_oc)
answer[idx_ok] = \
i_sc \
* (1 - c1 * (np.exp(src_val[idx_ok] / (c2 * v_oc)) - 1))
return answer
INTERFACE = INTF_PROLOGIX
SRC_TYPES = [
'VOLT', # Voltage
'CURR' # Current
]
SRC_MODES = [
'FIX', # Fixed output
'LIST', # Variable outputs
'SWE' # Sweep outputs
]
class KT2400(Instrument):
""""driver of the Keithley 2400 SourceMeter"""
def __init__(self,
instr_port_name='',
mock_mode=False,
instr_user_name='KT 2400',
**kwargs):
# manage the presence of the keyword interface which will determine
# which method of communication protocol this instrument will use
if 'interface' in kwargs.keys():
interface = kwargs.pop('interface')
else:
interface = INTERFACE
instr_mesurands = {
'V': 'V', # Voltage in Volt
'I': 'A' # Current in Ampere
}
if interface == INTF_PROLOGIX:
kwargs['auto'] = 0
super(KT2400, self).__init__(instr_port_name,
instr_id_name='KT2400',
instr_user_name=instr_user_name,
mock_mode=mock_mode,
instr_intf=interface,
instr_mesurands=instr_mesurands,
**kwargs)
self.auto_output_off = False
self.voltage_compliance = 0
self.current_compliance = 0
if instr_port_name:
self.initialize()
def _check_arg(self, arg, arg_list):
"""check if the argument is in a list"""
answer = (arg in arg_list)
if not answer:
print("'%s' is not a valid argument" % arg)
print("Valid arguments are : %s" % str(arg_list))
return answer
def _check_is_src_mode(self, arg):
"""check if the argument is a valid source mode"""
return self._check_arg(arg, arg_list=SRC_MODES)
def _check_is_src_type(self, arg):
"""check if the argument is a valid source type"""
return self._check_arg(arg, arg_list=SRC_TYPES)
def _clear_register(self):
"""refer to p 15-4 of the KT2400 manual"""
if not self.mock_mode:
self.write(':STAT:PRES')
def initialize(self):
"""get the compliance and the auto output parameters"""
if self.instr_connexion is not None:
self.auto_output_off = self.enquire_auto_output_off()
self.voltage_compliance = self.get_voltage_compliance()
self.current_compliance = self.get_current_compliance()
def connect(self, instr_port_name, **kwargs):
super(KT2400, self).connect(instr_port_name, **kwargs)
self.initialize()
def measure(self, instr_param):
if instr_param in self.measure_params:
if instr_param == 'V':
if not self.mock_mode:
# Initiate a voltage measure (turn output ON)
self.write('CONF:VOLT')
answer = self.ask(':READ?')
# Voltage comes in first position by default
answer = float(answer.split(',')[0])
# Check that the value is not larger than the compliance
if answer >= self.voltage_compliance:
print("Measured voltage is at compliance level")
else:
answer = np.random.random()
self.last_measure[instr_param] = answer
elif instr_param == 'I':
if not self.mock_mode:
# Initiate a current measure (turn output ON)
self.write(':CONF:CURR')
answer = self.ask(':READ?')
# Voltage comes in second position by default
answer = float(answer.split(',')[1])
# Check that the value is not larger than the compliance
if answer >= self.current_compliance:
print("Measured current is above compliance level")
else:
answer = np.random.random()
self.last_measure[instr_param] = answer
self.measured_data[instr_param].append(
self.last_measure[instr_param]
)
else:
print(
"you are trying to measure a non existent instr_param : "
+ instr_param
)
print("existing instr_params :", self.measure_params)
answer = None
return answer
def source_and_measure(self, instr_param, src_val):
""""set the given source and measure the corresponding measurand
source voltage => measure current
source current => measure voltage
"""
if not self.mock_mode:
if instr_param == 'V':
self.configure_voltage_source()
self.set_voltage(src_val)
answer = self.measure_current()
self.disable_output()
elif instr_param == 'I':
self.configure_current_source()
self.set_current(src_val)
answer = self.measure_voltage()
self.disable_output()
else:
print("The source type should be either 'I' or 'V'")
answer = np.nan
else:
answer = np.round(
np.squeeze(fake_iv_relation(instr_param, src_val)),
4
)
return answer
def measure_voltage(self):
return self.measure('V')
def get_voltage_compliance(self):
"""voltage compliance in Volt"""
if not self.mock_mode:
return float(self.ask(':SENS:VOLT:PROT:LEV?'))
else:
return 20
def measure_current(self):
return self.measure('I')
def get_current_compliance(self):
"""current compliance in Ampere"""
if not self.mock_mode:
return float(self.ask(':SENS:CURR:PROT:LEV?'))
else:
return 1
def configure_source(self, src_type='CURR', src_mode='FIX'):
"""refer to p 18-73 of the KT2400 manual"""
if not self.mock_mode:
if self._check_is_src_type(src_type):
self.write(':SOUR:FUNC:MODE %s' % src_type)
if self._check_is_src_mode(src_mode):
self.write(':SOUR:%s:MODE %s' % (src_type, src_mode))
def configure_voltage_source(self, src_mode='FIX'):
"""set the source to output voltage"""
if not self.mock_mode:
self.configure_source('VOLT', src_mode)
self.current_compliance = self.get_current_compliance()
def configure_current_source(self, src_mode='FIX'):
"""set the source to output current"""
if not self.mock_mode:
self.configure_source('CURR', src_mode)
self.voltage_compliance = self.get_voltage_compliance()
def set_voltage(self, volt_val):
"""set the voltage for the output, does not turn output on"""
if not self.mock_mode:
self.write(':SOUR:VOLT %f' % volt_val)
def set_current(self, curr_val):
"""set the current (in uA) for the output, does not turn output on"""
if not self.mock_mode:
self.write(':SOUR:CURR %f' % (curr_val * 1e-6))
def enable_output(self):
"""turn the output of the KT2400 on"""
if not self.mock_mode:
if not self.auto_output_off:
print('ENABLING')
self.write(':OUTP ON;')
def disable_output(self):
"""shut the output of the KT2400 off"""
if not self.mock_mode:
if not self.auto_output_off:
self.write(':OUTP OFF;')
def enquire_auto_output_off(self):
"""refer to p. 13 -7 of the KT2400 manual"""
if not self.mock_mode:
return bool(int(self.ask(':SOUR:CLE:AUTO?')))
else:
return False
def enable_auto_output_off(self):
"""refer to p. 13 -7 of the KT2400 manual"""
if not self.mock_mode:
self.write(':SOUR:CLE:AUTO ON')
self.auto_output_off = self.ask(':SOUR:CLE:AUTO?')
else:
self.auto_output_off = True
def disable_auto_output_off(self):
"""refer to p. 13 -7 of the KT2400 manual"""
if not self.mock_mode:
self.write(':SOUR:CLE:AUTO OFF')
self.auto_output_off = self.ask(':SOUR:CLE:AUTO?')
else:
self.auto_output_off = False
def test_manual_source_and_meas():
"""test source-measure scheme using the low level commands
first source current and measure voltage,
then source voltage and measure current
"""
i = KT2400(
'GPIB0::11',
mock_mode=False,
prologix='COM3',
auto=0
)
i.enable_auto_output_off()
i.configure_voltage_source()
i.set_voltage(1.0)
print(i.measure_current())
i.configure_current_source()
i.set_current(0.00001)
print(i.measure_voltage())
def test_auto_source_and_meas():
"""test source-measure scheme using the ready made method
first source current and measure voltage,
then source voltage and measure current
"""
i = KT2400(
'GPIB0::11',
mock_mode=False,
prologix='COM3',
auto=0
)
i.disable_auto_output_off()
print(i.ask('*IDN?'))
print(i.source_and_measure('V', 2))
print(i.source_and_measure('I', 0.00001))
print(i.source_and_measure('I', 0.000002))
print(i.source_and_measure('I', 0.000003))
def test_connect_after_initialization():
i = KT2400(
mock_mode=False,
prologix='COM3',
auto=0
)
i.connect('GPIB0::11')
i.enable_auto_output_off()
print(i.source_and_measure('V', 2))
print(i.source_and_measure('I', 0.00001))
print(i.source_and_measure('I', 0.000002))
print(i.source_and_measure('I', 0.000003))
def test_connect_without_prologix():
i = KT2400(
mock_mode=False
)
i.connect('GPIfc00:e968:6179::de52:7100')
i.enable_auto_output_off()
print(i.ask('*IDN?'))
print(i.source_and_measure('V', 2))
print(i.source_and_measure('I', 0.00001))
print(i.source_and_measure('I', 0.000002))
print(i.source_and_measure('I', 0.000003))
| [
"numpy.where",
"numpy.random.random",
"numpy.size",
"numpy.log",
"numpy.exp",
"numpy.array"
] | [((743, 759), 'numpy.size', 'np.size', (['src_val'], {}), '(src_val)\n', (750, 759), True, 'import numpy as np\n'), ((872, 896), 'numpy.where', 'np.where', (['(src_val < i_sc)'], {}), '(src_val < i_sc)\n', (880, 896), True, 'import numpy as np\n'), ((637, 649), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (645, 649), True, 'import numpy as np\n'), ((948, 993), 'numpy.log', 'np.log', (['(1 + (1 - src_val[idx_ok] / i_sc) / c1)'], {}), '(1 + (1 - src_val[idx_ok] / i_sc) / c1)\n', (954, 993), True, 'import numpy as np\n'), ((1127, 1151), 'numpy.where', 'np.where', (['(src_val < v_oc)'], {}), '(src_val < v_oc)\n', (1135, 1151), True, 'import numpy as np\n'), ((4749, 4767), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4765, 4767), True, 'import numpy as np\n'), ((5446, 5464), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5462, 5464), True, 'import numpy as np\n'), ((1224, 1261), 'numpy.exp', 'np.exp', (['(src_val[idx_ok] / (c2 * v_oc))'], {}), '(src_val[idx_ok] / (c2 * v_oc))\n', (1230, 1261), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
from sklearn.model_selection import StratifiedKFold
class Apply:
class StratifiedMinibatch:
def __init__(self, batch_size, ds_size):
self.batch_size, self.ds_size = batch_size, ds_size
# max number of splits
self.n_splits = self.ds_size // self.batch_size
# stratified "mini-batch" via k-fold
self.batcher = StratifiedKFold(n_splits=self.n_splits, shuffle=True)
def __call__(self, ds_input: tf.data.Dataset):
def generator():
# expecting ds of (idx, y_true, y_strat)
idx, y_true, y_strat = list(map(tf.stack, list(map(list, zip(*list(ds_input))))))
while True:
for _, batch_idx in self.batcher.split(y_strat, y_strat):
yield tf.gather(idx, batch_idx, axis=0), tf.gather(y_true, batch_idx, axis=0)
return tf.data.Dataset.from_generator(generator,
output_types=(ds_input.element_spec[0].dtype, ds_input.element_spec[1].dtype),
output_shapes=((None, ), (None, ds_input.element_spec[1].shape[0])))
class StratifiedBootstrap:
def __init__(self, batch_class_sizes=[]):
self.batch_class_sizes = batch_class_sizes
self.batch_size = sum(self.batch_class_sizes)
self.rnd = tf.random.Generator.from_non_deterministic_state()
def __call__(self, ds_input: tf.data.Dataset):
def generator():
# expecting ds of (idx, y_true, y_strat)
idx, y_true, y_strat = list(map(tf.stack, list(map(list, zip(*list(ds_input))))))
assert (tf.reduce_max(y_strat).numpy() + 1) == len(self.batch_class_sizes)
class_idx = [tf.where(y_strat == i)[:, 0] for i in range(len(self.batch_class_sizes))]
while True:
batch_idx = list()
for j in range(len(self.batch_class_sizes)):
batch_idx.append(tf.gather(class_idx[j], self.rnd.uniform(shape=(self.batch_class_sizes[j], ),
maxval=tf.cast(class_idx[j].shape[0] - 1, tf.int64),
dtype=tf.int64)))
batch_idx = tf.concat(batch_idx, axis=0)
yield tf.gather(idx, batch_idx, axis=0), tf.gather(y_true, batch_idx, axis=0)
return tf.data.Dataset.from_generator(generator,
output_types=(ds_input.element_spec[0].dtype, ds_input.element_spec[1].dtype),
output_shapes=((self.batch_size, ), (self.batch_size, ds_input.element_spec[1].shape[0])))
class SubSample:
def __init__(self, batch_size, ds_size):
self.batch_size = batch_size
self.ds_size = ds_size
def __call__(self, ds_input: tf.data.Dataset):
def generator():
# expecting ds of (idx, y_true)
idx, y_true = list(map(tf.stack, list(map(list, zip(*list(ds_input))))))
while True:
batch_idx = np.random.choice(np.arange(self.ds_size), self.batch_size, replace=False)
yield tf.gather(idx, batch_idx, axis=0), tf.gather(y_true, batch_idx, axis=0)
return tf.data.Dataset.from_generator(generator,
output_types=(ds_input.element_spec[0].dtype, ds_input.element_spec[1].dtype),
output_shapes=((None, ), (None, ds_input.element_spec[1].shape[0])))
class Map:
class LoadBatchByIndices:
def loader(self):
raise NotImplementedError
def __call__(self, sample_idx, ragged_output):
# flat_values and additional_args together should be the input into the ragged_constructor of the loader
flat_values, *additional_args = tf.py_function(self.loader, [sample_idx], self.tf_output_types)
flat_values.set_shape((None,) + self.inner_shape)
if ragged_output:
return self.ragged_constructor(flat_values, *additional_args)
else:
return flat_values
class FromNumpy(LoadBatchByIndices):
def __init__(self, data, data_type):
self.data = data
self.tf_output_types = [data_type, tf.int32]
self.inner_shape = data[0].shape[1:]
self.ragged_constructor = tf.RaggedTensor.from_row_lengths
def loader(self, idx):
batch = list()
for i in idx.numpy():
batch.append(self.data[i])
return np.concatenate(batch, axis=0), np.array([v.shape[0] for v in batch])
| [
"tensorflow.py_function",
"tensorflow.data.Dataset.from_generator",
"tensorflow.reduce_max",
"sklearn.model_selection.StratifiedKFold",
"numpy.array",
"tensorflow.concat",
"tensorflow.where",
"tensorflow.gather",
"numpy.concatenate",
"tensorflow.cast",
"numpy.arange",
"tensorflow.random.Genera... | [((426, 479), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'self.n_splits', 'shuffle': '(True)'}), '(n_splits=self.n_splits, shuffle=True)\n', (441, 479), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((948, 1146), 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['generator'], {'output_types': '(ds_input.element_spec[0].dtype, ds_input.element_spec[1].dtype)', 'output_shapes': '((None,), (None, ds_input.element_spec[1].shape[0]))'}), '(generator, output_types=(ds_input.\n element_spec[0].dtype, ds_input.element_spec[1].dtype), output_shapes=(\n (None,), (None, ds_input.element_spec[1].shape[0])))\n', (978, 1146), True, 'import tensorflow as tf\n'), ((1456, 1506), 'tensorflow.random.Generator.from_non_deterministic_state', 'tf.random.Generator.from_non_deterministic_state', ([], {}), '()\n', (1504, 1506), True, 'import tensorflow as tf\n'), ((2607, 2827), 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['generator'], {'output_types': '(ds_input.element_spec[0].dtype, ds_input.element_spec[1].dtype)', 'output_shapes': '((self.batch_size,), (self.batch_size, ds_input.element_spec[1].shape[0]))'}), '(generator, output_types=(ds_input.\n element_spec[0].dtype, ds_input.element_spec[1].dtype), output_shapes=(\n (self.batch_size,), (self.batch_size, ds_input.element_spec[1].shape[0])))\n', (2637, 2827), True, 'import tensorflow as tf\n'), ((3541, 3739), 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['generator'], {'output_types': '(ds_input.element_spec[0].dtype, ds_input.element_spec[1].dtype)', 'output_shapes': '((None,), (None, ds_input.element_spec[1].shape[0]))'}), '(generator, output_types=(ds_input.\n element_spec[0].dtype, ds_input.element_spec[1].dtype), output_shapes=(\n (None,), (None, ds_input.element_spec[1].shape[0])))\n', (3571, 3739), True, 'import tensorflow as tf\n'), ((4156, 4219), 'tensorflow.py_function', 'tf.py_function', (['self.loader', '[sample_idx]', 'self.tf_output_types'], {}), '(self.loader, [sample_idx], self.tf_output_types)\n', (4170, 4219), True, 'import tensorflow as tf\n'), ((4893, 4922), 'numpy.concatenate', 'np.concatenate', (['batch'], {'axis': '(0)'}), '(batch, axis=0)\n', (4907, 4922), True, 'import numpy as np\n'), ((4924, 4961), 'numpy.array', 'np.array', (['[v.shape[0] for v in batch]'], {}), '([v.shape[0] for v in batch])\n', (4932, 4961), True, 'import numpy as np\n'), ((2459, 2487), 'tensorflow.concat', 'tf.concat', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (2468, 2487), True, 'import tensorflow as tf\n'), ((1867, 1889), 'tensorflow.where', 'tf.where', (['(y_strat == i)'], {}), '(y_strat == i)\n', (1875, 1889), True, 'import tensorflow as tf\n'), ((3366, 3389), 'numpy.arange', 'np.arange', (['self.ds_size'], {}), '(self.ds_size)\n', (3375, 3389), True, 'import numpy as np\n'), ((2515, 2548), 'tensorflow.gather', 'tf.gather', (['idx', 'batch_idx'], {'axis': '(0)'}), '(idx, batch_idx, axis=0)\n', (2524, 2548), True, 'import tensorflow as tf\n'), ((2550, 2586), 'tensorflow.gather', 'tf.gather', (['y_true', 'batch_idx'], {'axis': '(0)'}), '(y_true, batch_idx, axis=0)\n', (2559, 2586), True, 'import tensorflow as tf\n'), ((3449, 3482), 'tensorflow.gather', 'tf.gather', (['idx', 'batch_idx'], {'axis': '(0)'}), '(idx, batch_idx, axis=0)\n', (3458, 3482), True, 'import tensorflow as tf\n'), ((3484, 3520), 'tensorflow.gather', 'tf.gather', (['y_true', 'batch_idx'], {'axis': '(0)'}), '(y_true, batch_idx, axis=0)\n', (3493, 3520), True, 'import tensorflow as tf\n'), ((856, 889), 'tensorflow.gather', 'tf.gather', (['idx', 'batch_idx'], {'axis': '(0)'}), '(idx, batch_idx, axis=0)\n', (865, 889), True, 'import tensorflow as tf\n'), ((891, 927), 'tensorflow.gather', 'tf.gather', (['y_true', 'batch_idx'], {'axis': '(0)'}), '(y_true, batch_idx, axis=0)\n', (900, 927), True, 'import tensorflow as tf\n'), ((1771, 1793), 'tensorflow.reduce_max', 'tf.reduce_max', (['y_strat'], {}), '(y_strat)\n', (1784, 1793), True, 'import tensorflow as tf\n'), ((2281, 2325), 'tensorflow.cast', 'tf.cast', (['(class_idx[j].shape[0] - 1)', 'tf.int64'], {}), '(class_idx[j].shape[0] - 1, tf.int64)\n', (2288, 2325), True, 'import tensorflow as tf\n')] |
import os
import time
import logger
import random
import tensorflow as tf
import gym
import numpy as np
from collections import deque
from config import args
from utils import set_global_seeds, sf01, explained_variance
from agent import PPO
from env_wrapper import make_env
def main():
env = make_env()
set_global_seeds(env, args.seed)
agent = PPO(env=env)
batch_steps = args.n_envs * args.batch_steps # number of steps per update
if args.save_interval and logger.get_dir():
# some saving jobs
pass
ep_info_buffer = deque(maxlen=100)
t_train_start = time.time()
n_updates = args.n_steps // batch_steps
runner = Runner(env, agent)
for update in range(1, n_updates + 1):
t_start = time.time()
frac = 1.0 - (update - 1.0) / n_updates
lr_now = args.lr # maybe dynamic change
clip_range_now = args.clip_range # maybe dynamic change
obs, returns, masks, acts, vals, neglogps, advs, rewards, ep_infos = \
runner.run(args.batch_steps, frac)
ep_info_buffer.extend(ep_infos)
loss_infos = []
idxs = np.arange(batch_steps)
for _ in range(args.n_epochs):
np.random.shuffle(idxs)
for start in range(0, batch_steps, args.minibatch):
end = start + args.minibatch
mb_idxs = idxs[start: end]
minibatch = [arr[mb_idxs] for arr in [obs, returns, masks, acts, vals, neglogps, advs]]
loss_infos.append(agent.train(lr_now, clip_range_now, *minibatch))
t_now = time.time()
time_this_batch = t_now - t_start
if update % args.log_interval == 0:
ev = float(explained_variance(vals, returns))
logger.logkv('updates', str(update) + '/' + str(n_updates))
logger.logkv('serial_steps', update * args.batch_steps)
logger.logkv('total_steps', update * batch_steps)
logger.logkv('time', time_this_batch)
logger.logkv('fps', int(batch_steps / (t_now - t_start)))
logger.logkv('total_time', t_now - t_train_start)
logger.logkv("explained_variance", ev)
logger.logkv('avg_reward', np.mean([e['r'] for e in ep_info_buffer]))
logger.logkv('avg_ep_len', np.mean([e['l'] for e in ep_info_buffer]))
logger.logkv('adv_mean', np.mean(returns - vals))
logger.logkv('adv_variance', np.std(returns - vals)**2)
loss_infos = np.mean(loss_infos, axis=0)
for loss_name, loss_info in zip(agent.loss_names, loss_infos):
logger.logkv(loss_name, loss_info)
logger.dumpkvs()
if args.save_interval and update % args.save_interval == 0 and logger.get_dir():
pass
env.close()
class Runner:
def __init__(self, env, agent):
self.env = env
self.agent = agent
self.obs = np.zeros((args.n_envs,) + env.observation_space.shape, dtype=np.float32)
self.obs[:] = env.reset()
self.dones = [False for _ in range(args.n_envs)]
def run(self, batch_steps, frac):
b_obs, b_rewards, b_actions, b_values, b_dones, b_neglogps = [], [], [], [], [], []
ep_infos = []
for s in range(batch_steps):
actions, values, neglogps = self.agent.step(self.obs, self.dones)
b_obs.append(self.obs.copy())
b_actions.append(actions)
b_values.append(values)
b_neglogps.append(neglogps)
b_dones.append(self.dones)
self.obs[:], rewards, self.dones, infos = self.env.step(actions)
for info in infos:
maybeinfo = info.get('episode')
if maybeinfo:
ep_infos.append(maybeinfo)
b_rewards.append(rewards)
# batch of steps to batch of rollouts
b_obs = np.asarray(b_obs, dtype=self.obs.dtype)
b_rewards = np.asarray(b_rewards, dtype=np.float32)
b_actions = np.asarray(b_actions)
b_values = np.asarray(b_values, dtype=np.float32)
b_neglogps = np.asarray(b_neglogps, dtype=np.float32)
b_dones = np.asarray(b_dones, dtype=np.bool)
last_values = self.agent.get_value(self.obs, self.dones)
b_returns = np.zeros_like(b_rewards)
b_advs = np.zeros_like(b_rewards)
lastgaelam = 0
for t in reversed(range(batch_steps)):
if t == batch_steps - 1:
mask = 1.0 - self.dones
nextvalues = last_values
else:
mask = 1.0 - b_dones[t + 1]
nextvalues = b_values[t + 1]
delta = b_rewards[t] + args.gamma * nextvalues * mask - b_values[t]
b_advs[t] = lastgaelam = delta + args.gamma * args.lam * mask * lastgaelam
b_returns = b_advs + b_values
return (*map(sf01, (b_obs, b_returns, b_dones, b_actions, b_values, b_neglogps, b_advs, b_rewards)), ep_infos)
if __name__ == '__main__':
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
logger.configure()
main()
| [
"numpy.mean",
"collections.deque",
"agent.PPO",
"numpy.arange",
"numpy.asarray",
"logger.configure",
"numpy.zeros_like",
"logger.dumpkvs",
"utils.set_global_seeds",
"numpy.zeros",
"logger.logkv",
"numpy.std",
"utils.explained_variance",
"time.time",
"env_wrapper.make_env",
"logger.get_... | [((299, 309), 'env_wrapper.make_env', 'make_env', ([], {}), '()\n', (307, 309), False, 'from env_wrapper import make_env\n'), ((314, 346), 'utils.set_global_seeds', 'set_global_seeds', (['env', 'args.seed'], {}), '(env, args.seed)\n', (330, 346), False, 'from utils import set_global_seeds, sf01, explained_variance\n'), ((360, 372), 'agent.PPO', 'PPO', ([], {'env': 'env'}), '(env=env)\n', (363, 372), False, 'from agent import PPO\n'), ((564, 581), 'collections.deque', 'deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (569, 581), False, 'from collections import deque\n'), ((602, 613), 'time.time', 'time.time', ([], {}), '()\n', (611, 613), False, 'import time\n'), ((5050, 5068), 'logger.configure', 'logger.configure', ([], {}), '()\n', (5066, 5068), False, 'import logger\n'), ((484, 500), 'logger.get_dir', 'logger.get_dir', ([], {}), '()\n', (498, 500), False, 'import logger\n'), ((752, 763), 'time.time', 'time.time', ([], {}), '()\n', (761, 763), False, 'import time\n'), ((1131, 1153), 'numpy.arange', 'np.arange', (['batch_steps'], {}), '(batch_steps)\n', (1140, 1153), True, 'import numpy as np\n'), ((1585, 1596), 'time.time', 'time.time', ([], {}), '()\n', (1594, 1596), False, 'import time\n'), ((2923, 2995), 'numpy.zeros', 'np.zeros', (['((args.n_envs,) + env.observation_space.shape)'], {'dtype': 'np.float32'}), '((args.n_envs,) + env.observation_space.shape, dtype=np.float32)\n', (2931, 2995), True, 'import numpy as np\n'), ((3884, 3923), 'numpy.asarray', 'np.asarray', (['b_obs'], {'dtype': 'self.obs.dtype'}), '(b_obs, dtype=self.obs.dtype)\n', (3894, 3923), True, 'import numpy as np\n'), ((3944, 3983), 'numpy.asarray', 'np.asarray', (['b_rewards'], {'dtype': 'np.float32'}), '(b_rewards, dtype=np.float32)\n', (3954, 3983), True, 'import numpy as np\n'), ((4004, 4025), 'numpy.asarray', 'np.asarray', (['b_actions'], {}), '(b_actions)\n', (4014, 4025), True, 'import numpy as np\n'), ((4045, 4083), 'numpy.asarray', 'np.asarray', (['b_values'], {'dtype': 'np.float32'}), '(b_values, dtype=np.float32)\n', (4055, 4083), True, 'import numpy as np\n'), ((4105, 4145), 'numpy.asarray', 'np.asarray', (['b_neglogps'], {'dtype': 'np.float32'}), '(b_neglogps, dtype=np.float32)\n', (4115, 4145), True, 'import numpy as np\n'), ((4164, 4198), 'numpy.asarray', 'np.asarray', (['b_dones'], {'dtype': 'np.bool'}), '(b_dones, dtype=np.bool)\n', (4174, 4198), True, 'import numpy as np\n'), ((4285, 4309), 'numpy.zeros_like', 'np.zeros_like', (['b_rewards'], {}), '(b_rewards)\n', (4298, 4309), True, 'import numpy as np\n'), ((4327, 4351), 'numpy.zeros_like', 'np.zeros_like', (['b_rewards'], {}), '(b_rewards)\n', (4340, 4351), True, 'import numpy as np\n'), ((1205, 1228), 'numpy.random.shuffle', 'np.random.shuffle', (['idxs'], {}), '(idxs)\n', (1222, 1228), True, 'import numpy as np\n'), ((1825, 1880), 'logger.logkv', 'logger.logkv', (['"""serial_steps"""', '(update * args.batch_steps)'], {}), "('serial_steps', update * args.batch_steps)\n", (1837, 1880), False, 'import logger\n'), ((1893, 1942), 'logger.logkv', 'logger.logkv', (['"""total_steps"""', '(update * batch_steps)'], {}), "('total_steps', update * batch_steps)\n", (1905, 1942), False, 'import logger\n'), ((1955, 1992), 'logger.logkv', 'logger.logkv', (['"""time"""', 'time_this_batch'], {}), "('time', time_this_batch)\n", (1967, 1992), False, 'import logger\n'), ((2075, 2124), 'logger.logkv', 'logger.logkv', (['"""total_time"""', '(t_now - t_train_start)'], {}), "('total_time', t_now - t_train_start)\n", (2087, 2124), False, 'import logger\n'), ((2137, 2175), 'logger.logkv', 'logger.logkv', (['"""explained_variance"""', 'ev'], {}), "('explained_variance', ev)\n", (2149, 2175), False, 'import logger\n'), ((2495, 2522), 'numpy.mean', 'np.mean', (['loss_infos'], {'axis': '(0)'}), '(loss_infos, axis=0)\n', (2502, 2522), True, 'import numpy as np\n'), ((2661, 2677), 'logger.dumpkvs', 'logger.dumpkvs', ([], {}), '()\n', (2675, 2677), False, 'import logger\n'), ((2750, 2766), 'logger.get_dir', 'logger.get_dir', ([], {}), '()\n', (2764, 2766), False, 'import logger\n'), ((1706, 1739), 'utils.explained_variance', 'explained_variance', (['vals', 'returns'], {}), '(vals, returns)\n', (1724, 1739), False, 'from utils import set_global_seeds, sf01, explained_variance\n'), ((2215, 2256), 'numpy.mean', 'np.mean', (["[e['r'] for e in ep_info_buffer]"], {}), "([e['r'] for e in ep_info_buffer])\n", (2222, 2256), True, 'import numpy as np\n'), ((2297, 2338), 'numpy.mean', 'np.mean', (["[e['l'] for e in ep_info_buffer]"], {}), "([e['l'] for e in ep_info_buffer])\n", (2304, 2338), True, 'import numpy as np\n'), ((2377, 2400), 'numpy.mean', 'np.mean', (['(returns - vals)'], {}), '(returns - vals)\n', (2384, 2400), True, 'import numpy as np\n'), ((2614, 2648), 'logger.logkv', 'logger.logkv', (['loss_name', 'loss_info'], {}), '(loss_name, loss_info)\n', (2626, 2648), False, 'import logger\n'), ((2443, 2465), 'numpy.std', 'np.std', (['(returns - vals)'], {}), '(returns - vals)\n', (2449, 2465), True, 'import numpy as np\n')] |
"""
Implements diversity/similarity calculations for JEWEL
"""
import numpy as np
from scipy.spatial.distance import pdist, squareform
from utils.logger import get_logger
# Global variable for logging
logger = get_logger()
# Note for developers: follow the example of `gaussian_similarity` to implement
# additional similarity functions. The outer wrapper function should take any
# necessary parameters and return a function that computes the pairwise
# similarities using a single `metadata` argument. If custom code is needed to
# parse parameters from the YAML configuration, define a function that takes a
# YAML dictionary containing configuration and returns a dictionary containing
# the loaded parameters, then assign this to the `load_params` attribute of the
# similarity wrapper function. If the parameters can be passed in as-is to the
# similarity wrapper function, then the `load_params` function does not need to
# be defined. Alternatively, the similarity function could be a class which as a
# `load_params` class method defined.
def gaussian_similarity(scale_factor=None):
"""
Return a function that computes Gaussian similarity using the supplied scale
factor, or the median distance as a heuristic if none is supplied.
Parameters
----------
scale_factor: float or None
a scale factor to use in the Gaussian exponent; if None, the median
pairwise distance is used as the scale factor
Returns
-------
similarity_func: callable
a function that takes a dict containing a array-like `dd` entry that
holds diversity descriptor features in its rows, and returns an n-by-n
array of pairwise similarities between the corresponding n ASDPs; values
are guaranteed to be within the range [0, 1]
"""
def similarity_func(metadata):
dd = metadata['dd']
# The `pdist` function computes the upper triangular entries of the
# symmetric pairwise distance matrix. The `squareform` function converts
# these entries to a standard symmetric square matrix with zero entries
# along the diagonal. This requires less than half of the distance
# function calculations as would be needed using `cdist(dd, dd)`.
D = squareform(pdist(dd))
if scale_factor is not None:
gamma = scale_factor
else:
gamma = (1.0 / np.median(D))
return np.exp(-(gamma * D)**2)
return similarity_func
def load_similarity_func(config):
"""
Loads a similarity function from a YAML config dict.
Parameters
----------
config: dict
dictionary containing YAML configuration with a "name" entry for a
similarity function defined within the global namespace of this module,
and (optionally) a "parameters" entry containing a dict of parameters
required by that similarity function
Returns
-------
similarity_func: callable
returns a parameterized similarity function corresponding to the
provided configuration, or None if an error occurred during loading
"""
sim_name = config.get('name', None)
if sim_name is None:
logger.warning(f'No similarity function name specified')
return
# Look for the function by name in the global namespace
sim = globals().get(sim_name, None)
if sim is None:
logger.warning(f'Similarity function "{sim_name}" not found')
return
# Get parameters from config and load them with the custom similarity
# function/class code if defined
param_config = config.get('parameters', {})
if hasattr(sim, 'load_params'):
params = sim.load_params(param_config)
else:
params = param_config
if params is None:
return
return sim(**params)
| [
"numpy.exp",
"scipy.spatial.distance.pdist",
"numpy.median",
"utils.logger.get_logger"
] | [((211, 223), 'utils.logger.get_logger', 'get_logger', ([], {}), '()\n', (221, 223), False, 'from utils.logger import get_logger\n'), ((2428, 2453), 'numpy.exp', 'np.exp', (['(-(gamma * D) ** 2)'], {}), '(-(gamma * D) ** 2)\n', (2434, 2453), True, 'import numpy as np\n'), ((2275, 2284), 'scipy.spatial.distance.pdist', 'pdist', (['dd'], {}), '(dd)\n', (2280, 2284), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((2398, 2410), 'numpy.median', 'np.median', (['D'], {}), '(D)\n', (2407, 2410), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""Create dataset for predicting lightning using Dataflow.
Copyright Google Inc.
2018 Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain a copy
of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required
by applicable law or agreed to in writing, software distributed under the
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
OF ANY KIND, either express or implied. See the License for the specific
language governing permissions and limitations under the License.
"""
import argparse
import datetime
import logging
import os
import shutil
import subprocess
import apache_beam as beam
import numpy as np
import tensorflow as tf
class BoxDef(object):
def __init__(self, predsize, stride):
self.N = predsize # pylint: disable=invalid-name
self.stride = stride
self.half_size = predsize // 2
self.N15 = predsize + self.half_size # pylint: disable=invalid-name
def get_prediction_grid_centers(self, ref):
cy, cx = np.meshgrid(
np.arange(self.N15, ref.shape[0] - self.N15, self.stride),
np.arange(self.N15, ref.shape[1] - self.N15, self.stride))
cy = cy.ravel()
cx = cx.ravel()
return zip(cy, cx)
def rawdata_input_fn(self, ref, ltg, griddef, ltgfcst = None):
"""Input function that yields example dicts for each box in grid."""
for cy, cx in self.get_prediction_grid_centers(ref):
# restrict to grids where there is currently lightning in the area
interesting = np.sum(ltg[cy - self.N15:cy + self.N15, cx -
self.N15:cx + self.N15]) > 0.5
if interesting:
label = (np.sum(ltgfcst[cy - self.half_size:cy + self.half_size, cx -
self.half_size:cx + self.half_size]) > 0.5
if ltgfcst is not None else None)
example = {
'cy':
cy,
'cx':
cx,
'lon':
griddef.lons[cy][cx],
'lat':
griddef.lats[cy][cx],
'ref_smallbox':
ref[cy - self.half_size:cy + self.half_size,
cx - self.half_size:cx + self.half_size],
'ref_bigbox':
ref[cy - self.N:cy + self.N,
cx - self.N:cx + self.N],
'ltg_smallbox':
ltg[cy - self.half_size:cy + self.half_size,
cx - self.half_size:cx + self.half_size],
'ltg_bigbox':
ltg[cy - self.N:cy + self.N,
cx - self.N:cx + self.N],
'has_ltg':
label
}
yield example
| [
"numpy.sum",
"numpy.arange"
] | [((1124, 1181), 'numpy.arange', 'np.arange', (['self.N15', '(ref.shape[0] - self.N15)', 'self.stride'], {}), '(self.N15, ref.shape[0] - self.N15, self.stride)\n', (1133, 1181), True, 'import numpy as np\n'), ((1191, 1248), 'numpy.arange', 'np.arange', (['self.N15', '(ref.shape[1] - self.N15)', 'self.stride'], {}), '(self.N15, ref.shape[1] - self.N15, self.stride)\n', (1200, 1248), True, 'import numpy as np\n'), ((1603, 1672), 'numpy.sum', 'np.sum', (['ltg[cy - self.N15:cy + self.N15, cx - self.N15:cx + self.N15]'], {}), '(ltg[cy - self.N15:cy + self.N15, cx - self.N15:cx + self.N15])\n', (1609, 1672), True, 'import numpy as np\n'), ((1749, 1851), 'numpy.sum', 'np.sum', (['ltgfcst[cy - self.half_size:cy + self.half_size, cx - self.half_size:cx +\n self.half_size]'], {}), '(ltgfcst[cy - self.half_size:cy + self.half_size, cx - self.half_size\n :cx + self.half_size])\n', (1755, 1851), True, 'import numpy as np\n')] |
# Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
from math import ceil
from time import time
from multiprocessing import Pool
import numpy as np
from mxnet import gluon, nd
from data_loading.dali_loader import get_dali_loader
from mlperf_logger import mllog_event, constants
def calculate_work(f):
arr = np.load(f)
image_shape = list(arr.shape[1:])
return np.prod([image_shape[i] // 64 - 1 + (1 if image_shape[i] % 64 >= 32 else 0) for i in range(3)])
def make_val_split_even(x_val, y_val, num_shards, shard_id, shard_eval, batch_size, local_shard_size):
t0 = time()
p = Pool(processes=8)
work = np.array(p.map(calculate_work, y_val))
x_res = [[] for _ in range(num_shards)]
y_res = [[] for _ in range(num_shards)]
curr_work_per_shard = np.zeros(shape=num_shards)
if shard_eval:
bucket_size = batch_size * local_shard_size
work = np.array([bucket_size * ceil(w/bucket_size) for w in work])
x_val, y_val = np.array(x_val), np.array(y_val)
sort_idx = np.argsort(work)[::-1]
work = work[sort_idx]
x_val, y_val = x_val[sort_idx], y_val[sort_idx]
for w_idx, w in enumerate(work):
idx = np.argmin(curr_work_per_shard)
curr_work_per_shard[idx] += w
x_res[idx].append(x_val[w_idx])
y_res[idx].append(y_val[w_idx])
return x_res[shard_id], y_res[shard_id]
def list_files_with_pattern(path, files_pattern):
data = sorted(glob.glob(os.path.join(path, files_pattern)))
assert len(data) > 0, f"Found no data at {path}"
return data
def load_data(path, files_pattern):
data = sorted(glob.glob(os.path.join(path, files_pattern)))
assert len(data) > 0, f"Found no data at {path}"
return data
def get_split(data, train_idx, val_idx):
train = list(np.array(data)[train_idx])
val = list(np.array(data)[val_idx])
return train, val
def get_data_split(path: str):
with open("evaluation_cases.txt", "r") as f:
val_cases_list = f.readlines()
val_cases_list = [case.rstrip("\n") for case in val_cases_list]
imgs = load_data(path, "*_x.npy")
lbls = load_data(path, "*_y.npy")
assert len(imgs) == len(lbls), f"Found {len(imgs)} volumes but {len(lbls)} corresponding masks"
imgs_train, lbls_train, imgs_val, lbls_val = [], [], [], []
for (case_img, case_lbl) in zip(imgs, lbls):
if case_img.split("_")[-2] in val_cases_list:
imgs_val.append(case_img)
lbls_val.append(case_lbl)
else:
imgs_train.append(case_img)
lbls_train.append(case_lbl)
mllog_event(key='train_samples', value=len(imgs_train), sync=False)
mllog_event(key='eval_samples', value=len(imgs_val), sync=False)
return imgs_train, imgs_val, lbls_train, lbls_val
class SyntheticDataset(gluon.data.Dataset):
def __init__(self, channels_in=1, channels_out=3, shape=(128, 128, 128), ctx=None, scalar=False):
x_shape = tuple(shape) + (channels_in,)
self.x = nd.random.uniform(shape=(32, *x_shape), dtype=np.float32, ctx=ctx)
if scalar:
self.y = nd.random.randint(low=0, high=channels_out-1, shape=(32, *shape), dtype=np.int32, ctx=ctx)
self.y = nd.expand_dims(self.y, -1)
else:
y_shape = tuple(shape) + (channels_out,)
self.y = nd.random.uniform(shape=(32, *y_shape), dtype=np.float32, ctx=ctx)
def __len__(self):
return 64
def __getitem__(self, idx):
return self.x[idx % 32], self.y[idx % 32]
def get_data_loaders(flags, data_dir, seed, local_rank, global_rank, train_ranks, eval_ranks,
spatial_group_size, shard_eval):
x_train, x_val, y_train, y_val = get_data_split(data_dir)
if global_rank in train_ranks:
shard_id = global_rank // spatial_group_size
num_shards = len(train_ranks) // spatial_group_size
dataset_len = len(x_train)
if flags.stick_to_shard:
shard_len = len(x_train) // num_shards
x_train = x_train[shard_id * shard_len:(shard_id + 1) * shard_len]
y_train = y_train[shard_id * shard_len:(shard_id + 1) * shard_len]
num_shards = 1
shard_id = 0
dataset_len = len(x_train)
train_dataloader = get_dali_loader(flags, x_train, y_train, mode="train", seed=seed, num_shards=num_shards,
device_id=local_rank, shard_id=shard_id, global_rank=global_rank,
dataset_len=dataset_len)
else:
train_dataloader = None
if global_rank in eval_ranks:
if shard_eval:
shard_id = (global_rank - eval_ranks[0]) // min(len(eval_ranks), 8)
num_shards = len(eval_ranks) // min(len(eval_ranks), 8)
else:
shard_id = (global_rank - eval_ranks[0]) // spatial_group_size
num_shards = len(eval_ranks) // spatial_group_size
x_val, y_val = make_val_split_even(x_val, y_val, num_shards=num_shards, shard_id=shard_id,
shard_eval=shard_eval, batch_size=flags.val_batch_size,
local_shard_size=min(len(eval_ranks), 8))
val_dataloader = get_dali_loader(flags, x_val, y_val, mode="validation", seed=seed,
num_shards=1, device_id=local_rank)
else:
val_dataloader = None
return train_dataloader, val_dataloader
def get_dummy_loaders(flags, data_dir, seed, local_rank, global_rank, training_ranks, spatial_group_size):
if spatial_group_size > 1:
assert flags.batch_size == 1, f"batch_size must be equal to 1, got {flags.batch_size}"
assert flags.val_batch_size == 1, f"val_batch_size must be equal to 1, got {flags.val_batch_size}"
train_dataloader = None
val_dataloader = None
if global_rank in training_ranks:
case_id = str(local_rank).zfill(5)
create_dummy_dataset(data_dir, case_id=case_id)
x_train = load_data(data_dir, f"*{case_id}_x.npy")
y_train = load_data(data_dir, f"*{case_id}_y.npy")
train_dataloader = get_dali_loader(flags, x_train, y_train, mode="train", seed=seed, num_shards=1,
device_id=local_rank, shard_id=0, global_rank=global_rank)
return train_dataloader, val_dataloader
def create_dummy_dataset(data_dir, case_id):
os.makedirs(data_dir, exist_ok=True)
x = np.random.rand(1, 256, 256, 256).astype(np.float32)
y = np.random.randint(low=0, high=3, size=(1, 256, 256, 256), dtype=np.uint8)
np.save(os.path.join(data_dir, f"dummy_{case_id}_x.npy"), x)
np.save(os.path.join(data_dir, f"dummy_{case_id}_y.npy"), y)
| [
"mxnet.nd.random.uniform",
"math.ceil",
"numpy.random.rand",
"os.makedirs",
"data_loading.dali_loader.get_dali_loader",
"os.path.join",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.random.randint",
"multiprocessing.Pool",
"mxnet.nd.expand_dims",
"mxnet.nd.random.randint",
"numpy.a... | [((901, 911), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (908, 911), True, 'import numpy as np\n'), ((1171, 1177), 'time.time', 'time', ([], {}), '()\n', (1175, 1177), False, 'from time import time\n'), ((1186, 1203), 'multiprocessing.Pool', 'Pool', ([], {'processes': '(8)'}), '(processes=8)\n', (1190, 1203), False, 'from multiprocessing import Pool\n'), ((1368, 1394), 'numpy.zeros', 'np.zeros', ([], {'shape': 'num_shards'}), '(shape=num_shards)\n', (1376, 1394), True, 'import numpy as np\n'), ((7015, 7051), 'os.makedirs', 'os.makedirs', (['data_dir'], {'exist_ok': '(True)'}), '(data_dir, exist_ok=True)\n', (7026, 7051), False, 'import os\n'), ((7120, 7193), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(3)', 'size': '(1, 256, 256, 256)', 'dtype': 'np.uint8'}), '(low=0, high=3, size=(1, 256, 256, 256), dtype=np.uint8)\n', (7137, 7193), True, 'import numpy as np\n'), ((1561, 1576), 'numpy.array', 'np.array', (['x_val'], {}), '(x_val)\n', (1569, 1576), True, 'import numpy as np\n'), ((1578, 1593), 'numpy.array', 'np.array', (['y_val'], {}), '(y_val)\n', (1586, 1593), True, 'import numpy as np\n'), ((1610, 1626), 'numpy.argsort', 'np.argsort', (['work'], {}), '(work)\n', (1620, 1626), True, 'import numpy as np\n'), ((1763, 1793), 'numpy.argmin', 'np.argmin', (['curr_work_per_shard'], {}), '(curr_work_per_shard)\n', (1772, 1793), True, 'import numpy as np\n'), ((3574, 3640), 'mxnet.nd.random.uniform', 'nd.random.uniform', ([], {'shape': '(32, *x_shape)', 'dtype': 'np.float32', 'ctx': 'ctx'}), '(shape=(32, *x_shape), dtype=np.float32, ctx=ctx)\n', (3591, 3640), False, 'from mxnet import gluon, nd\n'), ((4855, 5042), 'data_loading.dali_loader.get_dali_loader', 'get_dali_loader', (['flags', 'x_train', 'y_train'], {'mode': '"""train"""', 'seed': 'seed', 'num_shards': 'num_shards', 'device_id': 'local_rank', 'shard_id': 'shard_id', 'global_rank': 'global_rank', 'dataset_len': 'dataset_len'}), "(flags, x_train, y_train, mode='train', seed=seed,\n num_shards=num_shards, device_id=local_rank, shard_id=shard_id,\n global_rank=global_rank, dataset_len=dataset_len)\n", (4870, 5042), False, 'from data_loading.dali_loader import get_dali_loader\n'), ((5829, 5935), 'data_loading.dali_loader.get_dali_loader', 'get_dali_loader', (['flags', 'x_val', 'y_val'], {'mode': '"""validation"""', 'seed': 'seed', 'num_shards': '(1)', 'device_id': 'local_rank'}), "(flags, x_val, y_val, mode='validation', seed=seed,\n num_shards=1, device_id=local_rank)\n", (5844, 5935), False, 'from data_loading.dali_loader import get_dali_loader\n'), ((6737, 6879), 'data_loading.dali_loader.get_dali_loader', 'get_dali_loader', (['flags', 'x_train', 'y_train'], {'mode': '"""train"""', 'seed': 'seed', 'num_shards': '(1)', 'device_id': 'local_rank', 'shard_id': '(0)', 'global_rank': 'global_rank'}), "(flags, x_train, y_train, mode='train', seed=seed,\n num_shards=1, device_id=local_rank, shard_id=0, global_rank=global_rank)\n", (6752, 6879), False, 'from data_loading.dali_loader import get_dali_loader\n'), ((7206, 7254), 'os.path.join', 'os.path.join', (['data_dir', 'f"""dummy_{case_id}_x.npy"""'], {}), "(data_dir, f'dummy_{case_id}_x.npy')\n", (7218, 7254), False, 'import os\n'), ((7271, 7319), 'os.path.join', 'os.path.join', (['data_dir', 'f"""dummy_{case_id}_y.npy"""'], {}), "(data_dir, f'dummy_{case_id}_y.npy')\n", (7283, 7319), False, 'import os\n'), ((2037, 2070), 'os.path.join', 'os.path.join', (['path', 'files_pattern'], {}), '(path, files_pattern)\n', (2049, 2070), False, 'import os\n'), ((2208, 2241), 'os.path.join', 'os.path.join', (['path', 'files_pattern'], {}), '(path, files_pattern)\n', (2220, 2241), False, 'import os\n'), ((2373, 2387), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2381, 2387), True, 'import numpy as np\n'), ((2415, 2429), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2423, 2429), True, 'import numpy as np\n'), ((3681, 3778), 'mxnet.nd.random.randint', 'nd.random.randint', ([], {'low': '(0)', 'high': '(channels_out - 1)', 'shape': '(32, *shape)', 'dtype': 'np.int32', 'ctx': 'ctx'}), '(low=0, high=channels_out - 1, shape=(32, *shape), dtype=\n np.int32, ctx=ctx)\n', (3698, 3778), False, 'from mxnet import gluon, nd\n'), ((3793, 3819), 'mxnet.nd.expand_dims', 'nd.expand_dims', (['self.y', '(-1)'], {}), '(self.y, -1)\n', (3807, 3819), False, 'from mxnet import gluon, nd\n'), ((3908, 3974), 'mxnet.nd.random.uniform', 'nd.random.uniform', ([], {'shape': '(32, *y_shape)', 'dtype': 'np.float32', 'ctx': 'ctx'}), '(shape=(32, *y_shape), dtype=np.float32, ctx=ctx)\n', (3925, 3974), False, 'from mxnet import gluon, nd\n'), ((7060, 7092), 'numpy.random.rand', 'np.random.rand', (['(1)', '(256)', '(256)', '(256)'], {}), '(1, 256, 256, 256)\n', (7074, 7092), True, 'import numpy as np\n'), ((1506, 1527), 'math.ceil', 'ceil', (['(w / bucket_size)'], {}), '(w / bucket_size)\n', (1510, 1527), False, 'from math import ceil\n')] |
# -*- coding: utf-8 -*-
import numpy as np
from scipy import constants
def B21(A21,nu):
'''Returns the Einstein B21 coefficient for stimulated emission, computed
from the Einstein A21 coefficient and the frequency nu.'''
return constants.c**2/(2*constants.h*nu**3)*A21
def B12(A21,nu,g1,g2):
'''Einstein B12 coefficient for absorption, computed from the Einstein A21
coefficient, the frequency nu, statistical weights g2 and g1 of upper and
lower level respectively.'''
return g2/g1*B21(A21=A21,nu=nu)
def B_nu(nu,T):
"""Planck function
Return the value of the Planck function (black body) in [W/m2/Hz/sr].
Parameters
----------
nu : float or numpy.ndarray
frequency in Hz
T : float or numpy.ndarray
temperature in K
Returns
-------
numpy.ndarray
Value of Planck function in [W/m2/Hz/sr]
"""
T = np.array(T)
return (2*constants.h*nu**3/constants.c**2
*(np.exp(constants.h*nu/(constants.k*T))-1)**-1)
def generate_CMB_background(z=0):
'''generates a function that gives the CMB background at redshift z
Parameters
-----------
z: float
redshift
Returns
--------
function
function giving CMB background in [W/m2/Hz/sr] for an input frequency in [Hz]
'''
T_CMB = 2.73*(1+z)
def CMB_background(nu):
return B_nu(nu=nu,T=T_CMB)
return CMB_background
def zero_background(nu):
'''Zero intensity radiation field
Returns zero intensity for any frequency
Parameters
------------
nu: array_like
frequency in Hz
Returns
---------------
numpy.ndarray
Zero at all requested frequencies'''
return np.zeros_like(nu)
def FWHM2sigma(FWHM):
"""Convert FWHM of a Gaussian to standard deviation.
Parameters
-----------
FWHM: float or numpy.ndarray
FWHM of the Gaussian
Returns
------------
float or numpy.ndarray
the standard deviation of the Gaussian"""
return FWHM/(2*np.sqrt(2*np.log(2)))
def relative_difference(a,b):
"""Computes the elementwise relative difference between a and b.
In general, return |a-b|/a.
Special cases:
a=0 and b=0: return 0
a=0 and b!=0: return 1"""
abs_diff = np.abs(a-b)
with np.errstate(invalid='ignore',divide='ignore'):
rel_diff = np.where((a==0) & (b==0),0,np.where(a==0,1,abs_diff/a))
assert not np.any(np.isnan(rel_diff))
return np.abs(rel_diff)
def Delta_nu(Delta_v,nu0):
'''Computes the frequency interval from a given velocity interval
Delta_v at frequency nu0'''
return nu0 * Delta_v / constants.c | [
"numpy.abs",
"numpy.where",
"numpy.log",
"numpy.exp",
"numpy.array",
"numpy.errstate",
"numpy.isnan",
"numpy.zeros_like"
] | [((909, 920), 'numpy.array', 'np.array', (['T'], {}), '(T)\n', (917, 920), True, 'import numpy as np\n'), ((1755, 1772), 'numpy.zeros_like', 'np.zeros_like', (['nu'], {}), '(nu)\n', (1768, 1772), True, 'import numpy as np\n'), ((2325, 2338), 'numpy.abs', 'np.abs', (['(a - b)'], {}), '(a - b)\n', (2331, 2338), True, 'import numpy as np\n'), ((2521, 2537), 'numpy.abs', 'np.abs', (['rel_diff'], {}), '(rel_diff)\n', (2527, 2537), True, 'import numpy as np\n'), ((2346, 2392), 'numpy.errstate', 'np.errstate', ([], {'invalid': '"""ignore"""', 'divide': '"""ignore"""'}), "(invalid='ignore', divide='ignore')\n", (2357, 2392), True, 'import numpy as np\n'), ((2439, 2472), 'numpy.where', 'np.where', (['(a == 0)', '(1)', '(abs_diff / a)'], {}), '(a == 0, 1, abs_diff / a)\n', (2447, 2472), True, 'import numpy as np\n'), ((2490, 2508), 'numpy.isnan', 'np.isnan', (['rel_diff'], {}), '(rel_diff)\n', (2498, 2508), True, 'import numpy as np\n'), ((981, 1025), 'numpy.exp', 'np.exp', (['(constants.h * nu / (constants.k * T))'], {}), '(constants.h * nu / (constants.k * T))\n', (987, 1025), True, 'import numpy as np\n'), ((2091, 2100), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (2097, 2100), True, 'import numpy as np\n')] |
import logging
import os
import time
import warnings
from collections import OrderedDict
from datetime import datetime
import numpy as np
from pandas import DataFrame
from pandas_gbq.exceptions import AccessDenied
logger = logging.getLogger(__name__)
BIGQUERY_INSTALLED_VERSION = None
SHOW_VERBOSE_DEPRECATION = False
try:
import tqdm # noqa
except ImportError:
tqdm = None
def _check_google_client_version():
global BIGQUERY_INSTALLED_VERSION, SHOW_VERBOSE_DEPRECATION
try:
import pkg_resources
except ImportError:
raise ImportError("Could not import pkg_resources (setuptools).")
# https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/bigquery/CHANGELOG.md
bigquery_minimum_version = pkg_resources.parse_version("0.32.0")
BIGQUERY_INSTALLED_VERSION = pkg_resources.get_distribution(
"google-cloud-bigquery"
).parsed_version
if BIGQUERY_INSTALLED_VERSION < bigquery_minimum_version:
raise ImportError(
"pandas-gbq requires google-cloud-bigquery >= {0}, "
"current version {1}".format(
bigquery_minimum_version, BIGQUERY_INSTALLED_VERSION
)
)
# Add check for Pandas version before showing deprecation warning.
# https://github.com/pydata/pandas-gbq/issues/157
pandas_installed_version = pkg_resources.get_distribution(
"pandas"
).parsed_version
pandas_version_wo_verbosity = pkg_resources.parse_version("0.23.0")
SHOW_VERBOSE_DEPRECATION = (
pandas_installed_version >= pandas_version_wo_verbosity
)
def _test_google_api_imports():
try:
from google_auth_oauthlib.flow import InstalledAppFlow # noqa
except ImportError as ex:
raise ImportError(
"pandas-gbq requires google-auth-oauthlib: {0}".format(ex)
)
try:
import google.auth # noqa
except ImportError as ex:
raise ImportError("pandas-gbq requires google-auth: {0}".format(ex))
try:
from google.cloud import bigquery # noqa
except ImportError as ex:
raise ImportError(
"pandas-gbq requires google-cloud-bigquery: {0}".format(ex)
)
_check_google_client_version()
class DatasetCreationError(ValueError):
"""
Raised when the create dataset method fails
"""
pass
class GenericGBQException(ValueError):
"""
Raised when an unrecognized Google API Error occurs.
"""
pass
class InvalidColumnOrder(ValueError):
"""
Raised when the provided column order for output
results DataFrame does not match the schema
returned by BigQuery.
"""
pass
class InvalidIndexColumn(ValueError):
"""
Raised when the provided index column for output
results DataFrame does not match the schema
returned by BigQuery.
"""
pass
class InvalidPageToken(ValueError):
"""
Raised when Google BigQuery fails to return,
or returns a duplicate page token.
"""
pass
class InvalidSchema(ValueError):
"""
Raised when the provided DataFrame does
not match the schema of the destination
table in BigQuery.
"""
pass
class NotFoundException(ValueError):
"""
Raised when the project_id, table or dataset provided in the query could
not be found.
"""
pass
class QueryTimeout(ValueError):
"""
Raised when the query request exceeds the timeoutMs value specified in the
BigQuery configuration.
"""
pass
class TableCreationError(ValueError):
"""
Raised when the create table method fails
"""
pass
class Context(object):
"""Storage for objects to be used throughout a session.
A Context object is initialized when the ``pandas_gbq`` module is
imported, and can be found at :attr:`pandas_gbq.context`.
"""
def __init__(self):
self._credentials = None
self._project = None
@property
def credentials(self):
"""google.auth.credentials.Credentials: Credentials to use for Google
APIs.
Note:
These credentials are automatically cached in memory by calls to
:func:`pandas_gbq.read_gbq` and :func:`pandas_gbq.to_gbq`. To
manually set the credentials, construct an
:class:`google.auth.credentials.Credentials` object and set it as
the context credentials as demonstrated in the example below. See
`auth docs`_ for more information on obtaining credentials.
Example:
Manually setting the context credentials:
>>> import pandas_gbq
>>> from google.oauth2 import service_account
>>> credentials = (service_account
... .Credentials.from_service_account_file(
... '/path/to/key.json'))
>>> pandas_gbq.context.credentials = credentials
.. _auth docs: http://google-auth.readthedocs.io
/en/latest/user-guide.html#obtaining-credentials
"""
return self._credentials
@credentials.setter
def credentials(self, value):
self._credentials = value
@property
def project(self):
"""str: Default project to use for calls to Google APIs.
Example:
Manually setting the context project:
>>> import pandas_gbq
>>> pandas_gbq.context.project = 'my-project'
"""
return self._project
@project.setter
def project(self, value):
self._project = value
# Create an empty context, used to cache credentials.
context = Context()
"""A :class:`pandas_gbq.Context` object used to cache credentials.
Credentials automatically are cached in-memory by :func:`pandas_gbq.read_gbq`
and :func:`pandas_gbq.to_gbq`.
"""
class GbqConnector(object):
def __init__(
self,
project_id,
reauth=False,
private_key=None,
auth_local_webserver=False,
dialect="legacy",
location=None,
try_credentials=None,
):
global context
from google.api_core.exceptions import GoogleAPIError
from google.api_core.exceptions import ClientError
from pandas_gbq import auth
self.http_error = (ClientError, GoogleAPIError)
self.project_id = project_id
self.location = location
self.reauth = reauth
self.private_key = private_key
self.auth_local_webserver = auth_local_webserver
self.dialect = dialect
self.credentials_path = _get_credentials_file()
# Load credentials from cache.
self.credentials = context.credentials
default_project = context.project
# Credentials were explicitly asked for, so don't use the cache.
if private_key or reauth or not self.credentials:
self.credentials, default_project = auth.get_credentials(
private_key=private_key,
project_id=project_id,
reauth=reauth,
auth_local_webserver=auth_local_webserver,
try_credentials=try_credentials,
)
if self.project_id is None:
self.project_id = default_project
if self.project_id is None:
raise ValueError(
"Could not determine project ID and one was not supplied."
)
# Cache the credentials if they haven't been set yet.
if context.credentials is None:
context.credentials = self.credentials
if context.project is None:
context.project = self.project_id
self.client = self.get_client()
# BQ Queries costs $5 per TB. First 1 TB per month is free
# see here for more: https://cloud.google.com/bigquery/pricing
self.query_price_for_TB = 5. / 2 ** 40 # USD/TB
def _start_timer(self):
self.start = time.time()
def get_elapsed_seconds(self):
return round(time.time() - self.start, 2)
def log_elapsed_seconds(self, prefix="Elapsed", postfix="s.", overlong=6):
sec = self.get_elapsed_seconds()
if sec > overlong:
logger.info("{} {} {}".format(prefix, sec, postfix))
# http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
@staticmethod
def sizeof_fmt(num, suffix="B"):
fmt = "%3.1f %s%s"
for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
if abs(num) < 1024.0:
return fmt % (num, unit, suffix)
num /= 1024.0
return fmt % (num, "Y", suffix)
def get_client(self):
from google.cloud import bigquery
return bigquery.Client(
project=self.project_id, credentials=self.credentials
)
@staticmethod
def process_http_error(ex):
# See `BigQuery Troubleshooting Errors
# <https://cloud.google.com/bigquery/troubleshooting-errors>`__
raise GenericGBQException("Reason: {0}".format(ex))
def run_query(self, query, **kwargs):
from concurrent.futures import TimeoutError
from google.auth.exceptions import RefreshError
from google.cloud import bigquery
job_config = {
"query": {
"useLegacySql": self.dialect
== "legacy"
# 'allowLargeResults', 'createDisposition',
# 'preserveNulls', destinationTable, useQueryCache
}
}
config = kwargs.get("configuration")
if config is not None:
job_config.update(config)
if "query" in config and "query" in config["query"]:
if query is not None:
raise ValueError(
"Query statement can't be specified "
"inside config while it is specified "
"as parameter"
)
query = config["query"].pop("query")
self._start_timer()
try:
logger.debug("Requesting query... ")
query_reply = self.client.query(
query,
job_config=bigquery.QueryJobConfig.from_api_repr(job_config),
location=self.location,
)
logger.debug("Query running...")
except (RefreshError, ValueError):
if self.private_key:
raise AccessDenied(
"The service account credentials are not valid"
)
else:
raise AccessDenied(
"The credentials have been revoked or expired, "
"please re-run the application to re-authorize"
)
except self.http_error as ex:
self.process_http_error(ex)
job_id = query_reply.job_id
logger.debug("Job ID: %s" % job_id)
while query_reply.state != "DONE":
self.log_elapsed_seconds(" Elapsed", "s. Waiting...")
timeout_ms = job_config["query"].get("timeoutMs")
if timeout_ms and timeout_ms < self.get_elapsed_seconds() * 1000:
raise QueryTimeout("Query timeout: {} ms".format(timeout_ms))
timeout_sec = 1.0
if timeout_ms:
# Wait at most 1 second so we can show progress bar
timeout_sec = min(1.0, timeout_ms / 1000.0)
try:
query_reply.result(timeout=timeout_sec)
except TimeoutError:
# Use our own timeout logic
pass
except self.http_error as ex:
self.process_http_error(ex)
if query_reply.cache_hit:
logger.debug("Query done.\nCache hit.\n")
else:
bytes_processed = query_reply.total_bytes_processed or 0
bytes_billed = query_reply.total_bytes_billed or 0
logger.debug(
"Query done.\nProcessed: {} Billed: {}".format(
self.sizeof_fmt(bytes_processed),
self.sizeof_fmt(bytes_billed),
)
)
logger.debug(
"Standard price: ${:,.2f} USD\n".format(
bytes_billed * self.query_price_for_TB
)
)
try:
rows_iter = query_reply.result()
except self.http_error as ex:
self.process_http_error(ex)
result_rows = list(rows_iter)
total_rows = rows_iter.total_rows
schema = {
"fields": [field.to_api_repr() for field in rows_iter.schema]
}
logger.debug("Got {} rows.\n".format(total_rows))
return schema, result_rows
def load_data(
self,
dataframe,
dataset_id,
table_id,
chunksize=None,
schema=None,
progress_bar=True,
):
from pandas_gbq import load
total_rows = len(dataframe)
try:
chunks = load.load_chunks(
self.client,
dataframe,
dataset_id,
table_id,
chunksize=chunksize,
schema=schema,
location=self.location,
)
if progress_bar and tqdm:
chunks = tqdm.tqdm(chunks)
for remaining_rows in chunks:
logger.info(
"\rLoad is {0}% Complete".format(
((total_rows - remaining_rows) * 100) / total_rows
)
)
except self.http_error as ex:
self.process_http_error(ex)
def schema(self, dataset_id, table_id):
"""Retrieve the schema of the table
Obtain from BigQuery the field names and field types
for the table defined by the parameters
Parameters
----------
dataset_id : str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
Returns
-------
list of dicts
Fields representing the schema
"""
table_ref = self.client.dataset(dataset_id).table(table_id)
try:
table = self.client.get_table(table_ref)
remote_schema = table.schema
remote_fields = [
field_remote.to_api_repr() for field_remote in remote_schema
]
for field in remote_fields:
field["type"] = field["type"].upper()
field["mode"] = field["mode"].upper()
return remote_fields
except self.http_error as ex:
self.process_http_error(ex)
def _clean_schema_fields(self, fields):
"""Return a sanitized version of the schema for comparisons."""
fields_sorted = sorted(fields, key=lambda field: field["name"])
# Ignore mode and description when comparing schemas.
return [
{"name": field["name"], "type": field["type"]}
for field in fields_sorted
]
def verify_schema(self, dataset_id, table_id, schema):
"""Indicate whether schemas match exactly
Compare the BigQuery table identified in the parameters with
the schema passed in and indicate whether all fields in the former
are present in the latter. Order is not considered.
Parameters
----------
dataset_id :str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
schema : list(dict)
Schema for comparison. Each item should have
a 'name' and a 'type'
Returns
-------
bool
Whether the schemas match
"""
fields_remote = self._clean_schema_fields(
self.schema(dataset_id, table_id)
)
fields_local = self._clean_schema_fields(schema["fields"])
return fields_remote == fields_local
def schema_is_subset(self, dataset_id, table_id, schema):
"""Indicate whether the schema to be uploaded is a subset
Compare the BigQuery table identified in the parameters with
the schema passed in and indicate whether a subset of the fields in
the former are present in the latter. Order is not considered.
Parameters
----------
dataset_id : str
Name of the BigQuery dataset for the table
table_id : str
Name of the BigQuery table
schema : list(dict)
Schema for comparison. Each item should have
a 'name' and a 'type'
Returns
-------
bool
Whether the passed schema is a subset
"""
fields_remote = self._clean_schema_fields(
self.schema(dataset_id, table_id)
)
fields_local = self._clean_schema_fields(schema["fields"])
return all(field in fields_remote for field in fields_local)
def delete_and_recreate_table(self, dataset_id, table_id, table_schema):
table = _Table(
self.project_id, dataset_id, private_key=self.private_key
)
table.delete(table_id)
table.create(table_id, table_schema)
def _get_credentials_file():
return os.environ.get("PANDAS_GBQ_CREDENTIALS_FILE")
def _parse_schema(schema_fields):
# see:
# http://pandas.pydata.org/pandas-docs/dev/missing_data.html
# #missing-data-casting-rules-and-indexing
dtype_map = {"FLOAT": np.dtype(float), "TIMESTAMP": "M8[ns]"}
for field in schema_fields:
name = str(field["name"])
if field["mode"].upper() == "REPEATED":
yield name, object
else:
dtype = dtype_map.get(field["type"].upper(), object)
yield name, dtype
def _parse_data(schema, rows):
column_dtypes = OrderedDict(_parse_schema(schema["fields"]))
df = DataFrame(data=(iter(r) for r in rows), columns=column_dtypes.keys())
for column in df:
df[column] = df[column].astype(column_dtypes[column])
return df
def read_gbq(
query,
project_id=None,
index_col=None,
col_order=None,
reauth=False,
private_key=None,
auth_local_webserver=False,
dialect=None,
location=None,
configuration=None,
verbose=None,
):
r"""Load data from Google BigQuery using google-cloud-python
The main method a user calls to execute a Query in Google BigQuery
and read results into a pandas DataFrame.
This method uses the Google Cloud client library to make requests to
Google BigQuery, documented `here
<https://google-cloud-python.readthedocs.io/en/latest/bigquery/usage.html>`__.
See the :ref:`How to authenticate with Google BigQuery <authentication>`
guide for authentication instructions.
Parameters
----------
query : str
SQL-Like Query to return data values.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
index_col : str, optional
Name of result column to use for index in results DataFrame.
col_order : list(str), optional
List of BigQuery column names in the desired order for results
DataFrame.
reauth : boolean, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
private_key : str, optional
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
auth_local_webserver : boolean, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
.. versionadded:: 0.2.0
dialect : str, default 'legacy'
Note: The default value is changing to 'standard' in a future verion.
SQL syntax dialect to use. Value can be one of:
``'legacy'``
Use BigQuery's legacy SQL dialect. For more information see
`BigQuery Legacy SQL Reference
<https://cloud.google.com/bigquery/docs/reference/legacy-sql>`__.
``'standard'``
Use BigQuery's standard SQL, which is
compliant with the SQL 2011 standard. For more information
see `BigQuery Standard SQL Reference
<https://cloud.google.com/bigquery/docs/reference/standard-sql/>`__.
location : str, optional
Location where the query job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of any
datasets used in the query.
.. versionadded:: 0.5.0
configuration : dict, optional
Query config parameters for job processing.
For example:
configuration = {'query': {'useQueryCache': False}}
For more information see `BigQuery REST API Reference
<https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__.
verbose : None, deprecated
Deprecated in Pandas-GBQ 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
Returns
-------
df: DataFrame
DataFrame representing results of query.
"""
if dialect is None:
dialect = "legacy"
warnings.warn(
'The default value for dialect is changing to "standard" in a '
'future version. Pass in dialect="legacy" to disable this '
"warning.",
FutureWarning,
stacklevel=2,
)
_test_google_api_imports()
if verbose is not None and SHOW_VERBOSE_DEPRECATION:
warnings.warn(
"verbose is deprecated and will be removed in "
"a future version. Set logging level in order to vary "
"verbosity",
FutureWarning,
stacklevel=2,
)
if dialect not in ("legacy", "standard"):
raise ValueError("'{0}' is not valid for dialect".format(dialect))
connector = GbqConnector(
project_id,
reauth=reauth,
private_key=private_key,
dialect=dialect,
auth_local_webserver=auth_local_webserver,
location=location,
)
schema, rows = connector.run_query(query, configuration=configuration)
final_df = _parse_data(schema, rows)
# Reindex the DataFrame on the provided column
if index_col is not None:
if index_col in final_df.columns:
final_df.set_index(index_col, inplace=True)
else:
raise InvalidIndexColumn(
'Index column "{0}" does not exist in DataFrame.'.format(
index_col
)
)
# Change the order of columns in the DataFrame based on provided list
if col_order is not None:
if sorted(col_order) == sorted(final_df.columns):
final_df = final_df[col_order]
else:
raise InvalidColumnOrder(
"Column order does not match this DataFrame."
)
# cast BOOLEAN and INTEGER columns from object to bool/int
# if they dont have any nulls AND field mode is not repeated (i.e., array)
type_map = {"BOOLEAN": bool, "INTEGER": np.int64}
for field in schema["fields"]:
if (
field["type"].upper() in type_map
and final_df[field["name"]].notnull().all()
and field["mode"].lower() != "repeated"
):
final_df[field["name"]] = final_df[field["name"]].astype(
type_map[field["type"].upper()]
)
connector.log_elapsed_seconds(
"Total time taken",
datetime.now().strftime("s.\nFinished at %Y-%m-%d %H:%M:%S."),
)
return final_df
def to_gbq(
dataframe,
destination_table,
project_id=None,
chunksize=None,
reauth=False,
if_exists="fail",
private_key=None,
auth_local_webserver=False,
table_schema=None,
location=None,
progress_bar=True,
verbose=None,
):
"""Write a DataFrame to a Google BigQuery table.
The main method a user calls to export pandas DataFrame contents to
Google BigQuery table.
This method uses the Google Cloud client library to make requests to
Google BigQuery, documented `here
<https://google-cloud-python.readthedocs.io/en/latest/bigquery/usage.html>`__.
See the :ref:`How to authenticate with Google BigQuery <authentication>`
guide for authentication instructions.
Parameters
----------
dataframe : pandas.DataFrame
DataFrame to be written to a Google BigQuery table.
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists, do nothing.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
private_key : str, optional
Service account private key in JSON format. Can be file path
or string contents. This is useful for remote server
authentication (eg. Jupyter/IPython notebook on remote host).
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
.. versionadded:: 0.2.0
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``. If schema is not provided, it will be
generated according to dtypes of DataFrame columns. See
BigQuery API documentation on available names of a field.
.. versionadded:: 0.3.1
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
.. versionadded:: 0.5.0
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
.. versionadded:: 0.5.0
verbose : bool, deprecated
Deprecated in Pandas-GBQ 0.4.0. Use the `logging module
to adjust verbosity instead
<https://pandas-gbq.readthedocs.io/en/latest/intro.html#logging>`__.
"""
_test_google_api_imports()
if verbose is not None and SHOW_VERBOSE_DEPRECATION:
warnings.warn(
"verbose is deprecated and will be removed in "
"a future version. Set logging level in order to vary "
"verbosity",
FutureWarning,
stacklevel=1,
)
if if_exists not in ("fail", "replace", "append"):
raise ValueError("'{0}' is not valid for if_exists".format(if_exists))
if "." not in destination_table:
raise NotFoundException(
"Invalid Table Name. Should be of the form 'datasetId.tableId' "
)
connector = GbqConnector(
project_id,
reauth=reauth,
private_key=private_key,
auth_local_webserver=auth_local_webserver,
location=location,
# Avoid reads when writing tables.
# https://github.com/pydata/pandas-gbq/issues/202
try_credentials=lambda project, creds: creds,
)
dataset_id, table_id = destination_table.rsplit(".", 1)
table = _Table(
project_id, dataset_id, reauth=reauth, private_key=private_key
)
if not table_schema:
table_schema = _generate_bq_schema(dataframe)
else:
table_schema = dict(fields=table_schema)
# If table exists, check if_exists parameter
if table.exists(table_id):
if if_exists == "fail":
raise TableCreationError(
"Could not create the table because it "
"already exists. "
"Change the if_exists parameter to "
"'append' or 'replace' data."
)
elif if_exists == "replace":
connector.delete_and_recreate_table(
dataset_id, table_id, table_schema
)
elif if_exists == "append":
if not connector.schema_is_subset(
dataset_id, table_id, table_schema
):
raise InvalidSchema(
"Please verify that the structure and "
"data types in the DataFrame match the "
"schema of the destination table."
)
else:
table.create(table_id, table_schema)
connector.load_data(
dataframe,
dataset_id,
table_id,
chunksize=chunksize,
schema=table_schema,
progress_bar=progress_bar,
)
def generate_bq_schema(df, default_type="STRING"):
"""DEPRECATED: Given a passed df, generate the associated Google BigQuery
schema.
Parameters
----------
df : DataFrame
default_type : string
The default big query type in case the type of the column
does not exist in the schema.
"""
# deprecation TimeSeries, #11121
warnings.warn(
"generate_bq_schema is deprecated and will be removed in "
"a future version",
FutureWarning,
stacklevel=2,
)
return _generate_bq_schema(df, default_type=default_type)
def _generate_bq_schema(df, default_type="STRING"):
from pandas_gbq import schema
return schema.generate_bq_schema(df, default_type=default_type)
class _Table(GbqConnector):
def __init__(self, project_id, dataset_id, reauth=False, private_key=None):
self.dataset_id = dataset_id
super(_Table, self).__init__(project_id, reauth, private_key)
def exists(self, table_id):
""" Check if a table exists in Google BigQuery
Parameters
----------
table : str
Name of table to be verified
Returns
-------
boolean
true if table exists, otherwise false
"""
from google.api_core.exceptions import NotFound
table_ref = self.client.dataset(self.dataset_id).table(table_id)
try:
self.client.get_table(table_ref)
return True
except NotFound:
return False
except self.http_error as ex:
self.process_http_error(ex)
def create(self, table_id, schema):
""" Create a table in Google BigQuery given a table and schema
Parameters
----------
table : str
Name of table to be written
schema : str
Use the generate_bq_schema to generate your table schema from a
dataframe.
"""
from google.cloud.bigquery import SchemaField
from google.cloud.bigquery import Table
if self.exists(table_id):
raise TableCreationError(
"Table {0} already " "exists".format(table_id)
)
if not _Dataset(self.project_id, private_key=self.private_key).exists(
self.dataset_id
):
_Dataset(self.project_id, private_key=self.private_key).create(
self.dataset_id
)
table_ref = self.client.dataset(self.dataset_id).table(table_id)
table = Table(table_ref)
# Manually create the schema objects, adding NULLABLE mode
# as a workaround for
# https://github.com/GoogleCloudPlatform/google-cloud-python/issues/4456
for field in schema["fields"]:
if "mode" not in field:
field["mode"] = "NULLABLE"
table.schema = [
SchemaField.from_api_repr(field) for field in schema["fields"]
]
try:
self.client.create_table(table)
except self.http_error as ex:
self.process_http_error(ex)
def delete(self, table_id):
""" Delete a table in Google BigQuery
Parameters
----------
table : str
Name of table to be deleted
"""
from google.api_core.exceptions import NotFound
if not self.exists(table_id):
raise NotFoundException("Table does not exist")
table_ref = self.client.dataset(self.dataset_id).table(table_id)
try:
self.client.delete_table(table_ref)
except NotFound:
# Ignore 404 error which may occur if table already deleted
pass
except self.http_error as ex:
self.process_http_error(ex)
class _Dataset(GbqConnector):
def __init__(self, project_id, reauth=False, private_key=None):
super(_Dataset, self).__init__(project_id, reauth, private_key)
def exists(self, dataset_id):
""" Check if a dataset exists in Google BigQuery
Parameters
----------
dataset_id : str
Name of dataset to be verified
Returns
-------
boolean
true if dataset exists, otherwise false
"""
from google.api_core.exceptions import NotFound
try:
self.client.get_dataset(self.client.dataset(dataset_id))
return True
except NotFound:
return False
except self.http_error as ex:
self.process_http_error(ex)
def datasets(self):
""" Return a list of datasets in Google BigQuery
Parameters
----------
None
Returns
-------
list
List of datasets under the specific project
"""
dataset_list = []
try:
dataset_response = self.client.list_datasets()
for row in dataset_response:
dataset_list.append(row.dataset_id)
except self.http_error as ex:
self.process_http_error(ex)
return dataset_list
def create(self, dataset_id):
""" Create a dataset in Google BigQuery
Parameters
----------
dataset : str
Name of dataset to be written
"""
from google.cloud.bigquery import Dataset
if self.exists(dataset_id):
raise DatasetCreationError(
"Dataset {0} already " "exists".format(dataset_id)
)
dataset = Dataset(self.client.dataset(dataset_id))
try:
self.client.create_dataset(dataset)
except self.http_error as ex:
self.process_http_error(ex)
def delete(self, dataset_id):
""" Delete a dataset in Google BigQuery
Parameters
----------
dataset : str
Name of dataset to be deleted
"""
from google.api_core.exceptions import NotFound
if not self.exists(dataset_id):
raise NotFoundException(
"Dataset {0} does not exist".format(dataset_id)
)
try:
self.client.delete_dataset(self.client.dataset(dataset_id))
except NotFound:
# Ignore 404 error which may occur if dataset already deleted
pass
except self.http_error as ex:
self.process_http_error(ex)
def tables(self, dataset_id):
""" List tables in the specific dataset in Google BigQuery
Parameters
----------
dataset : str
Name of dataset to list tables for
Returns
-------
list
List of tables under the specific dataset
"""
table_list = []
try:
table_response = self.client.list_tables(
self.client.dataset(dataset_id)
)
for row in table_response:
table_list.append(row.table_id)
except self.http_error as ex:
self.process_http_error(ex)
return table_list
| [
"logging.getLogger",
"google.cloud.bigquery.QueryJobConfig.from_api_repr",
"pandas_gbq.load.load_chunks",
"google.cloud.bigquery.SchemaField.from_api_repr",
"pandas_gbq.exceptions.AccessDenied",
"tqdm.tqdm",
"os.environ.get",
"datetime.datetime.now",
"pkg_resources.parse_version",
"google.cloud.bi... | [((227, 254), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (244, 254), False, 'import logging\n'), ((761, 798), 'pkg_resources.parse_version', 'pkg_resources.parse_version', (['"""0.32.0"""'], {}), "('0.32.0')\n", (788, 798), False, 'import pkg_resources\n'), ((1468, 1505), 'pkg_resources.parse_version', 'pkg_resources.parse_version', (['"""0.23.0"""'], {}), "('0.23.0')\n", (1495, 1505), False, 'import pkg_resources\n'), ((17297, 17342), 'os.environ.get', 'os.environ.get', (['"""PANDAS_GBQ_CREDENTIALS_FILE"""'], {}), "('PANDAS_GBQ_CREDENTIALS_FILE')\n", (17311, 17342), False, 'import os\n'), ((30832, 30959), 'warnings.warn', 'warnings.warn', (['"""generate_bq_schema is deprecated and will be removed in a future version"""', 'FutureWarning'], {'stacklevel': '(2)'}), "(\n 'generate_bq_schema is deprecated and will be removed in a future version',\n FutureWarning, stacklevel=2)\n", (30845, 30959), False, 'import warnings\n'), ((31156, 31212), 'pandas_gbq.schema.generate_bq_schema', 'schema.generate_bq_schema', (['df'], {'default_type': 'default_type'}), '(df, default_type=default_type)\n', (31181, 31212), False, 'from pandas_gbq import schema\n'), ((832, 887), 'pkg_resources.get_distribution', 'pkg_resources.get_distribution', (['"""google-cloud-bigquery"""'], {}), "('google-cloud-bigquery')\n", (862, 887), False, 'import pkg_resources\n'), ((1364, 1404), 'pkg_resources.get_distribution', 'pkg_resources.get_distribution', (['"""pandas"""'], {}), "('pandas')\n", (1394, 1404), False, 'import pkg_resources\n'), ((7907, 7918), 'time.time', 'time.time', ([], {}), '()\n', (7916, 7918), False, 'import time\n'), ((8705, 8775), 'google.cloud.bigquery.Client', 'bigquery.Client', ([], {'project': 'self.project_id', 'credentials': 'self.credentials'}), '(project=self.project_id, credentials=self.credentials)\n', (8720, 8775), False, 'from google.cloud import bigquery\n'), ((17528, 17543), 'numpy.dtype', 'np.dtype', (['float'], {}), '(float)\n', (17536, 17543), True, 'import numpy as np\n'), ((21955, 22137), 'warnings.warn', 'warnings.warn', (['"""The default value for dialect is changing to "standard" in a future version. Pass in dialect="legacy" to disable this warning."""', 'FutureWarning'], {'stacklevel': '(2)'}), '(\n \'The default value for dialect is changing to "standard" in a future version. Pass in dialect="legacy" to disable this warning.\'\n , FutureWarning, stacklevel=2)\n', (21968, 22137), False, 'import warnings\n'), ((22303, 22466), 'warnings.warn', 'warnings.warn', (['"""verbose is deprecated and will be removed in a future version. Set logging level in order to vary verbosity"""', 'FutureWarning'], {'stacklevel': '(2)'}), "(\n 'verbose is deprecated and will be removed in a future version. Set logging level in order to vary verbosity'\n , FutureWarning, stacklevel=2)\n", (22316, 22466), False, 'import warnings\n'), ((28169, 28332), 'warnings.warn', 'warnings.warn', (['"""verbose is deprecated and will be removed in a future version. Set logging level in order to vary verbosity"""', 'FutureWarning'], {'stacklevel': '(1)'}), "(\n 'verbose is deprecated and will be removed in a future version. Set logging level in order to vary verbosity'\n , FutureWarning, stacklevel=1)\n", (28182, 28332), False, 'import warnings\n'), ((32995, 33011), 'google.cloud.bigquery.Table', 'Table', (['table_ref'], {}), '(table_ref)\n', (33000, 33011), False, 'from google.cloud.bigquery import Table\n'), ((6890, 7059), 'pandas_gbq.auth.get_credentials', 'auth.get_credentials', ([], {'private_key': 'private_key', 'project_id': 'project_id', 'reauth': 'reauth', 'auth_local_webserver': 'auth_local_webserver', 'try_credentials': 'try_credentials'}), '(private_key=private_key, project_id=project_id, reauth\n =reauth, auth_local_webserver=auth_local_webserver, try_credentials=\n try_credentials)\n', (6910, 7059), False, 'from pandas_gbq import auth\n'), ((12994, 13121), 'pandas_gbq.load.load_chunks', 'load.load_chunks', (['self.client', 'dataframe', 'dataset_id', 'table_id'], {'chunksize': 'chunksize', 'schema': 'schema', 'location': 'self.location'}), '(self.client, dataframe, dataset_id, table_id, chunksize=\n chunksize, schema=schema, location=self.location)\n', (13010, 13121), False, 'from pandas_gbq import load\n'), ((33347, 33379), 'google.cloud.bigquery.SchemaField.from_api_repr', 'SchemaField.from_api_repr', (['field'], {}), '(field)\n', (33372, 33379), False, 'from google.cloud.bigquery import SchemaField\n'), ((7976, 7987), 'time.time', 'time.time', ([], {}), '()\n', (7985, 7987), False, 'import time\n'), ((13307, 13324), 'tqdm.tqdm', 'tqdm.tqdm', (['chunks'], {}), '(chunks)\n', (13316, 13324), False, 'import tqdm\n'), ((24304, 24318), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (24316, 24318), False, 'from datetime import datetime\n'), ((10175, 10224), 'google.cloud.bigquery.QueryJobConfig.from_api_repr', 'bigquery.QueryJobConfig.from_api_repr', (['job_config'], {}), '(job_config)\n', (10212, 10224), False, 'from google.cloud import bigquery\n'), ((10423, 10484), 'pandas_gbq.exceptions.AccessDenied', 'AccessDenied', (['"""The service account credentials are not valid"""'], {}), "('The service account credentials are not valid')\n", (10435, 10484), False, 'from pandas_gbq.exceptions import AccessDenied\n'), ((10563, 10680), 'pandas_gbq.exceptions.AccessDenied', 'AccessDenied', (['"""The credentials have been revoked or expired, please re-run the application to re-authorize"""'], {}), "(\n 'The credentials have been revoked or expired, please re-run the application to re-authorize'\n )\n", (10575, 10680), False, 'from pandas_gbq.exceptions import AccessDenied\n')] |
import pytest
import numpy as np
def assert_equal(arr, arr2):
assert np.array_equal(arr, arr2)
assert arr.dtype == arr2.dtype
def test_bulk_importer_ndarray(repo):
from hangar.bulk_importer import run_bulk_import
from hangar.bulk_importer import UDF_Return
def make_ndarray(column, key, shape, dtype, multiplier):
size = np.prod(shape)
arr = np.arange(size, dtype=dtype).reshape(shape) * multiplier
yield UDF_Return(column=column, key=key, data=arr)
co = repo.checkout(write=True)
co.add_ndarray_column('arr', shape=(5, 5), dtype=np.uint32)
co.commit('first')
co.close()
kwargs = []
expected_kv = []
for idx in range(200):
_kw_dict = {
'column': 'arr',
'key': idx,
'shape': (5, 5),
'dtype': np.uint32,
'multiplier': idx
}
kwargs.append(_kw_dict)
for _udf_val in make_ndarray(**_kw_dict):
expected_kv.append(_udf_val)
assert len(expected_kv) == 200
run_bulk_import(
repo,
branch_name='master',
column_names=['arr'],
udf=make_ndarray,
udf_kwargs=kwargs,
ncpus=2)
co = repo.checkout()
try:
arr_col = co['arr']
assert len(arr_col) == 200
for _expected_udf_val in expected_kv:
assert _expected_udf_val.key in arr_col
assert_equal(arr_col[_expected_udf_val.key], _expected_udf_val.data)
finally:
co.close()
def test_bulk_importer_pystr(repo):
from hangar.bulk_importer import run_bulk_import
from hangar.bulk_importer import UDF_Return
def make_pystr(column, key, str_val):
yield UDF_Return(column=column, key=key, data=str_val)
co = repo.checkout(write=True)
co.add_str_column('str')
co.commit('first')
co.close()
kwargs = []
expected_kv = []
for idx in range(200):
_kw_dict = {
'column': 'str',
'key': idx,
'str_val': f'{str(idx) * 2}',
}
kwargs.append(_kw_dict)
for _udf_val in make_pystr(**_kw_dict):
expected_kv.append(_udf_val)
assert len(expected_kv) == 200
run_bulk_import(
repo,
branch_name='master',
column_names=['str'],
udf=make_pystr,
udf_kwargs=kwargs,
ncpus=2)
co = repo.checkout()
try:
str_col = co['str']
assert len(str_col) == 200
for _expected_udf_val in expected_kv:
assert _expected_udf_val.key in str_col
assert str_col[_expected_udf_val.key] == _expected_udf_val.data
finally:
co.close()
def test_bulk_importer_pybytes(repo):
from hangar.bulk_importer import run_bulk_import
from hangar.bulk_importer import UDF_Return
def make_pybytes(column, key, str_val):
raw = str_val.encode()
yield UDF_Return(column=column, key=key, data=raw)
co = repo.checkout(write=True)
co.add_bytes_column('bytes')
co.commit('first')
co.close()
kwargs = []
expected_kv = []
for idx in range(200):
_kw_dict = {
'column': 'bytes',
'key': idx,
'str_val': f'{str(idx) * 2}',
}
kwargs.append(_kw_dict)
for _udf_val in make_pybytes(**_kw_dict):
expected_kv.append(_udf_val)
assert len(expected_kv) == 200
run_bulk_import(
repo,
branch_name='master',
column_names=['bytes'],
udf=make_pybytes,
udf_kwargs=kwargs,
ncpus=2)
co = repo.checkout()
try:
bytes_col = co['bytes']
assert len(bytes_col) == 200
for _expected_udf_val in expected_kv:
assert _expected_udf_val.key in bytes_col
assert bytes_col[_expected_udf_val.key] == _expected_udf_val.data
finally:
co.close()
def test_bulk_importer_two_col_pybytes_pystr(repo):
from hangar.bulk_importer import run_bulk_import
from hangar.bulk_importer import UDF_Return
def _make_pystr(column, key, str_val):
yield UDF_Return(column=column, key=key, data=str_val)
def _make_pybytes(column, key, str_val):
raw = str_val.encode()
yield UDF_Return(column=column, key=key, data=raw)
def make_pystr_pybytes(str_col, bytes_col, key, str_val):
yield from _make_pystr(column=str_col, key=key, str_val=str_val)
yield from _make_pybytes(column=bytes_col, key=key, str_val=str_val)
co = repo.checkout(write=True)
co.add_bytes_column('bytes')
co.add_str_column('str')
co.commit('first')
co.close()
kwargs = []
expected_kv = []
for idx in range(200):
_kw_dict = {
'str_col': 'str',
'bytes_col': 'bytes',
'key': idx,
'str_val': f'{str(idx) * 2}',
}
kwargs.append(_kw_dict)
for _udf_val in make_pystr_pybytes(**_kw_dict):
expected_kv.append(_udf_val)
assert len(expected_kv) == 400
run_bulk_import(
repo,
branch_name='master',
column_names=['bytes', 'str'],
udf=make_pystr_pybytes,
udf_kwargs=kwargs,
ncpus=2)
co = repo.checkout()
try:
pybytes_col = co['bytes']
pystr_col = co['str']
assert len(pybytes_col) == 200
assert len(pystr_col) == 200
for _expected_udf_val in expected_kv:
assert _expected_udf_val.column in ['str', 'bytes']
if _expected_udf_val.column == 'str':
assert _expected_udf_val.key in pystr_col
assert pystr_col[_expected_udf_val.key] == _expected_udf_val.data
elif _expected_udf_val.column == 'bytes':
assert _expected_udf_val.key in pystr_col
assert pybytes_col[_expected_udf_val.key] == _expected_udf_val.data
else:
raise ValueError(_expected_udf_val.column)
finally:
co.close()
def test_signature_wrong(repo):
from hangar.bulk_importer import run_bulk_import
from hangar.bulk_importer import UDF_Return
def wrong_sig_udf(a, b, c=None):
yield UDF_Return(column='str', key=a, data=f'{a} {b} {c}')
co = repo.checkout(write=True)
co.add_str_column('str')
co.commit('first')
co.close()
kwargs = []
for idx in range(200):
_kw_dict = {
'a': 'bytes',
'str_val': f'{str(idx) * 2}',
}
kwargs.append(_kw_dict)
with pytest.raises(TypeError):
run_bulk_import(
repo,
branch_name='master',
column_names=['str'],
udf=wrong_sig_udf,
udf_kwargs=kwargs,
ncpus=2)
| [
"numpy.prod",
"hangar.bulk_importer.run_bulk_import",
"hangar.bulk_importer.UDF_Return",
"numpy.array_equal",
"pytest.raises",
"numpy.arange"
] | [((75, 100), 'numpy.array_equal', 'np.array_equal', (['arr', 'arr2'], {}), '(arr, arr2)\n', (89, 100), True, 'import numpy as np\n'), ((1041, 1157), 'hangar.bulk_importer.run_bulk_import', 'run_bulk_import', (['repo'], {'branch_name': '"""master"""', 'column_names': "['arr']", 'udf': 'make_ndarray', 'udf_kwargs': 'kwargs', 'ncpus': '(2)'}), "(repo, branch_name='master', column_names=['arr'], udf=\n make_ndarray, udf_kwargs=kwargs, ncpus=2)\n", (1056, 1157), False, 'from hangar.bulk_importer import run_bulk_import\n'), ((2212, 2326), 'hangar.bulk_importer.run_bulk_import', 'run_bulk_import', (['repo'], {'branch_name': '"""master"""', 'column_names': "['str']", 'udf': 'make_pystr', 'udf_kwargs': 'kwargs', 'ncpus': '(2)'}), "(repo, branch_name='master', column_names=['str'], udf=\n make_pystr, udf_kwargs=kwargs, ncpus=2)\n", (2227, 2326), False, 'from hangar.bulk_importer import run_bulk_import\n'), ((3415, 3533), 'hangar.bulk_importer.run_bulk_import', 'run_bulk_import', (['repo'], {'branch_name': '"""master"""', 'column_names': "['bytes']", 'udf': 'make_pybytes', 'udf_kwargs': 'kwargs', 'ncpus': '(2)'}), "(repo, branch_name='master', column_names=['bytes'], udf=\n make_pybytes, udf_kwargs=kwargs, ncpus=2)\n", (3430, 3533), False, 'from hangar.bulk_importer import run_bulk_import\n'), ((5035, 5165), 'hangar.bulk_importer.run_bulk_import', 'run_bulk_import', (['repo'], {'branch_name': '"""master"""', 'column_names': "['bytes', 'str']", 'udf': 'make_pystr_pybytes', 'udf_kwargs': 'kwargs', 'ncpus': '(2)'}), "(repo, branch_name='master', column_names=['bytes', 'str'],\n udf=make_pystr_pybytes, udf_kwargs=kwargs, ncpus=2)\n", (5050, 5165), False, 'from hangar.bulk_importer import run_bulk_import\n'), ((354, 368), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (361, 368), True, 'import numpy as np\n'), ((6519, 6543), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (6532, 6543), False, 'import pytest\n'), ((6553, 6670), 'hangar.bulk_importer.run_bulk_import', 'run_bulk_import', (['repo'], {'branch_name': '"""master"""', 'column_names': "['str']", 'udf': 'wrong_sig_udf', 'udf_kwargs': 'kwargs', 'ncpus': '(2)'}), "(repo, branch_name='master', column_names=['str'], udf=\n wrong_sig_udf, udf_kwargs=kwargs, ncpus=2)\n", (6568, 6670), False, 'from hangar.bulk_importer import run_bulk_import\n'), ((454, 498), 'hangar.bulk_importer.UDF_Return', 'UDF_Return', ([], {'column': 'column', 'key': 'key', 'data': 'arr'}), '(column=column, key=key, data=arr)\n', (464, 498), False, 'from hangar.bulk_importer import UDF_Return\n'), ((1707, 1755), 'hangar.bulk_importer.UDF_Return', 'UDF_Return', ([], {'column': 'column', 'key': 'key', 'data': 'str_val'}), '(column=column, key=key, data=str_val)\n', (1717, 1755), False, 'from hangar.bulk_importer import UDF_Return\n'), ((2906, 2950), 'hangar.bulk_importer.UDF_Return', 'UDF_Return', ([], {'column': 'column', 'key': 'key', 'data': 'raw'}), '(column=column, key=key, data=raw)\n', (2916, 2950), False, 'from hangar.bulk_importer import UDF_Return\n'), ((4105, 4153), 'hangar.bulk_importer.UDF_Return', 'UDF_Return', ([], {'column': 'column', 'key': 'key', 'data': 'str_val'}), '(column=column, key=key, data=str_val)\n', (4115, 4153), False, 'from hangar.bulk_importer import UDF_Return\n'), ((4245, 4289), 'hangar.bulk_importer.UDF_Return', 'UDF_Return', ([], {'column': 'column', 'key': 'key', 'data': 'raw'}), '(column=column, key=key, data=raw)\n', (4255, 4289), False, 'from hangar.bulk_importer import UDF_Return\n'), ((6178, 6230), 'hangar.bulk_importer.UDF_Return', 'UDF_Return', ([], {'column': '"""str"""', 'key': 'a', 'data': 'f"""{a} {b} {c}"""'}), "(column='str', key=a, data=f'{a} {b} {c}')\n", (6188, 6230), False, 'from hangar.bulk_importer import UDF_Return\n'), ((383, 411), 'numpy.arange', 'np.arange', (['size'], {'dtype': 'dtype'}), '(size, dtype=dtype)\n', (392, 411), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import inspect
import numpy as np
import pprint
from abc import ABCMeta, abstractmethod
from fvcore.transforms.transform import Transform, TransformList
__all__ = ["TransformGen", "apply_transform_gens"]
def check_dtype(img):
assert isinstance(img, np.ndarray), "[TransformGen] Needs an numpy array, but got a {}!".format(
type(img)
)
assert not isinstance(img.dtype, np.integer) or (
img.dtype == np.uint8
), "[TransformGen] Got image of type {}, use uint8 or floating points instead!".format(
img.dtype
)
assert img.ndim in [2, 3], img.ndim
class TransformGen(metaclass=ABCMeta):
"""
TransformGen takes an image of type uint8 in range [0, 255], or
floating point in range [0, 1] or [0, 255] as input.
It creates a :class:`Transform` based on the given image, sometimes with randomness.
The transform can then be used to transform images
or other data (boxes, points, annotations, etc.) associated with it.
The assumption made in this class
is that the image itself is sufficient to instantiate a transform.
When this assumption is not true, you need to create the transforms by your own.
A list of `TransformGen` can be applied with :func:`apply_transform_gens`.
"""
def _init(self, params=None):
if params:
for k, v in params.items():
if k != "self" and not k.startswith("_"):
setattr(self, k, v)
@abstractmethod
def get_transform(self, img):
pass
def _rand_range(self, low=1.0, high=None, size=None):
"""
Uniform float random number between low and high.
"""
if high is None:
low, high = 0, low
if size is None:
size = []
return np.random.uniform(low, high, size)
def __repr__(self):
"""
Produce something like:
"MyTransformGen(field1={self.field1}, field2={self.field2})"
"""
try:
sig = inspect.signature(self.__init__)
classname = type(self).__name__
argstr = []
for name, param in sig.parameters.items():
assert (
param.kind != param.VAR_POSITIONAL and param.kind != param.VAR_KEYWORD
), "The default __repr__ doesn't support *args or **kwargs"
assert hasattr(self, name), (
"Attribute {} not found! "
"Default __repr__ only works if attributes match the constructor.".format(name)
)
attr = getattr(self, name)
default = param.default
if default is attr:
continue
argstr.append("{}={}".format(name, pprint.pformat(attr)))
return "{}({})".format(classname, ", ".join(argstr))
except AssertionError:
return super().__repr__()
__str__ = __repr__
def apply_transform_gens(transform_gens, img):
"""
Apply a list of :class:`TransformGen` or :class:`Transform` on the input image, and
returns the transformed image and a list of transforms.
We cannot simply create and return all transforms without
applying it to the image, because a subsequent transform may
need the output of the previous one.
Args:
transform_gens (list): list of :class:`TransformGen` or :class:`Transform` instance to
be applied.
img (ndarray): uint8 or floating point images with 1 or 3 channels.
Returns:
ndarray: the transformed image
TransformList: contain the transforms that's used.
"""
for g in transform_gens:
assert isinstance(g, (Transform, TransformGen)), g
check_dtype(img)
tfms = []
for g in transform_gens:
tfm = g.get_transform(img) if isinstance(g, TransformGen) else g
assert isinstance(
tfm, Transform
), "TransformGen {} must return an instance of Transform! Got {} instead".format(g, tfm)
img = tfm.apply_image(img)
tfms.append(tfm)
return img, TransformList(tfms)
| [
"inspect.signature",
"fvcore.transforms.transform.TransformList",
"pprint.pformat",
"numpy.random.uniform"
] | [((1884, 1918), 'numpy.random.uniform', 'np.random.uniform', (['low', 'high', 'size'], {}), '(low, high, size)\n', (1901, 1918), True, 'import numpy as np\n'), ((4193, 4212), 'fvcore.transforms.transform.TransformList', 'TransformList', (['tfms'], {}), '(tfms)\n', (4206, 4212), False, 'from fvcore.transforms.transform import Transform, TransformList\n'), ((2100, 2132), 'inspect.signature', 'inspect.signature', (['self.__init__'], {}), '(self.__init__)\n', (2117, 2132), False, 'import inspect\n'), ((2858, 2878), 'pprint.pformat', 'pprint.pformat', (['attr'], {}), '(attr)\n', (2872, 2878), False, 'import pprint\n')] |
import os
import numpy as np
from random import choices
from radar_scenes.sequence import get_training_sequences, get_validation_sequences, Sequence
from radar_scenes.labels import ClassificationLabel
from radar_scenes.evaluation import per_point_predictions_to_json, PredictionFileSchemas
class SemSegNetwork:
"""
This is a dummy class for a semantic segmentation neural network.
For training, it takes as input a point cloud X and per-point labels y.
The network then learns to predict a class label for each input point p in X.
However, an instance label (track id) is NOT predicted.
"""
def __init__(self):
self._y_true_test = None
def train(self, X, y):
"""
Dummy method for training the neural network.
:param X: training data. Shape (N_points, N_feat)
:param y: semantic class label for each point. Shape (N_batch, N_points)
:return: None
"""
pass
def predict(self, X):
"""
Predicts a class label for each point in X.
This is a mock method which simply uses the true class labels from self._y_true_test to generate
a prediction which is likely correct
:param X: validation data. Shape (N_points, N_feat)
:return: an array of shape (N_points, ) containing the predicted class labels
"""
y_pred = []
for y in self._y_true_test:
if ClassificationLabel(y) == ClassificationLabel.CAR:
proba_vector = [0.9, 0.01, 0.01, 0.03, 0.04, 0.01]
elif ClassificationLabel(y) == ClassificationLabel.PEDESTRIAN:
proba_vector = [0.01, 0.90, 0.06, 0.02, 0.00, 0.01]
elif ClassificationLabel(y) == ClassificationLabel.PEDESTRIAN_GROUP:
proba_vector = [0.015, 0.04, 0.88, 0.045, 0.01, 0.01]
elif ClassificationLabel(y) == ClassificationLabel.TWO_WHEELER:
proba_vector = [0.06, 0.04, 0.03, 0.84, 0.01, 0.02]
elif ClassificationLabel(y) == ClassificationLabel.LARGE_VEHICLE:
proba_vector = [0.05, 0.01, 0.02, 0.03, 0.88, 0.01]
else:
proba_vector = [0.02, 0.005, 0.005, 0.005, 0.005, 0.96]
yy = choices([0, 1, 2, 3, 4, 5], weights=proba_vector)[0]
y_pred.append(yy)
return y_pred
class InstSegNetwork(SemSegNetwork):
def __init__(self):
super().__init__()
self._y_inst_true = None
self.last_instance_id = 1
self.translation_dict = {}
def train(self, X, y, y_inst):
pass
def predict(self, X: np.ndarray):
"""
Prediction method of the instance segmentation mock.
A class label and an instance label is predicted for each detection in X.
:param X: Array holding the individual detections
:return: predicted class labels and predicted instance labels.
"""
y_pred = super().predict(X)
# the viewer treats instance ID = -1 as "no instance". Therefore, this is used as default value for the instance
# labels.
y_inst_pred = np.zeros(len(X), dtype=np.int32) - 1
# translate string uuids to integers
for tr_uuid in set(self._y_inst_true):
if tr_uuid not in self.translation_dict:
self.translation_dict[tr_uuid] = self.last_instance_id
self.last_instance_id += 1
# iterate over true instance labels and assign new labels as prediction
for idx, true_instance_id in enumerate(self._y_inst_true):
if (true_instance_id == b"" or true_instance_id == "") and y_pred[idx] != ClassificationLabel.STATIC.value:
# a detection without a true instance id but with a predicted class label of a dynamic object gets a
# new instance label
y_inst_pred[idx] = self.last_instance_id
self.last_instance_id += 1
else:
if np.random.random() < 0.1:
# with a 10% chance, a point gets a different track id than all other points of this object
y_inst_pred[idx] = self.last_instance_id
self.last_instance_id += 1
else:
# assign the same integer instance label to all other points of an instance
y_inst_pred[idx] = self.translation_dict[true_instance_id]
# set default instance id for all points with label "STATIC"
idx = np.where(np.array(y_pred) == ClassificationLabel.STATIC.value)[0]
y_inst_pred[idx] = -1
return y_pred, y_inst_pred
def features_from_radar_data(radar_data):
"""
Generate a feature vector for each detection in radar_data.
The spatial coordinates as well as the ego-motion compensated Doppler velocity and the RCS value are used.
:param radar_data: Input data
:return: numpy array with shape (len(radar_data), 4), contains the feature vector for each point
"""
X = np.zeros((len(radar_data), 4)) # construct feature vector
X[:, 0] = radar_data["x_cc"]
X[:, 1] = radar_data["y_cc"]
X[:, 2] = radar_data["vr_compensated"]
X[:, 3] = radar_data["rcs"]
return X
def train_data_generator(training_sequences: list, path_to_dataset: str, return_track_ids=False):
"""
Given a list of training sequence names and the path to the data set,
the sequences are loaded and from each sequence 5 scenes are randomly chosen and returned as training data
This is only a mock training data generator. A true generator would require some more work.
:param training_sequences: list of sequence names
:param path_to_dataset: path to the dataset on the hard drive
:param return_track_ids: If true, in addition to the feature vectors and class labels, also the track ids are
returned.
:return: feature vectors and true labels.
"""
for sequence_name in training_sequences:
try:
sequence = Sequence.from_json(os.path.join(path_to_dataset, "data", sequence_name, "scenes.json"))
except FileNotFoundError:
continue
timestamps = sequence.timestamps # obtain all time stamps available in the sequence
chosen_times = np.random.choice(timestamps, 5) # choose five of them randomly
for t in chosen_times: # iterate over the selected timestamps
scene = sequence.get_scene(t) # collect the data which belong to the current timestamp
radar_data = scene.radar_data # retrieve the radar data which belong to this scene
y_true = np.array([ClassificationLabel.label_to_clabel(x) for x in radar_data["label_id"]]) # map labels
valid_points = y_true != None # filter invalid points
y_true = y_true[valid_points] # keep only valid points
y_true = [x.value for x in y_true] # get value of enum type to work with integers
track_ids = radar_data["track_id"]
X = features_from_radar_data(radar_data[valid_points]) # construct feature vector
if return_track_ids:
yield X, y_true, track_ids
else:
yield X, y_true
def validation_data_generator(validation_sequences: list, path_to_dataset: str, return_track_ids=False):
"""
Similar to the mock training data generator, this generator method returns validation data.
:param validation_sequences: List of sequence names which should be used for validation of a classifier
:param path_to_dataset: path to the data set on the hard drive
:param return_track_ids: If true, in addition to the feature vectors and class labels, also the track ids are
returned.
:return: Feature vectors X, true labels y_true, detection uuids, the sequence name, and optionally the track_ids
"""
for sequence_name in validation_sequences:
try:
sequence = Sequence.from_json(os.path.join(path_to_dataset, "data", sequence_name, "scenes.json"))
except FileNotFoundError:
continue
for scene in sequence.scenes(): # iterate over all scenes in the sequence
radar_data = scene.radar_data # retrieve the radar data which belong to this scene
y_true = np.array([ClassificationLabel.label_to_clabel(x) for x in radar_data["label_id"]]) # map labels
valid_points = y_true != None # filter invalid points
y_true = y_true[valid_points] # keep only valid points
y_true = [x.value for x in y_true] # get value of enum type to work with integers
X = features_from_radar_data(radar_data[valid_points]) # construct feature vector
uuids = radar_data["uuid"][valid_points]
track_ids = radar_data["track_id"][valid_points]
if return_track_ids:
yield X, y_true, uuids, sequence_name, track_ids
else:
yield X, y_true, uuids, sequence_name
def main():
# MODIFY THIS LINE AND INSERT PATH WHERE YOU STORED THE RADARSCENES DATASET
path_to_dataset = "/home/USERNAME/datasets/RadarScenes"
sequence_file = os.path.join(path_to_dataset, "data", "sequences.json")
if not os.path.exists(sequence_file):
print("Please modify this example so that it contains the correct path to the dataset on your machine.")
return
# load sequences.json file and obtain list of sequences for training.
training_sequences = get_training_sequences(sequence_file)
# load sequences.json file and obtain list of sequences for validation.
validation_sequences = get_validation_sequences(sequence_file)
print("Found {} sequences for training and {} sequences for validation.".format(len(training_sequences),
len(validation_sequences)))
print("-" * 120)
print("Mocking a semantic segmentation network...")
classifier = SemSegNetwork()
# For this example, only a subset of the training/validation files is used.
# In a real application of course all files would be used
training_sequences = training_sequences[112:115]
validation_sequences = validation_sequences[23:24]
# training loop for the classifier
print("Training of mock-classifier...", end=" ", flush=True)
for X, y_true in train_data_generator(training_sequences, path_to_dataset):
classifier.train(X, y_true)
print("Done!")
# Validation loop
print("Evaluating trained classifier on validation data...", end=" ", flush=True)
predictions = {}
for X, y_true, uuids, sequence_name in validation_data_generator(validation_sequences, path_to_dataset):
if sequence_name not in predictions:
predictions[sequence_name] = {}
classifier._y_true_test = y_true # this is only used to set the internal data of our fake-classifier
y_pred = classifier.predict(X) # predict for each point in X a class label
for y, uid in zip(y_pred, uuids): # store predictions in a dictionary along with the uuid of the points
predictions[sequence_name][uid] = y
print("Done!")
current_dir = os.getcwd()
for sequence_name in predictions: # iterate over all unique sequences
name = os.path.splitext(sequence_name)[0]
output_name = os.path.join(current_dir, name + "_predictions.json") # create output name for this sequence
print("Writing predictions for sequence {} to file {}.".format(sequence_name, output_name))
# write predictions to json file. This file can be loaded with the GUI tool to visualize the predictions
per_point_predictions_to_json(predictions[sequence_name], output_name, ClassificationLabel.translation_dict(),
schema=PredictionFileSchemas.SemSeg)
print("Done with semantic segmentation!")
print("-" * 120)
print("\n")
print("Mocking an instance segmentation network...")
classifier = InstSegNetwork()
print("Training of mock-classifier...", end=" ", flush=True)
for X, y_true, y_inst in train_data_generator(training_sequences, path_to_dataset, return_track_ids=True):
classifier.train(X, y_true, y_inst)
print("Done!")
# Validation loop instance segmentation
print("Evaluating trained instance segmentation network on validation data...", end=" ", flush=True)
predictions = {}
for X, y_true, uuids, sequence_name, y_inst in validation_data_generator(validation_sequences, path_to_dataset,
return_track_ids=True):
if sequence_name not in predictions:
predictions[sequence_name] = {}
classifier._y_true_test = y_true # this is only used to set the internal data of our fake-classifier
classifier._y_inst_true = y_inst
y_pred_labelid, y_pred_instid = classifier.predict(X) # predict for each point in X a class label
for y_lid, y_tid, uid in zip(y_pred_labelid, y_pred_instid, uuids):
# store predictions in a dictionary along with the uuid of the points
predictions[sequence_name][uid] = [int(y_lid), int(y_tid)] # casting to int for JSON serialization
print("Done!")
# write instance segmentation results back to a file
current_dir = os.getcwd()
for sequence_name in predictions: # iterate over all unique sequences
name = os.path.splitext(sequence_name)[0]
output_name = os.path.join(current_dir,
name + "_inst_seg_predictions.json") # create output name for this sequence
print("Writing predictions for sequence {} to file {}.".format(sequence_name, output_name))
# write predictions to json file. This file can be loaded with the GUI tool to visualize the predictions
per_point_predictions_to_json(predictions[sequence_name], output_name, ClassificationLabel.translation_dict(),
schema=PredictionFileSchemas.InstSeg)
print("Done with instance segmentation!")
if __name__ == '__main__':
main()
| [
"os.path.exists",
"numpy.random.choice",
"radar_scenes.labels.ClassificationLabel",
"numpy.random.random",
"radar_scenes.labels.ClassificationLabel.translation_dict",
"os.path.join",
"os.path.splitext",
"radar_scenes.sequence.get_validation_sequences",
"os.getcwd",
"numpy.array",
"random.choices... | [((9145, 9200), 'os.path.join', 'os.path.join', (['path_to_dataset', '"""data"""', '"""sequences.json"""'], {}), "(path_to_dataset, 'data', 'sequences.json')\n", (9157, 9200), False, 'import os\n'), ((9472, 9509), 'radar_scenes.sequence.get_training_sequences', 'get_training_sequences', (['sequence_file'], {}), '(sequence_file)\n', (9494, 9509), False, 'from radar_scenes.sequence import get_training_sequences, get_validation_sequences, Sequence\n'), ((9613, 9652), 'radar_scenes.sequence.get_validation_sequences', 'get_validation_sequences', (['sequence_file'], {}), '(sequence_file)\n', (9637, 9652), False, 'from radar_scenes.sequence import get_training_sequences, get_validation_sequences, Sequence\n'), ((11198, 11209), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11207, 11209), False, 'import os\n'), ((13372, 13383), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (13381, 13383), False, 'import os\n'), ((6251, 6282), 'numpy.random.choice', 'np.random.choice', (['timestamps', '(5)'], {}), '(timestamps, 5)\n', (6267, 6282), True, 'import numpy as np\n'), ((9213, 9242), 'os.path.exists', 'os.path.exists', (['sequence_file'], {}), '(sequence_file)\n', (9227, 9242), False, 'import os\n'), ((11357, 11410), 'os.path.join', 'os.path.join', (['current_dir', "(name + '_predictions.json')"], {}), "(current_dir, name + '_predictions.json')\n", (11369, 11410), False, 'import os\n'), ((13531, 13593), 'os.path.join', 'os.path.join', (['current_dir', "(name + '_inst_seg_predictions.json')"], {}), "(current_dir, name + '_inst_seg_predictions.json')\n", (13543, 13593), False, 'import os\n'), ((11300, 11331), 'os.path.splitext', 'os.path.splitext', (['sequence_name'], {}), '(sequence_name)\n', (11316, 11331), False, 'import os\n'), ((11743, 11781), 'radar_scenes.labels.ClassificationLabel.translation_dict', 'ClassificationLabel.translation_dict', ([], {}), '()\n', (11779, 11781), False, 'from radar_scenes.labels import ClassificationLabel\n'), ((13474, 13505), 'os.path.splitext', 'os.path.splitext', (['sequence_name'], {}), '(sequence_name)\n', (13490, 13505), False, 'import os\n'), ((13961, 13999), 'radar_scenes.labels.ClassificationLabel.translation_dict', 'ClassificationLabel.translation_dict', ([], {}), '()\n', (13997, 13999), False, 'from radar_scenes.labels import ClassificationLabel\n'), ((1425, 1447), 'radar_scenes.labels.ClassificationLabel', 'ClassificationLabel', (['y'], {}), '(y)\n', (1444, 1447), False, 'from radar_scenes.labels import ClassificationLabel\n'), ((2234, 2283), 'random.choices', 'choices', (['[0, 1, 2, 3, 4, 5]'], {'weights': 'proba_vector'}), '([0, 1, 2, 3, 4, 5], weights=proba_vector)\n', (2241, 2283), False, 'from random import choices\n'), ((6011, 6078), 'os.path.join', 'os.path.join', (['path_to_dataset', '"""data"""', 'sequence_name', '"""scenes.json"""'], {}), "(path_to_dataset, 'data', sequence_name, 'scenes.json')\n", (6023, 6078), False, 'import os\n'), ((7940, 8007), 'os.path.join', 'os.path.join', (['path_to_dataset', '"""data"""', 'sequence_name', '"""scenes.json"""'], {}), "(path_to_dataset, 'data', sequence_name, 'scenes.json')\n", (7952, 8007), False, 'import os\n'), ((1560, 1582), 'radar_scenes.labels.ClassificationLabel', 'ClassificationLabel', (['y'], {}), '(y)\n', (1579, 1582), False, 'from radar_scenes.labels import ClassificationLabel\n'), ((3970, 3988), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3986, 3988), True, 'import numpy as np\n'), ((4506, 4522), 'numpy.array', 'np.array', (['y_pred'], {}), '(y_pred)\n', (4514, 4522), True, 'import numpy as np\n'), ((6613, 6651), 'radar_scenes.labels.ClassificationLabel.label_to_clabel', 'ClassificationLabel.label_to_clabel', (['x'], {}), '(x)\n', (6648, 6651), False, 'from radar_scenes.labels import ClassificationLabel\n'), ((8275, 8313), 'radar_scenes.labels.ClassificationLabel.label_to_clabel', 'ClassificationLabel.label_to_clabel', (['x'], {}), '(x)\n', (8310, 8313), False, 'from radar_scenes.labels import ClassificationLabel\n'), ((1703, 1725), 'radar_scenes.labels.ClassificationLabel', 'ClassificationLabel', (['y'], {}), '(y)\n', (1722, 1725), False, 'from radar_scenes.labels import ClassificationLabel\n'), ((1854, 1876), 'radar_scenes.labels.ClassificationLabel', 'ClassificationLabel', (['y'], {}), '(y)\n', (1873, 1876), False, 'from radar_scenes.labels import ClassificationLabel\n'), ((1998, 2020), 'radar_scenes.labels.ClassificationLabel', 'ClassificationLabel', (['y'], {}), '(y)\n', (2017, 2020), False, 'from radar_scenes.labels import ClassificationLabel\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, cross_val_score,\
ShuffleSplit
from sklearn.svm import SVC
from sklearn.metrics import classification_report, confusion_matrix,\
accuracy_score
from sklearn.utils import shuffle
def load_dataset(wine_dataset_csv):
wine_dataframe = pd.read_csv(wine_dataset_csv, index_col=False)
return wine_dataframe
def analyse_dataset(processed_data):
data = processed_data
print(data.head())
def svm_algorithm_with_holdout_validation(wine_dataset):
shuffled_data = wine_dataset.sample(frac=1,
random_state=42).reset_index(drop=True)
label = shuffled_data["Class"].values
dataset = shuffled_data.iloc[:, : 486].values
# dataset = shuffled_data.iloc[:, : 485].values
# dataset = shuffled_data.iloc[:, : 482].values
X_train, X_test, y_train, y_test = train_test_split(dataset, label,
test_size=0.20,
random_state=1)
svc_classifier = SVC(kernel="linear")
svc_classifier.fit(X_train, y_train)
y_pred = svc_classifier.predict(X_test)
accuracy = accuracy_score(y_test, y_pred) * 100
print(accuracy)
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
def svm_algorithm_with_k_fold_validation(wine_dataset):
shuffled_data = wine_dataset.sample(frac=1,
random_state=42).reset_index(
drop=True)
# Extract features and label
label = shuffled_data["Class"].values
# dataset = shuffled_data.iloc[:, : 486].values
# dataset = shuffled_data.iloc[:, : 485].values
dataset = shuffled_data.iloc[:, : 482].values
# Create classifier
svc_classifier = SVC(kernel="linear")
# Train model with 10 fold cross validation
cross_validation_scores = cross_val_score(svc_classifier, dataset,
label, cv=10)
print(cross_validation_scores)
print()
print("Cross validation scores mean: {}%".format(
np.mean(cross_validation_scores) * 100))
def main():
# wine_dataset_file = "drink_and_hold_dataset.csv"
# tweaked_wine_dataset_file = \
# "drink_and_hold_dataset_with_finish_attribute_deleted.csv"
tweaked_wine_dataset_file =\
"drink_and_hold_dataset_with_4_attributes_above_35_percent_deleted.csv"
processed_data_file = load_dataset(tweaked_wine_dataset_file)
# processed_data_file = load_dataset(wine_dataset_file)
# analyse_dataset(processed_data_file)
# svm_algorithm_with_holdout_validation(processed_data_file)
svm_algorithm_with_k_fold_validation(processed_data_file)
if __name__ == "__main__":
main()
| [
"numpy.mean",
"sklearn.svm.SVC",
"sklearn.metrics.confusion_matrix",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.classification_report",
"sklearn.metrics.accuracy_score",
"sklearn.model_selection.cross_val_score"
] | [((426, 472), 'pandas.read_csv', 'pd.read_csv', (['wine_dataset_csv'], {'index_col': '(False)'}), '(wine_dataset_csv, index_col=False)\n', (437, 472), True, 'import pandas as pd\n'), ((1015, 1078), 'sklearn.model_selection.train_test_split', 'train_test_split', (['dataset', 'label'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(dataset, label, test_size=0.2, random_state=1)\n', (1031, 1078), False, 'from sklearn.model_selection import train_test_split, cross_val_score, ShuffleSplit\n'), ((1214, 1234), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (1217, 1234), False, 'from sklearn.svm import SVC\n'), ((1995, 2015), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (1998, 2015), False, 'from sklearn.svm import SVC\n'), ((2095, 2149), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['svc_classifier', 'dataset', 'label'], {'cv': '(10)'}), '(svc_classifier, dataset, label, cv=10)\n', (2110, 2149), False, 'from sklearn.model_selection import train_test_split, cross_val_score, ShuffleSplit\n'), ((1337, 1367), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1351, 1367), False, 'from sklearn.metrics import classification_report, confusion_matrix, accuracy_score\n'), ((1405, 1437), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1421, 1437), False, 'from sklearn.metrics import classification_report, confusion_matrix, accuracy_score\n'), ((1449, 1486), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1470, 1486), False, 'from sklearn.metrics import classification_report, confusion_matrix, accuracy_score\n'), ((2306, 2338), 'numpy.mean', 'np.mean', (['cross_validation_scores'], {}), '(cross_validation_scores)\n', (2313, 2338), True, 'import numpy as np\n')] |
####################################################
#
# @ Authors : <NAME>
# <NAME>
#
# @ Hint: you have to install all requirements
# from requirements.txt
#
####################################################
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
# load the image
rose_img = cv.imread("rose.jpeg")
# Extract the (r --> height) and (c --> width) and ( w --> channels )
r, c, w = rose_img.shape
# Maximum pissble number of intensity values used in the quantization of the orginal image
B = 256
# the desired maximum pissble number of intensity values used in the quantization of the new image
#q = 4
#q = 8
#q = 14
q = int(input("Enter the number of quantization values: "))
# l divides the B intrval [0-255] into multiple ranges each range has length of l
l = B/q
# Array of zeros
E = np.zeros((256,1))
# Generate the quantization function
for i in range(256):
E[i,0] = (i // l) * l + l/2
# plot the quantization function
plt.plot(np.arange(0, 256, 1), E)
plt.show()
# Generate a black image with the same size of the original image
y = np.zeros(rose_img.shape, dtype="uint8") # change dtype from double to uint8 for better visualisation
# Generate the new image with the new intensity values
for ch in range(w):
for i in range(r):
for j in range(c):
y[i, j, ch] = E[rose_img[i, j, ch], 0]
# plot the new image
cv.imshow('y Image', y)
cv.waitKey()
cv.destroyAllWindows() | [
"matplotlib.pyplot.show",
"cv2.imshow",
"numpy.zeros",
"cv2.destroyAllWindows",
"cv2.waitKey",
"numpy.arange",
"cv2.imread"
] | [((343, 365), 'cv2.imread', 'cv.imread', (['"""rose.jpeg"""'], {}), "('rose.jpeg')\n", (352, 365), True, 'import cv2 as cv\n'), ((857, 875), 'numpy.zeros', 'np.zeros', (['(256, 1)'], {}), '((256, 1))\n', (865, 875), True, 'import numpy as np\n'), ((1031, 1041), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1039, 1041), True, 'import matplotlib.pyplot as plt\n'), ((1114, 1153), 'numpy.zeros', 'np.zeros', (['rose_img.shape'], {'dtype': '"""uint8"""'}), "(rose_img.shape, dtype='uint8')\n", (1122, 1153), True, 'import numpy as np\n'), ((1397, 1420), 'cv2.imshow', 'cv.imshow', (['"""y Image"""', 'y'], {}), "('y Image', y)\n", (1406, 1420), True, 'import cv2 as cv\n'), ((1422, 1434), 'cv2.waitKey', 'cv.waitKey', ([], {}), '()\n', (1432, 1434), True, 'import cv2 as cv\n'), ((1435, 1457), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (1455, 1457), True, 'import cv2 as cv\n'), ((1006, 1026), 'numpy.arange', 'np.arange', (['(0)', '(256)', '(1)'], {}), '(0, 256, 1)\n', (1015, 1026), True, 'import numpy as np\n')] |
import io
import os
from flask import abort
from flask import Flask
from flask import jsonify
from flask import request
from flask import send_file
from flask_cors import CORS
import numpy as np
import PIL
from PIL import Image
from scipy import misc
import tensorflow as tf
import DCSCN
from helper import args
api = Flask(__name__)
CORS(api)
args.flags.DEFINE_string("file", "image.jpg", "Target filename")
FLAGS = args.get()
FLAGS.scale = 3
MODEL_PATH = os.environ.get("MODEL_PATH")
print(MODEL_PATH)
def load_model(flags, model_path):
model = DCSCN.SuperResolution(FLAGS, model_name=model_path)
model.build_graph()
model.build_optimizer()
model.build_summary_saver()
model.init_all_variables()
model.load_model(name=model_path)
return model
model = load_model(FLAGS, MODEL_PATH)
@api.route("/healthcheck")
def healthcheck():
return jsonify({'Status': 'All good'}), 200
@api.route("/predict", methods=['POST'])
def predict():
# check if the post request has the file part
if 'image' not in request.files:
print("No file in request.")
return abort(403)
image_file = request.files['image']
img = Image.open(image_file.stream)
data = np.array(img)
print(data.shape)
print(data.min(), data.max())
# with sess.as_default():
# with sess.graph.as_default():
# image = model.predict_im(data)
image = model.predict_im(data)
print(image.shape)
print(image.min(), image.max())
# Convert array to Image
img = misc.toimage(image, cmin=0, cmax=255) # to avoid range rescaling
# img = PIL.Image.fromarray(image)
img_io = io.BytesIO()
img.save(img_io, format='PNG')
img_io.seek(0)
return send_file(
img_io,
mimetype='image/png',
as_attachment=True,
attachment_filename='prediction.png'), 200
if __name__ == '__main__':
api.run(host='0.0.0.0', port=5001)
| [
"PIL.Image.open",
"flask_cors.CORS",
"flask.Flask",
"os.environ.get",
"helper.args.flags.DEFINE_string",
"io.BytesIO",
"scipy.misc.toimage",
"numpy.array",
"flask.abort",
"flask.send_file",
"DCSCN.SuperResolution",
"helper.args.get",
"flask.jsonify"
] | [((321, 336), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (326, 336), False, 'from flask import Flask\n'), ((337, 346), 'flask_cors.CORS', 'CORS', (['api'], {}), '(api)\n', (341, 346), False, 'from flask_cors import CORS\n'), ((348, 412), 'helper.args.flags.DEFINE_string', 'args.flags.DEFINE_string', (['"""file"""', '"""image.jpg"""', '"""Target filename"""'], {}), "('file', 'image.jpg', 'Target filename')\n", (372, 412), False, 'from helper import args\n'), ((421, 431), 'helper.args.get', 'args.get', ([], {}), '()\n', (429, 431), False, 'from helper import args\n'), ((463, 491), 'os.environ.get', 'os.environ.get', (['"""MODEL_PATH"""'], {}), "('MODEL_PATH')\n", (477, 491), False, 'import os\n'), ((559, 610), 'DCSCN.SuperResolution', 'DCSCN.SuperResolution', (['FLAGS'], {'model_name': 'model_path'}), '(FLAGS, model_name=model_path)\n', (580, 610), False, 'import DCSCN\n'), ((1176, 1205), 'PIL.Image.open', 'Image.open', (['image_file.stream'], {}), '(image_file.stream)\n', (1186, 1205), False, 'from PIL import Image\n'), ((1218, 1231), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1226, 1231), True, 'import numpy as np\n'), ((1537, 1574), 'scipy.misc.toimage', 'misc.toimage', (['image'], {'cmin': '(0)', 'cmax': '(255)'}), '(image, cmin=0, cmax=255)\n', (1549, 1574), False, 'from scipy import misc\n'), ((1655, 1667), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (1665, 1667), False, 'import io\n'), ((881, 912), 'flask.jsonify', 'jsonify', (["{'Status': 'All good'}"], {}), "({'Status': 'All good'})\n", (888, 912), False, 'from flask import jsonify\n'), ((1115, 1125), 'flask.abort', 'abort', (['(403)'], {}), '(403)\n', (1120, 1125), False, 'from flask import abort\n'), ((1733, 1834), 'flask.send_file', 'send_file', (['img_io'], {'mimetype': '"""image/png"""', 'as_attachment': '(True)', 'attachment_filename': '"""prediction.png"""'}), "(img_io, mimetype='image/png', as_attachment=True,\n attachment_filename='prediction.png')\n", (1742, 1834), False, 'from flask import send_file\n')] |
import matplotlib.pyplot as plt
import numpy as np
from keras.callbacks import TensorBoard
from keras.datasets import mnist
from keras.layers import Dense, Dropout
from keras.layers import Input
from keras.models import Model
from keras.utils import to_categorical
def main():
# this is the size of our encoded representations
encoding_dim = 32 # 32 floats -> compression of factor 24.5, assuming the input is 784 floats
# this is our input placeholder
input_img = Input(shape=(784,), name='encode-img-input')
# "encoded" is the encoded representation of the input
encoded = Dense(128, activation='relu', name='encode-128')(input_img)
encoded = Dense(64, activation='relu', name='encode-64')(encoded)
encoded = Dense(32, activation='relu', name='encode-32')(encoded)
# "decoded" is the lossy reconstruction of the input
encoded_img = Input(shape=(32,))
decoded = Dense(64, activation='relu', name='decode-img-64')(encoded_img)
decoded = Dense(128, activation='relu', name='decode-img-128')(decoded)
decoded = Dense(784, activation='sigmoid', name='decode-img-784')(decoded)
# decode the latent form to the class.
num_classes = 10
decode_cls = Dense(64, activation='relu', name='decode-cls-64')(encoded_img)
decoded = Dropout(.3)(decoded)
decode_cls = Dense(256, activation='relu', name='decode-cls-256')(decode_cls)
decode_cls = Dense(num_classes, activation='softmax', name='decode-cls-10')(decode_cls)
# Map the raw input image to "bottleneck" latent representation
encoder = Model(input_img, encoded, name='Encoder')
# Map the latent representation back to the raw image
decoder = Model(input=encoded_img, output=decoded, name='Decoder-Image')
# Map the latent form to the class.
decoder_cls = Model(input=encoded_img, output=decode_cls, name='Decoder-Class')
# this model maps an input to its reconstruction
autoencoder = Model(input=input_img,
output=[decoder(encoder.output), decoder_cls(encoder.output)])
autoencoder.compile(optimizer='adadelta', loss=['binary_crossentropy', 'categorical_crossentropy'])
print(autoencoder.summary())
print(decoder.summary())
print(decoder_cls.summary())
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
# Convert labels to binary class matrices
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
print(x_train.shape)
print(x_test.shape)
autoencoder.fit(x=x_train,
y=[x_train, y_train],
epochs=100,
batch_size=256,
shuffle=True,
validation_data=(x_test, [x_test, y_test]),
callbacks=[TensorBoard(log_dir='./tb')])
# encode and decode some digits
# note that we take them from the *test* set
encoded_imgs = encoder.predict(x_test)
decoded_imgs = decoder.predict(encoded_imgs)
n = 10 # how many digits we will display
plt.figure(figsize=(20, 4))
for i in range(n):
# display original
ax = plt.subplot(3, n, i + 1)
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display encoding
ax = plt.subplot(3, n, i + 1 + n)
plt.imshow(encoded_imgs[i].reshape(4, 8))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(3, n, i + 1 + (2 * n))
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
return
if __name__ == "__main__":
main()
| [
"numpy.prod",
"matplotlib.pyplot.gray",
"keras.datasets.mnist.load_data",
"keras.utils.to_categorical",
"matplotlib.pyplot.subplot",
"keras.callbacks.TensorBoard",
"keras.layers.Input",
"matplotlib.pyplot.figure",
"keras.models.Model",
"keras.layers.Dense",
"keras.layers.Dropout",
"matplotlib.... | [((485, 529), 'keras.layers.Input', 'Input', ([], {'shape': '(784,)', 'name': '"""encode-img-input"""'}), "(shape=(784,), name='encode-img-input')\n", (490, 529), False, 'from keras.layers import Input\n'), ((879, 897), 'keras.layers.Input', 'Input', ([], {'shape': '(32,)'}), '(shape=(32,))\n', (884, 897), False, 'from keras.layers import Input\n'), ((1569, 1610), 'keras.models.Model', 'Model', (['input_img', 'encoded'], {'name': '"""Encoder"""'}), "(input_img, encoded, name='Encoder')\n", (1574, 1610), False, 'from keras.models import Model\n'), ((1684, 1746), 'keras.models.Model', 'Model', ([], {'input': 'encoded_img', 'output': 'decoded', 'name': '"""Decoder-Image"""'}), "(input=encoded_img, output=decoded, name='Decoder-Image')\n", (1689, 1746), False, 'from keras.models import Model\n'), ((1806, 1871), 'keras.models.Model', 'Model', ([], {'input': 'encoded_img', 'output': 'decode_cls', 'name': '"""Decoder-Class"""'}), "(input=encoded_img, output=decode_cls, name='Decoder-Class')\n", (1811, 1871), False, 'from keras.models import Model\n'), ((2299, 2316), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (2314, 2316), False, 'from keras.datasets import mnist\n'), ((2616, 2652), 'keras.utils.to_categorical', 'to_categorical', (['y_train', 'num_classes'], {}), '(y_train, num_classes)\n', (2630, 2652), False, 'from keras.utils import to_categorical\n'), ((2666, 2701), 'keras.utils.to_categorical', 'to_categorical', (['y_test', 'num_classes'], {}), '(y_test, num_classes)\n', (2680, 2701), False, 'from keras.utils import to_categorical\n'), ((3282, 3309), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 4)'}), '(figsize=(20, 4))\n', (3292, 3309), True, 'import matplotlib.pyplot as plt\n'), ((4011, 4021), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4019, 4021), True, 'import matplotlib.pyplot as plt\n'), ((603, 651), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""', 'name': '"""encode-128"""'}), "(128, activation='relu', name='encode-128')\n", (608, 651), False, 'from keras.layers import Dense, Dropout\n'), ((677, 723), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""', 'name': '"""encode-64"""'}), "(64, activation='relu', name='encode-64')\n", (682, 723), False, 'from keras.layers import Dense, Dropout\n'), ((747, 793), 'keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""', 'name': '"""encode-32"""'}), "(32, activation='relu', name='encode-32')\n", (752, 793), False, 'from keras.layers import Dense, Dropout\n'), ((912, 962), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""', 'name': '"""decode-img-64"""'}), "(64, activation='relu', name='decode-img-64')\n", (917, 962), False, 'from keras.layers import Dense, Dropout\n'), ((990, 1042), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""', 'name': '"""decode-img-128"""'}), "(128, activation='relu', name='decode-img-128')\n", (995, 1042), False, 'from keras.layers import Dense, Dropout\n'), ((1066, 1121), 'keras.layers.Dense', 'Dense', (['(784)'], {'activation': '"""sigmoid"""', 'name': '"""decode-img-784"""'}), "(784, activation='sigmoid', name='decode-img-784')\n", (1071, 1121), False, 'from keras.layers import Dense, Dropout\n'), ((1213, 1263), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""', 'name': '"""decode-cls-64"""'}), "(64, activation='relu', name='decode-cls-64')\n", (1218, 1263), False, 'from keras.layers import Dense, Dropout\n'), ((1291, 1303), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (1298, 1303), False, 'from keras.layers import Dense, Dropout\n'), ((1329, 1381), 'keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""', 'name': '"""decode-cls-256"""'}), "(256, activation='relu', name='decode-cls-256')\n", (1334, 1381), False, 'from keras.layers import Dense, Dropout\n'), ((1411, 1473), 'keras.layers.Dense', 'Dense', (['num_classes'], {'activation': '"""softmax"""', 'name': '"""decode-cls-10"""'}), "(num_classes, activation='softmax', name='decode-cls-10')\n", (1416, 1473), False, 'from keras.layers import Dense, Dropout\n'), ((3373, 3397), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', 'n', '(i + 1)'], {}), '(3, n, i + 1)\n', (3384, 3397), True, 'import matplotlib.pyplot as plt\n'), ((3452, 3462), 'matplotlib.pyplot.gray', 'plt.gray', ([], {}), '()\n', (3460, 3462), True, 'import matplotlib.pyplot as plt\n'), ((3588, 3616), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', 'n', '(i + 1 + n)'], {}), '(3, n, i + 1 + n)\n', (3599, 3616), True, 'import matplotlib.pyplot as plt\n'), ((3675, 3685), 'matplotlib.pyplot.gray', 'plt.gray', ([], {}), '()\n', (3683, 3685), True, 'import matplotlib.pyplot as plt\n'), ((3817, 3849), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', 'n', '(i + 1 + 2 * n)'], {}), '(3, n, i + 1 + 2 * n)\n', (3828, 3849), True, 'import matplotlib.pyplot as plt\n'), ((3912, 3922), 'matplotlib.pyplot.gray', 'plt.gray', ([], {}), '()\n', (3920, 3922), True, 'import matplotlib.pyplot as plt\n'), ((2456, 2482), 'numpy.prod', 'np.prod', (['x_train.shape[1:]'], {}), '(x_train.shape[1:])\n', (2463, 2482), True, 'import numpy as np\n'), ((2527, 2552), 'numpy.prod', 'np.prod', (['x_test.shape[1:]'], {}), '(x_test.shape[1:])\n', (2534, 2552), True, 'import numpy as np\n'), ((3023, 3050), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': '"""./tb"""'}), "(log_dir='./tb')\n", (3034, 3050), False, 'from keras.callbacks import TensorBoard\n')] |
from collections import Counter, defaultdict
import csv
import json
import os
import random
import sys
from time import time
from metal.contrib.info_extraction.mentions import RelationMention
from metal.contrib.info_extraction.utils import mark_entities
import numpy as np
import torch
from scipy.sparse import issparse
from .explanation import Explanation
class PrintTimer:
"""Prints msg at start, total time taken at end."""
def __init__(self, msg, prefix="###"):
self.msg = msg
self.prefix = prefix + " " if len(prefix) > 0 else prefix
def __enter__(self):
self.t0 = time()
print("{0}{1}".format(self.prefix, self.msg))
def __exit__(self, type, value, traceback):
print ("{0}Done in {1:.1f}s.\n".format(self.prefix, time() - self.t0))
class ProgressBar(object):
def __init__(self, N, length=40):
# Protect against division by zero (N = 0 results in full bar being printed)
self.N = max(1, N)
self.nf = float(self.N)
self.length = length
# Precalculate the i values that should trigger a write operation
self.ticks = set([round(i/100.0 * N) for i in range(101)])
self.ticks.add(N-1)
self.bar(0)
def bar(self, i):
"""Assumes i ranges through [0, N-1]"""
if i in self.ticks:
b = int(np.ceil(((i+1) / self.nf) * self.length))
sys.stdout.write(
"\r[{0}{1}] {2}%".format(
"="*b, " "*(self.length-b), int(100*((i+1) / self.nf))))
sys.stdout.flush()
def close(self):
# Move the bar to 100% before closing
self.bar(self.N-1)
sys.stdout.write("\n\n")
sys.stdout.flush()
class ExplanationIO(object):
def write(self, explanations, fpath):
explanations = explanations if isinstance(explanations, list) else [explanations]
with open(fpath, 'w') as tsvfile:
tsvwriter = csv.writer(tsvfile, delimiter='\t')
for exp in explanations:
if isinstance(exp.candidate, str):
candidate_id = exp.candidate
else:
candidate_id = exp.candidate.mention_id
tsvwriter.writerow([
exp.name,
exp.label,
candidate_id,
exp.condition,
])
fpath = fpath if len(fpath) < 50 else fpath[:20] + '...' + fpath[-30:]
print("Wrote {} explanations to {}".format(len(explanations), fpath))
def read(self, fpath):
with open(fpath, 'r') as tsvfile:
tsvreader = csv.reader(tsvfile, delimiter='\t')
num_read = 0
explanations = []
for (name, label, candidate_id, condition) in tsvreader:
explanations.append(
Explanation(
name=name,
label=int(label),
candidate=candidate_id,
condition=condition.strip(),
)
)
num_read += 1
fpath = fpath if len(fpath) < 50 else fpath[:20] + '...' + fpath[-30:]
print("Read {} explanations from {}".format(num_read, fpath))
return explanations
def link_explanation_candidates(explanations, candidates):
"""Doc string goes here."""
target_candidate_ids = set()
linked = 0
print("Building list of target candidate ids...")
for e in explanations:
if e.candidate is not None and not isinstance(e.candidate, RelationMention):
target_candidate_ids.add(e.candidate)
elif e.candidate:
linked += 1
if linked == len(explanations):
print("All {} explanations are already linked to candidates.".format(
len(explanations)))
return explanations
else:
print("Collected {} unique target candidate ids from {} explanations.".format(
len(target_candidate_ids), len(explanations)))
if not target_candidate_ids:
print("No candidate hashes were provided. Skipping linking.")
return explanations
candidate_map = {}
print("Gathering desired candidates...")
for candidate in candidates:
if candidate.mention_id in target_candidate_ids:
candidate_map[candidate.mention_id] = candidate
if len(candidate_map) < len(target_candidate_ids):
num_missing = len(target_candidate_ids) - len(candidate_map)
print("Could not find {} target candidates with the following mention_ids (first 5):".format(
num_missing))
num_reported = 0
for i, c_hash in enumerate(target_candidate_ids):
if c_hash not in candidate_map:
print(c_hash)
num_reported += 1
if num_reported >= 5:
break
print("Found {}/{} desired candidates".format(
len(candidate_map), len(target_candidate_ids)))
print("Linking explanations to candidates...")
for e in explanations:
if not isinstance(e.candidate, RelationMention):
try:
e.candidate = candidate_map[e.candidate]
linked += 1
except KeyError:
pass
print("Linked {}/{} explanations".format(linked, len(explanations)))
return explanations
def sparse_to_indices(X):
"""Converts a sparse matrix into a tensor of the nonzero indices
Args:
X: an [n, num_features] one-hot scipy.sparse matrix
Returns:
X_idx: an [n, h] tensor where X_idx[i,:] is a zero-padded 1D tesnor of
the nonzero indices of X[i,:]
"""
if not issparse(X):
raise ValueError("X must be a scipy.sparse matrix")
nonzeros = X.nonzero()
indices = defaultdict(list)
for i, v in zip(nonzeros[0], nonzeros[1]):
indices[i].append(v + 1)
max_len = max(map(lambda x: len(x), indices.values()))
X_idx = torch.zeros(X.shape[0], max_len).long()
for i, values in indices.items():
X_idx[i, :len(values)] = torch.LongTensor(values)
return X_idx
def display_candidate(candidate):
tokens = candidate.tokens
positions = list(zip(candidate.word_starts, candidate.word_ends))
markers = ['{', '}', '{', '}']
marked = mark_entities(tokens, positions, markers, style='concatenate')
print(' '.join(marked))
print()
print(marked)
class CandidateViewer(object):
def __init__(self, candidates, shuffle=False, seed=None):
if seed:
random.seed(seed)
self.candidates = candidates
self.idx = -1
self.order = list(range(len(candidates)))
# Shuffle indirectly to not mess up alignment between candidates and
# other objects in the workspace (e.g., labels).
if shuffle:
random.shuffle(self.order)
def view(self):
self.idx += 1
if self.idx > len(self.order):
print("Exhausted provided candidate set")
return
c = self.candidates[self.order[self.idx]]
display_candidate(c)
return c | [
"numpy.ceil",
"random.shuffle",
"torch.LongTensor",
"csv.writer",
"scipy.sparse.issparse",
"random.seed",
"metal.contrib.info_extraction.utils.mark_entities",
"collections.defaultdict",
"csv.reader",
"sys.stdout.flush",
"time.time",
"torch.zeros",
"sys.stdout.write"
] | [((5921, 5938), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5932, 5938), False, 'from collections import Counter, defaultdict\n'), ((6427, 6489), 'metal.contrib.info_extraction.utils.mark_entities', 'mark_entities', (['tokens', 'positions', 'markers'], {'style': '"""concatenate"""'}), "(tokens, positions, markers, style='concatenate')\n", (6440, 6489), False, 'from metal.contrib.info_extraction.utils import mark_entities\n'), ((611, 617), 'time.time', 'time', ([], {}), '()\n', (615, 617), False, 'from time import time\n'), ((1682, 1706), 'sys.stdout.write', 'sys.stdout.write', (['"""\n\n"""'], {}), "('\\n\\n')\n", (1698, 1706), False, 'import sys\n'), ((1715, 1733), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1731, 1733), False, 'import sys\n'), ((5807, 5818), 'scipy.sparse.issparse', 'issparse', (['X'], {}), '(X)\n', (5815, 5818), False, 'from scipy.sparse import issparse\n'), ((6202, 6226), 'torch.LongTensor', 'torch.LongTensor', (['values'], {}), '(values)\n', (6218, 6226), False, 'import torch\n'), ((1560, 1578), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1576, 1578), False, 'import sys\n'), ((1963, 1998), 'csv.writer', 'csv.writer', (['tsvfile'], {'delimiter': '"""\t"""'}), "(tsvfile, delimiter='\\t')\n", (1973, 1998), False, 'import csv\n'), ((2739, 2774), 'csv.reader', 'csv.reader', (['tsvfile'], {'delimiter': '"""\t"""'}), "(tsvfile, delimiter='\\t')\n", (2749, 2774), False, 'import csv\n'), ((6091, 6123), 'torch.zeros', 'torch.zeros', (['X.shape[0]', 'max_len'], {}), '(X.shape[0], max_len)\n', (6102, 6123), False, 'import torch\n'), ((6671, 6688), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (6682, 6688), False, 'import random\n'), ((6965, 6991), 'random.shuffle', 'random.shuffle', (['self.order'], {}), '(self.order)\n', (6979, 6991), False, 'import random\n'), ((1357, 1397), 'numpy.ceil', 'np.ceil', (['((i + 1) / self.nf * self.length)'], {}), '((i + 1) / self.nf * self.length)\n', (1364, 1397), True, 'import numpy as np\n'), ((781, 787), 'time.time', 'time', ([], {}), '()\n', (785, 787), False, 'from time import time\n')] |
import os
import matplotlib.pyplot as plt
import numpy as np
from utils.util import save_fig
#http://matplotlib.org/examples/pylab_examples/subplots_demo.html
plt.figure(figsize=(12,4))
ks = range(1,10)
ys = [1.0/k for k in ks]
print(ys)
plt.subplot(1,3,1)
plt.plot(ks, np.log(ys), color = 'r')
plt.title('Sublinear convergence')
ys = [1.0/(2**k) for k in ks]
print(ys)
plt.subplot(1,3,2)
plt.plot(ks, np.log(ys), color = 'g')
plt.title('Linear convergence')
ys = [1.0/(2**(2**k)) for k in ks]
print(ys)
plt.subplot(1,3,3)
plt.plot(ks, np.log(ys), color = 'b')
plt.title('Quadratic convergence')
#fig.subplots_adjust(hspace=0)
plt.tight_layout()
plt.draw()
plt.show()
plt.show()
fname = 'convergenceRates.pdf'
print(fname)
save_fig(fname)
if 0:
plt.figure(figsize=(12,4))
fig, (ax1, ax2, ax3) = plt.subplots(1,3)
ks = range(1,10)
ys = [1.0/k for k in ks]
print(ys)
ax1.plot(ks, np.log(ys), color = 'r')
ax1.set_title('Sublinear convergence')
ys = [1.0/(2**k) for k in ks]
print(ys)
ax2.plot(ks, np.log(ys), color = 'g')
ax2.set_title('Linear convergence')
ys = [1.0/(2**(2**k)) for k in ks]
print(ys)
ax3.plot(ks, np.log(ys), color = 'b')
ax3.set_title('Quadratic convergence')
#fig.subplots_adjust(hspace=0)
plt.tight_layout()
plt.draw()
save_fig('convergenceRates.png')
plt.show() | [
"numpy.log",
"utils.util.save_fig",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((161, 188), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (171, 188), True, 'import matplotlib.pyplot as plt\n'), ((241, 261), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (252, 261), True, 'import matplotlib.pyplot as plt\n'), ((298, 332), 'matplotlib.pyplot.title', 'plt.title', (['"""Sublinear convergence"""'], {}), "('Sublinear convergence')\n", (307, 332), True, 'import matplotlib.pyplot as plt\n'), ((374, 394), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (385, 394), True, 'import matplotlib.pyplot as plt\n'), ((431, 462), 'matplotlib.pyplot.title', 'plt.title', (['"""Linear convergence"""'], {}), "('Linear convergence')\n", (440, 462), True, 'import matplotlib.pyplot as plt\n'), ((509, 529), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (520, 529), True, 'import matplotlib.pyplot as plt\n'), ((566, 600), 'matplotlib.pyplot.title', 'plt.title', (['"""Quadratic convergence"""'], {}), "('Quadratic convergence')\n", (575, 600), True, 'import matplotlib.pyplot as plt\n'), ((633, 651), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (649, 651), True, 'import matplotlib.pyplot as plt\n'), ((652, 662), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (660, 662), True, 'import matplotlib.pyplot as plt\n'), ((663, 673), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (671, 673), True, 'import matplotlib.pyplot as plt\n'), ((679, 689), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (687, 689), True, 'import matplotlib.pyplot as plt\n'), ((734, 749), 'utils.util.save_fig', 'save_fig', (['fname'], {}), '(fname)\n', (742, 749), False, 'from utils.util import save_fig\n'), ((273, 283), 'numpy.log', 'np.log', (['ys'], {}), '(ys)\n', (279, 283), True, 'import numpy as np\n'), ((406, 416), 'numpy.log', 'np.log', (['ys'], {}), '(ys)\n', (412, 416), True, 'import numpy as np\n'), ((541, 551), 'numpy.log', 'np.log', (['ys'], {}), '(ys)\n', (547, 551), True, 'import numpy as np\n'), ((762, 789), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 4)'}), '(figsize=(12, 4))\n', (772, 789), True, 'import matplotlib.pyplot as plt\n'), ((816, 834), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {}), '(1, 3)\n', (828, 834), True, 'import matplotlib.pyplot as plt\n'), ((1310, 1328), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1326, 1328), True, 'import matplotlib.pyplot as plt\n'), ((1333, 1343), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (1341, 1343), True, 'import matplotlib.pyplot as plt\n'), ((1353, 1385), 'utils.util.save_fig', 'save_fig', (['"""convergenceRates.png"""'], {}), "('convergenceRates.png')\n", (1361, 1385), False, 'from utils.util import save_fig\n'), ((1395, 1405), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1403, 1405), True, 'import matplotlib.pyplot as plt\n'), ((920, 930), 'numpy.log', 'np.log', (['ys'], {}), '(ys)\n', (926, 930), True, 'import numpy as np\n'), ((1058, 1068), 'numpy.log', 'np.log', (['ys'], {}), '(ys)\n', (1064, 1068), True, 'import numpy as np\n'), ((1198, 1208), 'numpy.log', 'np.log', (['ys'], {}), '(ys)\n', (1204, 1208), True, 'import numpy as np\n')] |
"""
BlackHole.py
Author: <NAME>
Affiliation: University of Colorado at Boulder
Created on: Mon Jul 8 09:56:38 MDT 2013
Description:
"""
import numpy as np
from .Star import _Planck
from .Source import Source
from types import FunctionType
from scipy.integrate import quad
from ..util.ReadData import read_lit
from ..util.SetDefaultParameterValues import BlackHoleParameters
from ..physics.CrossSections import PhotoIonizationCrossSection as sigma_E
from ..physics.Constants import s_per_myr, G, g_per_msun, c, t_edd, m_p, \
sigma_T, sigma_SB
sptypes = ['pl', 'mcd', 'simpl']
class BlackHole(Source):
def __init__(self, **kwargs):
"""
Initialize a black hole object.
Parameters
----------
pf: dict
Full parameter file.
src_pars: dict
Contains source-specific parameters.
spec_pars: dict
Contains spectrum-specific parameters.
"""
self.pf = BlackHoleParameters()
self.pf.update(kwargs)
Source.__init__(self)
self._name = 'bh'
self.M0 = self.pf['source_mass']
self.epsilon = self.pf['source_eta']
# Duty cycle parameters
self.tau = self.pf['source_lifetime'] * s_per_myr
self.fduty = self.pf['source_fduty']
self.variable = self.fduty < 1
#if self.src_pars['fduty'] == 1:
# self.variable = self.tau < self.pf['stop_time']
self.toff = self.tau * (self.fduty**-1. - 1.)
# Disk properties
self.last_renormalized = 0.0
self.r_in = self._DiskInnermostRadius(self.M0)
self.r_out = self.pf['source_rmax'] * self._GravitationalRadius(self.M0)
self.T_in = self._DiskInnermostTemperature(self.M0)
self.T_out = self._DiskTemperature(self.M0, self.r_out)
self.Lbol = self.Luminosity(0.0)
self.disk_history = {}
#if 'mcd' in self.spec_pars['type']:
# self.fcol = self.spec_pars['fcol'][self.spec_pars['type'].index('mcd')]
#if 'simpl' in self.spec_pars['type']:
# self.fcol = self.spec_pars['fcol'][self.spec_pars['type'].index('simpl')]
#if 'zebra' in self.pf['source_sed']:
# self.T = self.src_pars['temperature']#[self.spec_pars['type'].index('zebra')]
if self.pf['source_sed'] in sptypes:
pass
elif type(self.pf['source_sed']) is FunctionType:
self._UserDefined = self.pf['source_sed']
else:
from_lit = read_lit(self.pf['source_sed'])
src = from_lit.Source()
self._UserDefined = src.Spectrum
# Convert spectral types to strings
#self.N = len(self.spec_pars['type'])
#self.type_by_num = []
#self.type_by_name = []
#for i, sptype in enumerate(self.spec_pars['type']):
# if type(sptype) != int:
#
# if sptype in sptypes:
# self.type_by_name.append(sptype)
# self.type_by_num.append(sptypes[sptype])
# elif type(sptype) is FunctionType:
# self._UserDefined = sptype
# else:
# from_lit = read_lit(sptype)
# self._UserDefined = from_lit.Spectrum
#
# continue
#
# self.type_by_num.append(sptype)
# self.type_by_name.append(sptypes.keys()[sptypes.values().index(sptype)])
def _SchwartzchildRadius(self, M):
return 2. * self._GravitationalRadius(M)
def _GravitationalRadius(self, M):
""" Half the Schwartzchild radius. """
return G * M * g_per_msun / c**2
def _MassAccretionRate(self, M=None):
return self.Luminosity(0, M=M) / self.epsilon / c**2
def _DiskInnermostRadius(self, M):
"""
Inner radius of disk. Unless SourceISCO > 0, will be set to the
inner-most stable circular orbit for a BH of mass M.
"""
return self.pf['source_isco'] * self._GravitationalRadius(M)
def _DiskInnermostTemperature(self, M):
"""
Temperature (in Kelvin) at inner edge of the disk.
"""
return (3. * G * M * g_per_msun * self._MassAccretionRate(M) / \
8. / np.pi / self._DiskInnermostRadius(M)**3 / sigma_SB)**0.25
def _DiskTemperature(self, M, r):
return ((3. * G * M * g_per_msun * self._MassAccretionRate(M) / \
8. / np.pi / r**3 / sigma_SB) * \
(1. - (self._DiskInnermostRadius(M) / r)**0.5))**0.25
def _PowerLaw(self, E, t=0.0):
"""
A simple power law X-ray spectrum - this is proportional to the
*energy* emitted at E, not the number of photons.
"""
return E**self.pf['source_alpha']
def _SIMPL(self, E, t=0.0):
"""
Purpose:
--------
Convolve an input spectrum with a Comptonization kernel.
Inputs:
-------
Gamma - Power-law index, LE ~ E**(-Gamma)
fsc - Fraction of seed photons that get scattered
(assumes all input photons have same probability of being scattered
and that scattering is energy-independent)
fref - Of the photons that impact the disk after a scattering, this is the
fraction that reflect back off the disk to the observer instead of
being absorbed and thermalized (default 1)
uponly - False: SIMPL-2, non-rel Comptonization, up- and down-scattering
True: SIMPL-1, relativistic Comptoniztion, up-scattering only
Outputs: (dictionary)
--------
LE - Absorbed power-law luminosity array [keV s^-1]
E - Energy array [keV]
dE - Differential energy array [keV]
References
----------
Steiner et al. (2009). Thanks <NAME> for the code!
"""
# Input photon distribution
if self.pf['source_sed'] == 'zebra':
nin = lambda E0: _Planck(E0, self.T) / E0
else:
nin = lambda E0: self._MultiColorDisk(E0, t) / E0
fsc = self.pf['source_fsc']
# Output photon distribution - integrate in log-space
integrand = lambda E0: nin(10**E0) \
* self._GreensFunctionSIMPL(10**E0, E) * 10**E0
nout = (1.0 - fsc) * nin(E) + fsc \
* quad(integrand, np.log10(self.Emin),
np.log10(self.Emax))[0] * np.log(10.)
# Output spectrum
return nout * E
def _GreensFunctionSIMPL(self, Ein, Eout):
"""
Must perform integral transform to compute output photon distribution.
"""
# Careful with Gamma...
# In Steiner et al. 2009, Gamma is n(E) ~ E**(-Gamma),
# but n(E) and L(E) are different by a factor of E (see below)
Gamma = -self.pf['source_alpha'] + 1.0
if self.pf['source_uponly']:
if Eout >= Ein:
return (Gamma - 1.0) * (Eout / Ein)**(-1.0 * Gamma) / Ein
else:
return 0.0
else:
if Eout >= Ein:
return (Gamma - 1.0) * (Gamma + 2.0) / (1.0 + 2.0 * Gamma) * \
(Eout / Ein)**(-1.0 * Gamma) / Ein
else:
return (Gamma - 1.0) * (Gamma + 2.0) / (1.0 + 2.0 * Gamma) * \
(Eout / Ein)**(Gamma + 1.0) / Ein
def _MultiColorDisk(self, E, t=0.0):
"""
Soft component of accretion disk spectra.
References
----------
Mitsuda et al. 1984, PASJ, 36, 741.
"""
# If t > 0, re-compute mass, inner radius, and inner temperature
if t > 0 and self.pf['source_evolving'] \
and t != self.last_renormalized:
self.M = self.Mass(t)
self.r_in = self._DiskInnermostRadius(self.M)
self.r_out = self.pf['source_rmax'] * self._GravitationalRadius(self.M)
self.T_in = self._DiskInnermostTemperature(self.M)
self.T_out = self._DiskTemperature(self.M, self.r_out)
integrand = lambda T: (T / self.T_in)**(-11. / 3.) \
* _Planck(E, T) / self.T_in
return quad(integrand, self.T_out, self.T_in)[0]
def SourceOn(self, t):
""" See if source is on. Provide t in code units. """
if not self.variable:
return True
if t < self.tau:
return True
if self.fduty == 1:
return False
nacc = t / (self.tau + self.toff)
if nacc % 1 < self.fduty:
return True
else:
return False
def _Intensity(self, E, t=0, absorb=True):
"""
Return quantity *proportional* to fraction of bolometric luminosity
emitted at photon energy E. Normalization handled separately.
"""
if self.pf['source_sed'] == 'pl':
Lnu = self._PowerLaw(E, t)
elif self.pf['source_sed'] == 'mcd':
Lnu = self._MultiColorDisk(E, t)
elif self.pf['source_sed'] == 'sazonov2004':
Lnu = self._UserDefined(E, t)
elif self.pf['source_sed'] == 'simpl':
Lnu = self._SIMPL(E, t)
elif self.pf['source_sed'] == 'zebra':
Lnu = self._SIMPL(E, t)
else:
Lnu = 0.0
if self.pf['source_logN'] > 0 and absorb:
Lnu *= self._hardening_factor(E)
return Lnu
#def _NormalizeSpectrum(self, t=0.):
# Lbol = self.Luminosity()
# # Treat simPL spectrum special
# if self.pf['source_sed'] == 'simpl':
# integral, err = quad(self._MultiColorDisk,
# self.EminNorm, self.EmaxNorm, args=(t, False))
# else:
# integral, err = quad(self._Intensity,
# self.EminNorm, self.EmaxNorm, args=(t, False))
#
# norms = Lbol / integral
#
# return norms
def Luminosity(self, t=0.0, M=None):
"""
Returns the bolometric luminosity of a source in units of erg/s.
For accreting black holes, the bolometric luminosity will increase
with time, hence the optional 't' and 'M' arguments.
"""
if not self.SourceOn(t):
return 0.0
Mnow = self.Mass(t)
if M is not None:
Mnow = M
return self.epsilon * 4.0 * np.pi * G * Mnow * g_per_msun * m_p \
* c / sigma_T
def Mass(self, t):
"""
Compute black hole mass after t (seconds) have elapsed. Relies on
initial mass self.M, and (constant) radiaitive efficiency self.epsilon.
"""
if self.variable:
nlifetimes = int(t / (self.tau + self.toff))
dtacc = nlifetimes * self.tau
M0 = self.M0 * np.exp(((1.0 - self.epsilon) / self.epsilon) * dtacc / t_edd)
dt = t - nlifetimes * (self.tau + self.toff)
else:
M0 = self.M0
dt = t
return M0 * np.exp(((1.0 - self.epsilon) / self.epsilon) * dt / t_edd)
def Age(self, M):
"""
Compute age of black hole based on current time, current mass, and initial mass.
"""
return np.log(M / self.M0) * (self.epsilon / (1. - self.epsilon)) * t_edd
| [
"scipy.integrate.quad",
"numpy.exp",
"numpy.log",
"numpy.log10"
] | [((8536, 8574), 'scipy.integrate.quad', 'quad', (['integrand', 'self.T_out', 'self.T_in'], {}), '(integrand, self.T_out, self.T_in)\n', (8540, 8574), False, 'from scipy.integrate import quad\n'), ((11555, 11611), 'numpy.exp', 'np.exp', (['((1.0 - self.epsilon) / self.epsilon * dt / t_edd)'], {}), '((1.0 - self.epsilon) / self.epsilon * dt / t_edd)\n', (11561, 11611), True, 'import numpy as np\n'), ((6703, 6715), 'numpy.log', 'np.log', (['(10.0)'], {}), '(10.0)\n', (6709, 6715), True, 'import numpy as np\n'), ((11347, 11406), 'numpy.exp', 'np.exp', (['((1.0 - self.epsilon) / self.epsilon * dtacc / t_edd)'], {}), '((1.0 - self.epsilon) / self.epsilon * dtacc / t_edd)\n', (11353, 11406), True, 'import numpy as np\n'), ((11811, 11830), 'numpy.log', 'np.log', (['(M / self.M0)'], {}), '(M / self.M0)\n', (11817, 11830), True, 'import numpy as np\n'), ((6640, 6659), 'numpy.log10', 'np.log10', (['self.Emin'], {}), '(self.Emin)\n', (6648, 6659), True, 'import numpy as np\n'), ((6677, 6696), 'numpy.log10', 'np.log10', (['self.Emax'], {}), '(self.Emax)\n', (6685, 6696), True, 'import numpy as np\n')] |
from distutils.core import setup, Extension
import Cython
from Cython.Build import cythonize
import numpy
setup(
ext_modules=cythonize("special_partition.pyx"),
include_dirs=[numpy.get_include()]
)
"""
Build instructions:
------------------
> cd special_partition
> python setup.py build_ext --inplace
""" | [
"Cython.Build.cythonize",
"numpy.get_include"
] | [((130, 164), 'Cython.Build.cythonize', 'cythonize', (['"""special_partition.pyx"""'], {}), "('special_partition.pyx')\n", (139, 164), False, 'from Cython.Build import cythonize\n'), ((184, 203), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (201, 203), False, 'import numpy\n')] |
import numpy as np
from numba import jit, int32, float32, double, cfunc
from numba.experimental import jitclass
spec = [
('x', double[:]), ('dq', double[:]), ('u', double[:]),
('m', double), ('Iz', double),
('lf', double), ('lr', double),
('Bf', double), ('Cf', double), ('Df', double),
('Br', double), ('Cr', double), ('Dr', double),
('Cr0', double), ('Cr2', double),
('Cm1', double), ('Cm2', double),
('iterNum', int32), ('sim_dt', double), ('control_dt', double),
('car_shape', double[:,:]),
]
@jitclass(spec)
class VehicleSimModel(object):
def __init__(self, m=0.041, Iz=27.8E-6,
lf=0.029, lr=0.033,
Bf=2.579, Cf=1.2, Df=0.192,
Br=3.3852, Cr=1.2691, Dr=0.1737,
Cm1=0.287, Cm2=0.0545,
Cr0= 0.0518,
Cr2=0.00035, scale=1.0, control_dt = 10.0, sim_dt=1.0):
self.x = np.asfortranarray(np.zeros(3, dtype=np.float64))
self.dq = np.zeros(3, dtype=np.float64)
self.u = np.zeros(2, dtype=np.float64)
self.m = m
self.Iz= Iz
self.lf = lf
self.lr = lr
self.Bf = Bf
self.Cf = Cf
self.Df = Df
self.Br = Br
self.Cr = Cr
self.Dr = Dr
self.Cr0 = Cr0
self.Cr2 = Cr2
self.Cm1 = Cm1
self.Cm2 = Cm2
car_l = (lf + lr)/2 * scale
car_w = car_l/2
self.car_shape = np.asfortranarray(np.array([ [ car_l, car_w, 1.],
[ car_l,-car_w, 1.],
[-car_l,-car_w, 1.],
[-car_l, car_w, 1.],
[ car_l, car_w, 1.]], dtype=np.float64))
self.sim_dt = sim_dt
self.control_dt = control_dt
self.iterNum = int(self.control_dt/self.sim_dt)
@property
def shape(self):
shape = np.dot(self.car_shape,
np.asfortranarray(np.array([
[ np.cos(self.x[2]), np.sin(self.x[2]), 0.],
[-np.sin(self.x[2]), np.cos(self.x[2]), 0.],
[ self.x[0] , self.x[1] , 1.]], dtype=np.float64)))
return shape[:,:2]
def ODE_rh_eq(self, x, dq, u):
return self.ODE_rh_eq_x(x, dq, u), self.ODE_rh_eq_dq(x, dq, u)
def ODE_rh_eq_dq(self, x, dq, u):
ddq = np.zeros_like(dq)
alpha_f = np.arctan2(dq[1] + self.lf * dq[2], dq[0]) - u[1]
alpha_r = np.arctan2(dq[1] - self.lr * dq[2], dq[0])
Ffy = - self.Df * np.sin(self.Cf*np.arctan(self.Bf*alpha_f))
Fry = - self.Dr * np.sin(self.Cr*np.arctan(self.Br*alpha_r))
Frx = self.Cm1*u[0]-self.Cm2*u[0]*dq[0]-self.Cr0-self.Cr2*dq[0]**2
ddq[0] = dq[1] * dq[2] + 1.0/self.m * (Frx - Ffy*np.sin(u[1]) )
ddq[1] = -dq[0] * dq[2] + 1.0/self.m * (Fry + Ffy*np.cos(u[1]) )
ddq[2] = 1.0/self.Iz * (Ffy * self.lf * np.cos(u[1]) - Fry * self.lr)
return ddq
def ODE_rh_eq_x(self, x, dq, u):
dx = np.zeros_like(x)
dx[0:2] = np.dot(np.array([[np.cos(x[2]), -np.sin(x[2])],
[np.sin(x[2]), np.cos(x[2])]]),
dq[0:2])
dx[2] = dq[2]
return dx
def RK4(self, u, dt):
k1x, k1dq = self.ODE_rh_eq(self.x, self.dq, u)
k2x, k2dq = self.ODE_rh_eq(self.x+k1x*dt/2, self.dq+k1dq*dt/2, u)
k3x, k3dq = self.ODE_rh_eq(self.x+k2x*dt/2, self.dq+k2dq*dt/2, u)
k4x, k4dq = self.ODE_rh_eq(self.x+k3x*dt/2, self.dq+k3dq*dt/2, u)
self.x = self.x + dt * (k1x/6 + k2x/3 + k3x/3 + k4x/6)
self.dq = self.dq + dt * (k1dq/6 + k2dq/3 + k3dq/3 + k4dq/6)
def LeapFrog(self, u, dt):
dx = self.ODE_rh_eq_x(self.x, self.dq, u)
ddq = self.ODE_rh_eq_dq(self.x, self.dq, u)
self.x = self.x + dx * dt + ddq*dt**2/2
ddq2= self.ODE_rh_eq_dq(self.x, self.dq, u)
self.dq = self.dq + (ddq + ddq2)*dt/2
def sim_step(self, u):
for i in range(self.iterNum):
self.RK4(u, self.sim_dt/1000)
#self.LeapFrog(u, self.sim_dt/1000)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import matplotlib.animation as animation
v = VehicleSimModel(scale=0.01)
v.dq[0] = 0.2
v.u[1] = np.deg2rad(20)
x = np.array([v.x[0]])
y = np.array([v.x[1]])
car = []
vhit = []
for i in range(100):
v.sim_step(v.u)
x = np.append(x, v.x[0])
y = np.append(y, v.x[1])
carX, carY = v.shape.T
car.append((carX, carY))
vhit.append(v.dq[0])
plt.figure(0)
plt.plot(x, y, "ob-", label="trajectory")
for carX, carY in car:
plt.plot(carX, carY, 'k-')
plt.axis("equal")
plt.figure(1)
plt.plot(range(len(vhit)), vhit)
plt.grid(True)
plt.show()
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.plot",
"numba.experimental.jitclass",
"numpy.append",
"numpy.array",
"numpy.deg2rad",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.arctan2",
"numpy.arctan",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.axis",
"numpy.zeros_like",
"matplotl... | [((536, 550), 'numba.experimental.jitclass', 'jitclass', (['spec'], {}), '(spec)\n', (544, 550), False, 'from numba.experimental import jitclass\n'), ((4413, 4427), 'numpy.deg2rad', 'np.deg2rad', (['(20)'], {}), '(20)\n', (4423, 4427), True, 'import numpy as np\n'), ((4437, 4455), 'numpy.array', 'np.array', (['[v.x[0]]'], {}), '([v.x[0]])\n', (4445, 4455), True, 'import numpy as np\n'), ((4464, 4482), 'numpy.array', 'np.array', (['[v.x[1]]'], {}), '([v.x[1]])\n', (4472, 4482), True, 'import numpy as np\n'), ((4725, 4738), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (4735, 4738), True, 'import matplotlib.pyplot as plt\n'), ((4743, 4784), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""ob-"""'], {'label': '"""trajectory"""'}), "(x, y, 'ob-', label='trajectory')\n", (4751, 4784), True, 'import matplotlib.pyplot as plt\n'), ((4851, 4868), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (4859, 4868), True, 'import matplotlib.pyplot as plt\n'), ((4873, 4886), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (4883, 4886), True, 'import matplotlib.pyplot as plt\n'), ((4928, 4942), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4936, 4942), True, 'import matplotlib.pyplot as plt\n'), ((4947, 4957), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4955, 4957), True, 'import matplotlib.pyplot as plt\n'), ((1029, 1058), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'np.float64'}), '(3, dtype=np.float64)\n', (1037, 1058), True, 'import numpy as np\n'), ((1076, 1105), 'numpy.zeros', 'np.zeros', (['(2)'], {'dtype': 'np.float64'}), '(2, dtype=np.float64)\n', (1084, 1105), True, 'import numpy as np\n'), ((2474, 2491), 'numpy.zeros_like', 'np.zeros_like', (['dq'], {}), '(dq)\n', (2487, 2491), True, 'import numpy as np\n'), ((2581, 2623), 'numpy.arctan2', 'np.arctan2', (['(dq[1] - self.lr * dq[2])', 'dq[0]'], {}), '(dq[1] - self.lr * dq[2], dq[0])\n', (2591, 2623), True, 'import numpy as np\n'), ((3134, 3150), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (3147, 3150), True, 'import numpy as np\n'), ((4572, 4592), 'numpy.append', 'np.append', (['x', 'v.x[0]'], {}), '(x, v.x[0])\n', (4581, 4592), True, 'import numpy as np\n'), ((4605, 4625), 'numpy.append', 'np.append', (['y', 'v.x[1]'], {}), '(y, v.x[1])\n', (4614, 4625), True, 'import numpy as np\n'), ((4820, 4846), 'matplotlib.pyplot.plot', 'plt.plot', (['carX', 'carY', '"""k-"""'], {}), "(carX, carY, 'k-')\n", (4828, 4846), True, 'import matplotlib.pyplot as plt\n'), ((980, 1009), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'np.float64'}), '(3, dtype=np.float64)\n', (988, 1009), True, 'import numpy as np\n'), ((1514, 1655), 'numpy.array', 'np.array', (['[[car_l, car_w, 1.0], [car_l, -car_w, 1.0], [-car_l, -car_w, 1.0], [-car_l,\n car_w, 1.0], [car_l, car_w, 1.0]]'], {'dtype': 'np.float64'}), '([[car_l, car_w, 1.0], [car_l, -car_w, 1.0], [-car_l, -car_w, 1.0],\n [-car_l, car_w, 1.0], [car_l, car_w, 1.0]], dtype=np.float64)\n', (1522, 1655), True, 'import numpy as np\n'), ((2512, 2554), 'numpy.arctan2', 'np.arctan2', (['(dq[1] + self.lf * dq[2])', 'dq[0]'], {}), '(dq[1] + self.lf * dq[2], dq[0])\n', (2522, 2554), True, 'import numpy as np\n'), ((2666, 2694), 'numpy.arctan', 'np.arctan', (['(self.Bf * alpha_f)'], {}), '(self.Bf * alpha_f)\n', (2675, 2694), True, 'import numpy as np\n'), ((2735, 2763), 'numpy.arctan', 'np.arctan', (['(self.Br * alpha_r)'], {}), '(self.Br * alpha_r)\n', (2744, 2763), True, 'import numpy as np\n'), ((3033, 3045), 'numpy.cos', 'np.cos', (['u[1]'], {}), '(u[1])\n', (3039, 3045), True, 'import numpy as np\n'), ((2897, 2909), 'numpy.sin', 'np.sin', (['u[1]'], {}), '(u[1])\n', (2903, 2909), True, 'import numpy as np\n'), ((2970, 2982), 'numpy.cos', 'np.cos', (['u[1]'], {}), '(u[1])\n', (2976, 2982), True, 'import numpy as np\n'), ((3187, 3199), 'numpy.cos', 'np.cos', (['x[2]'], {}), '(x[2])\n', (3193, 3199), True, 'import numpy as np\n'), ((3253, 3265), 'numpy.sin', 'np.sin', (['x[2]'], {}), '(x[2])\n', (3259, 3265), True, 'import numpy as np\n'), ((3268, 3280), 'numpy.cos', 'np.cos', (['x[2]'], {}), '(x[2])\n', (3274, 3280), True, 'import numpy as np\n'), ((2076, 2093), 'numpy.cos', 'np.cos', (['self.x[2]'], {}), '(self.x[2])\n', (2082, 2093), True, 'import numpy as np\n'), ((2096, 2113), 'numpy.sin', 'np.sin', (['self.x[2]'], {}), '(self.x[2])\n', (2102, 2113), True, 'import numpy as np\n'), ((2169, 2186), 'numpy.cos', 'np.cos', (['self.x[2]'], {}), '(self.x[2])\n', (2175, 2186), True, 'import numpy as np\n'), ((3202, 3214), 'numpy.sin', 'np.sin', (['x[2]'], {}), '(x[2])\n', (3208, 3214), True, 'import numpy as np\n'), ((2149, 2166), 'numpy.sin', 'np.sin', (['self.x[2]'], {}), '(self.x[2])\n', (2155, 2166), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from flags import CONST
from sklearn.model_selection import train_test_split
from keras import backend as K
from tensorflow.keras.models import Sequential, model_from_json
from tensorflow.keras.layers import SimpleRNN, Embedding, Dense, Dropout
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
class Classifier():
def __init__(self):
self.history = None
def _reshape(self, x):
"""
LSTM 계열의 레이어 사용시 필요한 (total, embedding_dim, 1) 형태의 shape로 변환
- input
: x / nparray / 변환하려는 배열
- return
: nparray
"""
return x.reshape(x.shape[0], x.shape[1], 1)
def _dataSeperator(self, data, test_size=0.33):
"""
데이터 분할
- input
: data / DataFrame / documents.csv 데이터
: test_size / float / 데이터 분할 비율
- return
: [nparray, nparray, nparray, nparray]
"""
X_train, X_test, y_train, y_test = train_test_split(data.vector,
data.label,
test_size=test_size,
random_state=321)
X_train = np.array(X_train.tolist(), dtype=np.float32)
X_test = np.array(X_test.tolist(), dtype=np.float32)
y_train = np.array(y_train.tolist(), dtype=np.int32)
y_test = np.array(y_test.tolist(), dtype=np.int32)
return X_train, X_test, np.asarray(y_train), np.asarray(y_test)
def train(self,
data,
checkpoint_path,
epochs=75,
batch_size=100,
validation_split=0.1,
verbose=0):
"""
모델 학습
- input
: data / DataFrame / documents.csv 데이터
: checkpoint_path / str / 학습 중간 결과물 저장 경로
: epochs / int / 학습 횟수
: batch_size / int / 배치 사이즈
: validation_split / float / validation data ratio
: verbose / int / 0 = silent, 1 = progress bar, 2 = one line per epoch.
- return
: classifier
- export
: ./model/classifier.json (graph)
: ./model/classifier.h5 (weights)
"""
# seperate data
X_train, X_test, y_train, y_test = self._dataSeperator(data)
# model
K.clear_session()
model = Sequential()
model.add(Dense(100, activation='relu', kernel_initializer='he_normal', input_shape=(X_train.shape[1],)))
model.add(Dense(80, activation='relu', kernel_initializer='he_normal'))
model.add(Dense(2, activation='softmax'))
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['acc',
self.f1_m,
self.precision_m,
self.recall_m])
model.summary()
# checkpoint
checkpoint = ModelCheckpoint(filepath=checkpoint_path, mode='max', monitor='val_acc', verbose=2, save_best_only=True)
# early stopping
earlystop_callback = EarlyStopping(monitor='val_acc', min_delta=0.001, patience=3)
self.history = model.fit(X_train,
y_train,
epochs=epochs,
batch_size=batch_size,
validation_split=validation_split,
callbacks=[checkpoint, earlystop_callback])
loss, accuracy, f1_score, precision, recall = model.evaluate(X_test, y_test, verbose=verbose)
print(f'🐈 loss : {loss}')
print(f'🐈 accuracy : {accuracy}')
print(f'🐈 f1_score : {f1_score}')
print(f'🐈 precision : {precision}')
print(f'🐈 recall : {recall}')
return model
def predict(self, cf_model, vector, criterion=0.5):
"""
개발관련 문서여부 예측
- input
: cf_model / classifier model
: vector / np.array / embedded vector
: criterion / float / 개발관련 문서 판단 기준
- return
: boolean / 개발문서 여부
: float / 1에 가까울수록 개발관련 문서
"""
confidence = round(cf_model.predict(vector)[0][1], 3)
is_dev_doc = confidence > criterion
return is_dev_doc, confidence
def saveModel(self, model, cf_model_path):
"""
모델의 parameter와 weights를 저장한다.
- input
: model / classifier
: cf_model_path / str / 저장할 경로
- export
: ./model/classifier.json / parameter
: ./model/classifier.h5 / weights
"""
# save model
model_json = model.to_json()
with open(cf_model_path + '.json', "w") as json_file :
json_file.write(model_json)
# save weights
model.save_weights(cf_model_path + '.h5')
def loadModel(self, cf_model_path):
"""
모델을 불러옴
- input
: cf_model_path / str / 불러올 모델
- return
: classifier
"""
# load model
with open(cf_model_path + '.json', "r") as json_file:
json_model = json_file.read()
model = model_from_json(json_model)
# load weight
model.load_weights(cf_model_path + '.h5')
return model
def showHistory(self):
"""
train history를 그래프로 나타냄
"""
if self.history == None:
print('🐈 학습내역이 없습니다.')
return
fig, loss_ax = plt.subplots()
acc_ax = loss_ax.twinx()
acc_ax.plot(self.history.history['acc'], 'b', label='train acc')
acc_ax.plot(self.history.history['val_acc'], 'g', label='val acc')
acc_ax.set_ylabel('accuracy')
acc_ax.legend(loc='upper left')
plt.show()
def recall_m(self, y_true, y_pred):
"""
재현율(실제 True인 것 중에서 모델이 True라고 예측한 것의 비율) 계산
- input
: y_true / int / 정답
: y_pred / int / 모델 예측결과
- return
: float
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(self, y_true, y_pred):
"""
정밀도(모델이 True라고 분류한 것 중에서 실제 True인 것의 비율) 계산
- input
: y_true / int / 정답
: y_pred / int / 모델 예측결과
- return
: float
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(self, y_true, y_pred):
"""
F1 score(Precision과 Recall의 조화평균) 계산
- input
: y_true / int / 정답
: y_pred / int / 모델 예측결과
- return
: float
"""
precision = self.precision_m(y_true, y_pred)
recall = self.recall_m(y_true, y_pred)
return 2 * ((precision * recall)/(precision + recall + K.epsilon()))
| [
"sklearn.model_selection.train_test_split",
"tensorflow.keras.models.model_from_json",
"keras.backend.clip",
"numpy.asarray",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.layers.Dense",
"keras.backend.clear_session",
"tensorflow.keras.callbacks.ModelCheckpoint",
"keras.backend.epsil... | [((1075, 1160), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data.vector', 'data.label'], {'test_size': 'test_size', 'random_state': '(321)'}), '(data.vector, data.label, test_size=test_size, random_state=321\n )\n', (1091, 1160), False, 'from sklearn.model_selection import train_test_split\n'), ((2495, 2512), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (2510, 2512), True, 'from keras import backend as K\n'), ((2529, 2541), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2539, 2541), False, 'from tensorflow.keras.models import Sequential, model_from_json\n'), ((3139, 3247), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'checkpoint_path', 'mode': '"""max"""', 'monitor': '"""val_acc"""', 'verbose': '(2)', 'save_best_only': '(True)'}), "(filepath=checkpoint_path, mode='max', monitor='val_acc',\n verbose=2, save_best_only=True)\n", (3154, 3247), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping\n'), ((3307, 3368), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_acc"""', 'min_delta': '(0.001)', 'patience': '(3)'}), "(monitor='val_acc', min_delta=0.001, patience=3)\n", (3320, 3368), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping\n'), ((5404, 5431), 'tensorflow.keras.models.model_from_json', 'model_from_json', (['json_model'], {}), '(json_model)\n', (5419, 5431), False, 'from tensorflow.keras.models import Sequential, model_from_json\n'), ((5746, 5760), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5758, 5760), True, 'import matplotlib.pyplot as plt\n'), ((6028, 6038), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6036, 6038), True, 'import matplotlib.pyplot as plt\n'), ((1612, 1631), 'numpy.asarray', 'np.asarray', (['y_train'], {}), '(y_train)\n', (1622, 1631), True, 'import numpy as np\n'), ((1633, 1651), 'numpy.asarray', 'np.asarray', (['y_test'], {}), '(y_test)\n', (1643, 1651), True, 'import numpy as np\n'), ((2560, 2659), 'tensorflow.keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'input_shape': '(X_train.shape[1],)'}), "(100, activation='relu', kernel_initializer='he_normal', input_shape=(\n X_train.shape[1],))\n", (2565, 2659), False, 'from tensorflow.keras.layers import SimpleRNN, Embedding, Dense, Dropout\n'), ((2674, 2734), 'tensorflow.keras.layers.Dense', 'Dense', (['(80)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""'}), "(80, activation='relu', kernel_initializer='he_normal')\n", (2679, 2734), False, 'from tensorflow.keras.layers import SimpleRNN, Embedding, Dense, Dropout\n'), ((2754, 2784), 'tensorflow.keras.layers.Dense', 'Dense', (['(2)'], {'activation': '"""softmax"""'}), "(2, activation='softmax')\n", (2759, 2784), False, 'from tensorflow.keras.layers import SimpleRNN, Embedding, Dense, Dropout\n'), ((6327, 6356), 'keras.backend.clip', 'K.clip', (['(y_true * y_pred)', '(0)', '(1)'], {}), '(y_true * y_pred, 0, 1)\n', (6333, 6356), True, 'from keras import backend as K\n'), ((6402, 6422), 'keras.backend.clip', 'K.clip', (['y_true', '(0)', '(1)'], {}), '(y_true, 0, 1)\n', (6408, 6422), True, 'from keras import backend as K\n'), ((6481, 6492), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (6490, 6492), True, 'from keras import backend as K\n'), ((6803, 6832), 'keras.backend.clip', 'K.clip', (['(y_true * y_pred)', '(0)', '(1)'], {}), '(y_true * y_pred, 0, 1)\n', (6809, 6832), True, 'from keras import backend as K\n'), ((6879, 6899), 'keras.backend.clip', 'K.clip', (['y_pred', '(0)', '(1)'], {}), '(y_pred, 0, 1)\n', (6885, 6899), True, 'from keras import backend as K\n'), ((6962, 6973), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (6971, 6973), True, 'from keras import backend as K\n'), ((7397, 7408), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (7406, 7408), True, 'from keras import backend as K\n')] |
import argparse
import glob
import os
import pickle
import sys
import time
from itertools import product
import matplotlib.pyplot as plt
import multiprocessing as mp
import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels.nonparametric.api as smnp
import swifter
import utils
import graphs
N_PROC = 10
BASE_DIR = '/home/johnmcbride/projects/Scales/Data_compare/'
RAW_DIR = '/home/johnmcbride/projects/Scales/Toy_model/Data/Raw/'
PRO_DIR = '/home/johnmcbride/projects/Scales/Toy_model/Data/Processed/'
REAL_DIR = os.path.join(BASE_DIR, 'Processed/Real', 'Samples')
DIST_DIR = os.path.join(BASE_DIR, 'Processed/Real', 'Sample_dist')
def calc_relative_entropy(pk, qk):
RE = 0.0
for i in range(len(pk)):
if pk[i] <= 0 or qk[i] <= 0:
pass
else:
RE += pk[i] * np.log(pk[i] / qk[i])
return RE
def calc_jensen_shannon_distance(pk, qk):
mk = 0.5 * (pk + qk)
return (0.5 * (calc_relative_entropy(pk, mk) + calc_relative_entropy(qk, mk))) ** 0.5
def smooth_dist_kde(df, cat='pair_ints', hist=False, nbins=1202):
X = [float(x) for y in df.loc[:,cat] for x in y.split(';')]
kde = smnp.KDEUnivariate(np.array(X))
kde.fit(kernel='gau', bw='scott', fft=1, gridsize=10000, cut=20)
grid = np.linspace(0, 1200, num=nbins-1)
y = np.array([kde.evaluate(x) for x in grid]).reshape(nbins-1)
if hist:
xtra = (nbins-2)/1200./2.
bins = np.linspace(-xtra, 1200+xtra, num=nbins)
hist, bins = np.histogram(X, bins=bins, normed=True)
return grid, y, hist
else:
return grid, y
def get_KDE(df, cat):
xKDE, yKDE = smooth_dist_kde(df, cat=cat)
return yKDE / np.trapz(yKDE)
def get_dists_file(s, cat='pair_ints', nbins=1202):
out = {}
if not os.path.exists(os.path.join(DIST_DIR, f"{s}_n7_hist.npy")):
df = pd.read_feather(os.path.join(REAL_DIR, f"{s}.feather"))
for n in [5,7]:
fHist = os.path.join(DIST_DIR, f"{s}_{cat}_n{n}_hist.npy")
fKDE = os.path.join(DIST_DIR, f"{s}_{cat}_n{n}_kde.npy")
if os.path.exists(fHist):
X, hist = np.load(fHist)
X, kde = np.load(fKDE)
else:
X, kde, hist = smooth_dist_kde(df.loc[df.n_notes==n], cat=cat, hist=True, nbins=nbins)
np.save(fHist, np.array([X, hist]))
np.save(fKDE, np.array([X, kde]))
out[n] = [X, kde, hist]
return out
def how_much_real_scales_predicted(df, n_real, w, s):
# try:
return float(len(set([int(x) for y in df[f"{s}_w{w:02d}"] for x in y.split(';') if len(y)]))) / float(n_real)
# except:
# return None
def rename_processed_files(f, s='sample_'):
root, fName = os.path.split(f)
print(root, fName)
return os.path.join(root, f"{s}{fName}")
def load_model_filenames():
paths = pickle.load(open(os.path.join(BASE_DIR, 'best_models.pickle'), 'rb'))
return [rename_processed_files(paths[k][n]) for k, n in product(paths.keys(), [5,7])]
def calculate_metrics(y1, y2):
y1 = y1.reshape(y1.size)
y2 = y2.reshape(y2.size)
err_sq = np.sqrt(np.dot(y1-y2, y1-y2))
d1 = y1[1:] - y1[:-1]
d2 = y2[1:] - y2[:-1]
deriv_es = np.sqrt(np.dot(d1-d2, d1-d2))
return [err_sq, deriv_es, (err_sq * deriv_es)**0.5]
def scale_rsq(Y1, Y2):
SStot = np.sum((Y1 - np.mean(Y1))**2)
SSres = np.sum((Y1 - Y2)**2)
return 1 - SSres/SStot
if __name__ == "__main__":
timeS = time.time()
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--partabase', action='store', default='None', type=str)
args = parser.parse_args()
categories = ['pair_ints', 'scale']
n_arr = np.arange(4,10,dtype=int)
samples = ['theory', 'instrument'] + [f"sample_f{frac:3.1f}_{i:02d}" for frac in [0.4, 0.6, 0.8] for i in range(10)]
files = [f"{s}.feather" for s in samples]
int_dists = [get_dists_file(s) for s in samples]
hist_dists = [get_dists_file(s, cat='scale', nbins=42) for s in samples]
# print(f"Real scales loaded after {(time.time()-timeS)/60.} minutes")
pro_files = load_model_filenames()
def extract_stats_each_model(fName):
df = pd.read_feather(fName)
bits = os.path.split(fName)[1].split('_')
n = int(bits[1].strip('n'))
idx = [i for i in range(len(bits)) if bits[i][0]=='M'][0]
bias = '_'.join(bits[2:idx])
mi = int(bits[idx].strip('MI'))
ma = int(bits[idx+1].strip('MA'))
beta = float(bits[-1].strip('.feather'))
n_sample = df.n_att.sum()
q = float(len(df))/float(n_sample)
output = [n, mi, ma, bias, beta, q, n_sample]
X, iKDE, iHist = smooth_dist_kde(df, cat='pair_ints', hist=True)
X, sKDE, sHist = smooth_dist_kde(df, cat='scale', hist=True, nbins=42)
for i, f in enumerate(files):
df_real = pd.read_feather(os.path.join(REAL_DIR, f))
n_real = len(df_real.loc[df_real.n_notes==n])
frac_real = [how_much_real_scales_predicted(df, n_real, w, f'{samples[i]}_ss') for w in [10, 20]]
metrics = calculate_metrics(int_dists[i][n][1], iKDE)
scale_R2 = scale_rsq(sHist,hist_dists[i][n][2])
output.extend([n_real] + frac_real + metrics + [scale_R2])
return output + [fName]
biases = ['none',
'distI_1_0', 'distI_2_0', 'distI_3_0', 'distI_0_1', 'distI_0_2',
'distI_1_1', 'distI_2_1', 'distI_1_2', 'distI_2_2',
'opt_c', 'opt_c_I1', 'opt_c_I2', 'opt_c_s2', 'opt_c_s3'] + \
[f"hs_n{i}_w{w:02d}" for i in range(1,4) for w in [5,10,15,20]] + \
[f"hs_r3_w{w:02d}" for w in [5,10,15,20]] + \
[f"ahs{i:02d}_w{w:02d}" for i in range(1,11) for w in [5,10,15,20]] + \
[f"im5_r{r:3.1f}_w{w:02d}" for r in [0, 0.5, 1, 2] for w in [5,10,15,20]] + \
[f"Nhs_n1_w{w:02d}" for w in [5,10,15,20]] + \
[f"Nhs_n2_w{w:02d}" for w in [5,10,15,20]] + \
[f"Nhs_n3_w{w:02d}" for w in [5,10,15,20]] + \
[f"Nim5_r0.0_w{w:02d}" for w in [5,10,15,20]] + \
[f"TRANSB_{i}" for i in [1,2,3]] + \
[f"TRANS{a}_{b}" for a in ['A', 'B'] for b in range(1,4)] + \
[f"HAR_{b}_{a}" for a in range(1,4) for b in range(5,25,5)] + \
[f"{a}_{b}" for a in ['HAR', 'FIF'] for b in range(5,25,5)]
# ['hs_r3_w05', 'hs_r3_w10', 'hs_r3_w15', 'hs_r3_w20'] + \
# [f"im5_r0.75_w{w:02d}" for w in [5,10,15,20] +
groups = ['none'] + ['distI']*3 + ['S#1']*2 + ['distI_S#1']*4 + \
['distW'] + ['distW_S#1']*2 + ['distW_S#2']*2 + ['HS']*12 + ['im5']*4 + ['AHS']*40 + ['im5']*16 + \
['HS']*12 + ['im5']*4 + ['TRANSB']*3 + \
['TRANS']*6 + ['HAR']*4 + ['HAR2']*4 + ['HAR3']*4 + ['HAR']*4 + ['FIF']*4
bias_groups = {biases[i]:groups[i] for i in range(len(biases))}
with mp.Pool(N_PROC) as pool:
results = list(pool.imap_unordered(extract_stats_each_model, pro_files))
print(f"Model comparison finished after {(time.time()-timeS)/60.} minutes")
df = pd.DataFrame(columns=['n_notes', 'min_int', 'max_int', 'bias', 'beta', 'quantile', 'n_sample'] + \
[f"{s}_{a}" for s in samples for a in ['n_real', 'fr_10', 'fr_20', 'RMSD', 'dRMSD', 'met1', 'sRMSD']] + \
['fName'], data=results)
df['bias_group'] = df.bias.apply(lambda x: bias_groups[x])
df['logq'] = np.log10(df['quantile'])
df = graphs.rename_bias_groups(df)
df = graphs.rename_biases(df)
print(f"DataFrame compiled after {(time.time()-timeS)/60.} minutes")
if args.partabase == 'None':
df.to_feather(os.path.join(BASE_DIR, 'Processed', 'database_sensitivity.feather'))
| [
"numpy.log10",
"numpy.log",
"numpy.array",
"numpy.arange",
"os.path.exists",
"numpy.histogram",
"pandas.read_feather",
"numpy.mean",
"argparse.ArgumentParser",
"os.path.split",
"numpy.linspace",
"numpy.dot",
"pandas.DataFrame",
"numpy.trapz",
"graphs.rename_bias_groups",
"time.time",
... | [((542, 593), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""Processed/Real"""', '"""Samples"""'], {}), "(BASE_DIR, 'Processed/Real', 'Samples')\n", (554, 593), False, 'import os\n'), ((605, 660), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""Processed/Real"""', '"""Sample_dist"""'], {}), "(BASE_DIR, 'Processed/Real', 'Sample_dist')\n", (617, 660), False, 'import os\n'), ((1282, 1317), 'numpy.linspace', 'np.linspace', (['(0)', '(1200)'], {'num': '(nbins - 1)'}), '(0, 1200, num=nbins - 1)\n', (1293, 1317), True, 'import numpy as np\n'), ((2711, 2727), 'os.path.split', 'os.path.split', (['f'], {}), '(f)\n', (2724, 2727), False, 'import os\n'), ((2762, 2795), 'os.path.join', 'os.path.join', (['root', 'f"""{s}{fName}"""'], {}), "(root, f'{s}{fName}')\n", (2774, 2795), False, 'import os\n'), ((3367, 3389), 'numpy.sum', 'np.sum', (['((Y1 - Y2) ** 2)'], {}), '((Y1 - Y2) ** 2)\n', (3373, 3389), True, 'import numpy as np\n'), ((3458, 3469), 'time.time', 'time.time', ([], {}), '()\n', (3467, 3469), False, 'import time\n'), ((3484, 3545), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process some integers."""'}), "(description='Process some integers.')\n", (3507, 3545), False, 'import argparse\n'), ((3711, 3738), 'numpy.arange', 'np.arange', (['(4)', '(10)'], {'dtype': 'int'}), '(4, 10, dtype=int)\n', (3720, 3738), True, 'import numpy as np\n'), ((7147, 7386), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "(['n_notes', 'min_int', 'max_int', 'bias', 'beta', 'quantile', 'n_sample'] +\n [f'{s}_{a}' for s in samples for a in ['n_real', 'fr_10', 'fr_20',\n 'RMSD', 'dRMSD', 'met1', 'sRMSD']] + ['fName'])", 'data': 'results'}), "(columns=['n_notes', 'min_int', 'max_int', 'bias', 'beta',\n 'quantile', 'n_sample'] + [f'{s}_{a}' for s in samples for a in [\n 'n_real', 'fr_10', 'fr_20', 'RMSD', 'dRMSD', 'met1', 'sRMSD']] + [\n 'fName'], data=results)\n", (7159, 7386), True, 'import pandas as pd\n'), ((7520, 7544), 'numpy.log10', 'np.log10', (["df['quantile']"], {}), "(df['quantile'])\n", (7528, 7544), True, 'import numpy as np\n'), ((7554, 7583), 'graphs.rename_bias_groups', 'graphs.rename_bias_groups', (['df'], {}), '(df)\n', (7579, 7583), False, 'import graphs\n'), ((7593, 7617), 'graphs.rename_biases', 'graphs.rename_biases', (['df'], {}), '(df)\n', (7613, 7617), False, 'import graphs\n'), ((1189, 1200), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (1197, 1200), True, 'import numpy as np\n'), ((1445, 1487), 'numpy.linspace', 'np.linspace', (['(-xtra)', '(1200 + xtra)'], {'num': 'nbins'}), '(-xtra, 1200 + xtra, num=nbins)\n', (1456, 1487), True, 'import numpy as np\n'), ((1507, 1546), 'numpy.histogram', 'np.histogram', (['X'], {'bins': 'bins', 'normed': '(True)'}), '(X, bins=bins, normed=True)\n', (1519, 1546), True, 'import numpy as np\n'), ((1697, 1711), 'numpy.trapz', 'np.trapz', (['yKDE'], {}), '(yKDE)\n', (1705, 1711), True, 'import numpy as np\n'), ((1955, 2005), 'os.path.join', 'os.path.join', (['DIST_DIR', 'f"""{s}_{cat}_n{n}_hist.npy"""'], {}), "(DIST_DIR, f'{s}_{cat}_n{n}_hist.npy')\n", (1967, 2005), False, 'import os\n'), ((2022, 2071), 'os.path.join', 'os.path.join', (['DIST_DIR', 'f"""{s}_{cat}_n{n}_kde.npy"""'], {}), "(DIST_DIR, f'{s}_{cat}_n{n}_kde.npy')\n", (2034, 2071), False, 'import os\n'), ((2083, 2104), 'os.path.exists', 'os.path.exists', (['fHist'], {}), '(fHist)\n', (2097, 2104), False, 'import os\n'), ((3111, 3135), 'numpy.dot', 'np.dot', (['(y1 - y2)', '(y1 - y2)'], {}), '(y1 - y2, y1 - y2)\n', (3117, 3135), True, 'import numpy as np\n'), ((3209, 3233), 'numpy.dot', 'np.dot', (['(d1 - d2)', '(d1 - d2)'], {}), '(d1 - d2, d1 - d2)\n', (3215, 3233), True, 'import numpy as np\n'), ((4206, 4228), 'pandas.read_feather', 'pd.read_feather', (['fName'], {}), '(fName)\n', (4221, 4228), True, 'import pandas as pd\n'), ((6951, 6966), 'multiprocessing.Pool', 'mp.Pool', (['N_PROC'], {}), '(N_PROC)\n', (6958, 6966), True, 'import multiprocessing as mp\n'), ((1805, 1847), 'os.path.join', 'os.path.join', (['DIST_DIR', 'f"""{s}_n7_hist.npy"""'], {}), "(DIST_DIR, f'{s}_n7_hist.npy')\n", (1817, 1847), False, 'import os\n'), ((1879, 1917), 'os.path.join', 'os.path.join', (['REAL_DIR', 'f"""{s}.feather"""'], {}), "(REAL_DIR, f'{s}.feather')\n", (1891, 1917), False, 'import os\n'), ((2128, 2142), 'numpy.load', 'np.load', (['fHist'], {}), '(fHist)\n', (2135, 2142), True, 'import numpy as np\n'), ((2164, 2177), 'numpy.load', 'np.load', (['fKDE'], {}), '(fKDE)\n', (2171, 2177), True, 'import numpy as np\n'), ((2855, 2899), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""best_models.pickle"""'], {}), "(BASE_DIR, 'best_models.pickle')\n", (2867, 2899), False, 'import os\n'), ((7753, 7820), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""Processed"""', '"""database_sensitivity.feather"""'], {}), "(BASE_DIR, 'Processed', 'database_sensitivity.feather')\n", (7765, 7820), False, 'import os\n'), ((834, 855), 'numpy.log', 'np.log', (['(pk[i] / qk[i])'], {}), '(pk[i] / qk[i])\n', (840, 855), True, 'import numpy as np\n'), ((2318, 2337), 'numpy.array', 'np.array', (['[X, hist]'], {}), '([X, hist])\n', (2326, 2337), True, 'import numpy as np\n'), ((2365, 2383), 'numpy.array', 'np.array', (['[X, kde]'], {}), '([X, kde])\n', (2373, 2383), True, 'import numpy as np\n'), ((3338, 3349), 'numpy.mean', 'np.mean', (['Y1'], {}), '(Y1)\n', (3345, 3349), True, 'import numpy as np\n'), ((4912, 4937), 'os.path.join', 'os.path.join', (['REAL_DIR', 'f'], {}), '(REAL_DIR, f)\n', (4924, 4937), False, 'import os\n'), ((4244, 4264), 'os.path.split', 'os.path.split', (['fName'], {}), '(fName)\n', (4257, 4264), False, 'import os\n'), ((7104, 7115), 'time.time', 'time.time', ([], {}), '()\n', (7113, 7115), False, 'import time\n'), ((7663, 7674), 'time.time', 'time.time', ([], {}), '()\n', (7672, 7674), False, 'import time\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. module:: matrix
:platform: Unix, Windows
:synopsis: Operations on matrices.
.. moduleauthor:: hbldh <<EMAIL>>
Created on 2013-05-15, 10:45
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
from b2ac.compat import *
def inverse_symmetric_3by3_double(M):
"""C style inverse of a symmetric, flattened 3 by 3 matrix.
Returns the adjoint matrix and the determinant, s.t.
.. math::
M^{-1} = \\frac{1}{\\det(M)} \\cdot \\text{adj}(M).
For integer matrices, this then returns exact results by avoiding
to apply the division.
:param M: The matrix to find inverse to. Assumes array with shape (6,).
:type M: :py:class:`numpy.ndarray`
:return: The inverse matrix, flattened.
:rtype: :py:class:`numpy.ndarray`
"""
determinant = 0
inverse = np.zeros((9,), dtype='float')
# First row of adjunct matrix
inverse[0] = (M[3] * M[5] - (M[4] ** 2)) # Det #0
inverse[1] = -(M[1] * M[5] - M[4] * M[2]) # Det #1
inverse[2] = (M[1] * M[4] - M[3] * M[2]) # Det #2
# Second row of adjunct matrix
inverse[3] = inverse[1]
inverse[4] = (M[0] * M[5] - (M[2] ** 2))
inverse[5] = -(M[0] * M[4] - M[1] * M[2])
# Third row of adjunct matrix
inverse[6] = inverse[2]
inverse[7] = inverse[5]
inverse[8] = (M[0] * M[3] - (M[1] ** 2))
determinant += M[0] * inverse[0]
determinant += M[1] * inverse[1] # Using addition since minus is integrated in adjunct matrix.
determinant += M[2] * inverse[2]
for i in xrange(len(inverse)):
inverse[i] /= determinant
return inverse
def inverse_symmetric_3by3_int(M):
"""C style inverse of a symmetric, flattened 3 by 3 matrix.
Returns the adjoint matrix and the determinant, s.t.
.. math::
M^{-1} = \\frac{1}{\\det(M)} \\cdot \\text{adj}(M).
For integer matrices, this then returns exact results by avoiding
to apply the division.
:param M: The matrix to find inverse to. Assumes array with shape (6,).
:type M: :py:class:`numpy.ndarray`
:return: The adjoint flattened matrix and the determinant.
:rtype: tuple
"""
determinant = 0
adj_M = np.zeros((9,), dtype='int32')
# First row of adjunct matrix
adj_M[0] = (M[3] * M[5] - (M[4] ** 2)) # Det #0
adj_M[1] = -(M[1] * M[5] - M[4] * M[2]) # Det #1
adj_M[2] = (M[1] * M[4] - M[3] * M[2]) # Det #2
# Second row of adjunct matrix
adj_M[3] = adj_M[1]
adj_M[4] = (M[0] * M[5] - (M[2] ** 2))
adj_M[5] = -(M[0] * M[4] - M[1] * M[2])
# Third row of adjunct matrix
adj_M[6] = adj_M[2]
adj_M[7] = adj_M[5]
adj_M[8] = (M[0] * M[3] - (M[1] ** 2))
determinant += np.int64(M[0]) * np.int64(adj_M[0])
determinant += np.int64(M[1]) * np.int64(adj_M[1]) # Using addition since minus is integrated in adjunct matrix.
determinant += np.int64(M[2]) * np.int64(adj_M[2])
return adj_M, determinant
def inverse_3by3_int(M):
"""C style inverse of a flattened 3 by 3 matrix.
Returns the adjoint matrix and the determinant, s.t.
.. math::
M^{-1} = \\frac{1}{\\det(M)} \\cdot \\text{adj}(M).
For integer matrices, this then returns exact results by avoiding
to apply the division.
:param M: The matrix to find inverse to.
:type M: :py:class:`numpy.ndarray`
:return: The adjoint flattened matrix and the determinant.
:rtype: tuple
"""
if len(M.shape) > 1:
M = M.flatten()
determinant = 0
adj_M = np.zeros((9,), 'int')
# First row of adjunct matrix
adj_M[0] = (M[4] * M[8] - M[7] * M[5]) # Det #0
adj_M[1] = -(M[1] * M[8] - M[7] * M[2])
adj_M[2] = (M[1] * M[5] - M[4] * M[2])
# Second row of adjunct matrix
adj_M[3] = -(M[3] * M[8] - M[6] * M[5]) # Det #1
adj_M[4] = (M[0] * M[8] - M[6] * M[2])
adj_M[5] = -(M[0] * M[5] - M[3] * M[2])
# Third row of adjunct matrix
adj_M[6] = (M[3] * M[7] - M[6] * M[4]) # Det #2
adj_M[7] = -(M[0] * M[7] - M[6] * M[1])
adj_M[8] = (M[0] * M[4] - M[3] * M[1])
determinant += np.int64(M[0]) * np.int64(adj_M[0])
determinant += np.int64(M[1]) * np.int64(adj_M[3]) # Using addition since minus is integrated in adjunct matrix.
determinant += np.int64(M[2]) * np.int64(adj_M[6])
return adj_M, determinant
def inverse_3by3_double(M):
"""C style inverse of a flattened 3 by 3 matrix.
.. math::
M^{-1} = \\frac{1}{\\det(M)} \\cdot \\text{adj}(M).
For integer matrices, this then returns exact results by avoiding
to apply the division.
:param M: The matrix to find inverse to.
:type M: :py:class:`numpy.ndarray`
:return: The inverse matrix.
:rtype: :py:class:`numpy.ndarray`
"""
if len(M.shape) > 1:
M = M.flatten()
determinant = 0
adj_M = np.zeros((9,), 'int')
# First row of adjunct matrix
adj_M[0] = (M[4] * M[8] - M[7] * M[5]) # Det #0
adj_M[1] = -(M[1] * M[8] - M[7] * M[2])
adj_M[2] = (M[1] * M[5] - M[4] * M[2])
# Second row of adjunct matrix
adj_M[3] = -(M[3] * M[8] - M[6] * M[5]) # Det #1
adj_M[4] = (M[0] * M[8] - M[6] * M[2])
adj_M[5] = -(M[0] * M[5] - M[3] * M[2])
# Third row of adjunct matrix
adj_M[6] = (M[3] * M[7] - M[6] * M[4]) # Det #2
adj_M[7] = -(M[0] * M[7] - M[6] * M[1])
adj_M[8] = (M[0] * M[4] - M[3] * M[1])
determinant += M[0] * adj_M[0]
determinant += M[1] * adj_M[3] # Using addition since minus is integrated in adjunct matrix.
determinant += M[2] * adj_M[6]
return adj_M / determinant
def add_symmetric_matrix(M, M_sym):
"""Add a regular matrix and a symmetric one.
:param M: A [3x3] matrix to add with symmetric matrix.
:type M: :py:class:`numpy.ndarray`
:param M_sym: A [6x1] array to add with M.
:type M_sym: :py:class:`numpy.ndarray`
:return: The sum of the two matrices.
:rtype: :py:class:`numpy.ndarray`
"""
M[0, 0] += M_sym[0]
M[0, 1] += M_sym[1]
M[1, 0] += M_sym[1]
M[0, 2] += M_sym[2]
M[2, 0] += M_sym[2]
M[1, 1] += M_sym[3]
M[1, 2] += M_sym[4]
M[2, 1] += M_sym[4]
M[2, 2] += M_sym[5]
return M
| [
"numpy.zeros",
"numpy.int64"
] | [((981, 1010), 'numpy.zeros', 'np.zeros', (['(9,)'], {'dtype': '"""float"""'}), "((9,), dtype='float')\n", (989, 1010), True, 'import numpy as np\n'), ((2341, 2370), 'numpy.zeros', 'np.zeros', (['(9,)'], {'dtype': '"""int32"""'}), "((9,), dtype='int32')\n", (2349, 2370), True, 'import numpy as np\n'), ((3669, 3690), 'numpy.zeros', 'np.zeros', (['(9,)', '"""int"""'], {}), "((9,), 'int')\n", (3677, 3690), True, 'import numpy as np\n'), ((4983, 5004), 'numpy.zeros', 'np.zeros', (['(9,)', '"""int"""'], {}), "((9,), 'int')\n", (4991, 5004), True, 'import numpy as np\n'), ((2859, 2873), 'numpy.int64', 'np.int64', (['M[0]'], {}), '(M[0])\n', (2867, 2873), True, 'import numpy as np\n'), ((2876, 2894), 'numpy.int64', 'np.int64', (['adj_M[0]'], {}), '(adj_M[0])\n', (2884, 2894), True, 'import numpy as np\n'), ((2914, 2928), 'numpy.int64', 'np.int64', (['M[1]'], {}), '(M[1])\n', (2922, 2928), True, 'import numpy as np\n'), ((2931, 2949), 'numpy.int64', 'np.int64', (['adj_M[1]'], {}), '(adj_M[1])\n', (2939, 2949), True, 'import numpy as np\n'), ((3032, 3046), 'numpy.int64', 'np.int64', (['M[2]'], {}), '(M[2])\n', (3040, 3046), True, 'import numpy as np\n'), ((3049, 3067), 'numpy.int64', 'np.int64', (['adj_M[2]'], {}), '(adj_M[2])\n', (3057, 3067), True, 'import numpy as np\n'), ((4238, 4252), 'numpy.int64', 'np.int64', (['M[0]'], {}), '(M[0])\n', (4246, 4252), True, 'import numpy as np\n'), ((4255, 4273), 'numpy.int64', 'np.int64', (['adj_M[0]'], {}), '(adj_M[0])\n', (4263, 4273), True, 'import numpy as np\n'), ((4293, 4307), 'numpy.int64', 'np.int64', (['M[1]'], {}), '(M[1])\n', (4301, 4307), True, 'import numpy as np\n'), ((4310, 4328), 'numpy.int64', 'np.int64', (['adj_M[3]'], {}), '(adj_M[3])\n', (4318, 4328), True, 'import numpy as np\n'), ((4411, 4425), 'numpy.int64', 'np.int64', (['M[2]'], {}), '(M[2])\n', (4419, 4425), True, 'import numpy as np\n'), ((4428, 4446), 'numpy.int64', 'np.int64', (['adj_M[6]'], {}), '(adj_M[6])\n', (4436, 4446), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""Newtons Cradle example using the visualizer.
This is the same example as provided in [1], but translated into Python and using the `raisimpy` library (which
is a wrapper around `raisimLib` [2] and `raisimOgre` [3]).
References:
- [1] https://github.com/leggedrobotics/raisimOgre/blob/master/examples/src/primitives/newtonsCradle.cpp
- [2] raisimLib: https://github.com/leggedrobotics/raisimLib
- [3] raisimOgre: https://github.com/leggedrobotics/raisimOgre
"""
__author__ = ["<NAME> (C++)", "<NAME> (Python)"]
__copyright__ = "Copyright (c), 2019 Robotic Systems Lab, ETH Zurich"
__credits__ = ["Robotic Systems Lab, ETH Zurich + Hwangbo (C++ example code)",
"<NAME> (Python wrapper + Python example)"]
__license__ = "MIT"
import numpy as np
import raisimpy as raisim
def setup_callback():
vis = raisim.OgreVis.get()
# light
light = vis.get_light()
light.set_diffuse_color(1, 1, 1)
light.set_cast_shadows(True)
vis.get_light_node().set_position(3, 3, 3)
# load textures
vis.add_resource_directory(vis.get_resource_dir() + "/material/gravel")
vis.load_material("gravel.material")
vis.add_resource_directory(vis.get_resource_dir() + "/model/monkey")
vis.load_material("monkey.material")
vis.add_resource_directory(vis.get_resource_dir() + "/material/checkerboard")
vis.load_material("checkerboard.material")
# shadow setting
manager = vis.get_scene_manager()
manager.set_shadow_technique(raisim.ogre.ShadowTechnique.SHADOWTYPE_TEXTURE_ADDITIVE)
manager.set_shadow_texture_settings(2048, 3)
# scale related settings!! Please adapt it depending on your map size
# beyond this distance, shadow disappears
manager.set_shadow_far_distance(60)
# size of contact points and contact forces
vis.set_contact_visual_object_size(0.1, 3.0)
# speed of camera motion in freelook mode
vis.get_camera_man().set_top_speed(10)
if __name__ == '__main__':
# create raisim world
world = raisim.World()
world.set_time_step(0.001)
world.set_erp(0.5, 0.)
# start visualizer thread
# these methods must be called before initApp
vis = raisim.OgreVis.get()
vis.set_world(world)
vis.set_window_size(1800, 1000)
vis.set_default_callbacks()
vis.set_setup_callback(setup_callback)
vis.set_anti_aliasing(2)
# init
vis.init_app()
# create raisim objects
ground = world.add_ground()
raisim.gui.manual_stepping = True
# create visualizer objects
for i in range(3):
for j in range(3):
mesh_instance = world.add_mesh(file_name=vis.get_resource_dir() + "/model/monkey/monkey.obj", mass=1.0, inertia=np.identity(3), com=np.zeros(3))
mesh_instance.set_position(-(3.0/2.0) + i * 1.5, -(3.0/2.0) + j * 1.5, 2.0 + (i*3+j))
vis.create_graphical_object(mesh_instance, name="monkey_mesh"+str(i)+"_"+str(j), material="red")
sphere = world.add_sphere(0.5, 1.0)
vis.create_graphical_object(ground, dimension=20, name="floor", material="checkerboard_green")
# set camera
camera = vis.get_camera_man().get_camera()
camera.set_position(0, 15.5, 10.5)
camera.yaw(3.14)
camera.pitch(1.2)
# run the app
vis.run()
# terminate
vis.close_app()
| [
"numpy.identity",
"raisimpy.OgreVis.get",
"numpy.zeros",
"raisimpy.World"
] | [((859, 879), 'raisimpy.OgreVis.get', 'raisim.OgreVis.get', ([], {}), '()\n', (877, 879), True, 'import raisimpy as raisim\n'), ((2034, 2048), 'raisimpy.World', 'raisim.World', ([], {}), '()\n', (2046, 2048), True, 'import raisimpy as raisim\n'), ((2199, 2219), 'raisimpy.OgreVis.get', 'raisim.OgreVis.get', ([], {}), '()\n', (2217, 2219), True, 'import raisimpy as raisim\n'), ((2722, 2736), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (2733, 2736), True, 'import numpy as np\n'), ((2742, 2753), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2750, 2753), True, 'import numpy as np\n')] |
import cv2
import numpy as np
def nothing(x):
pass
cap = cv2.VideoCapture(0)
cv2.namedWindow("Settings")
cv2.createTrackbar("Lower-Hue", "Settings", 0, 180, nothing)
cv2.createTrackbar("Lower-Saturation", "Settings", 0, 255, nothing)
cv2.createTrackbar("Lower-Value", "Settings", 0, 255, nothing)
cv2.createTrackbar("Upper-Hue", "Settings", 0, 180, nothing)
cv2.createTrackbar("Upper-Saturation", "Settings", 0, 255, nothing)
cv2.createTrackbar("Upper-Value", "Settings", 0, 255, nothing)
font = cv2.FONT_HERSHEY_SIMPLEX
while 1:
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lh = cv2.getTrackbarPos("Lower-Hue", "Settings")
ls = cv2.getTrackbarPos("Lower-Saturation", "Settings")
lv = cv2.getTrackbarPos("Lower-Value", "Settings")
uh = cv2.getTrackbarPos("Upper-Hue", "Settings")
us = cv2.getTrackbarPos("Upper-Saturation", "Settings")
uv = cv2.getTrackbarPos("Upper-Value", "Settings")
lower_color = np.array([lh, ls, lv])
upper_color = np.array([uh, us, uv])
mask = cv2.inRange(hsv, lower_color, upper_color)
kernel = np.ones((5, 5), np.uint8) # maske üzerinde oluşan siyah noktaları yok etmek için yapıyoruz
mask = cv2.erode(mask, kernel)
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for i in contours:
area = cv2.contourArea(i)
epsilon = 0.02 * cv2.arcLength(i, True)
approx = cv2.approxPolyDP(i, epsilon, True) # bulunan konturlara iyileştirme yapar
x = approx.ravel()[0] # tüm sayıları tek bir satıra döküyor çok boyutlu dizileri tek satıra döküyor
y = approx.ravel()[1] # konturların başladığı yerler
if area > 400: # 400 önceden hesaplanmış bilimsel değer
cv2.drawContours(frame, [approx], 0, (0, 0, 0), 5)
if len(approx) == 3:
cv2.putText(frame, "Triangle", (x, y), font, 1, (0, 0, 0))
elif len(approx) == 4:
cv2.putText(frame, "Rectangle", (x, y), font, 1, (0, 0, 0))
elif len(approx) == 5:
cv2.putText(frame, "Pentagon", (x, y), font, 1, (0, 0, 0))
elif len(approx) > 6:
cv2.putText(frame, "Circle", (x, y), font, 1, (0, 0, 0))
cv2.imshow("Frame", frame)
cv2.imshow("Mask", mask)
if cv2.waitKey(3) & 0xFF == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
| [
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.approxPolyDP",
"cv2.erode",
"cv2.arcLength",
"cv2.contourArea",
"cv2.waitKey",
"cv2.drawContours",
"numpy.ones",
"cv2.putText",
"cv2.cvtColor",
"cv2.createTrackbar",
"cv2.namedWindow",
"cv2.flip",
"cv2.inRange",
"cv2.VideoCap... | [((65, 84), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (81, 84), False, 'import cv2\n'), ((86, 113), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Settings"""'], {}), "('Settings')\n", (101, 113), False, 'import cv2\n'), ((114, 174), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""Lower-Hue"""', '"""Settings"""', '(0)', '(180)', 'nothing'], {}), "('Lower-Hue', 'Settings', 0, 180, nothing)\n", (132, 174), False, 'import cv2\n'), ((175, 242), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""Lower-Saturation"""', '"""Settings"""', '(0)', '(255)', 'nothing'], {}), "('Lower-Saturation', 'Settings', 0, 255, nothing)\n", (193, 242), False, 'import cv2\n'), ((243, 305), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""Lower-Value"""', '"""Settings"""', '(0)', '(255)', 'nothing'], {}), "('Lower-Value', 'Settings', 0, 255, nothing)\n", (261, 305), False, 'import cv2\n'), ((306, 366), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""Upper-Hue"""', '"""Settings"""', '(0)', '(180)', 'nothing'], {}), "('Upper-Hue', 'Settings', 0, 180, nothing)\n", (324, 366), False, 'import cv2\n'), ((367, 434), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""Upper-Saturation"""', '"""Settings"""', '(0)', '(255)', 'nothing'], {}), "('Upper-Saturation', 'Settings', 0, 255, nothing)\n", (385, 434), False, 'import cv2\n'), ((435, 497), 'cv2.createTrackbar', 'cv2.createTrackbar', (['"""Upper-Value"""', '"""Settings"""', '(0)', '(255)', 'nothing'], {}), "('Upper-Value', 'Settings', 0, 255, nothing)\n", (453, 497), False, 'import cv2\n'), ((2413, 2436), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2434, 2436), False, 'import cv2\n'), ((581, 599), 'cv2.flip', 'cv2.flip', (['frame', '(1)'], {}), '(frame, 1)\n', (589, 599), False, 'import cv2\n'), ((610, 648), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (622, 648), False, 'import cv2\n'), ((659, 702), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""Lower-Hue"""', '"""Settings"""'], {}), "('Lower-Hue', 'Settings')\n", (677, 702), False, 'import cv2\n'), ((712, 762), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""Lower-Saturation"""', '"""Settings"""'], {}), "('Lower-Saturation', 'Settings')\n", (730, 762), False, 'import cv2\n'), ((772, 817), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""Lower-Value"""', '"""Settings"""'], {}), "('Lower-Value', 'Settings')\n", (790, 817), False, 'import cv2\n'), ((827, 870), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""Upper-Hue"""', '"""Settings"""'], {}), "('Upper-Hue', 'Settings')\n", (845, 870), False, 'import cv2\n'), ((880, 930), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""Upper-Saturation"""', '"""Settings"""'], {}), "('Upper-Saturation', 'Settings')\n", (898, 930), False, 'import cv2\n'), ((940, 985), 'cv2.getTrackbarPos', 'cv2.getTrackbarPos', (['"""Upper-Value"""', '"""Settings"""'], {}), "('Upper-Value', 'Settings')\n", (958, 985), False, 'import cv2\n'), ((1005, 1027), 'numpy.array', 'np.array', (['[lh, ls, lv]'], {}), '([lh, ls, lv])\n', (1013, 1027), True, 'import numpy as np\n'), ((1046, 1068), 'numpy.array', 'np.array', (['[uh, us, uv]'], {}), '([uh, us, uv])\n', (1054, 1068), True, 'import numpy as np\n'), ((1081, 1123), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower_color', 'upper_color'], {}), '(hsv, lower_color, upper_color)\n', (1092, 1123), False, 'import cv2\n'), ((1138, 1163), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (1145, 1163), True, 'import numpy as np\n'), ((1242, 1265), 'cv2.erode', 'cv2.erode', (['mask', 'kernel'], {}), '(mask, kernel)\n', (1251, 1265), False, 'import cv2\n'), ((1285, 1347), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (1301, 1347), False, 'import cv2\n'), ((2286, 2312), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'frame'], {}), "('Frame', frame)\n", (2296, 2312), False, 'import cv2\n'), ((2317, 2341), 'cv2.imshow', 'cv2.imshow', (['"""Mask"""', 'mask'], {}), "('Mask', mask)\n", (2327, 2341), False, 'import cv2\n'), ((1387, 1405), 'cv2.contourArea', 'cv2.contourArea', (['i'], {}), '(i)\n', (1402, 1405), False, 'import cv2\n'), ((1471, 1505), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['i', 'epsilon', '(True)'], {}), '(i, epsilon, True)\n', (1487, 1505), False, 'import cv2\n'), ((1431, 1453), 'cv2.arcLength', 'cv2.arcLength', (['i', '(True)'], {}), '(i, True)\n', (1444, 1453), False, 'import cv2\n'), ((1794, 1844), 'cv2.drawContours', 'cv2.drawContours', (['frame', '[approx]', '(0)', '(0, 0, 0)', '(5)'], {}), '(frame, [approx], 0, (0, 0, 0), 5)\n', (1810, 1844), False, 'import cv2\n'), ((2349, 2363), 'cv2.waitKey', 'cv2.waitKey', (['(3)'], {}), '(3)\n', (2360, 2363), False, 'import cv2\n'), ((1894, 1952), 'cv2.putText', 'cv2.putText', (['frame', '"""Triangle"""', '(x, y)', 'font', '(1)', '(0, 0, 0)'], {}), "(frame, 'Triangle', (x, y), font, 1, (0, 0, 0))\n", (1905, 1952), False, 'import cv2\n'), ((2004, 2063), 'cv2.putText', 'cv2.putText', (['frame', '"""Rectangle"""', '(x, y)', 'font', '(1)', '(0, 0, 0)'], {}), "(frame, 'Rectangle', (x, y), font, 1, (0, 0, 0))\n", (2015, 2063), False, 'import cv2\n'), ((2115, 2173), 'cv2.putText', 'cv2.putText', (['frame', '"""Pentagon"""', '(x, y)', 'font', '(1)', '(0, 0, 0)'], {}), "(frame, 'Pentagon', (x, y), font, 1, (0, 0, 0))\n", (2126, 2173), False, 'import cv2\n'), ((2224, 2280), 'cv2.putText', 'cv2.putText', (['frame', '"""Circle"""', '(x, y)', 'font', '(1)', '(0, 0, 0)'], {}), "(frame, 'Circle', (x, y), font, 1, (0, 0, 0))\n", (2235, 2280), False, 'import cv2\n')] |
import pandas as pd
import os
import re
import numpy as np
from datetime import datetime
from sklearn.decomposition import PCA
# Plotting Packages
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.cbook as cbook
import numpy as np
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from matplotlib import rcParams
rcParams['font.family'] = "Times New Roman"
colors=['#033C5A','#AA9868','#0190DB','#FFC72C','#A75523','#008364','#78BE20','#C9102F',
'#033C5A','#AA9868','#0190DB','#FFC72C','#A75523','#008364','#78BE20','#C9102F']
#-----------------------------------------------------------------------------------------------------------------------
#----------------------------------------------------Import Data--------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
# Import monthly data
monthlyIndex=pd.read_csv(r'Data\RegRelevant_MonthlySentimentIndex_Jan2021.csv')
print(monthlyIndex.info())
monthlyIndex['Year-Month']=monthlyIndex['Year'].map(str)+'-'+monthlyIndex['Month'].map(str)
monthlyIndex['date']=monthlyIndex['Year-Month'].astype('datetime64[ns]').dt.date
for dict in ['GI','LM','LSD']:
monthlyIndex[dict+'index_standardized']=(monthlyIndex[dict+'index']-np.mean(monthlyIndex[dict+'index']))/np.std(monthlyIndex[dict+'index'])
monthlyIndex['UncertaintyIndex_standardized']=(monthlyIndex['UncertaintyIndex']-np.mean(monthlyIndex['UncertaintyIndex']))/np.std(monthlyIndex['UncertaintyIndex'])
# PCA of monthly sentiment indexes
features = ['GIindex', 'LMindex', 'LSDindex']
x = monthlyIndex.loc[:, features].values
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
print("Variance explained by PC1 and PC2:", pca.explained_variance_ratio_)
print("PC1 feature weights:", pca.components_[0])
principalComponents_neg=principalComponents*(-1)
principalDf = pd.DataFrame(data = principalComponents_neg, columns = ['SentimentPC1', 'SentimentPC2'])
monthlyIndex = pd.concat([monthlyIndex, principalDf], axis = 1)
monthlyIndex['SentimentMax']=monthlyIndex[['GIindex','LMindex','LSDindex']].max(axis=1)
monthlyIndex['SentimentMin']=monthlyIndex[['GIindex','LMindex','LSDindex']].min(axis=1)
# Import weekly data
weeklyIndex=pd.read_csv(r'Data\RegRelevant_WeeklySentimentIndex_Jan2021.csv')
print(weeklyIndex.info())
weeklyIndex['date']=weeklyIndex['StartDate'].astype('datetime64[ns]').dt.date
for dict in ['GI','LM','LSD']:
weeklyIndex[dict+'index_standardized']=(weeklyIndex[dict+'index']-np.mean(weeklyIndex[dict+'index']))/np.std(weeklyIndex[dict+'index'])
weeklyIndex['UncertaintyIndex_standardized']=(weeklyIndex['UncertaintyIndex']-np.mean(weeklyIndex['UncertaintyIndex']))/np.std(weeklyIndex['UncertaintyIndex'])
# PCA of weekly sentiment indexes
features = ['GIindex', 'LMindex', 'LSDindex']
x = weeklyIndex.loc[:, features].values
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
print("Variance explained by PC1 and PC2:", pca.explained_variance_ratio_)
print("PC1 feature weights:", pca.components_[0])
principalComponents_neg=principalComponents*(-1)
principalDf = pd.DataFrame(data = principalComponents_neg, columns = ['SentimentPC1', 'SentimentPC2'])
weeklyIndex = pd.concat([weeklyIndex, principalDf], axis = 1)
#-----------------------------------------------------------------------------------------------------------------------
#---------------------------------------Plot Monthly Sentiment & Uncertainty Indexes--------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
# Plot monthly uncertainty index under Trump with weekly inset
x=monthlyIndex['date'][-49:]
y=monthlyIndex['UncertaintyIndex'][-49:]
fig, ax = plt.subplots(1, figsize=(15,8))
ax.plot(x,y,color=colors[0],marker='D',markersize=8)
# Events
ax.text(datetime(2016,12,1), 0.73, 'Transition\nof power', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2020,4,1), 0.8, 'Coronavirus\noutbreak', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2020,11,1), 0.77, '2020 presidential\nelection', fontsize=13, color=colors[4],horizontalalignment='center')
# format the ticks
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
years_fmt = mdates.DateFormatter('%Y-%m')
#
# ax.xaxis.set_major_locator(years)
# ax.xaxis.set_major_formatter(years_fmt)
# ax.xaxis.set_minor_locator(months)
#
# # round to nearest years.
# datemin = np.datetime64(min(x), 'Y')
# datemax = np.datetime64(max(x), 'Y') + np.timedelta64(1, 'Y')
# ax.set_xlim(datemin, datemax)
# format the coords message box
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.format_ydata = lambda x: '$%1.2f' % x
fig.autofmt_xdate()
# Set tick and label format
ax.tick_params(axis='both',which='major',labelsize=14,color='#d3d3d3')
ax.tick_params(axis='both',which='minor',color='#d3d3d3')
ax.set_ylabel('Monthly Uncertainty Index',fontsize=16)
ax.set_yticks(np.arange(round(min(y),1)-0.1,round(max(y),1)+0.2,0.1))
#ax.set_ylim(bottom=round(min(y),1))
ax.grid(color='#d3d3d3', which='major', axis='y')
# Borders
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_color('#d3d3d3')
ax.spines['bottom'].set_color('#d3d3d3')
# Title
fig.suptitle('Figure 1: Uncertainty about Regulatory Policy',
x=0.72, y=0.95,fontsize=20)
ax.set_title('(January 2017 - January 2021)',fontsize=18,position=(0.85,1.1))
# Inset plot
xins=weeklyIndex['date'][-52:]
yins=weeklyIndex['UncertaintyIndex'][-52:]
axins=inset_axes(ax, width=5, height=2.5, bbox_to_anchor=(.05, .69, .5, .5),
bbox_transform=ax.transAxes,loc=2)
axins.plot(xins,yins,color='#033C5A',linewidth=2,marker='D',markersize=5)
axins.format_xdata = mdates.DateFormatter('%Y-%m')
axins.set_yticks(np.arange(round(min(yins),1)-0.1, round(max(yins),1)+0.2, 0.1))
axins.grid(color='gray', which='major', axis='y', linestyle='dotted')
axins.tick_params(axis='both',which='major',labelsize=10)
axins.set_facecolor('#d3d3d3')
axins.set_alpha(0.2)
axins.set_title('Weekly Index over the Past 12 Months',fontsize=14,position=(0.5,0.85))
# Adjust plot position
plt.subplots_adjust(top=0.81, bottom=0.15)
#Notes
fig.text(0.12, 0.02,'Notes: The uncertainty index was estimated using a dictionary-based sentiment analysis'
' approach applied to newspaper text and fixed effects\nregressions. '
'For details on the methodology, refer to the latest draft of the Sinclair and Xie paper'
' on "Sentiment and Uncertainty about Regulation".',
fontsize=14,style='italic')
plt.savefig('Figures/Figure1.jpg', bbox_inches='tight')
plt.show()
#-----------------------------------------------------------------------------------------------------------------------
# Plot monthly uncertainty index with events by presidential year
x=monthlyIndex['date']
y=monthlyIndex['UncertaintyIndex']
fig, ax = plt.subplots(1, figsize=(15,9))
ax.plot(x,y,color='black')
# Presidential year
ax.axvspan(datetime(1985,1,1),datetime(1989,2,1),alpha=0.1, color=colors[7])
ax.text(datetime(1987,1,1), 0.91, 'Ronald\nReagan', fontsize=13, color=colors[7],horizontalalignment='center')
ax.axvspan(datetime(1989,2,1),datetime(1993,2,1),alpha=0.1, color=colors[7])
ax.text(datetime(1991,1,1), 0.91, '<NAME>', fontsize=13, color=colors[7],horizontalalignment='center')
ax.axvspan(datetime(1993,2,1),datetime(2001,2,1),alpha=0.1, color=colors[0])
ax.text(datetime(1997,1,1), 0.91, 'Bill\nClinton', fontsize=13, color=colors[0],horizontalalignment='center')
ax.axvspan(datetime(2001,2,1),datetime(2009,2,1),alpha=0.1, color=colors[7])
ax.text(datetime(2005,1,1), 0.91, '<NAME>', fontsize=13, color=colors[7],horizontalalignment='center')
ax.axvspan(datetime(2009,2,1),datetime(2017,2,1),alpha=0.1, color=colors[0])
ax.text(datetime(2013,1,1), 0.91, 'Barack\nObama', fontsize=13, color=colors[0],horizontalalignment='center')
ax.axvspan(datetime(2017,2,1),datetime(2021,2,1),alpha=0.1, color=colors[7])
ax.text(datetime(2019,1,1),0.91, 'Donald\nTrump', fontsize=13, color=colors[7],horizontalalignment='center')
# events
ax.text(datetime(2008,9,1), 0.8, 'Lehman\nBrothers', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2010,3,1), 0.855, 'Obamacare', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2010,10,1), 0.87, 'Deepwater Horizon\noil spill', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2010,7,1), 0.84, 'Dodd-Frank', fontsize=13, color=colors[4],horizontalalignment='left')
ax.text(datetime(2016,11,1),0.83 , '2016 presidential\nelection', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2020,1,1), 0.79, 'Coronavirus\noutbreak', fontsize=13, color=colors[4],horizontalalignment='center')
# format the ticks
years = mdates.YearLocator(2) # every year
months = mdates.MonthLocator() # every month
years_fmt = mdates.DateFormatter('%Y')
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(years_fmt)
ax.xaxis.set_minor_locator(months)
# round to nearest years.
datemin = np.datetime64(monthlyIndex['date'].iloc[0], 'Y')
datemax = np.datetime64(monthlyIndex['date'].iloc[-1], 'Y')
ax.set_xlim(datemin, datemax)
# format the coords message box
ax.format_xdata = mdates.DateFormatter('%Y')
ax.format_ydata = lambda x: '$%1.2f' % x
fig.autofmt_xdate()
# Set tick and label format
ax.tick_params(axis='both',which='major',labelsize=14,color='#d3d3d3')
ax.tick_params(axis='both',which='minor',color='#d3d3d3')
ax.set_ylabel('Monthly Uncertainty Index',fontsize=16)
ax.set_yticks(np.arange(round(min(y),1),round(max(y),1)+0.1,0.1))
ax.set_ylim(bottom=round(min(y),1))
ax.grid(color='#d3d3d3', which='major', axis='y')
# Borders
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_color('#d3d3d3')
ax.spines['bottom'].set_color('#d3d3d3')
# Title
fig.suptitle('Figure 3: Uncertainty about Regulation by Presidential Year',
y=0.95,fontsize=20)
ax.set_title('(January 1985 - January 2021)',fontsize=18,position=(0.5,1.12))
#Notes
fig.text(0.12, 0.03,'Notes: The uncertainty index was estimated using a dictionary-based sentiment analysis'
' approach applied to newspaper text and fixed effects\nregressions. '
'For details on the methodology, refer to the latest draft of the Sinclair and Xie paper'
' on "Sentiment and Uncertainty about Regulation".',
fontsize=14,style='italic')
# Adjust plot position
plt.subplots_adjust(top=0.81, bottom=0.15)
plt.savefig('Figures/Figure3.jpg', bbox_inches='tight')
plt.show()
#-----------------------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
# Plot PC1 under Trump with weekly inset
x = monthlyIndex['date'][-49:]
y = monthlyIndex['SentimentPC1'][-49:]
fig, ax = plt.subplots(1, figsize=(15, 8))
ax.plot(x,y,color=colors[0],marker='D',markersize=8)
# Events
#ax.text(datetime(2016,12,1), 0.73, 'Transition\nof Power', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2018,12,1), -0.45, 'Trump midterm\nelection', fontsize=13, color=colors[4],horizontalalignment='center')
#ax.text(datetime(2020,3,1), -0.15, 'Coronavirus\noutbreak', fontsize=13, color=colors[4],horizontalalignment='center')
#ax.text(datetime(2020,12,1), 0.77, '2020 Presidential Election', fontsize=13, color=colors[4],horizontalalignment='center')
# format the ticks
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
years_fmt = mdates.DateFormatter('%Y-%m')
#
# ax.xaxis.set_major_locator(years)
# ax.xaxis.set_major_formatter(years_fmt)
# ax.xaxis.set_minor_locator(months)
#
# # round to nearest years.
# datemin = np.datetime64(min(x), 'Y')
# datemax = np.datetime64(max(x), 'Y') + np.timedelta64(1, 'Y')
# ax.set_xlim(datemin, datemax)
# format the coords message box
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.format_ydata = lambda x: '$%1.2f' % x
fig.autofmt_xdate()
# Set tick and label format
ax.tick_params(axis='both',which='major',labelsize=14,color='#d3d3d3')
ax.tick_params(axis='both',which='minor',color='#d3d3d3')
ax.set_ylabel('Monthly Sentiment Index',fontsize=16)
ax.set_yticks(np.arange(-0.8,1.4,0.4))
#ax.set_ylim(bottom=round(min(y),1))
ax.grid(color='#d3d3d3', which='major', axis='y')
# Borders
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_color('#d3d3d3')
ax.spines['bottom'].set_color('#d3d3d3')
# Title
fig.suptitle('Figure 2: Sentiment about Regulatory Policy',
x=0.26, y=0.95,fontsize=20)
ax.set_title('(January 2017 - January 2021)',fontsize=18,position=(0.1,1.13))
# Inset plot
xins=weeklyIndex['date'][-52:]
yins=weeklyIndex['SentimentPC1'][-52:]
axins=inset_axes(ax, width=5, height=2.5, bbox_to_anchor=(.52, .75, .5, .5),
bbox_transform=ax.transAxes,loc=2)
axins.plot(xins,yins,color='#033C5A',linewidth=2,marker='D',markersize=5)
axins.format_xdata = mdates.DateFormatter('%Y-%m')
axins.set_yticks(np.arange(-2, 3, 1))
axins.grid(color='gray', which='major', axis='y', linestyle='dotted')
axins.tick_params(axis='both',which='major',labelsize=10)
axins.set_facecolor('#d3d3d3')
axins.set_alpha(0.1)
axins.set_title('Weekly Index over the Past 12 Months',fontsize=14,position=(0.5,0.85))
# Adjust plot position
plt.subplots_adjust(top=0.79, bottom=0.15)
#Notes
fig.text(0.12, 0.02,'Notes: The sentiment index was estimated using a dictionary-based sentiment analysis'
' approach applied to newspaper text and fixed effects\nregressions. '
'For details on the methodology, refer to the latest draft of the Sinclair and Xie paper'
' on "Sentiment and Uncertainty about Regulation".',
fontsize=14,style='italic')
plt.savefig("Figures/Figure2.jpg", bbox_inches='tight')
plt.show()
#-----------------------------------------------------------------------------------------------------------------------
# Plot PC1 with events by presidential year
x = monthlyIndex['date']
y = monthlyIndex['SentimentPC1']
fig, ax = plt.subplots(1, figsize=(15, 9))
ax.plot(x, y, color='black')
# Presidential year
ax.axvspan(datetime(1985,1,1),datetime(1989,2,1),alpha=0.1, color=colors[7])
ax.text(datetime(1987,1,1), 1.6, 'Ronald\nReagan', fontsize=13, color=colors[7],horizontalalignment='center')
ax.axvspan(datetime(1989,2,1),datetime(1993,2,1),alpha=0.1, color=colors[7])
ax.text(datetime(1991,1,1), 1.6, '<NAME>', fontsize=13, color=colors[7],horizontalalignment='center')
ax.axvspan(datetime(1993,2,1),datetime(2001,2,1),alpha=0.1, color=colors[0])
ax.text(datetime(1997,1,1), 1.6, 'Bill\nClinton', fontsize=13, color=colors[0],horizontalalignment='center')
ax.axvspan(datetime(2001,2,1),datetime(2009,2,1),alpha=0.1, color=colors[7])
ax.text(datetime(2005,1,1), 1.6, '<NAME>', fontsize=13, color=colors[7],horizontalalignment='center')
ax.axvspan(datetime(2009,2,1),datetime(2017,2,1),alpha=0.1, color=colors[0])
ax.text(datetime(2013,1,1), 1.6, 'Barack\nObama', fontsize=13, color=colors[0],horizontalalignment='center')
ax.axvspan(datetime(2017,2,1),datetime(2021,2,1),alpha=0.1, color=colors[7])
ax.text(datetime(2019,1,1),1.6, 'Donald\nTrump', fontsize=13, color=colors[7],horizontalalignment='center')
# events
ax.text(datetime(1993,9,1), 0.75, 'Clinton\nhealth care plan', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2001,9,1), -0.75, '9/11', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2006,11,1), 0.73, 'Bush midterm\nelection', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2008,9,1), -0.6, 'Lehman\nBrothers', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2010,3,1), -1, 'Obamacare', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2010,10,1),-1.25, 'Deepwater Horizon\noil spill', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2010,12,1), -1.4, 'Dodd-Frank', fontsize=13, color=colors[4],horizontalalignment='center')
ax.text(datetime(2012,6,1), -1, 'Libor\nscandal', fontsize=13, color=colors[4],horizontalalignment='left')
ax.text(datetime(2016,11,1), 0.8 , '2016 presidential\nelection', fontsize=13, color=colors[4],horizontalalignment='center')
#ax.text(datetime(2020,1,1), -0.5, 'Coronavirus\noutbreak', fontsize=13, color=colors[4],horizontalalignment='center')
# format the ticks
years = mdates.YearLocator(2) # every year
months = mdates.MonthLocator() # every month
years_fmt = mdates.DateFormatter('%Y')
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(years_fmt)
ax.xaxis.set_minor_locator(months)
# round to nearest years.
datemin = np.datetime64(x.iloc[0], 'Y')
datemax = np.datetime64(x.iloc[-1], 'Y')
ax.set_xlim(datemin, datemax)
# format the coords message box
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.format_ydata = lambda x: '$%1.2f' % x
fig.autofmt_xdate()
# Set tick and label format
ax.tick_params(axis='both',which='major',labelsize=14,color='#d3d3d3')
ax.tick_params(axis='both',which='minor',color='#d3d3d3')
ax.set_ylabel('Monthly Sentiment Index', fontsize=16)
ax.set_yticks(np.arange(round(min(y), 0) - 0.5, round(max(y), 0) + 1, 0.5))
ax.grid(color='#d3d3d3', which='major', axis='y')
# Borders
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_color('#d3d3d3')
ax.spines['bottom'].set_color('#d3d3d3')
# Title
fig.suptitle("Figure 4: Sentiment about Regulation by Presidential Year",
y=0.95, fontsize=20)
ax.set_title('(January 1985 - January 2021)', fontsize=18,position=(0.5,1.12))
# Notes
fig.text(0.12, 0.03, 'Notes: The sentiment index was estimated using a dictionary-based sentiment analysis'
' approach applied to newspaper text and fixed effects\nregressions. '
'For details on the methodology, refer to the latest draft of the Sinclair and Xie paper'
' on "Sentiment and Uncertainty about Regulation".',
fontsize=14, style='italic')
# Adjust plot position
plt.subplots_adjust(top=0.81, bottom=0.15)
plt.savefig("Figures/Figure4.jpg", bbox_inches='tight')
plt.show()
| [
"datetime.datetime",
"numpy.mean",
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"matplotlib.dates.MonthLocator",
"sklearn.decomposition.PCA",
"mpl_toolkits.axes_grid1.inset_locator.inset_axes",
"matplotlib.dates.DateFormatter",
"numpy.std",
"numpy.arange",
"numpy.datetime64",
"pandas.DataFr... | [((1111, 1177), 'pandas.read_csv', 'pd.read_csv', (['"""Data\\\\RegRelevant_MonthlySentimentIndex_Jan2021.csv"""'], {}), "('Data\\\\RegRelevant_MonthlySentimentIndex_Jan2021.csv')\n", (1122, 1177), True, 'import pandas as pd\n'), ((1848, 1867), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (1851, 1867), False, 'from sklearn.decomposition import PCA\n'), ((2100, 2188), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'principalComponents_neg', 'columns': "['SentimentPC1', 'SentimentPC2']"}), "(data=principalComponents_neg, columns=['SentimentPC1',\n 'SentimentPC2'])\n", (2112, 2188), True, 'import pandas as pd\n'), ((2204, 2250), 'pandas.concat', 'pd.concat', (['[monthlyIndex, principalDf]'], {'axis': '(1)'}), '([monthlyIndex, principalDf], axis=1)\n', (2213, 2250), True, 'import pandas as pd\n'), ((2464, 2529), 'pandas.read_csv', 'pd.read_csv', (['"""Data\\\\RegRelevant_WeeklySentimentIndex_Jan2021.csv"""'], {}), "('Data\\\\RegRelevant_WeeklySentimentIndex_Jan2021.csv')\n", (2475, 2529), True, 'import pandas as pd\n'), ((3094, 3113), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (3097, 3113), False, 'from sklearn.decomposition import PCA\n'), ((3346, 3434), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'principalComponents_neg', 'columns': "['SentimentPC1', 'SentimentPC2']"}), "(data=principalComponents_neg, columns=['SentimentPC1',\n 'SentimentPC2'])\n", (3358, 3434), True, 'import pandas as pd\n'), ((3449, 3494), 'pandas.concat', 'pd.concat', (['[weeklyIndex, principalDf]'], {'axis': '(1)'}), '([weeklyIndex, principalDf], axis=1)\n', (3458, 3494), True, 'import pandas as pd\n'), ((4136, 4168), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(15, 8)'}), '(1, figsize=(15, 8))\n', (4148, 4168), True, 'import matplotlib.pyplot as plt\n'), ((4619, 4639), 'matplotlib.dates.YearLocator', 'mdates.YearLocator', ([], {}), '()\n', (4637, 4639), True, 'import matplotlib.dates as mdates\n'), ((4664, 4685), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {}), '()\n', (4683, 4685), True, 'import matplotlib.dates as mdates\n'), ((4713, 4742), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%Y-%m"""'], {}), "('%Y-%m')\n", (4733, 4742), True, 'import matplotlib.dates as mdates\n'), ((5076, 5108), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%Y-%m-%d"""'], {}), "('%Y-%m-%d')\n", (5096, 5108), True, 'import matplotlib.dates as mdates\n'), ((5993, 6107), 'mpl_toolkits.axes_grid1.inset_locator.inset_axes', 'inset_axes', (['ax'], {'width': '(5)', 'height': '(2.5)', 'bbox_to_anchor': '(0.05, 0.69, 0.5, 0.5)', 'bbox_transform': 'ax.transAxes', 'loc': '(2)'}), '(ax, width=5, height=2.5, bbox_to_anchor=(0.05, 0.69, 0.5, 0.5),\n bbox_transform=ax.transAxes, loc=2)\n', (6003, 6107), False, 'from mpl_toolkits.axes_grid1.inset_locator import inset_axes\n'), ((6215, 6244), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%Y-%m"""'], {}), "('%Y-%m')\n", (6235, 6244), True, 'import matplotlib.dates as mdates\n'), ((6618, 6660), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.81)', 'bottom': '(0.15)'}), '(top=0.81, bottom=0.15)\n', (6637, 6660), True, 'import matplotlib.pyplot as plt\n'), ((7090, 7145), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Figures/Figure1.jpg"""'], {'bbox_inches': '"""tight"""'}), "('Figures/Figure1.jpg', bbox_inches='tight')\n", (7101, 7145), True, 'import matplotlib.pyplot as plt\n'), ((7146, 7156), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7154, 7156), True, 'import matplotlib.pyplot as plt\n'), ((7414, 7446), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(15, 9)'}), '(1, figsize=(15, 9))\n', (7426, 7446), True, 'import matplotlib.pyplot as plt\n'), ((9343, 9364), 'matplotlib.dates.YearLocator', 'mdates.YearLocator', (['(2)'], {}), '(2)\n', (9361, 9364), True, 'import matplotlib.dates as mdates\n'), ((9389, 9410), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {}), '()\n', (9408, 9410), True, 'import matplotlib.dates as mdates\n'), ((9438, 9464), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%Y"""'], {}), "('%Y')\n", (9458, 9464), True, 'import matplotlib.dates as mdates\n'), ((9612, 9660), 'numpy.datetime64', 'np.datetime64', (["monthlyIndex['date'].iloc[0]", '"""Y"""'], {}), "(monthlyIndex['date'].iloc[0], 'Y')\n", (9625, 9660), True, 'import numpy as np\n'), ((9671, 9720), 'numpy.datetime64', 'np.datetime64', (["monthlyIndex['date'].iloc[-1]", '"""Y"""'], {}), "(monthlyIndex['date'].iloc[-1], 'Y')\n", (9684, 9720), True, 'import numpy as np\n'), ((9802, 9828), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%Y"""'], {}), "('%Y')\n", (9822, 9828), True, 'import matplotlib.dates as mdates\n'), ((11071, 11113), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.81)', 'bottom': '(0.15)'}), '(top=0.81, bottom=0.15)\n', (11090, 11113), True, 'import matplotlib.pyplot as plt\n'), ((11115, 11170), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Figures/Figure3.jpg"""'], {'bbox_inches': '"""tight"""'}), "('Figures/Figure3.jpg', bbox_inches='tight')\n", (11126, 11170), True, 'import matplotlib.pyplot as plt\n'), ((11171, 11181), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11179, 11181), True, 'import matplotlib.pyplot as plt\n'), ((11548, 11580), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(15, 8)'}), '(1, figsize=(15, 8))\n', (11560, 11580), True, 'import matplotlib.pyplot as plt\n'), ((12158, 12178), 'matplotlib.dates.YearLocator', 'mdates.YearLocator', ([], {}), '()\n', (12176, 12178), True, 'import matplotlib.dates as mdates\n'), ((12203, 12224), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {}), '()\n', (12222, 12224), True, 'import matplotlib.dates as mdates\n'), ((12252, 12281), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%Y-%m"""'], {}), "('%Y-%m')\n", (12272, 12281), True, 'import matplotlib.dates as mdates\n'), ((12615, 12647), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%Y-%m-%d"""'], {}), "('%Y-%m-%d')\n", (12635, 12647), True, 'import matplotlib.dates as mdates\n'), ((13493, 13607), 'mpl_toolkits.axes_grid1.inset_locator.inset_axes', 'inset_axes', (['ax'], {'width': '(5)', 'height': '(2.5)', 'bbox_to_anchor': '(0.52, 0.75, 0.5, 0.5)', 'bbox_transform': 'ax.transAxes', 'loc': '(2)'}), '(ax, width=5, height=2.5, bbox_to_anchor=(0.52, 0.75, 0.5, 0.5),\n bbox_transform=ax.transAxes, loc=2)\n', (13503, 13607), False, 'from mpl_toolkits.axes_grid1.inset_locator import inset_axes\n'), ((13715, 13744), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%Y-%m"""'], {}), "('%Y-%m')\n", (13735, 13744), True, 'import matplotlib.dates as mdates\n'), ((14075, 14117), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.79)', 'bottom': '(0.15)'}), '(top=0.79, bottom=0.15)\n', (14094, 14117), True, 'import matplotlib.pyplot as plt\n'), ((14545, 14600), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Figures/Figure2.jpg"""'], {'bbox_inches': '"""tight"""'}), "('Figures/Figure2.jpg', bbox_inches='tight')\n", (14556, 14600), True, 'import matplotlib.pyplot as plt\n'), ((14601, 14611), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14609, 14611), True, 'import matplotlib.pyplot as plt\n'), ((14847, 14879), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(15, 9)'}), '(1, figsize=(15, 9))\n', (14859, 14879), True, 'import matplotlib.pyplot as plt\n'), ((17230, 17251), 'matplotlib.dates.YearLocator', 'mdates.YearLocator', (['(2)'], {}), '(2)\n', (17248, 17251), True, 'import matplotlib.dates as mdates\n'), ((17275, 17296), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {}), '()\n', (17294, 17296), True, 'import matplotlib.dates as mdates\n'), ((17324, 17350), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%Y"""'], {}), "('%Y')\n", (17344, 17350), True, 'import matplotlib.dates as mdates\n'), ((17498, 17527), 'numpy.datetime64', 'np.datetime64', (['x.iloc[0]', '"""Y"""'], {}), "(x.iloc[0], 'Y')\n", (17511, 17527), True, 'import numpy as np\n'), ((17538, 17568), 'numpy.datetime64', 'np.datetime64', (['x.iloc[-1]', '"""Y"""'], {}), "(x.iloc[-1], 'Y')\n", (17551, 17568), True, 'import numpy as np\n'), ((17650, 17682), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%Y-%m-%d"""'], {}), "('%Y-%m-%d')\n", (17670, 17682), True, 'import matplotlib.dates as mdates\n'), ((18896, 18938), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.81)', 'bottom': '(0.15)'}), '(top=0.81, bottom=0.15)\n', (18915, 18938), True, 'import matplotlib.pyplot as plt\n'), ((18940, 18995), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Figures/Figure4.jpg"""'], {'bbox_inches': '"""tight"""'}), "('Figures/Figure4.jpg', bbox_inches='tight')\n", (18951, 18995), True, 'import matplotlib.pyplot as plt\n'), ((18996, 19006), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19004, 19006), True, 'import matplotlib.pyplot as plt\n'), ((1678, 1718), 'numpy.std', 'np.std', (["monthlyIndex['UncertaintyIndex']"], {}), "(monthlyIndex['UncertaintyIndex'])\n", (1684, 1718), True, 'import numpy as np\n'), ((2927, 2966), 'numpy.std', 'np.std', (["weeklyIndex['UncertaintyIndex']"], {}), "(weeklyIndex['UncertaintyIndex'])\n", (2933, 2966), True, 'import numpy as np\n'), ((4239, 4260), 'datetime.datetime', 'datetime', (['(2016)', '(12)', '(1)'], {}), '(2016, 12, 1)\n', (4247, 4260), False, 'from datetime import datetime\n'), ((4357, 4377), 'datetime.datetime', 'datetime', (['(2020)', '(4)', '(1)'], {}), '(2020, 4, 1)\n', (4365, 4377), False, 'from datetime import datetime\n'), ((4474, 4495), 'datetime.datetime', 'datetime', (['(2020)', '(11)', '(1)'], {}), '(2020, 11, 1)\n', (4482, 4495), False, 'from datetime import datetime\n'), ((7505, 7525), 'datetime.datetime', 'datetime', (['(1985)', '(1)', '(1)'], {}), '(1985, 1, 1)\n', (7513, 7525), False, 'from datetime import datetime\n'), ((7524, 7544), 'datetime.datetime', 'datetime', (['(1989)', '(2)', '(1)'], {}), '(1989, 2, 1)\n', (7532, 7544), False, 'from datetime import datetime\n'), ((7579, 7599), 'datetime.datetime', 'datetime', (['(1987)', '(1)', '(1)'], {}), '(1987, 1, 1)\n', (7587, 7599), False, 'from datetime import datetime\n'), ((7694, 7714), 'datetime.datetime', 'datetime', (['(1989)', '(2)', '(1)'], {}), '(1989, 2, 1)\n', (7702, 7714), False, 'from datetime import datetime\n'), ((7713, 7733), 'datetime.datetime', 'datetime', (['(1993)', '(2)', '(1)'], {}), '(1993, 2, 1)\n', (7721, 7733), False, 'from datetime import datetime\n'), ((7768, 7788), 'datetime.datetime', 'datetime', (['(1991)', '(1)', '(1)'], {}), '(1991, 1, 1)\n', (7776, 7788), False, 'from datetime import datetime\n'), ((7875, 7895), 'datetime.datetime', 'datetime', (['(1993)', '(2)', '(1)'], {}), '(1993, 2, 1)\n', (7883, 7895), False, 'from datetime import datetime\n'), ((7894, 7914), 'datetime.datetime', 'datetime', (['(2001)', '(2)', '(1)'], {}), '(2001, 2, 1)\n', (7902, 7914), False, 'from datetime import datetime\n'), ((7949, 7969), 'datetime.datetime', 'datetime', (['(1997)', '(1)', '(1)'], {}), '(1997, 1, 1)\n', (7957, 7969), False, 'from datetime import datetime\n'), ((8063, 8083), 'datetime.datetime', 'datetime', (['(2001)', '(2)', '(1)'], {}), '(2001, 2, 1)\n', (8071, 8083), False, 'from datetime import datetime\n'), ((8082, 8102), 'datetime.datetime', 'datetime', (['(2009)', '(2)', '(1)'], {}), '(2009, 2, 1)\n', (8090, 8102), False, 'from datetime import datetime\n'), ((8137, 8157), 'datetime.datetime', 'datetime', (['(2005)', '(1)', '(1)'], {}), '(2005, 1, 1)\n', (8145, 8157), False, 'from datetime import datetime\n'), ((8244, 8264), 'datetime.datetime', 'datetime', (['(2009)', '(2)', '(1)'], {}), '(2009, 2, 1)\n', (8252, 8264), False, 'from datetime import datetime\n'), ((8263, 8283), 'datetime.datetime', 'datetime', (['(2017)', '(2)', '(1)'], {}), '(2017, 2, 1)\n', (8271, 8283), False, 'from datetime import datetime\n'), ((8318, 8338), 'datetime.datetime', 'datetime', (['(2013)', '(1)', '(1)'], {}), '(2013, 1, 1)\n', (8326, 8338), False, 'from datetime import datetime\n'), ((8432, 8452), 'datetime.datetime', 'datetime', (['(2017)', '(2)', '(1)'], {}), '(2017, 2, 1)\n', (8440, 8452), False, 'from datetime import datetime\n'), ((8451, 8471), 'datetime.datetime', 'datetime', (['(2021)', '(2)', '(1)'], {}), '(2021, 2, 1)\n', (8459, 8471), False, 'from datetime import datetime\n'), ((8506, 8526), 'datetime.datetime', 'datetime', (['(2019)', '(1)', '(1)'], {}), '(2019, 1, 1)\n', (8514, 8526), False, 'from datetime import datetime\n'), ((8625, 8645), 'datetime.datetime', 'datetime', (['(2008)', '(9)', '(1)'], {}), '(2008, 9, 1)\n', (8633, 8645), False, 'from datetime import datetime\n'), ((8738, 8758), 'datetime.datetime', 'datetime', (['(2010)', '(3)', '(1)'], {}), '(2010, 3, 1)\n', (8746, 8758), False, 'from datetime import datetime\n'), ((8846, 8867), 'datetime.datetime', 'datetime', (['(2010)', '(10)', '(1)'], {}), '(2010, 10, 1)\n', (8854, 8867), False, 'from datetime import datetime\n'), ((8973, 8993), 'datetime.datetime', 'datetime', (['(2010)', '(7)', '(1)'], {}), '(2010, 7, 1)\n', (8981, 8993), False, 'from datetime import datetime\n'), ((9079, 9100), 'datetime.datetime', 'datetime', (['(2016)', '(11)', '(1)'], {}), '(2016, 11, 1)\n', (9087, 9100), False, 'from datetime import datetime\n'), ((9205, 9225), 'datetime.datetime', 'datetime', (['(2020)', '(1)', '(1)'], {}), '(2020, 1, 1)\n', (9213, 9225), False, 'from datetime import datetime\n'), ((11771, 11792), 'datetime.datetime', 'datetime', (['(2018)', '(12)', '(1)'], {}), '(2018, 12, 1)\n', (11779, 11792), False, 'from datetime import datetime\n'), ((12934, 12959), 'numpy.arange', 'np.arange', (['(-0.8)', '(1.4)', '(0.4)'], {}), '(-0.8, 1.4, 0.4)\n', (12943, 12959), True, 'import numpy as np\n'), ((13762, 13781), 'numpy.arange', 'np.arange', (['(-2)', '(3)', '(1)'], {}), '(-2, 3, 1)\n', (13771, 13781), True, 'import numpy as np\n'), ((14941, 14961), 'datetime.datetime', 'datetime', (['(1985)', '(1)', '(1)'], {}), '(1985, 1, 1)\n', (14949, 14961), False, 'from datetime import datetime\n'), ((14960, 14980), 'datetime.datetime', 'datetime', (['(1989)', '(2)', '(1)'], {}), '(1989, 2, 1)\n', (14968, 14980), False, 'from datetime import datetime\n'), ((15015, 15035), 'datetime.datetime', 'datetime', (['(1987)', '(1)', '(1)'], {}), '(1987, 1, 1)\n', (15023, 15035), False, 'from datetime import datetime\n'), ((15129, 15149), 'datetime.datetime', 'datetime', (['(1989)', '(2)', '(1)'], {}), '(1989, 2, 1)\n', (15137, 15149), False, 'from datetime import datetime\n'), ((15148, 15168), 'datetime.datetime', 'datetime', (['(1993)', '(2)', '(1)'], {}), '(1993, 2, 1)\n', (15156, 15168), False, 'from datetime import datetime\n'), ((15203, 15223), 'datetime.datetime', 'datetime', (['(1991)', '(1)', '(1)'], {}), '(1991, 1, 1)\n', (15211, 15223), False, 'from datetime import datetime\n'), ((15309, 15329), 'datetime.datetime', 'datetime', (['(1993)', '(2)', '(1)'], {}), '(1993, 2, 1)\n', (15317, 15329), False, 'from datetime import datetime\n'), ((15328, 15348), 'datetime.datetime', 'datetime', (['(2001)', '(2)', '(1)'], {}), '(2001, 2, 1)\n', (15336, 15348), False, 'from datetime import datetime\n'), ((15383, 15403), 'datetime.datetime', 'datetime', (['(1997)', '(1)', '(1)'], {}), '(1997, 1, 1)\n', (15391, 15403), False, 'from datetime import datetime\n'), ((15496, 15516), 'datetime.datetime', 'datetime', (['(2001)', '(2)', '(1)'], {}), '(2001, 2, 1)\n', (15504, 15516), False, 'from datetime import datetime\n'), ((15515, 15535), 'datetime.datetime', 'datetime', (['(2009)', '(2)', '(1)'], {}), '(2009, 2, 1)\n', (15523, 15535), False, 'from datetime import datetime\n'), ((15570, 15590), 'datetime.datetime', 'datetime', (['(2005)', '(1)', '(1)'], {}), '(2005, 1, 1)\n', (15578, 15590), False, 'from datetime import datetime\n'), ((15676, 15696), 'datetime.datetime', 'datetime', (['(2009)', '(2)', '(1)'], {}), '(2009, 2, 1)\n', (15684, 15696), False, 'from datetime import datetime\n'), ((15695, 15715), 'datetime.datetime', 'datetime', (['(2017)', '(2)', '(1)'], {}), '(2017, 2, 1)\n', (15703, 15715), False, 'from datetime import datetime\n'), ((15750, 15770), 'datetime.datetime', 'datetime', (['(2013)', '(1)', '(1)'], {}), '(2013, 1, 1)\n', (15758, 15770), False, 'from datetime import datetime\n'), ((15863, 15883), 'datetime.datetime', 'datetime', (['(2017)', '(2)', '(1)'], {}), '(2017, 2, 1)\n', (15871, 15883), False, 'from datetime import datetime\n'), ((15882, 15902), 'datetime.datetime', 'datetime', (['(2021)', '(2)', '(1)'], {}), '(2021, 2, 1)\n', (15890, 15902), False, 'from datetime import datetime\n'), ((15937, 15957), 'datetime.datetime', 'datetime', (['(2019)', '(1)', '(1)'], {}), '(2019, 1, 1)\n', (15945, 15957), False, 'from datetime import datetime\n'), ((16055, 16075), 'datetime.datetime', 'datetime', (['(1993)', '(9)', '(1)'], {}), '(1993, 9, 1)\n', (16063, 16075), False, 'from datetime import datetime\n'), ((16178, 16198), 'datetime.datetime', 'datetime', (['(2001)', '(9)', '(1)'], {}), '(2001, 9, 1)\n', (16186, 16198), False, 'from datetime import datetime\n'), ((16281, 16302), 'datetime.datetime', 'datetime', (['(2006)', '(11)', '(1)'], {}), '(2006, 11, 1)\n', (16289, 16302), False, 'from datetime import datetime\n'), ((16402, 16422), 'datetime.datetime', 'datetime', (['(2008)', '(9)', '(1)'], {}), '(2008, 9, 1)\n', (16410, 16422), False, 'from datetime import datetime\n'), ((16516, 16536), 'datetime.datetime', 'datetime', (['(2010)', '(3)', '(1)'], {}), '(2010, 3, 1)\n', (16524, 16536), False, 'from datetime import datetime\n'), ((16621, 16642), 'datetime.datetime', 'datetime', (['(2010)', '(10)', '(1)'], {}), '(2010, 10, 1)\n', (16629, 16642), False, 'from datetime import datetime\n'), ((16748, 16769), 'datetime.datetime', 'datetime', (['(2010)', '(12)', '(1)'], {}), '(2010, 12, 1)\n', (16756, 16769), False, 'from datetime import datetime\n'), ((16857, 16877), 'datetime.datetime', 'datetime', (['(2012)', '(6)', '(1)'], {}), '(2012, 6, 1)\n', (16865, 16877), False, 'from datetime import datetime\n'), ((16965, 16986), 'datetime.datetime', 'datetime', (['(2016)', '(11)', '(1)'], {}), '(2016, 11, 1)\n', (16973, 16986), False, 'from datetime import datetime\n'), ((1520, 1556), 'numpy.std', 'np.std', (["monthlyIndex[dict + 'index']"], {}), "(monthlyIndex[dict + 'index'])\n", (1526, 1556), True, 'import numpy as np\n'), ((1635, 1676), 'numpy.mean', 'np.mean', (["monthlyIndex['UncertaintyIndex']"], {}), "(monthlyIndex['UncertaintyIndex'])\n", (1642, 1676), True, 'import numpy as np\n'), ((2773, 2808), 'numpy.std', 'np.std', (["weeklyIndex[dict + 'index']"], {}), "(weeklyIndex[dict + 'index'])\n", (2779, 2808), True, 'import numpy as np\n'), ((2885, 2925), 'numpy.mean', 'np.mean', (["weeklyIndex['UncertaintyIndex']"], {}), "(weeklyIndex['UncertaintyIndex'])\n", (2892, 2925), True, 'import numpy as np\n'), ((1483, 1520), 'numpy.mean', 'np.mean', (["monthlyIndex[dict + 'index']"], {}), "(monthlyIndex[dict + 'index'])\n", (1490, 1520), True, 'import numpy as np\n'), ((2737, 2773), 'numpy.mean', 'np.mean', (["weeklyIndex[dict + 'index']"], {}), "(weeklyIndex[dict + 'index'])\n", (2744, 2773), True, 'import numpy as np\n')] |
import unittest
import numpy as np
import torch
from torch.autograd import Variable
import torch.nn
from pyoptmat import models, flowrules, utility, hardening, damage
from pyoptmat.temperature import ConstantParameter as CP
torch.set_default_dtype(torch.float64)
class CommonModel:
def test_derivs_strain(self):
strain_rates = torch.cat(
(
torch.zeros(1, self.strains.shape[1]),
(self.strains[1:] - self.strains[:-1])
/ (self.times[1:] - self.times[:-1]),
)
)
strain_rates[torch.isnan(strain_rates)] = 0
erate_interpolator = utility.CheaterBatchTimeSeriesInterpolator(
self.times, strain_rates
)
temperature_interpolator = utility.CheaterBatchTimeSeriesInterpolator(
self.times, self.temperatures
)
use = models.StrainBasedModel(
self.model, erate_interpolator, temperature_interpolator
)
v, dv = use.forward(self.t, self.state_strain)
ddv = utility.new_differentiate(
lambda x: use.forward(self.t, x)[0], self.state_strain
)
self.assertTrue(np.allclose(dv, ddv, rtol=1e-4, atol=1e-4))
def test_derivs_stress(self):
stress_rates = torch.cat(
(
torch.zeros(1, self.stresses.shape[1]),
(self.stresses[1:] - self.stresses[:-1])
/ (self.times[1:] - self.times[:-1]),
)
)
stress_rates[torch.isnan(stress_rates)] = 0
stress_rate_interpolator = utility.CheaterBatchTimeSeriesInterpolator(
self.times, stress_rates
)
stress_interpolator = utility.CheaterBatchTimeSeriesInterpolator(
self.times, self.stresses
)
temperature_interpolator = utility.CheaterBatchTimeSeriesInterpolator(
self.times, self.temperatures
)
use = models.StressBasedModel(
self.model,
stress_rate_interpolator,
stress_interpolator,
temperature_interpolator,
)
v, dv = use.forward(self.t, self.state_stress)
ddv = utility.new_differentiate(
lambda x: use.forward(self.t, x)[0], self.state_stress
)
self.assertTrue(np.allclose(dv, ddv, rtol=1e-4, atol=1e-4))
class TestPerfectViscoplasticity(unittest.TestCase, CommonModel):
def setUp(self):
self.E = torch.tensor(100000.0)
self.n = torch.tensor(5.2)
self.eta = torch.tensor(110.0)
self.times = torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
self.strains = (
torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
/ 10.0
)
self.temperatures = torch.zeros_like(self.strains)
self.stresses = (
torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
* 0
)
self.state_strain = torch.tensor([[90.0], [100.0], [101.0]])
self.state_stress = torch.tensor([[0.0], [0.0], [0.0]])
self.t = self.times[2]
self.flowrule = flowrules.PerfectViscoplasticity(CP(self.n), CP(self.eta))
self.model = models.InelasticModel(CP(self.E), self.flowrule)
class TestIsoKinViscoplasticity(unittest.TestCase, CommonModel):
def setUp(self):
self.E = torch.tensor(100000.0)
self.n = torch.tensor(5.2)
self.eta = torch.tensor(110.0)
self.s0 = torch.tensor(0.0)
self.R = torch.tensor(101.0)
self.d = torch.tensor(1.3)
self.iso = hardening.VoceIsotropicHardeningModel(CP(self.R), CP(self.d))
self.C = torch.tensor(12000.0)
self.g = torch.tensor(10.1)
self.kin = hardening.FAKinematicHardeningModel(CP(self.C), CP(self.g))
self.flowrule = flowrules.IsoKinViscoplasticity(
CP(self.n), CP(self.eta), CP(self.s0), self.iso, self.kin
)
self.model = models.InelasticModel(CP(self.E), self.flowrule)
self.times = torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
self.strains = torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
self.temperatures = torch.zeros_like(self.times)
self.stresses = (
torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
* 200
)
self.state_strain = (
torch.tensor(
[[90.0, 30.0, 10.0, 0], [100.0, 10.0, 15.0, 0], [101.0, 50.0, 60.0, 0]]
)
/ 3
)
self.state_stress = (
torch.tensor(
[[0.05, 30.0, 10.0, 0], [0.07, 10.0, 15.0, 0], [0.08, 50.0, 60.0, 0]]
)
/ 3
)
self.t = self.times[2]
class TestIsoKinViscoplasticityRecovery(unittest.TestCase, CommonModel):
def setUp(self):
self.E = torch.tensor(100000.0)
self.n = torch.tensor(5.2)
self.eta = torch.tensor(110.0)
self.s0 = torch.tensor(0.0)
self.tau0 = torch.tensor(101.0)
self.theta0 = torch.tensor(1000.0)
self.R0 = torch.tensor(0.0)
self.r1 = torch.tensor(1.0e-6)
self.r2 = torch.tensor(2.0)
self.iso = hardening.Theta0RecoveryVoceIsotropicHardeningModel(
CP(self.tau0), CP(self.theta0), CP(self.R0), CP(self.r1), CP(self.r2)
)
self.C = torch.tensor(12000.0)
self.g = torch.tensor(10.1)
self.kin = hardening.FAKinematicHardeningModel(CP(self.C), CP(self.g))
self.flowrule = flowrules.IsoKinViscoplasticity(
CP(self.n), CP(self.eta), CP(self.s0), self.iso, self.kin
)
self.model = models.InelasticModel(CP(self.E), self.flowrule)
self.times = torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
self.strains = torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
self.temperatures = torch.zeros_like(self.times)
self.stresses = (
torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
* 200
)
self.state_strain = (
torch.tensor(
[[90.0, 30.0, 10.0, 0], [100.0, 10.0, 15.0, 0], [101.0, 50.0, 60.0, 0]]
)
/ 3
)
self.state_stress = (
torch.tensor(
[[0.05, 30.0, 10.0, 0], [0.07, 10.0, 15.0, 0], [0.08, 50.0, 60.0, 0]]
)
/ 3
)
self.t = self.times[2]
class TestDamage(unittest.TestCase, CommonModel):
def setUp(self):
self.E = torch.tensor(100000.0)
self.n = torch.tensor(5.2)
self.eta = torch.tensor(110.0)
self.s0 = torch.tensor(0.0)
self.R = torch.tensor(101.0)
self.d = torch.tensor(1.3)
self.iso = hardening.VoceIsotropicHardeningModel(CP(self.R), CP(self.d))
self.C = torch.tensor(1200.0)
self.g = torch.tensor(10.1)
self.kin = hardening.FAKinematicHardeningModel(CP(self.C), CP(self.g))
self.A = torch.tensor(3000.0)
self.xi = torch.tensor(6.5)
self.phi = torch.tensor(1.7)
self.dmodel = damage.HayhurstLeckie(CP(self.A), CP(self.xi), CP(self.phi))
self.flowrule = flowrules.IsoKinViscoplasticity(
CP(self.n), CP(self.eta), CP(self.s0), self.iso, self.kin
)
self.model = models.InelasticModel(
CP(self.E), self.flowrule, dmodel=self.dmodel
)
self.times = torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
self.strains = torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
self.temperatures = torch.zeros_like(self.strains)
self.stresses = (
torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
* 200
)
self.state_strain = torch.tensor(
[[90.0, 30.0, 10.0, 0.05], [100.0, 10.0, 15.0, 0.1], [20, -10.0, -10, 0.2]]
)
self.state_stress = torch.tensor(
[[0.1, 30.0, 10.0, 0.05], [0.11, 10.0, 15.0, 0.1], [0.12, -10.0, -10, 0.2]]
)
self.t = self.times[2]
class TestAll(unittest.TestCase, CommonModel):
def setUp(self):
self.E = torch.tensor(100000.0)
self.n = torch.tensor(5.2)
self.eta = torch.tensor(110.0)
self.s0 = torch.tensor(0.0)
self.R = torch.tensor(101.0)
self.d = torch.tensor(1.3)
self.iso = hardening.VoceIsotropicHardeningModel(CP(self.R), CP(self.d))
self.C = torch.tensor([1200.0, 200.0, 10.0])
self.g = torch.tensor([10.1, 100.0, 50.0])
self.kin = hardening.ChabocheHardeningModel(CP(self.C), CP(self.g))
self.A = torch.tensor(3000.0)
self.xi = torch.tensor(6.5)
self.phi = torch.tensor(1.7)
self.dmodel = damage.HayhurstLeckie(CP(self.A), CP(self.xi), CP(self.phi))
self.flowrule = flowrules.IsoKinViscoplasticity(
CP(self.n), CP(self.eta), CP(self.s0), self.iso, self.kin
)
self.model = models.InelasticModel(
CP(self.E), self.flowrule, dmodel=self.dmodel
)
self.times = torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
self.strains = torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
self.temperatures = torch.zeros_like(self.strains)
self.stresses = (
torch.transpose(
torch.tensor(np.array([np.linspace(0, 1, 4) for i in range(3)])), 1, 0
)
* 200
)
self.state_strain = torch.tensor(
[
[90.0, 30.0, 10.0, 10.0, -10.0, 0.2],
[100.0, 10.0, 15.0, 5.0, -10.0, 0.3],
[101.0, 50.0, 60.0, -50.0, 10.0, 0.4],
]
)
self.state_stress = torch.tensor(
[
[0.05, 30.0, 10.0, 10.0, -10.0, 0.2],
[0.08, 10.0, 15.0, 5.0, -10.0, 0.3],
[0.07, 50.0, 60.0, -50.0, 10.0, 0.4],
]
)
self.t = self.times[2]
| [
"numpy.allclose",
"pyoptmat.models.StrainBasedModel",
"torch.set_default_dtype",
"pyoptmat.models.StressBasedModel",
"torch.tensor",
"numpy.linspace",
"pyoptmat.utility.CheaterBatchTimeSeriesInterpolator",
"torch.zeros_like",
"torch.isnan",
"torch.zeros",
"pyoptmat.temperature.ConstantParameter"... | [((228, 266), 'torch.set_default_dtype', 'torch.set_default_dtype', (['torch.float64'], {}), '(torch.float64)\n', (251, 266), False, 'import torch\n'), ((640, 708), 'pyoptmat.utility.CheaterBatchTimeSeriesInterpolator', 'utility.CheaterBatchTimeSeriesInterpolator', (['self.times', 'strain_rates'], {}), '(self.times, strain_rates)\n', (682, 708), False, 'from pyoptmat import models, flowrules, utility, hardening, damage\n'), ((766, 839), 'pyoptmat.utility.CheaterBatchTimeSeriesInterpolator', 'utility.CheaterBatchTimeSeriesInterpolator', (['self.times', 'self.temperatures'], {}), '(self.times, self.temperatures)\n', (808, 839), False, 'from pyoptmat import models, flowrules, utility, hardening, damage\n'), ((877, 962), 'pyoptmat.models.StrainBasedModel', 'models.StrainBasedModel', (['self.model', 'erate_interpolator', 'temperature_interpolator'], {}), '(self.model, erate_interpolator,\n temperature_interpolator)\n', (900, 962), False, 'from pyoptmat import models, flowrules, utility, hardening, damage\n'), ((1585, 1653), 'pyoptmat.utility.CheaterBatchTimeSeriesInterpolator', 'utility.CheaterBatchTimeSeriesInterpolator', (['self.times', 'stress_rates'], {}), '(self.times, stress_rates)\n', (1627, 1653), False, 'from pyoptmat import models, flowrules, utility, hardening, damage\n'), ((1706, 1775), 'pyoptmat.utility.CheaterBatchTimeSeriesInterpolator', 'utility.CheaterBatchTimeSeriesInterpolator', (['self.times', 'self.stresses'], {}), '(self.times, self.stresses)\n', (1748, 1775), False, 'from pyoptmat import models, flowrules, utility, hardening, damage\n'), ((1833, 1906), 'pyoptmat.utility.CheaterBatchTimeSeriesInterpolator', 'utility.CheaterBatchTimeSeriesInterpolator', (['self.times', 'self.temperatures'], {}), '(self.times, self.temperatures)\n', (1875, 1906), False, 'from pyoptmat import models, flowrules, utility, hardening, damage\n'), ((1944, 2056), 'pyoptmat.models.StressBasedModel', 'models.StressBasedModel', (['self.model', 'stress_rate_interpolator', 'stress_interpolator', 'temperature_interpolator'], {}), '(self.model, stress_rate_interpolator,\n stress_interpolator, temperature_interpolator)\n', (1967, 2056), False, 'from pyoptmat import models, flowrules, utility, hardening, damage\n'), ((2461, 2483), 'torch.tensor', 'torch.tensor', (['(100000.0)'], {}), '(100000.0)\n', (2473, 2483), False, 'import torch\n'), ((2501, 2518), 'torch.tensor', 'torch.tensor', (['(5.2)'], {}), '(5.2)\n', (2513, 2518), False, 'import torch\n'), ((2538, 2557), 'torch.tensor', 'torch.tensor', (['(110.0)'], {}), '(110.0)\n', (2550, 2557), False, 'import torch\n'), ((2902, 2932), 'torch.zeros_like', 'torch.zeros_like', (['self.strains'], {}), '(self.strains)\n', (2918, 2932), False, 'import torch\n'), ((3144, 3184), 'torch.tensor', 'torch.tensor', (['[[90.0], [100.0], [101.0]]'], {}), '([[90.0], [100.0], [101.0]])\n', (3156, 3184), False, 'import torch\n'), ((3213, 3248), 'torch.tensor', 'torch.tensor', (['[[0.0], [0.0], [0.0]]'], {}), '([[0.0], [0.0], [0.0]])\n', (3225, 3248), False, 'import torch\n'), ((3539, 3561), 'torch.tensor', 'torch.tensor', (['(100000.0)'], {}), '(100000.0)\n', (3551, 3561), False, 'import torch\n'), ((3579, 3596), 'torch.tensor', 'torch.tensor', (['(5.2)'], {}), '(5.2)\n', (3591, 3596), False, 'import torch\n'), ((3616, 3635), 'torch.tensor', 'torch.tensor', (['(110.0)'], {}), '(110.0)\n', (3628, 3635), False, 'import torch\n'), ((3654, 3671), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (3666, 3671), False, 'import torch\n'), ((3690, 3709), 'torch.tensor', 'torch.tensor', (['(101.0)'], {}), '(101.0)\n', (3702, 3709), False, 'import torch\n'), ((3727, 3744), 'torch.tensor', 'torch.tensor', (['(1.3)'], {}), '(1.3)\n', (3739, 3744), False, 'import torch\n'), ((3844, 3865), 'torch.tensor', 'torch.tensor', (['(12000.0)'], {}), '(12000.0)\n', (3856, 3865), False, 'import torch\n'), ((3883, 3901), 'torch.tensor', 'torch.tensor', (['(10.1)'], {}), '(10.1)\n', (3895, 3901), False, 'import torch\n'), ((4482, 4510), 'torch.zeros_like', 'torch.zeros_like', (['self.times'], {}), '(self.times)\n', (4498, 4510), False, 'import torch\n'), ((5207, 5229), 'torch.tensor', 'torch.tensor', (['(100000.0)'], {}), '(100000.0)\n', (5219, 5229), False, 'import torch\n'), ((5247, 5264), 'torch.tensor', 'torch.tensor', (['(5.2)'], {}), '(5.2)\n', (5259, 5264), False, 'import torch\n'), ((5284, 5303), 'torch.tensor', 'torch.tensor', (['(110.0)'], {}), '(110.0)\n', (5296, 5303), False, 'import torch\n'), ((5322, 5339), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (5334, 5339), False, 'import torch\n'), ((5361, 5380), 'torch.tensor', 'torch.tensor', (['(101.0)'], {}), '(101.0)\n', (5373, 5380), False, 'import torch\n'), ((5403, 5423), 'torch.tensor', 'torch.tensor', (['(1000.0)'], {}), '(1000.0)\n', (5415, 5423), False, 'import torch\n'), ((5442, 5459), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (5454, 5459), False, 'import torch\n'), ((5478, 5497), 'torch.tensor', 'torch.tensor', (['(1e-06)'], {}), '(1e-06)\n', (5490, 5497), False, 'import torch\n'), ((5517, 5534), 'torch.tensor', 'torch.tensor', (['(2.0)'], {}), '(2.0)\n', (5529, 5534), False, 'import torch\n'), ((5717, 5738), 'torch.tensor', 'torch.tensor', (['(12000.0)'], {}), '(12000.0)\n', (5729, 5738), False, 'import torch\n'), ((5756, 5774), 'torch.tensor', 'torch.tensor', (['(10.1)'], {}), '(10.1)\n', (5768, 5774), False, 'import torch\n'), ((6355, 6383), 'torch.zeros_like', 'torch.zeros_like', (['self.times'], {}), '(self.times)\n', (6371, 6383), False, 'import torch\n'), ((7057, 7079), 'torch.tensor', 'torch.tensor', (['(100000.0)'], {}), '(100000.0)\n', (7069, 7079), False, 'import torch\n'), ((7097, 7114), 'torch.tensor', 'torch.tensor', (['(5.2)'], {}), '(5.2)\n', (7109, 7114), False, 'import torch\n'), ((7134, 7153), 'torch.tensor', 'torch.tensor', (['(110.0)'], {}), '(110.0)\n', (7146, 7153), False, 'import torch\n'), ((7172, 7189), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (7184, 7189), False, 'import torch\n'), ((7208, 7227), 'torch.tensor', 'torch.tensor', (['(101.0)'], {}), '(101.0)\n', (7220, 7227), False, 'import torch\n'), ((7245, 7262), 'torch.tensor', 'torch.tensor', (['(1.3)'], {}), '(1.3)\n', (7257, 7262), False, 'import torch\n'), ((7362, 7382), 'torch.tensor', 'torch.tensor', (['(1200.0)'], {}), '(1200.0)\n', (7374, 7382), False, 'import torch\n'), ((7400, 7418), 'torch.tensor', 'torch.tensor', (['(10.1)'], {}), '(10.1)\n', (7412, 7418), False, 'import torch\n'), ((7516, 7536), 'torch.tensor', 'torch.tensor', (['(3000.0)'], {}), '(3000.0)\n', (7528, 7536), False, 'import torch\n'), ((7555, 7572), 'torch.tensor', 'torch.tensor', (['(6.5)'], {}), '(6.5)\n', (7567, 7572), False, 'import torch\n'), ((7592, 7609), 'torch.tensor', 'torch.tensor', (['(1.7)'], {}), '(1.7)\n', (7604, 7609), False, 'import torch\n'), ((8236, 8266), 'torch.zeros_like', 'torch.zeros_like', (['self.strains'], {}), '(self.strains)\n', (8252, 8266), False, 'import torch\n'), ((8480, 8574), 'torch.tensor', 'torch.tensor', (['[[90.0, 30.0, 10.0, 0.05], [100.0, 10.0, 15.0, 0.1], [20, -10.0, -10, 0.2]]'], {}), '([[90.0, 30.0, 10.0, 0.05], [100.0, 10.0, 15.0, 0.1], [20, -\n 10.0, -10, 0.2]])\n', (8492, 8574), False, 'import torch\n'), ((8620, 8714), 'torch.tensor', 'torch.tensor', (['[[0.1, 30.0, 10.0, 0.05], [0.11, 10.0, 15.0, 0.1], [0.12, -10.0, -10, 0.2]]'], {}), '([[0.1, 30.0, 10.0, 0.05], [0.11, 10.0, 15.0, 0.1], [0.12, -\n 10.0, -10, 0.2]])\n', (8632, 8714), False, 'import torch\n'), ((8851, 8873), 'torch.tensor', 'torch.tensor', (['(100000.0)'], {}), '(100000.0)\n', (8863, 8873), False, 'import torch\n'), ((8891, 8908), 'torch.tensor', 'torch.tensor', (['(5.2)'], {}), '(5.2)\n', (8903, 8908), False, 'import torch\n'), ((8928, 8947), 'torch.tensor', 'torch.tensor', (['(110.0)'], {}), '(110.0)\n', (8940, 8947), False, 'import torch\n'), ((8966, 8983), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (8978, 8983), False, 'import torch\n'), ((9002, 9021), 'torch.tensor', 'torch.tensor', (['(101.0)'], {}), '(101.0)\n', (9014, 9021), False, 'import torch\n'), ((9039, 9056), 'torch.tensor', 'torch.tensor', (['(1.3)'], {}), '(1.3)\n', (9051, 9056), False, 'import torch\n'), ((9156, 9191), 'torch.tensor', 'torch.tensor', (['[1200.0, 200.0, 10.0]'], {}), '([1200.0, 200.0, 10.0])\n', (9168, 9191), False, 'import torch\n'), ((9209, 9242), 'torch.tensor', 'torch.tensor', (['[10.1, 100.0, 50.0]'], {}), '([10.1, 100.0, 50.0])\n', (9221, 9242), False, 'import torch\n'), ((9337, 9357), 'torch.tensor', 'torch.tensor', (['(3000.0)'], {}), '(3000.0)\n', (9349, 9357), False, 'import torch\n'), ((9376, 9393), 'torch.tensor', 'torch.tensor', (['(6.5)'], {}), '(6.5)\n', (9388, 9393), False, 'import torch\n'), ((9413, 9430), 'torch.tensor', 'torch.tensor', (['(1.7)'], {}), '(1.7)\n', (9425, 9430), False, 'import torch\n'), ((10057, 10087), 'torch.zeros_like', 'torch.zeros_like', (['self.strains'], {}), '(self.strains)\n', (10073, 10087), False, 'import torch\n'), ((10301, 10434), 'torch.tensor', 'torch.tensor', (['[[90.0, 30.0, 10.0, 10.0, -10.0, 0.2], [100.0, 10.0, 15.0, 5.0, -10.0, 0.3],\n [101.0, 50.0, 60.0, -50.0, 10.0, 0.4]]'], {}), '([[90.0, 30.0, 10.0, 10.0, -10.0, 0.2], [100.0, 10.0, 15.0, 5.0,\n -10.0, 0.3], [101.0, 50.0, 60.0, -50.0, 10.0, 0.4]])\n', (10313, 10434), False, 'import torch\n'), ((10544, 10675), 'torch.tensor', 'torch.tensor', (['[[0.05, 30.0, 10.0, 10.0, -10.0, 0.2], [0.08, 10.0, 15.0, 5.0, -10.0, 0.3],\n [0.07, 50.0, 60.0, -50.0, 10.0, 0.4]]'], {}), '([[0.05, 30.0, 10.0, 10.0, -10.0, 0.2], [0.08, 10.0, 15.0, 5.0,\n -10.0, 0.3], [0.07, 50.0, 60.0, -50.0, 10.0, 0.4]])\n', (10556, 10675), False, 'import torch\n'), ((579, 604), 'torch.isnan', 'torch.isnan', (['strain_rates'], {}), '(strain_rates)\n', (590, 604), False, 'import torch\n'), ((1179, 1225), 'numpy.allclose', 'np.allclose', (['dv', 'ddv'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(dv, ddv, rtol=0.0001, atol=0.0001)\n', (1190, 1225), True, 'import numpy as np\n'), ((1518, 1543), 'torch.isnan', 'torch.isnan', (['stress_rates'], {}), '(stress_rates)\n', (1529, 1543), False, 'import torch\n'), ((2311, 2357), 'numpy.allclose', 'np.allclose', (['dv', 'ddv'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(dv, ddv, rtol=0.0001, atol=0.0001)\n', (2322, 2357), True, 'import numpy as np\n'), ((3338, 3348), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.n'], {}), '(self.n)\n', (3340, 3348), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((3350, 3362), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.eta'], {}), '(self.eta)\n', (3352, 3362), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((3407, 3417), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.E'], {}), '(self.E)\n', (3409, 3417), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((3802, 3812), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.R'], {}), '(self.R)\n', (3804, 3812), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((3814, 3824), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.d'], {}), '(self.d)\n', (3816, 3824), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((3957, 3967), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.C'], {}), '(self.C)\n', (3959, 3967), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((3969, 3979), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.g'], {}), '(self.g)\n', (3971, 3979), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((4051, 4061), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.n'], {}), '(self.n)\n', (4053, 4061), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((4063, 4075), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.eta'], {}), '(self.eta)\n', (4065, 4075), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((4077, 4088), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.s0'], {}), '(self.s0)\n', (4079, 4088), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((4162, 4172), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.E'], {}), '(self.E)\n', (4164, 4172), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((4738, 4828), 'torch.tensor', 'torch.tensor', (['[[90.0, 30.0, 10.0, 0], [100.0, 10.0, 15.0, 0], [101.0, 50.0, 60.0, 0]]'], {}), '([[90.0, 30.0, 10.0, 0], [100.0, 10.0, 15.0, 0], [101.0, 50.0, \n 60.0, 0]])\n', (4750, 4828), False, 'import torch\n'), ((4922, 5010), 'torch.tensor', 'torch.tensor', (['[[0.05, 30.0, 10.0, 0], [0.07, 10.0, 15.0, 0], [0.08, 50.0, 60.0, 0]]'], {}), '([[0.05, 30.0, 10.0, 0], [0.07, 10.0, 15.0, 0], [0.08, 50.0, \n 60.0, 0]])\n', (4934, 5010), False, 'import torch\n'), ((5619, 5632), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.tau0'], {}), '(self.tau0)\n', (5621, 5632), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((5634, 5649), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.theta0'], {}), '(self.theta0)\n', (5636, 5649), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((5651, 5662), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.R0'], {}), '(self.R0)\n', (5653, 5662), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((5664, 5675), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.r1'], {}), '(self.r1)\n', (5666, 5675), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((5677, 5688), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.r2'], {}), '(self.r2)\n', (5679, 5688), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((5830, 5840), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.C'], {}), '(self.C)\n', (5832, 5840), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((5842, 5852), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.g'], {}), '(self.g)\n', (5844, 5852), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((5924, 5934), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.n'], {}), '(self.n)\n', (5926, 5934), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((5936, 5948), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.eta'], {}), '(self.eta)\n', (5938, 5948), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((5950, 5961), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.s0'], {}), '(self.s0)\n', (5952, 5961), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((6035, 6045), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.E'], {}), '(self.E)\n', (6037, 6045), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((6611, 6701), 'torch.tensor', 'torch.tensor', (['[[90.0, 30.0, 10.0, 0], [100.0, 10.0, 15.0, 0], [101.0, 50.0, 60.0, 0]]'], {}), '([[90.0, 30.0, 10.0, 0], [100.0, 10.0, 15.0, 0], [101.0, 50.0, \n 60.0, 0]])\n', (6623, 6701), False, 'import torch\n'), ((6795, 6883), 'torch.tensor', 'torch.tensor', (['[[0.05, 30.0, 10.0, 0], [0.07, 10.0, 15.0, 0], [0.08, 50.0, 60.0, 0]]'], {}), '([[0.05, 30.0, 10.0, 0], [0.07, 10.0, 15.0, 0], [0.08, 50.0, \n 60.0, 0]])\n', (6807, 6883), False, 'import torch\n'), ((7320, 7330), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.R'], {}), '(self.R)\n', (7322, 7330), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((7332, 7342), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.d'], {}), '(self.d)\n', (7334, 7342), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((7474, 7484), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.C'], {}), '(self.C)\n', (7476, 7484), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((7486, 7496), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.g'], {}), '(self.g)\n', (7488, 7496), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((7654, 7664), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.A'], {}), '(self.A)\n', (7656, 7664), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((7666, 7677), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.xi'], {}), '(self.xi)\n', (7668, 7677), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((7679, 7691), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.phi'], {}), '(self.phi)\n', (7681, 7691), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((7763, 7773), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.n'], {}), '(self.n)\n', (7765, 7773), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((7775, 7787), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.eta'], {}), '(self.eta)\n', (7777, 7787), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((7789, 7800), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.s0'], {}), '(self.s0)\n', (7791, 7800), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((7887, 7897), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.E'], {}), '(self.E)\n', (7889, 7897), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((9114, 9124), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.R'], {}), '(self.R)\n', (9116, 9124), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((9126, 9136), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.d'], {}), '(self.d)\n', (9128, 9136), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((9295, 9305), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.C'], {}), '(self.C)\n', (9297, 9305), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((9307, 9317), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.g'], {}), '(self.g)\n', (9309, 9317), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((9475, 9485), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.A'], {}), '(self.A)\n', (9477, 9485), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((9487, 9498), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.xi'], {}), '(self.xi)\n', (9489, 9498), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((9500, 9512), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.phi'], {}), '(self.phi)\n', (9502, 9512), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((9584, 9594), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.n'], {}), '(self.n)\n', (9586, 9594), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((9596, 9608), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.eta'], {}), '(self.eta)\n', (9598, 9608), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((9610, 9621), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.s0'], {}), '(self.s0)\n', (9612, 9621), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((9708, 9718), 'pyoptmat.temperature.ConstantParameter', 'CP', (['self.E'], {}), '(self.E)\n', (9710, 9718), True, 'from pyoptmat.temperature import ConstantParameter as CP\n'), ((386, 423), 'torch.zeros', 'torch.zeros', (['(1)', 'self.strains.shape[1]'], {}), '(1, self.strains.shape[1])\n', (397, 423), False, 'import torch\n'), ((1322, 1360), 'torch.zeros', 'torch.zeros', (['(1)', 'self.stresses.shape[1]'], {}), '(1, self.stresses.shape[1])\n', (1333, 1360), False, 'import torch\n'), ((2632, 2652), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (2643, 2652), True, 'import numpy as np\n'), ((4263, 4283), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (4274, 4283), True, 'import numpy as np\n'), ((4396, 4416), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (4407, 4416), True, 'import numpy as np\n'), ((6136, 6156), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (6147, 6156), True, 'import numpy as np\n'), ((6269, 6289), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (6280, 6289), True, 'import numpy as np\n'), ((8017, 8037), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (8028, 8037), True, 'import numpy as np\n'), ((8150, 8170), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (8161, 8170), True, 'import numpy as np\n'), ((9838, 9858), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (9849, 9858), True, 'import numpy as np\n'), ((9971, 9991), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (9982, 9991), True, 'import numpy as np\n'), ((2783, 2803), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (2794, 2803), True, 'import numpy as np\n'), ((3027, 3047), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (3038, 3047), True, 'import numpy as np\n'), ((4605, 4625), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (4616, 4625), True, 'import numpy as np\n'), ((6478, 6498), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (6489, 6498), True, 'import numpy as np\n'), ((8361, 8381), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (8372, 8381), True, 'import numpy as np\n'), ((10182, 10202), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(4)'], {}), '(0, 1, 4)\n', (10193, 10202), True, 'import numpy as np\n')] |
"""
the code is adapted from:
https://github.com/ibab/tensorflow-wavenet/blob/master/wavenet/model.py (base model)
https://github.com/twidddj/vqvae/blob/master/wavenet/model.py
"""
import numpy as np
import tensorflow as tf
from .ops import causal_conv, mu_law_encode
from .mixture import discretized_mix_logistic_loss, sample_from_discretized_mix_logistic
PREFIX_QUEUE_VAR = "Q_L"
def create_variable(name, shape):
'''Create a convolution filter variable with the specified name and shape,
and initialize it using Xavier initialition.'''
initializer = tf.contrib.layers.xavier_initializer()
variable = tf.get_variable(name, shape, initializer=initializer)
return variable
def create_bias_variable(name, shape):
'''Create a bias variable with the specified name and shape and initialize
it to zero.'''
initializer = tf.constant_initializer(value=0.0, dtype=tf.float32)
variable = tf.get_variable(name, shape, initializer=initializer)
return variable
def create_embedding_table(name, shape):
if shape[0] == shape[1]:
# Make a one-hot encoding as the initial value.
initial_val = np.identity(n=shape[0], dtype=np.float32)
variable = tf.get_variable(name, initializer=initial_val)
return variable
else:
return create_variable(name, shape)
class WaveNetModel(object):
'''Implements the WaveNet network for generative audio.
Usage (with the architecture as in the DeepMind paper):
dilations = [2**i for i in range(N)] * M
filter_width = 2 # Convolutions just use 2 samples.
residual_channels = 16 # Not specified in the paper.
dilation_channels = 32 # Not specified in the paper.
skip_channels = 16 # Not specified in the paper.
net = WaveNetModel(batch_size, dilations, filter_width,
residual_channels, dilation_channels,
skip_channels)
loss = net.loss(input_batch)
'''
def __init__(self,
batch_size,
dilations,
filter_width,
residual_channels,
dilation_channels,
skip_channels,
out_channels=None,
quantization_channels=2**8,
use_biases=False,
scalar_input=False,
initial_filter_width=None,
histograms=False,
global_condition_channels=None,
global_condition_cardinality=None,
local_condition_channels=80):
'''Initializes the WaveNet model.
Args:
batch_size: How many audio files are supplied per batch
(recommended: 1).
dilations: A list with the dilation factor for each layer.
filter_width: The samples that are included in each convolution,
after dilating.
residual_channels: How many filters to learn for the residual.
dilation_channels: How many filters to learn for the dilated
convolution.
skip_channels: How many filters to learn that contribute to the
quantized softmax output.
quantization_channels: How many amplitude values to use for audio
quantization and the corresponding one-hot encoding.
Default: 256 (8-bit quantization).
use_biases: Whether to add a bias layer to each convolution.
Default: False.
scalar_input: Whether to use the quantized waveform directly as
input to the network instead of one-hot encoding it.
Default: False.
initial_filter_width: The width of the initial filter of the
initial convolution.
histograms: Whether to store histograms in the summary.
Default: False.
global_condition_channels: Number of channels in (embedding
size) of global conditioning vector. None indicates there is
no global conditioning.
global_condition_cardinality: Number of mutually exclusive
categories to be embedded in global condition embedding. If
not None, then this implies that global_condition tensor
specifies an integer selecting which of the N global condition
categories, where N = global_condition_cardinality. If None,
then the global_condition tensor is regarded as a vector which
must have dimension global_condition_channels.
'''
assert filter_width > 1
self.batch_size = batch_size
self.dilations = dilations
self.filter_width = filter_width
self.residual_channels = residual_channels
self.dilation_channels = dilation_channels
self.quantization_channels = quantization_channels
self.out_channels = out_channels or quantization_channels
self.use_biases = use_biases
self.skip_channels = skip_channels
self.scalar_input = scalar_input
self.initial_filter_width = initial_filter_width or filter_width
self.histograms = histograms
self.global_condition_channels = global_condition_channels
self.global_condition_cardinality = global_condition_cardinality
self.local_condition_channels = local_condition_channels
self.receptive_field = WaveNetModel.calculate_receptive_field(
self.filter_width, self.dilations, self.scalar_input,
self.initial_filter_width)
self.variables = self._create_variables()
@staticmethod
def calculate_receptive_field(filter_width, dilations, scalar_input,
initial_filter_width):
receptive_field = (filter_width - 1) * sum(dilations) + 1
if scalar_input:
receptive_field += initial_filter_width - 1
else:
receptive_field += filter_width - 1
return receptive_field
def _create_variables(self):
'''This function creates all variables used by the network.
This allows us to share them between multiple calls to the loss
function and generation function.'''
var = dict()
with tf.variable_scope('wavenet'):
if self.global_condition_cardinality is not None:
# We only look up the embedding if we are conditioning on a
# set of mutually-exclusive categories. We can also condition
# on an already-embedded dense vector, in which case it's
# given to us and we don't need to do the embedding lookup.
# Still another alternative is no global condition at all, in
# which case we also don't do a tf.nn.embedding_lookup.
with tf.variable_scope('embeddings'):
layer = dict()
layer['gc_embedding'] = create_embedding_table(
'gc_embedding',
[self.global_condition_cardinality,
self.global_condition_channels])
var['embeddings'] = layer
with tf.variable_scope('causal_layer'):
layer = dict()
if self.scalar_input:
initial_channels = 1
else:
initial_channels = self.quantization_channels
layer['filter'] = create_variable(
'filter',
[self.initial_filter_width,
initial_channels,
self.residual_channels])
var['causal_layer'] = layer
var['dilated_stack'] = list()
with tf.variable_scope('dilated_stack'):
for i, dilation in enumerate(self.dilations):
with tf.variable_scope('layer{}'.format(i)):
current = dict()
current['filter'] = create_variable(
'filter',
[self.filter_width,
self.residual_channels,
self.dilation_channels])
current['gate'] = create_variable(
'gate',
[self.filter_width,
self.residual_channels,
self.dilation_channels])
current['cond_filter'] = create_variable('cond_filter', [1, self.local_condition_channels, self.dilation_channels])
current['cond_gate'] = create_variable('cond_gate', [1, self.local_condition_channels, self.dilation_channels])
if self.use_biases:
current['cond_filter_bias'] = create_bias_variable(
'cond_filter_bias',
[self.dilation_channels])
current['cond_gate_bias'] = create_bias_variable(
'cond_gate_bias',
[self.dilation_channels])
current['dense'] = create_variable(
'dense',
[1,
self.dilation_channels,
self.residual_channels])
current['skip'] = create_variable(
'skip',
[1,
self.dilation_channels,
self.skip_channels])
if self.global_condition_channels is not None:
current['gc_gateweights'] = create_variable(
'gc_gate',
[1, self.global_condition_channels,
self.dilation_channels])
current['gc_filtweights'] = create_variable(
'gc_filter',
[1, self.global_condition_channels,
self.dilation_channels])
if self.use_biases:
current['filter_bias'] = create_bias_variable(
'filter_bias',
[self.dilation_channels])
current['gate_bias'] = create_bias_variable(
'gate_bias',
[self.dilation_channels])
current['dense_bias'] = create_bias_variable(
'dense_bias',
[self.residual_channels])
current['skip_bias'] = create_bias_variable(
'slip_bias',
[self.skip_channels])
var['dilated_stack'].append(current)
with tf.variable_scope('postprocessing'):
current = dict()
current['postprocess1'] = create_variable(
'postprocess1',
[1, self.skip_channels, self.skip_channels])
current['postprocess2'] = create_variable(
'postprocess2',
[1, self.skip_channels, self.out_channels])
if self.use_biases:
current['postprocess1_bias'] = create_bias_variable(
'postprocess1_bias',
[self.skip_channels])
current['postprocess2_bias'] = create_bias_variable(
'postprocess2_bias',
[self.out_channels])
var['postprocessing'] = current
return var
def _create_causal_layer(self, input_batch):
'''Creates a single causal convolution layer.
The layer can change the number of channels.
'''
with tf.name_scope('causal_layer'):
weights_filter = self.variables['causal_layer']['filter']
return causal_conv(input_batch, weights_filter, 1)
def _create_dilation_layer(self, input_batch, layer_index, dilation, local_condition_batch,
global_condition_batch, output_width):
'''Creates a single causal dilated convolution layer.
Args:
input_batch: Input to the dilation layer.
layer_index: Integer indicating which layer this is.
dilation: Integer specifying the dilation size.
global_conditioning_batch: Tensor containing the global data upon
which the output is to be conditioned upon. Shape:
[batch size, 1, channels]. The 1 is for the axis
corresponding to time so that the result is broadcast to
all time steps.
The layer contains a gated filter that connects to dense output
and to a skip connection:
|-> [gate] -| |-> 1x1 conv -> skip output
| |-> (*) -|
input -|-> [filter] -| |-> 1x1 conv -|
| |-> (+) -> dense output
|------------------------------------|
Where `[gate]` and `[filter]` are causal convolutions with a
non-linear activation at the output. Biases and global conditioning
are omitted due to the limits of ASCII art.
'''
variables = self.variables['dilated_stack'][layer_index]
weights_filter = variables['filter']
weights_gate = variables['gate']
conv_filter = causal_conv(input_batch, weights_filter, dilation)
conv_gate = causal_conv(input_batch, weights_gate, dilation)
if local_condition_batch is not None:
condition_cut = tf.shape(local_condition_batch)[1] - tf.shape(conv_filter)[1]
lc = tf.slice(local_condition_batch, [0, condition_cut, 0], [-1, -1, -1])
conv_filter += tf.nn.conv1d(lc, variables['cond_filter'], stride=1, padding="SAME", name="cond_filter")
conv_gate += tf.nn.conv1d(lc, variables['cond_gate'], stride=1, padding="SAME", name="cond_gate")
if self.use_biases:
conv_filter += variables['cond_filter_bias']
conv_gate += variables['cond_gate_bias']
if global_condition_batch is not None:
conv_filter += tf.nn.conv1d(global_condition_batch, variables['gc_filtweights'], stride=1, padding="SAME",
name="gc_filter")
conv_gate += tf.nn.conv1d(global_condition_batch, variables['gc_gateweights'], stride=1, padding="SAME",
name="gc_gate")
if self.use_biases:
filter_bias = variables['filter_bias']
gate_bias = variables['gate_bias']
conv_filter += filter_bias
conv_gate += gate_bias
out = tf.tanh(conv_filter) * tf.sigmoid(conv_gate)
# The 1x1 conv to produce the residual output
weights_dense = variables['dense']
transformed = tf.nn.conv1d(
out, weights_dense, stride=1, padding="SAME", name="dense")
# The 1x1 conv to produce the skip output
skip_cut = tf.shape(out)[1] - output_width
out_skip = tf.slice(out, [0, skip_cut, 0], [-1, -1, -1])
weights_skip = variables['skip']
skip_contribution = tf.nn.conv1d(
out_skip, weights_skip, stride=1, padding="SAME", name="skip")
if self.use_biases:
dense_bias = variables['dense_bias']
skip_bias = variables['skip_bias']
transformed = transformed + dense_bias
skip_contribution = skip_contribution + skip_bias
if self.histograms:
layer = 'layer{}'.format(layer_index)
tf.histogram_summary(layer + '_filter', weights_filter)
tf.histogram_summary(layer + '_gate', weights_gate)
tf.histogram_summary(layer + '_dense', weights_dense)
tf.histogram_summary(layer + '_skip', weights_skip)
if self.use_biases:
tf.histogram_summary(layer + '_biases_filter', filter_bias)
tf.histogram_summary(layer + '_biases_gate', gate_bias)
tf.histogram_summary(layer + '_biases_dense', dense_bias)
tf.histogram_summary(layer + '_biases_skip', skip_bias)
input_cut = tf.shape(input_batch)[1] - tf.shape(transformed)[1]
input_batch = tf.slice(input_batch, [0, input_cut, 0], [-1, -1, -1])
return skip_contribution, input_batch + transformed
def _generator_conv(self, input_batch, state_batch, weights, is_initial=False):
'''Perform convolution for a single convolutional processing step.'''
if state_batch is not None:
output = tf.matmul(state_batch[0], weights[0])
filter_width = self.initial_filter_width if is_initial else self.filter_width
i = 0 # This value will be used when filter width == 2
for i in range(1, filter_width - 1):
output += tf.matmul(state_batch[i], weights[i])
i = i+1
else:
output = 0
i = 0
output += tf.matmul(input_batch, weights[i])
return output
def _generator_causal_layer(self, input_batch, state_batch):
with tf.name_scope('causal_layer'):
weights_filter = self.variables['causal_layer']['filter']
output = self._generator_conv(
input_batch, state_batch, weights_filter, is_initial=True)
return output
def _generator_dilation_layer(self, input_batch, state_batch, layer_index,
local_condition_batch, global_condition_batch):
variables = self.variables['dilated_stack'][layer_index]
weights_filter = variables['filter']
weights_gate = variables['gate']
output_filter = self._generator_conv(input_batch, state_batch, weights_filter)
output_gate = self._generator_conv(input_batch, state_batch, weights_gate)
if local_condition_batch is not None:
output_filter += tf.matmul(local_condition_batch, variables['cond_filter'][0, :, :])
output_gate += tf.matmul(local_condition_batch, variables['cond_gate'][0, :, :])
if self.use_biases:
output_filter += variables['cond_filter_bias']
output_gate += variables['cond_gate_bias']
if global_condition_batch is not None:
output_filter += tf.matmul(global_condition_batch, variables['gc_filtweights'][0, :, :])
output_gate += tf.matmul(global_condition_batch, variables['gc_gateweights'][0, :, :])
if self.use_biases:
output_filter = output_filter + variables['filter_bias']
output_gate = output_gate + variables['gate_bias']
out = tf.tanh(output_filter) * tf.sigmoid(output_gate)
weights_dense = variables['dense']
transformed = tf.matmul(out, weights_dense[0, :, :], name="TEST_transform")
if self.use_biases:
transformed = transformed + variables['dense_bias']
weights_skip = variables['skip']
skip_contribution = tf.matmul(out, weights_skip[0, :, :], name="TEST_skip")
if self.use_biases:
skip_contribution = skip_contribution + variables['skip_bias']
return skip_contribution, input_batch + transformed
def create_network(self, input_batch, local_condition_batch, global_condition_batch):
'''Construct the WaveNet network.'''
outputs = []
current_layer = input_batch
current_layer = self._create_causal_layer(current_layer)
output_width = tf.shape(input_batch)[1] - self.receptive_field + 1
# Add all defined dilation layers.
with tf.name_scope('dilated_stack'):
for layer_index, dilation in enumerate(self.dilations):
with tf.name_scope('layer{}'.format(layer_index)):
output, current_layer = self._create_dilation_layer(
current_layer, layer_index, dilation, local_condition_batch,
global_condition_batch, output_width)
outputs.append(output)
with tf.name_scope('postprocessing'):
# Perform (+) -> ReLU -> 1x1 conv -> ReLU -> 1x1 conv to
# postprocess the output.
w1 = self.variables['postprocessing']['postprocess1']
w2 = self.variables['postprocessing']['postprocess2']
if self.use_biases:
b1 = self.variables['postprocessing']['postprocess1_bias']
b2 = self.variables['postprocessing']['postprocess2_bias']
if self.histograms:
tf.histogram_summary('postprocess1_weights', w1)
tf.histogram_summary('postprocess2_weights', w2)
if self.use_biases:
tf.histogram_summary('postprocess1_biases', b1)
tf.histogram_summary('postprocess2_biases', b2)
# We skip connections from the outputs of each layer, adding them
# all up here.
total = sum(outputs)
transformed1 = tf.nn.relu(total)
conv1 = tf.nn.conv1d(transformed1, w1, stride=1, padding="SAME")
if self.use_biases:
conv1 = tf.add(conv1, b1)
transformed2 = tf.nn.relu(conv1)
conv2 = tf.nn.conv1d(transformed2, w2, stride=1, padding="SAME")
if self.use_biases:
conv2 = tf.add(conv2, b2)
return conv2
def _create_queue(self, dilation, n_channel, batch_size, is_initial=False, name=None):
filter_width = self.initial_filter_width if is_initial else self.filter_width
if filter_width == 1:
return None
shape = (dilation * (filter_width - 1), batch_size, n_channel)
value = tf.zeros(shape, dtype=tf.float32)
return tf.Variable(initial_value=value, name=name, trainable=False)
def _create_q_ops(self, batch_size):
qs = []
input_channels = 1 if self.scalar_input else self.quantization_channels
q = self._create_queue(1, input_channels, batch_size,
is_initial=True,
name=PREFIX_QUEUE_VAR + str(0))
qs.append(q)
with tf.name_scope('dilated_stack'):
for layer_index, dilation in enumerate(self.dilations):
with tf.name_scope('layer{}'.format(layer_index)):
q = self._create_queue(dilation, self.residual_channels, batch_size,
name=PREFIX_QUEUE_VAR + str(layer_index + 1))
qs.append(q)
return qs
def _update(self, q, current_q_idx, x, is_initial=False):
if q is None:
return None
filter_width = self.initial_filter_width if is_initial else self.filter_width
# dequeue
for i in range(1, filter_width - 1):
idx = current_q_idx + i
q = tf.scatter_update(q, idx - 1, q[idx])
# enqueue
q = tf.scatter_update(q, current_q_idx + (filter_width - 2), x)
return q
def create_update_q_ops(self, qs, initial, others, gen_num, batch_size=1):
current_q_idx = 0
# Initial queue value will be None if initial filter width == 1
if self.initial_filter_width > 1:
q = qs[0]
input_channels = 1 if self.scalar_input else self.quantization_channels
ipt = tf.reshape(initial, [batch_size, input_channels])
q = self._update(q, current_q_idx, ipt, is_initial=True)
qs[0] = q
for layer_index, dilation in enumerate(self.dilations):
q = qs[layer_index + 1]
current_q_idx = (gen_num % dilation) * (self.filter_width - 1)
ipt = tf.reshape(others[layer_index], [batch_size, self.residual_channels])
q = self._update(q, current_q_idx, ipt)
qs[layer_index + 1] = q
if self.initial_filter_width == 1:
return qs[1:]
else:
return qs
@staticmethod
def get_vars_q():
return list(filter(lambda var: var.name.split('/')[-1].startswith(PREFIX_QUEUE_VAR),
tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)))
def _create_generator(self, qs, x, gen_num, c, g):
outputs = []
output_layers = []
q = qs[0]
current_q_idx = 0
current_data_idx = current_q_idx + (self.initial_filter_width - 1)
current_layer = x
if q is not None:
current_state = q[current_q_idx:current_data_idx]
else:
current_state = None
output_layers.append(current_layer)
current_layer = self._generator_causal_layer(current_layer, current_state)
# Add all defined dilation layers.
with tf.name_scope('dilated_stack'):
for layer_index, dilation in enumerate(self.dilations):
with tf.name_scope('layer{}'.format(layer_index)):
q = qs[layer_index + 1]
current_q_idx = (gen_num % dilation) * (self.filter_width - 1)
current_data_idx = current_q_idx + (self.filter_width - 1)
output_layers.append(current_layer)
current_state = q[current_q_idx:current_data_idx]
# current_layer = tf.Print(current_layer, [current_layer],
# message="current_layer{}:".format(layer_index + 1))
output, current_layer = self._generator_dilation_layer(current_layer, current_state, layer_index, c, g)
outputs.append(output)
with tf.name_scope('postprocessing'):
variables = self.variables['postprocessing']
# Perform (+) -> ReLU -> 1x1 conv -> ReLU -> 1x1 conv to
# postprocess the output.
w1 = variables['postprocess1']
w2 = variables['postprocess2']
if self.use_biases:
b1 = variables['postprocess1_bias']
b2 = variables['postprocess2_bias']
# We skip connections from the outputs of each layer, adding them
# all up here.
total = sum(outputs)
transformed1 = tf.nn.relu(total)
conv1 = tf.matmul(transformed1, w1[0, :, :])
if self.use_biases:
conv1 = conv1 + b1
transformed2 = tf.nn.relu(conv1)
conv2 = tf.matmul(transformed2, w2[0, :, :])
if self.use_biases:
conv2 = conv2 + b2
return conv2, output_layers
def _one_hot(self, input_batch):
'''One-hot encodes the waveform amplitudes.
This allows the definition of the network as a categorical distribution
over a finite set of possible amplitudes.
'''
with tf.name_scope('one_hot_encode'):
encoded = tf.one_hot(
input_batch,
depth=self.quantization_channels,
dtype=tf.float32)
shape = [self.batch_size, -1, self.quantization_channels]
encoded = tf.reshape(encoded, shape)
return encoded
def _embed_gc(self, global_condition):
'''Returns embedding for global condition.
:param global_condition: Either ID of global condition for
tf.nn.embedding_lookup or actual embedding. The latter is
experimental.
:return: Embedding or None
'''
embedding = None
if self.global_condition_cardinality is not None:
# Only lookup the embedding if the global condition is presented
# as an integer of mutually-exclusive categories ...
embedding_table = self.variables['embeddings']['gc_embedding']
embedding = tf.nn.embedding_lookup(embedding_table,
global_condition)
elif global_condition is not None:
# ... else the global_condition (if any) is already provided
# as an embedding.
# In this case, the number of global_embedding channels must be
# equal to the the last dimension of the global_condition tensor.
gc_batch_rank = len(global_condition.get_shape())
dims_match = (global_condition.get_shape()[gc_batch_rank - 1] ==
self.global_condition_channels)
if not dims_match:
raise ValueError('Shape of global_condition {} does not'
' match global_condition_channels {}.'.
format(global_condition.get_shape(),
self.global_condition_channels))
embedding = global_condition
if embedding is not None:
embedding = tf.reshape(
embedding,
[self.batch_size, 1, self.global_condition_channels])
return embedding
def predict_proba(self, waveform, local_condition=None, global_condition=None, name='wavenet'):
'''Computes the probability distribution of the next sample based on
all samples in the input waveform.
If you want to generate audio by feeding the output of the network back
as an input, see predict_proba_incremental for a faster alternative.'''
with tf.name_scope(name):
if self.scalar_input:
encoded = tf.reshape(waveform, [self.batch_size, -1, 1])
encoded = tf.cast(encoded, tf.float32)
else:
encoded = self._one_hot(waveform)
gc_embedding = self._embed_gc(global_condition)
raw_output = self.create_network(encoded, local_condition, gc_embedding)
if self.scalar_input:
out = tf.reshape(raw_output, [self.batch_size, -1, self.out_channels])
last = sample_from_discretized_mix_logistic(out)
else:
out = tf.reshape(raw_output, [-1, self.out_channels])
# Cast to float64 to avoid bug in TensorFlow
proba = tf.cast(
tf.nn.softmax(tf.cast(out, tf.float64)), tf.float32)
last = tf.slice(
proba,
[tf.shape(proba)[0] - 1, 0],
[1, self.out_channels])
return tf.reshape(last, [-1])
def predict_proba_incremental(self, waveform, gen_num, batch_size=1,
local_condition=None, global_condition=None, name='wavenet'):
'''Computes the probability distribution of the next sample
incrementally, based on a single sample and all previously passed
samples.'''
q_ops = self._create_q_ops(batch_size)
with tf.name_scope(name):
if self.scalar_input:
encoded = tf.cast(waveform, tf.float32)
encoded = tf.reshape(encoded, [-1, 1])
else:
encoded = self._one_hot(waveform)
encoded = tf.reshape(encoded, [-1, self.quantization_channels])
gc_embedding = self._embed_gc(global_condition)
if gc_embedding is not None:
gc_embedding = tf.squeeze(gc_embedding, [1])
raw_output, output_layers = self._create_generator(q_ops, encoded, gen_num, local_condition, gc_embedding)
if self.scalar_input:
out = tf.reshape(raw_output, [batch_size, -1, self.out_channels])
proba = sample_from_discretized_mix_logistic(out)
else:
out = tf.reshape(raw_output, [-1, self.out_channels])
proba = tf.cast(tf.nn.softmax(tf.cast(out, tf.float64)), tf.float32)
return proba, output_layers, q_ops
def loss(self,
input_batch,
local_condition_batch=None,
global_condition_batch=None,
l2_regularization_strength=None,
name='wavenet'):
'''Creates a WaveNet network and returns the autoencoding loss.
The variables are all scoped to the given name.
'''
with tf.name_scope(name):
gc_embedding = self._embed_gc(global_condition_batch)
if self.scalar_input:
network_input = tf.reshape(
tf.cast(input_batch, tf.float32),
[self.batch_size, -1, 1])
else:
encoded_input = mu_law_encode(input_batch, self.quantization_channels)
encoded = self._one_hot(encoded_input)
network_input = encoded
# Cut off the last sample of network input to preserve causality.
network_input_width = tf.shape(network_input)[1] - 1
inputs = tf.slice(network_input, [0, 0, 0],
[-1, network_input_width, -1])
raw_output = self.create_network(inputs, local_condition_batch, gc_embedding)
with tf.name_scope('loss'):
# Cut off the samples corresponding to the receptive field
# for the first predicted sample.
target_output = tf.slice(
network_input,
[0, self.receptive_field, 0],
[-1, -1, -1])
if self.scalar_input:
loss = discretized_mix_logistic_loss(raw_output, target_output,
num_class=2**16, reduce=False)
reduced_loss = tf.reduce_mean(loss)
else:
target_output = tf.reshape(target_output, [-1, self.out_channels])
prediction = tf.reshape(raw_output, [-1, self.out_channels])
loss = tf.nn.softmax_cross_entropy_with_logits(
logits=prediction,
labels=target_output)
reduced_loss = tf.reduce_mean(loss)
tf.summary.scalar('loss', reduced_loss)
if l2_regularization_strength is None:
return reduced_loss
else:
# L2 regularization for all trainable parameters
l2_loss = tf.add_n([tf.nn.l2_loss(v)
for v in tf.trainable_variables()
if not('bias' in v.name)])
# Add the regularization term to the loss
total_loss = (reduced_loss +
l2_regularization_strength * l2_loss)
tf.summary.scalar('l2_loss', l2_loss)
tf.summary.scalar('total_loss', total_loss)
return total_loss
| [
"tensorflow.shape",
"tensorflow.get_variable",
"tensorflow.scatter_update",
"tensorflow.tanh",
"tensorflow.nn.conv1d",
"tensorflow.reduce_mean",
"tensorflow.cast",
"tensorflow.slice",
"tensorflow.nn.embedding_lookup",
"tensorflow.histogram_summary",
"tensorflow.matmul",
"tensorflow.nn.softmax_... | [((569, 607), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (605, 607), True, 'import tensorflow as tf\n'), ((623, 676), 'tensorflow.get_variable', 'tf.get_variable', (['name', 'shape'], {'initializer': 'initializer'}), '(name, shape, initializer=initializer)\n', (638, 676), True, 'import tensorflow as tf\n'), ((854, 906), 'tensorflow.constant_initializer', 'tf.constant_initializer', ([], {'value': '(0.0)', 'dtype': 'tf.float32'}), '(value=0.0, dtype=tf.float32)\n', (877, 906), True, 'import tensorflow as tf\n'), ((922, 975), 'tensorflow.get_variable', 'tf.get_variable', (['name', 'shape'], {'initializer': 'initializer'}), '(name, shape, initializer=initializer)\n', (937, 975), True, 'import tensorflow as tf\n'), ((1146, 1187), 'numpy.identity', 'np.identity', ([], {'n': 'shape[0]', 'dtype': 'np.float32'}), '(n=shape[0], dtype=np.float32)\n', (1157, 1187), True, 'import numpy as np\n'), ((1207, 1253), 'tensorflow.get_variable', 'tf.get_variable', (['name'], {'initializer': 'initial_val'}), '(name, initializer=initial_val)\n', (1222, 1253), True, 'import tensorflow as tf\n'), ((15278, 15350), 'tensorflow.nn.conv1d', 'tf.nn.conv1d', (['out', 'weights_dense'], {'stride': '(1)', 'padding': '"""SAME"""', 'name': '"""dense"""'}), "(out, weights_dense, stride=1, padding='SAME', name='dense')\n", (15290, 15350), True, 'import tensorflow as tf\n'), ((15485, 15530), 'tensorflow.slice', 'tf.slice', (['out', '[0, skip_cut, 0]', '[-1, -1, -1]'], {}), '(out, [0, skip_cut, 0], [-1, -1, -1])\n', (15493, 15530), True, 'import tensorflow as tf\n'), ((15600, 15675), 'tensorflow.nn.conv1d', 'tf.nn.conv1d', (['out_skip', 'weights_skip'], {'stride': '(1)', 'padding': '"""SAME"""', 'name': '"""skip"""'}), "(out_skip, weights_skip, stride=1, padding='SAME', name='skip')\n", (15612, 15675), True, 'import tensorflow as tf\n'), ((16689, 16743), 'tensorflow.slice', 'tf.slice', (['input_batch', '[0, input_cut, 0]', '[-1, -1, -1]'], {}), '(input_batch, [0, input_cut, 0], [-1, -1, -1])\n', (16697, 16743), True, 'import tensorflow as tf\n'), ((17430, 17464), 'tensorflow.matmul', 'tf.matmul', (['input_batch', 'weights[i]'], {}), '(input_batch, weights[i])\n', (17439, 17464), True, 'import tensorflow as tf\n'), ((19230, 19291), 'tensorflow.matmul', 'tf.matmul', (['out', 'weights_dense[0, :, :]'], {'name': '"""TEST_transform"""'}), "(out, weights_dense[0, :, :], name='TEST_transform')\n", (19239, 19291), True, 'import tensorflow as tf\n'), ((19454, 19509), 'tensorflow.matmul', 'tf.matmul', (['out', 'weights_skip[0, :, :]'], {'name': '"""TEST_skip"""'}), "(out, weights_skip[0, :, :], name='TEST_skip')\n", (19463, 19509), True, 'import tensorflow as tf\n'), ((22173, 22206), 'tensorflow.zeros', 'tf.zeros', (['shape'], {'dtype': 'tf.float32'}), '(shape, dtype=tf.float32)\n', (22181, 22206), True, 'import tensorflow as tf\n'), ((22222, 22282), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'value', 'name': 'name', 'trainable': '(False)'}), '(initial_value=value, name=name, trainable=False)\n', (22233, 22282), True, 'import tensorflow as tf\n'), ((23408, 23467), 'tensorflow.scatter_update', 'tf.scatter_update', (['q', '(current_q_idx + (filter_width - 2))', 'x'], {}), '(q, current_q_idx + (filter_width - 2), x)\n', (23425, 23467), True, 'import tensorflow as tf\n'), ((6316, 6344), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""wavenet"""'], {}), "('wavenet')\n", (6333, 6344), True, 'import tensorflow as tf\n'), ((12080, 12109), 'tensorflow.name_scope', 'tf.name_scope', (['"""causal_layer"""'], {}), "('causal_layer')\n", (12093, 12109), True, 'import tensorflow as tf\n'), ((14056, 14124), 'tensorflow.slice', 'tf.slice', (['local_condition_batch', '[0, condition_cut, 0]', '[-1, -1, -1]'], {}), '(local_condition_batch, [0, condition_cut, 0], [-1, -1, -1])\n', (14064, 14124), True, 'import tensorflow as tf\n'), ((14152, 14245), 'tensorflow.nn.conv1d', 'tf.nn.conv1d', (['lc', "variables['cond_filter']"], {'stride': '(1)', 'padding': '"""SAME"""', 'name': '"""cond_filter"""'}), "(lc, variables['cond_filter'], stride=1, padding='SAME', name=\n 'cond_filter')\n", (14164, 14245), True, 'import tensorflow as tf\n'), ((14266, 14355), 'tensorflow.nn.conv1d', 'tf.nn.conv1d', (['lc', "variables['cond_gate']"], {'stride': '(1)', 'padding': '"""SAME"""', 'name': '"""cond_gate"""'}), "(lc, variables['cond_gate'], stride=1, padding='SAME', name=\n 'cond_gate')\n", (14278, 14355), True, 'import tensorflow as tf\n'), ((14576, 14689), 'tensorflow.nn.conv1d', 'tf.nn.conv1d', (['global_condition_batch', "variables['gc_filtweights']"], {'stride': '(1)', 'padding': '"""SAME"""', 'name': '"""gc_filter"""'}), "(global_condition_batch, variables['gc_filtweights'], stride=1,\n padding='SAME', name='gc_filter')\n", (14588, 14689), True, 'import tensorflow as tf\n'), ((14751, 14862), 'tensorflow.nn.conv1d', 'tf.nn.conv1d', (['global_condition_batch', "variables['gc_gateweights']"], {'stride': '(1)', 'padding': '"""SAME"""', 'name': '"""gc_gate"""'}), "(global_condition_batch, variables['gc_gateweights'], stride=1,\n padding='SAME', name='gc_gate')\n", (14763, 14862), True, 'import tensorflow as tf\n'), ((15113, 15133), 'tensorflow.tanh', 'tf.tanh', (['conv_filter'], {}), '(conv_filter)\n', (15120, 15133), True, 'import tensorflow as tf\n'), ((15136, 15157), 'tensorflow.sigmoid', 'tf.sigmoid', (['conv_gate'], {}), '(conv_gate)\n', (15146, 15157), True, 'import tensorflow as tf\n'), ((16018, 16073), 'tensorflow.histogram_summary', 'tf.histogram_summary', (["(layer + '_filter')", 'weights_filter'], {}), "(layer + '_filter', weights_filter)\n", (16038, 16073), True, 'import tensorflow as tf\n'), ((16086, 16137), 'tensorflow.histogram_summary', 'tf.histogram_summary', (["(layer + '_gate')", 'weights_gate'], {}), "(layer + '_gate', weights_gate)\n", (16106, 16137), True, 'import tensorflow as tf\n'), ((16150, 16203), 'tensorflow.histogram_summary', 'tf.histogram_summary', (["(layer + '_dense')", 'weights_dense'], {}), "(layer + '_dense', weights_dense)\n", (16170, 16203), True, 'import tensorflow as tf\n'), ((16216, 16267), 'tensorflow.histogram_summary', 'tf.histogram_summary', (["(layer + '_skip')", 'weights_skip'], {}), "(layer + '_skip', weights_skip)\n", (16236, 16267), True, 'import tensorflow as tf\n'), ((17026, 17063), 'tensorflow.matmul', 'tf.matmul', (['state_batch[0]', 'weights[0]'], {}), '(state_batch[0], weights[0])\n', (17035, 17063), True, 'import tensorflow as tf\n'), ((17567, 17596), 'tensorflow.name_scope', 'tf.name_scope', (['"""causal_layer"""'], {}), "('causal_layer')\n", (17580, 17596), True, 'import tensorflow as tf\n'), ((18376, 18443), 'tensorflow.matmul', 'tf.matmul', (['local_condition_batch', "variables['cond_filter'][0, :, :]"], {}), "(local_condition_batch, variables['cond_filter'][0, :, :])\n", (18385, 18443), True, 'import tensorflow as tf\n'), ((18471, 18536), 'tensorflow.matmul', 'tf.matmul', (['local_condition_batch', "variables['cond_gate'][0, :, :]"], {}), "(local_condition_batch, variables['cond_gate'][0, :, :])\n", (18480, 18536), True, 'import tensorflow as tf\n'), ((18768, 18839), 'tensorflow.matmul', 'tf.matmul', (['global_condition_batch', "variables['gc_filtweights'][0, :, :]"], {}), "(global_condition_batch, variables['gc_filtweights'][0, :, :])\n", (18777, 18839), True, 'import tensorflow as tf\n'), ((18867, 18938), 'tensorflow.matmul', 'tf.matmul', (['global_condition_batch', "variables['gc_gateweights'][0, :, :]"], {}), "(global_condition_batch, variables['gc_gateweights'][0, :, :])\n", (18876, 18938), True, 'import tensorflow as tf\n'), ((19115, 19137), 'tensorflow.tanh', 'tf.tanh', (['output_filter'], {}), '(output_filter)\n', (19122, 19137), True, 'import tensorflow as tf\n'), ((19140, 19163), 'tensorflow.sigmoid', 'tf.sigmoid', (['output_gate'], {}), '(output_gate)\n', (19150, 19163), True, 'import tensorflow as tf\n'), ((20066, 20096), 'tensorflow.name_scope', 'tf.name_scope', (['"""dilated_stack"""'], {}), "('dilated_stack')\n", (20079, 20096), True, 'import tensorflow as tf\n'), ((20510, 20541), 'tensorflow.name_scope', 'tf.name_scope', (['"""postprocessing"""'], {}), "('postprocessing')\n", (20523, 20541), True, 'import tensorflow as tf\n'), ((21465, 21482), 'tensorflow.nn.relu', 'tf.nn.relu', (['total'], {}), '(total)\n', (21475, 21482), True, 'import tensorflow as tf\n'), ((21503, 21559), 'tensorflow.nn.conv1d', 'tf.nn.conv1d', (['transformed1', 'w1'], {'stride': '(1)', 'padding': '"""SAME"""'}), "(transformed1, w1, stride=1, padding='SAME')\n", (21515, 21559), True, 'import tensorflow as tf\n'), ((21661, 21678), 'tensorflow.nn.relu', 'tf.nn.relu', (['conv1'], {}), '(conv1)\n', (21671, 21678), True, 'import tensorflow as tf\n'), ((21699, 21755), 'tensorflow.nn.conv1d', 'tf.nn.conv1d', (['transformed2', 'w2'], {'stride': '(1)', 'padding': '"""SAME"""'}), "(transformed2, w2, stride=1, padding='SAME')\n", (21711, 21755), True, 'import tensorflow as tf\n'), ((22630, 22660), 'tensorflow.name_scope', 'tf.name_scope', (['"""dilated_stack"""'], {}), "('dilated_stack')\n", (22643, 22660), True, 'import tensorflow as tf\n'), ((23339, 23376), 'tensorflow.scatter_update', 'tf.scatter_update', (['q', '(idx - 1)', 'q[idx]'], {}), '(q, idx - 1, q[idx])\n', (23356, 23376), True, 'import tensorflow as tf\n'), ((23831, 23880), 'tensorflow.reshape', 'tf.reshape', (['initial', '[batch_size, input_channels]'], {}), '(initial, [batch_size, input_channels])\n', (23841, 23880), True, 'import tensorflow as tf\n'), ((24166, 24235), 'tensorflow.reshape', 'tf.reshape', (['others[layer_index]', '[batch_size, self.residual_channels]'], {}), '(others[layer_index], [batch_size, self.residual_channels])\n', (24176, 24235), True, 'import tensorflow as tf\n'), ((25209, 25239), 'tensorflow.name_scope', 'tf.name_scope', (['"""dilated_stack"""'], {}), "('dilated_stack')\n", (25222, 25239), True, 'import tensorflow as tf\n'), ((26101, 26132), 'tensorflow.name_scope', 'tf.name_scope', (['"""postprocessing"""'], {}), "('postprocessing')\n", (26114, 26132), True, 'import tensorflow as tf\n'), ((26686, 26703), 'tensorflow.nn.relu', 'tf.nn.relu', (['total'], {}), '(total)\n', (26696, 26703), True, 'import tensorflow as tf\n'), ((26725, 26761), 'tensorflow.matmul', 'tf.matmul', (['transformed1', 'w1[0, :, :]'], {}), '(transformed1, w1[0, :, :])\n', (26734, 26761), True, 'import tensorflow as tf\n'), ((26856, 26873), 'tensorflow.nn.relu', 'tf.nn.relu', (['conv1'], {}), '(conv1)\n', (26866, 26873), True, 'import tensorflow as tf\n'), ((26894, 26930), 'tensorflow.matmul', 'tf.matmul', (['transformed2', 'w2[0, :, :]'], {}), '(transformed2, w2[0, :, :])\n', (26903, 26930), True, 'import tensorflow as tf\n'), ((27281, 27312), 'tensorflow.name_scope', 'tf.name_scope', (['"""one_hot_encode"""'], {}), "('one_hot_encode')\n", (27294, 27312), True, 'import tensorflow as tf\n'), ((27336, 27411), 'tensorflow.one_hot', 'tf.one_hot', (['input_batch'], {'depth': 'self.quantization_channels', 'dtype': 'tf.float32'}), '(input_batch, depth=self.quantization_channels, dtype=tf.float32)\n', (27346, 27411), True, 'import tensorflow as tf\n'), ((27553, 27579), 'tensorflow.reshape', 'tf.reshape', (['encoded', 'shape'], {}), '(encoded, shape)\n', (27563, 27579), True, 'import tensorflow as tf\n'), ((28238, 28295), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embedding_table', 'global_condition'], {}), '(embedding_table, global_condition)\n', (28260, 28295), True, 'import tensorflow as tf\n'), ((29262, 29337), 'tensorflow.reshape', 'tf.reshape', (['embedding', '[self.batch_size, 1, self.global_condition_channels]'], {}), '(embedding, [self.batch_size, 1, self.global_condition_channels])\n', (29272, 29337), True, 'import tensorflow as tf\n'), ((29791, 29810), 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (29804, 29810), True, 'import tensorflow as tf\n'), ((30802, 30824), 'tensorflow.reshape', 'tf.reshape', (['last', '[-1]'], {}), '(last, [-1])\n', (30812, 30824), True, 'import tensorflow as tf\n'), ((31219, 31238), 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (31232, 31238), True, 'import tensorflow as tf\n'), ((32577, 32596), 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (32590, 32596), True, 'import tensorflow as tf\n'), ((33208, 33273), 'tensorflow.slice', 'tf.slice', (['network_input', '[0, 0, 0]', '[-1, network_input_width, -1]'], {}), '(network_input, [0, 0, 0], [-1, network_input_width, -1])\n', (33216, 33273), True, 'import tensorflow as tf\n'), ((7241, 7274), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""causal_layer"""'], {}), "('causal_layer')\n", (7258, 7274), True, 'import tensorflow as tf\n'), ((7792, 7826), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""dilated_stack"""'], {}), "('dilated_stack')\n", (7809, 7826), True, 'import tensorflow as tf\n'), ((11077, 11112), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""postprocessing"""'], {}), "('postprocessing')\n", (11094, 11112), True, 'import tensorflow as tf\n'), ((15434, 15447), 'tensorflow.shape', 'tf.shape', (['out'], {}), '(out)\n', (15442, 15447), True, 'import tensorflow as tf\n'), ((16316, 16375), 'tensorflow.histogram_summary', 'tf.histogram_summary', (["(layer + '_biases_filter')", 'filter_bias'], {}), "(layer + '_biases_filter', filter_bias)\n", (16336, 16375), True, 'import tensorflow as tf\n'), ((16392, 16447), 'tensorflow.histogram_summary', 'tf.histogram_summary', (["(layer + '_biases_gate')", 'gate_bias'], {}), "(layer + '_biases_gate', gate_bias)\n", (16412, 16447), True, 'import tensorflow as tf\n'), ((16464, 16521), 'tensorflow.histogram_summary', 'tf.histogram_summary', (["(layer + '_biases_dense')", 'dense_bias'], {}), "(layer + '_biases_dense', dense_bias)\n", (16484, 16521), True, 'import tensorflow as tf\n'), ((16538, 16593), 'tensorflow.histogram_summary', 'tf.histogram_summary', (["(layer + '_biases_skip')", 'skip_bias'], {}), "(layer + '_biases_skip', skip_bias)\n", (16558, 16593), True, 'import tensorflow as tf\n'), ((16615, 16636), 'tensorflow.shape', 'tf.shape', (['input_batch'], {}), '(input_batch)\n', (16623, 16636), True, 'import tensorflow as tf\n'), ((16642, 16663), 'tensorflow.shape', 'tf.shape', (['transformed'], {}), '(transformed)\n', (16650, 16663), True, 'import tensorflow as tf\n'), ((17298, 17335), 'tensorflow.matmul', 'tf.matmul', (['state_batch[i]', 'weights[i]'], {}), '(state_batch[i], weights[i])\n', (17307, 17335), True, 'import tensorflow as tf\n'), ((21013, 21061), 'tensorflow.histogram_summary', 'tf.histogram_summary', (['"""postprocess1_weights"""', 'w1'], {}), "('postprocess1_weights', w1)\n", (21033, 21061), True, 'import tensorflow as tf\n'), ((21078, 21126), 'tensorflow.histogram_summary', 'tf.histogram_summary', (['"""postprocess2_weights"""', 'w2'], {}), "('postprocess2_weights', w2)\n", (21098, 21126), True, 'import tensorflow as tf\n'), ((21616, 21633), 'tensorflow.add', 'tf.add', (['conv1', 'b1'], {}), '(conv1, b1)\n', (21622, 21633), True, 'import tensorflow as tf\n'), ((21812, 21829), 'tensorflow.add', 'tf.add', (['conv2', 'b2'], {}), '(conv2, b2)\n', (21818, 21829), True, 'import tensorflow as tf\n'), ((24584, 24632), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {}), '(tf.GraphKeys.GLOBAL_VARIABLES)\n', (24601, 24632), True, 'import tensorflow as tf\n'), ((29872, 29918), 'tensorflow.reshape', 'tf.reshape', (['waveform', '[self.batch_size, -1, 1]'], {}), '(waveform, [self.batch_size, -1, 1])\n', (29882, 29918), True, 'import tensorflow as tf\n'), ((29945, 29973), 'tensorflow.cast', 'tf.cast', (['encoded', 'tf.float32'], {}), '(encoded, tf.float32)\n', (29952, 29973), True, 'import tensorflow as tf\n'), ((30245, 30309), 'tensorflow.reshape', 'tf.reshape', (['raw_output', '[self.batch_size, -1, self.out_channels]'], {}), '(raw_output, [self.batch_size, -1, self.out_channels])\n', (30255, 30309), True, 'import tensorflow as tf\n'), ((30415, 30462), 'tensorflow.reshape', 'tf.reshape', (['raw_output', '[-1, self.out_channels]'], {}), '(raw_output, [-1, self.out_channels])\n', (30425, 30462), True, 'import tensorflow as tf\n'), ((31300, 31329), 'tensorflow.cast', 'tf.cast', (['waveform', 'tf.float32'], {}), '(waveform, tf.float32)\n', (31307, 31329), True, 'import tensorflow as tf\n'), ((31356, 31384), 'tensorflow.reshape', 'tf.reshape', (['encoded', '[-1, 1]'], {}), '(encoded, [-1, 1])\n', (31366, 31384), True, 'import tensorflow as tf\n'), ((31479, 31532), 'tensorflow.reshape', 'tf.reshape', (['encoded', '[-1, self.quantization_channels]'], {}), '(encoded, [-1, self.quantization_channels])\n', (31489, 31532), True, 'import tensorflow as tf\n'), ((31666, 31695), 'tensorflow.squeeze', 'tf.squeeze', (['gc_embedding', '[1]'], {}), '(gc_embedding, [1])\n', (31676, 31695), True, 'import tensorflow as tf\n'), ((31872, 31931), 'tensorflow.reshape', 'tf.reshape', (['raw_output', '[batch_size, -1, self.out_channels]'], {}), '(raw_output, [batch_size, -1, self.out_channels])\n', (31882, 31931), True, 'import tensorflow as tf\n'), ((32038, 32085), 'tensorflow.reshape', 'tf.reshape', (['raw_output', '[-1, self.out_channels]'], {}), '(raw_output, [-1, self.out_channels])\n', (32048, 32085), True, 'import tensorflow as tf\n'), ((33420, 33441), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (33433, 33441), True, 'import tensorflow as tf\n'), ((33600, 33667), 'tensorflow.slice', 'tf.slice', (['network_input', '[0, self.receptive_field, 0]', '[-1, -1, -1]'], {}), '(network_input, [0, self.receptive_field, 0], [-1, -1, -1])\n', (33608, 33667), True, 'import tensorflow as tf\n'), ((34417, 34456), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'reduced_loss'], {}), "('loss', reduced_loss)\n", (34434, 34456), True, 'import tensorflow as tf\n'), ((6883, 6914), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""embeddings"""'], {}), "('embeddings')\n", (6900, 6914), True, 'import tensorflow as tf\n'), ((13977, 14008), 'tensorflow.shape', 'tf.shape', (['local_condition_batch'], {}), '(local_condition_batch)\n', (13985, 14008), True, 'import tensorflow as tf\n'), ((14014, 14035), 'tensorflow.shape', 'tf.shape', (['conv_filter'], {}), '(conv_filter)\n', (14022, 14035), True, 'import tensorflow as tf\n'), ((19957, 19978), 'tensorflow.shape', 'tf.shape', (['input_batch'], {}), '(input_batch)\n', (19965, 19978), True, 'import tensorflow as tf\n'), ((21183, 21230), 'tensorflow.histogram_summary', 'tf.histogram_summary', (['"""postprocess1_biases"""', 'b1'], {}), "('postprocess1_biases', b1)\n", (21203, 21230), True, 'import tensorflow as tf\n'), ((21251, 21298), 'tensorflow.histogram_summary', 'tf.histogram_summary', (['"""postprocess2_biases"""', 'b2'], {}), "('postprocess2_biases', b2)\n", (21271, 21298), True, 'import tensorflow as tf\n'), ((32763, 32795), 'tensorflow.cast', 'tf.cast', (['input_batch', 'tf.float32'], {}), '(input_batch, tf.float32)\n', (32770, 32795), True, 'import tensorflow as tf\n'), ((33156, 33179), 'tensorflow.shape', 'tf.shape', (['network_input'], {}), '(network_input)\n', (33164, 33179), True, 'import tensorflow as tf\n'), ((33975, 33995), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (33989, 33995), True, 'import tensorflow as tf\n'), ((34054, 34104), 'tensorflow.reshape', 'tf.reshape', (['target_output', '[-1, self.out_channels]'], {}), '(target_output, [-1, self.out_channels])\n', (34064, 34104), True, 'import tensorflow as tf\n'), ((34138, 34185), 'tensorflow.reshape', 'tf.reshape', (['raw_output', '[-1, self.out_channels]'], {}), '(raw_output, [-1, self.out_channels])\n', (34148, 34185), True, 'import tensorflow as tf\n'), ((34214, 34299), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'prediction', 'labels': 'target_output'}), '(logits=prediction, labels=target_output\n )\n', (34253, 34299), True, 'import tensorflow as tf\n'), ((34379, 34399), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (34393, 34399), True, 'import tensorflow as tf\n'), ((35047, 35084), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""l2_loss"""', 'l2_loss'], {}), "('l2_loss', l2_loss)\n", (35064, 35084), True, 'import tensorflow as tf\n'), ((35105, 35148), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""total_loss"""', 'total_loss'], {}), "('total_loss', total_loss)\n", (35122, 35148), True, 'import tensorflow as tf\n'), ((30591, 30615), 'tensorflow.cast', 'tf.cast', (['out', 'tf.float64'], {}), '(out, tf.float64)\n', (30598, 30615), True, 'import tensorflow as tf\n'), ((32132, 32156), 'tensorflow.cast', 'tf.cast', (['out', 'tf.float64'], {}), '(out, tf.float64)\n', (32139, 32156), True, 'import tensorflow as tf\n'), ((34684, 34700), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['v'], {}), '(v)\n', (34697, 34700), True, 'import tensorflow as tf\n'), ((30711, 30726), 'tensorflow.shape', 'tf.shape', (['proba'], {}), '(proba)\n', (30719, 30726), True, 'import tensorflow as tf\n'), ((34750, 34774), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (34772, 34774), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 16 14:50:14 2018
@author: Kaushik
"""
'''
# Known symmatric distances
locations = ["New York", "Los Angeles", "Chicago", "Minneapolis", "Denver", "Dallas", "Seattle",
"Boston", "San Francisco", "St. Louis", "Houston", "Phoenix", "Salt Lake City"]
dist_matrix = [
[ 0, 2451, 713, 1018, 1631, 1374, 2408, 213, 2571, 875, 1420, 2145, 1972], # New York
[2451, 0, 1745, 1524, 831, 1240, 959, 2596, 403, 1589, 1374, 357, 579], # Los Angeles
[ 713, 1745, 0, 355, 920, 803, 1737, 851, 1858, 262, 940, 1453, 1260], # Chicago
[1018, 1524, 355, 0, 700, 862, 1395, 1123, 1584, 466, 1056, 1280, 987], # Minneapolis
[1631, 831, 920, 700, 0, 663, 1021, 1769, 949, 796, 879, 586, 371], # Denver
[1374, 1240, 803, 862, 663, 0, 1681, 1551, 1765, 547, 225, 887, 999], # Dallas
[2408, 959, 1737, 1395, 1021, 1681, 0, 2493, 678, 1724, 1891, 1114, 701], # Seattle
[ 213, 2596, 851, 1123, 1769, 1551, 2493, 0, 2699, 1038, 1605, 2300, 2099], # Boston
[2571, 403, 1858, 1584, 949, 1765, 678, 2699, 0, 1744, 1645, 653, 600], # San Francisco
[ 875, 1589, 262, 466, 796, 547, 1724, 1038, 1744, 0, 679, 1272, 1162], # St. Louis
[1420, 1374, 940, 1056, 879, 225, 1891, 1605, 1645, 679, 0, 1017, 1200], # Houston
[2145, 357, 1453, 1280, 586, 887, 1114, 2300, 653, 1272, 1017, 0, 504], # Phoenix
[1972, 579, 1260, 987, 371, 999, 701, 2099, 600, 1162, 1200, 504, 0]] # Salt Lake City
'''
# Locations
def create_data_array():
locations = [[288, 149], [288, 129], [270, 133], [256, 141], [256, 157], [246, 157], [236, 169],
[228, 169], [228, 161], [220, 169], [212, 169], [204, 169], [196, 169], [188, 169], [196, 161],
[188, 145], [172, 145], [164, 145], [156, 145], [148, 145], [140, 145], [148, 169], [164, 169],
[172, 169], [156, 169], [140, 169], [132, 169], [124, 169], [116, 161], [104, 153], [104, 161],
[104, 169], [90, 165], [80, 157], [64, 157], [64, 165], [56, 169], [56, 161], [56, 153], [56, 145],
[56, 137], [56, 129], [56, 121], [40, 121], [40, 129], [40, 137], [40, 145], [40, 153], [40, 161],
[40, 169], [32, 169], [32, 161], [32, 153], [32, 145], [32, 137], [32, 129], [32, 121], [32, 113],
[40, 113], [56, 113], [56, 105], [48, 99], [40, 99], [32, 97], [32, 89], [24, 89], [16, 97],
[16, 109], [8, 109], [8, 97], [8, 89], [8, 81], [8, 73], [8, 65], [8, 57], [16, 57], [8, 49],
[8, 41], [24, 45], [32, 41], [32, 49], [32, 57], [32, 65], [32, 73], [32, 81], [40, 83], [40, 73],
[40, 63], [40, 51], [44, 43], [44, 35], [44, 27], [32, 25], [24, 25], [16, 25], [16, 17], [24, 17],
[32, 17], [44, 11], [56, 9], [56, 17], [56, 25], [56, 33], [56, 41], [64, 41], [72, 41], [72, 49],
[56, 49], [48, 51], [56, 57], [56, 65], [48, 63], [48, 73], [56, 73], [56, 81], [48, 83], [56, 89],
[56, 97], [104, 97], [104, 105], [104, 113], [104, 121], [104, 129], [104, 137], [104, 145],
[116, 145], [124, 145], [132, 145], [132, 137], [140, 137], [148, 137], [156, 137], [164, 137],
[172, 125], [172, 117], [172, 109], [172, 101], [172, 93], [172, 85], [180, 85], [180, 77],
[180, 69], [180, 61], [180, 53], [172, 53], [172, 61], [172, 69], [172, 77], [164, 81], [148, 85],
[124, 85], [124, 93], [124, 109], [124, 125], [124, 117], [124, 101], [104, 89], [104, 81],
[104, 73], [104, 65], [104, 49], [104, 41], [104, 33], [104, 25], [104, 17], [92, 9], [80, 9],
[72, 9], [64, 21], [72, 25], [80, 25], [80, 25], [80, 41], [88, 49], [104, 57], [124, 69],
[124, 77], [132, 81], [140, 65], [132, 61], [124, 61], [124, 53], [124, 45], [124, 37], [124, 29],
[132, 21], [124, 21], [120, 9], [128, 9], [136, 9], [148, 9], [162, 9], [156, 25], [172, 21],
[180, 21], [180, 29], [172, 29], [172, 37], [172, 45], [180, 45], [180, 37], [188, 41], [196, 49],
[204, 57], [212, 65], [220, 73], [228, 69], [228, 77], [236, 77], [236, 69], [236, 61], [228, 61],
[228, 53], [236, 53], [236, 45], [228, 45], [228, 37], [236, 37], [236, 29], [228, 29], [228, 21],
[236, 21], [252, 21], [260, 29], [260, 37], [260, 45], [260, 53], [260, 61], [260, 69], [260, 77],
[276, 77], [276, 69], [276, 61], [276, 53], [284, 53], [284, 61], [284, 69], [284, 77], [284, 85],
[284, 93], [284, 101], [288, 109], [280, 109], [276, 101], [276, 93], [276, 85], [268, 97],
[260, 109], [252, 101], [260, 93], [260, 85], [236, 85], [228, 85], [228, 93], [236, 93],
[236, 101], [228, 101], [228, 109], [228, 117], [228, 125], [220, 125], [212, 117], [204, 109],
[196, 101], [188, 93], [180, 93], [180, 101], [180, 109], [180, 117], [180, 125], [196, 145],
[204, 145], [212, 145], [220, 145], [228, 145], [236, 145], [246, 141], [252, 125], [260, 129],
[280, 133]]
return locations
# Imports
import numpy as np
# from ortools.linear_solver import pywraplp
from ortools.constraint_solver import pywrapcp, routing_enums_pb2
# Euclidean distance between points.
def euclid_distance(x1, y1, x2, y2):
dist = np.sqrt((x1 - x2)**2 + (y1 - y2)**2)
return dist
# Manhattan distance between points.
def manhattan_distance(x1, y1, x2, y2):
dist = abs(x1 - x2) + abs(y1 - y2)
return dist
# Create the distance matrix (symmetric).
def create_distance_matrix(locations, distance_func):
size = len(locations)
dist_matrix = {}
for from_node in range(size):
dist_matrix[from_node] = {}
for to_node in range(size):
x1 = locations[from_node][0]
y1 = locations[from_node][1]
x2 = locations[to_node][0]
y2 = locations[to_node][1]
dist_matrix[from_node][to_node] = distance_func(x1, y1, x2, y2)
return dist_matrix
# Create a callback to calculate distances between cities.
def create_distance_callback(dist_matrix):
# For each pair of nodes
def distance_callback(from_node, to_node):
return int(dist_matrix[from_node][to_node])
return distance_callback
# Set search parameters
def set_search_parameters(guided_local_search=False, timeout=30000):
search_parameters = pywrapcp.RoutingModel.DefaultSearchParameters() # Default
if guided_local_search:
search_parameters.local_search_metaheuristic = (routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH) # Guided local search to avoid a local minimum
search_parameters.time_limit_ms = timeout # Need to set a time limit for guided local search
return search_parameters
# Display the solution
def display_solution(assignment, routing):
# Solution cost.
print("\nMinimum total distance found: " + str(assignment.ObjectiveValue()) + " miles\n")
# Display the routes
for vehicle_number in range(routing.vehicles()):
print("\nBest route found for vehicle ", vehicle_number, ":\n")
node = routing.Start(vehicle_number) # Index of the variable for the starting node.
route = ''
while not routing.IsEnd(node):
# Convert variable indices to node indices in the displayed route.
route += str(locations[routing.IndexToNode(node)]) + ' -> '
node = assignment.Value(routing.NextVar(node))
route += str(locations[routing.IndexToNode(node)])
print(route)
# Wrapper over Google OR Tools
def google_or_wrapper(tsp_size, num_vehicles, depot, locations, dist_matrix):
if tsp_size > 0:
# Create the routing model
routing = pywrapcp.RoutingModel(tsp_size, num_vehicles, depot)
# Set the search parameters
search_parameters = set_search_parameters(True)
# Create the distance callback.
dist_callback = create_distance_callback(dist_matrix)
routing.SetArcCostEvaluatorOfAllVehicles(dist_callback)
# Solve
assignment = routing.SolveWithParameters(search_parameters)
if assignment:
# Display the solution
display_solution(assignment, routing)
else:
print('No solution found.')
else:
print("Specify an instance greater than 0.")
# Main
if __name__ == "__main__":
# Create the data.
locations = create_data_array()
dist_matrix = create_distance_matrix(locations, euclid_distance)
# Initialize parameters
tsp_size = len(locations) # Number of cities.
num_vehicles = 1 # Number of routes (i.e. number of vehicles), which is 1 for a TSP
depot = 0 # Start and end node of the route
# Solve using Google OR Tools
google_or_wrapper(tsp_size, num_vehicles, depot, locations, dist_matrix)
| [
"ortools.constraint_solver.pywrapcp.RoutingModel.DefaultSearchParameters",
"numpy.sqrt",
"ortools.constraint_solver.pywrapcp.RoutingModel"
] | [((5085, 5125), 'numpy.sqrt', 'np.sqrt', (['((x1 - x2) ** 2 + (y1 - y2) ** 2)'], {}), '((x1 - x2) ** 2 + (y1 - y2) ** 2)\n', (5092, 5125), True, 'import numpy as np\n'), ((6160, 6207), 'ortools.constraint_solver.pywrapcp.RoutingModel.DefaultSearchParameters', 'pywrapcp.RoutingModel.DefaultSearchParameters', ([], {}), '()\n', (6205, 6207), False, 'from ortools.constraint_solver import pywrapcp, routing_enums_pb2\n'), ((7509, 7561), 'ortools.constraint_solver.pywrapcp.RoutingModel', 'pywrapcp.RoutingModel', (['tsp_size', 'num_vehicles', 'depot'], {}), '(tsp_size, num_vehicles, depot)\n', (7530, 7561), False, 'from ortools.constraint_solver import pywrapcp, routing_enums_pb2\n')] |
# Copyright 2021-2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import cunumeric as num
def test():
x = num.array([1, 2, 3])
y = num.array([4, 5, 6])
z = num.vstack((x, y))
xnp = np.array([1, 2, 3])
ynp = np.array([4, 5, 6])
znp = np.vstack((xnp, ynp))
assert np.array_equal(z, znp)
x = num.array([[1, 2, 3], [4, 5, 6]])
y = num.array([[7, 8, 9], [10, 11, 12]])
z = num.vstack((x, y))
xnp = np.array([[1, 2, 3], [4, 5, 6]])
ynp = np.array([[7, 8, 9], [10, 11, 12]])
znp = np.vstack((xnp, ynp))
assert np.array_equal(z, znp)
x = num.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])
y = num.array([[[13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24]]])
z = num.vstack((x, y))
xnp = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])
ynp = np.array(
[[[13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24]]]
)
znp = np.vstack((xnp, ynp))
assert np.array_equal(z, znp)
return
if __name__ == "__main__":
test()
| [
"numpy.array",
"numpy.array_equal",
"numpy.vstack",
"cunumeric.vstack",
"cunumeric.array"
] | [((656, 676), 'cunumeric.array', 'num.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (665, 676), True, 'import cunumeric as num\n'), ((685, 705), 'cunumeric.array', 'num.array', (['[4, 5, 6]'], {}), '([4, 5, 6])\n', (694, 705), True, 'import cunumeric as num\n'), ((714, 732), 'cunumeric.vstack', 'num.vstack', (['(x, y)'], {}), '((x, y))\n', (724, 732), True, 'import cunumeric as num\n'), ((744, 763), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (752, 763), True, 'import numpy as np\n'), ((774, 793), 'numpy.array', 'np.array', (['[4, 5, 6]'], {}), '([4, 5, 6])\n', (782, 793), True, 'import numpy as np\n'), ((804, 825), 'numpy.vstack', 'np.vstack', (['(xnp, ynp)'], {}), '((xnp, ynp))\n', (813, 825), True, 'import numpy as np\n'), ((838, 860), 'numpy.array_equal', 'np.array_equal', (['z', 'znp'], {}), '(z, znp)\n', (852, 860), True, 'import numpy as np\n'), ((870, 903), 'cunumeric.array', 'num.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (879, 903), True, 'import cunumeric as num\n'), ((912, 948), 'cunumeric.array', 'num.array', (['[[7, 8, 9], [10, 11, 12]]'], {}), '([[7, 8, 9], [10, 11, 12]])\n', (921, 948), True, 'import cunumeric as num\n'), ((957, 975), 'cunumeric.vstack', 'num.vstack', (['(x, y)'], {}), '((x, y))\n', (967, 975), True, 'import cunumeric as num\n'), ((987, 1019), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (995, 1019), True, 'import numpy as np\n'), ((1030, 1065), 'numpy.array', 'np.array', (['[[7, 8, 9], [10, 11, 12]]'], {}), '([[7, 8, 9], [10, 11, 12]])\n', (1038, 1065), True, 'import numpy as np\n'), ((1076, 1097), 'numpy.vstack', 'np.vstack', (['(xnp, ynp)'], {}), '((xnp, ynp))\n', (1085, 1097), True, 'import numpy as np\n'), ((1110, 1132), 'numpy.array_equal', 'np.array_equal', (['z', 'znp'], {}), '(z, znp)\n', (1124, 1132), True, 'import numpy as np\n'), ((1142, 1204), 'cunumeric.array', 'num.array', (['[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]'], {}), '([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])\n', (1151, 1204), True, 'import cunumeric as num\n'), ((1213, 1284), 'cunumeric.array', 'num.array', (['[[[13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24]]]'], {}), '([[[13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24]]])\n', (1222, 1284), True, 'import cunumeric as num\n'), ((1293, 1311), 'cunumeric.vstack', 'num.vstack', (['(x, y)'], {}), '((x, y))\n', (1303, 1311), True, 'import cunumeric as num\n'), ((1323, 1384), 'numpy.array', 'np.array', (['[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]'], {}), '([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])\n', (1331, 1384), True, 'import numpy as np\n'), ((1395, 1465), 'numpy.array', 'np.array', (['[[[13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24]]]'], {}), '([[[13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24]]])\n', (1403, 1465), True, 'import numpy as np\n'), ((1490, 1511), 'numpy.vstack', 'np.vstack', (['(xnp, ynp)'], {}), '((xnp, ynp))\n', (1499, 1511), True, 'import numpy as np\n'), ((1524, 1546), 'numpy.array_equal', 'np.array_equal', (['z', 'znp'], {}), '(z, znp)\n', (1538, 1546), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import os
import tempfile
import shutil
import subprocess
import itertools
import sys
from .helpers import sh
from . import Task
try:
import cPickle as pickle
except ImportError:
import pickle
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class Document(object):
"""A document that is auto generated from a template.
:param templates: The document template files (or file)
:type templates: str or list
:param depends: The list of dependencies.
:type depends: list of :class:`anadama2.tracked.Base` or strings
:param targets: The target(s). The document(s) to be generated.
:type targets: :class:`anadama2.tracked.Base` or string
:param vars: A dictionary of variables used by the template.
:type vars: dict
"""
def create(self, task):
raise NotImplementedError()
class PweaveDocument(Document):
"""A document that is auto generated from a template using Pweave and Pandoc
:keyword templates: The document template files (or file)
:type templates: str or list
:keyword depends: The list of dependencies.
:type depends: list of :class:`anadama2.tracked.Base` or strings
:keyword targets: The target(s). The document(s) to be generated.
:type targets: :class:`anadama2.tracked.Base` or string
:keyword vars: A dictionary of variables used by the template.
:type vars: dict
:keyword table_of_contents: If set add table of contents to reports
:type table_of_contents: bool
"""
def __init__(self, templates=None, depends=None, targets=None, vars=None, table_of_contents=None):
# allow for a single template or multiple templates
if templates is not None and isinstance(templates,str):
templates=[templates]
self.templates=templates
self.depends=depends
self.table_of_contents=table_of_contents
# if targets is a single item, save as a list
if targets is not None and isinstance(targets,str):
targets=[targets]
self.targets=targets
self.vars=vars
# set the max number of x tick labels to be shown on plots
self.max_labels = 65
# set the max labels for legends
self.max_labels_legend = 30
# set the location of the final figures and data folders
# only create if the document has a target
if self.targets:
self.figures_folder = os.path.join(os.path.dirname(self.targets[0]),"figures")
if not os.path.isdir(self.figures_folder):
os.makedirs(self.figures_folder)
self.data_folder = os.path.join(os.path.dirname(self.targets[0]),"data")
if not os.path.isdir(self.data_folder):
os.makedirs(self.data_folder)
else:
# if a target is not provided use the variables to try to find the folder locations
try:
self.vars=self.get_vars()
except (IndexError, EOFError):
self.vars=None
if self.vars:
self.figures_folder = os.path.join(os.path.dirname(self.vars["targets"][0]),"figures")
self.data_folder = os.path.join(os.path.dirname(self.vars["targets"][0]),"data")
# check for the required dependencies when using a pweave document
# only check when creating an instance with a template to run
if templates is not None:
try:
import numpy
except ImportError:
sys.exit("Please install numpy for document generation")
try:
# set non-interactive backend to simplify server install
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as pyplot
except ImportError:
sys.exit("Please install matplotlib for document generation")
try:
output=subprocess.check_output(["pypublish","-h"],stderr=subprocess.STDOUT)
except EnvironmentError:
sys.exit("Please install pweave for document generation")
try:
output=subprocess.check_output(["pdflatex","--help"],stderr=subprocess.STDOUT)
except EnvironmentError:
sys.exit("Please install latex which includes pdflatex for document generation")
# copy over the file dependencies to the data folder
if self.depends and self.data_folder and os.path.isdir(self.data_folder):
depends_files = list(filter(lambda x: not isinstance(x,Task), self.depends))
for data_file in depends_files:
if data_file:
shutil.copy(data_file,os.path.join(self.data_folder,os.path.basename(data_file)))
def print_title(self):
if self.vars["header_image"]:
print(" ")
print("  ".format("",self.vars["header_image"]))
if self.vars["project"]:
print(" "+self.vars["title"]+" for "+self.vars["project"])
else:
print(" "+self.vars["title"])
def create(self, task):
""" Create the documents specified as targets """
# get the template extension
template_extension=os.path.splitext(self.templates[0])[-1]
# get the report and figure extensions based on the target
report_filename, report_extension=os.path.splitext(self.targets[0])
# create the temp file to run with when ready to create the document
temp_directory = tempfile.mkdtemp(dir=os.path.dirname(self.targets[0]))
temp_template_basename = os.path.join(temp_directory,os.path.basename(report_filename))
# keep the extension of the template for pweave auto reader function
temp_template = temp_template_basename + template_extension
# if variables are provided, then create a pickled file to store these
# for use when the document is created
if self.vars is not None:
# save the depends in the vars set
self.vars["depends"]=self.depends
# save the targets
self.vars["targets"]=self.targets
# change directories/files to the full paths (so no longer required in input)
# this is because pweave does not have an output folder option (just cwd so a ch is needed)
for variable in self.vars:
try:
if os.path.isdir(self.vars[variable]) or os.path.isfile(self.vars[variable]):
self.vars[variable] = os.path.abspath(self.vars[variable])
except (TypeError, ValueError, NameError):
# ignore lists and None values
continue
# create a picked file with the temp template name in the same folder
pickle.dump(self.vars, open(temp_template_basename+".pkl", "wb"))
# merge the templates into the temp file
templates_globals=globals()
templates_globals["vars"]=self.vars
templates_globals["filename"]=""
with open(temp_template,"w") as handle:
for file in self.templates:
current_import_section=""
capture_import=False
for line in open(file):
# look for and process imports
if line.startswith("```{import"):
capture_import=True
elif line.startswith("```") and capture_import:
exec(current_import_section, templates_globals)
if "filename" in templates_globals and templates_globals["filename"]:
# import the file to the template
for importline in open(os.path.join(os.path.dirname(file),filename)):
handle.write(importline)
templates_globals["filename"]=""
current_import_section=""
capture_import=False
elif capture_import:
current_import_section+=line
else:
handle.write(line)
# create the document
# first move to the directory with the temp output files
# this will cause all pweave output files to be written to this folder
# by default the tex output file is written to the same folder as the
# template file and the pdf files are written to the current working directory
current_working_directory = os.getcwd()
os.chdir(temp_directory)
# get the intermediate output file based on the initial template name
intermediate_template = temp_template_basename+"."+"md"
# process the template based on the extension type
temp_report = temp_template_basename+"."+report_extension
# set the pandoc command based on if a table of contents will be included
pandoc_command="pandoc {0} -o {1} --variable=linkcolor:Blue "+\
"--variable=toccolor:Blue --pdf-engine=pdflatex --standalone "+\
"-V geometry:left=1in -V geometry:right=1in -V geometry:top=1in -V geometry:bottom=1in"
if self.table_of_contents:
pandoc_command+=" --toc"
# run pweave then pandoc to generate document
# call pweave to use class with fix
from pweave import PwebPandocFormatter, Pweb, PwebProcessor
from pweave.readers import PwebScriptReader
class PwebProcessorSpaces(PwebProcessor):
def loadinline(self, content):
"""Function from pweave slightly modified to allow for spaces in code"""
"""Evaluate code from doc chunks using ERB markup"""
# Flags don't work with ironpython
import re
splitted = re.split('(<%[\w\s\W]*?%>)', content) # , flags = re.S)
# No inline code
if len(splitted) < 2:
return content
n = len(splitted)
for i in range(n):
elem = splitted[i]
if not elem.startswith('<%'):
continue
if elem.startswith('<%='):
code_str = elem.replace('<%=', '').replace('%>', '').lstrip()
result = self.loadstring(self.add_echo(code_str)).strip()
splitted[i] = result
continue
if elem.startswith('<%'):
code_str = elem.replace('<%', '').replace('%>', '').lstrip()
#result = self.loadstring(code_str).strip()
# small modification from original code to allow for spaces at the end
# spaces after figures are required for figure captions with pandoc
result = self.loadstring(code_str).lstrip().replace("\n","",1)
splitted[i] = result
return ''.join(splitted)
class PwebPandocFormatterFixedFigures(PwebPandocFormatter):
def make_figure_string_size(self, figname, width, label, caption = ""):
# new function to fix figure width string to work with pandoc format
# only use width if pandoc installed is >= v1.16.0
if self.new_pandoc:
figstring="{ width=%s }\n" % (caption, figname, width)
else:
figstring="\n" % (caption, figname)
if caption == "":
figstring += "\\"
figstring += "\n"
return figstring
def formatfigure(self, chunk):
fignames = chunk['figure']
if chunk["caption"]:
caption = chunk["caption"]
else:
caption = ""
figstring = ""
# increase default figure size
if not chunk["width"]:
chunk["width"]="100%"
if chunk['caption'] and len(fignames) > 0:
if len(fignames) > 1:
print("INFO: Only including the first plot in a chunk when the caption is set")
figstring = self.make_figure_string_size(fignames[0], chunk["width"], chunk["name"], caption)
return figstring
for fig in fignames:
# original line which duplicates figures commented out and replaced
#figstring += self.make_figure_string(fignames[0], chunk["width"], chunk["name"])
figstring += self.make_figure_string_size(fig, chunk["width"], chunk["name"])
return figstring
if temp_template.endswith(".py"):
# capture stdout messages
original_stdout = sys.stdout
sys.stdout = capture_stdout = StringIO()
doc = Pweb(temp_template)
doc.setformat(Formatter = PwebPandocFormatterFixedFigures)
doc.detect_reader()
doc.weave(shell=PwebProcessorSpaces)
sys.stdout = original_stdout
else:
sh("pweave {0} -o {1}".format(temp_template,intermediate_template),log_command=True)()
sh(pandoc_command.format(intermediate_template, temp_report),log_command=True)()
# change back to original working directory
os.chdir(current_working_directory)
# rename to the original target name and location specified
shutil.copy(temp_report,self.targets[0])
# move the temp figures files
temp_figures_folder=os.path.join(temp_directory,"figures")
for file in os.listdir(temp_figures_folder):
shutil.move(os.path.join(temp_figures_folder,file),os.path.join(self.figures_folder,file))
# remove all of the temp files in the temp folder
shutil.rmtree(temp_directory,ignore_errors=True)
# remove the figures folder if it is empty
if len(os.listdir(self.figures_folder)) == 0:
os.rmdir(self.figures_folder)
# remove the data folder if it is empty
if len(os.listdir(self.data_folder)) == 0:
os.rmdir(self.data_folder)
def get_vars(self):
""" Try to get the variables from the pickled file """
# the pickled file will be the same name as the template name
# the current working directory will hold the pickle file
# the current working directory is a temp folder
# find the pickle file
pickle_file = list(filter(lambda x: x.endswith(".pkl"),os.listdir(".")))
vars = pickle.load(open(pickle_file[0],"rb"))
return vars
def read_table(self, file, invert=None, delimiter="\t", only_data_columns=None, format_data=None):
""" Read the table from a text file with the first line the column
names and the first column the row names.
:param file: The file to read
:type file: str
:keyword invert: Invert the table rows/columns after reading
:type invert: bool
:keyword delimiter: The delimiter present in the file
:type delimiter: str
:keyword only_data_columns: Remove the header and row names
:type only_data_columns: bool
:keyword format_data: A function to use to format the data
:type format_data: function
"""
def try_format_data(function, data):
""" Try to format the data, except use zero """
try:
if function == int:
formatted_data=int(float(data))
else:
formatted_data=function(data)
except ValueError:
formatted_data=function(0)
return formatted_data
# if not set, format data to floats
if format_data is None:
format_data=float
data=[]
row_names=[]
with open(file) as file_handle:
column_names = file_handle.readline().rstrip().split(delimiter)[1:]
for line in file_handle:
line=line.rstrip().split(delimiter)
row_names.append(line[0])
data.append([try_format_data(format_data, i) for i in line[1:]])
# remove extra columns if not requested
if only_data_columns is not None:
column_names=[column_names[i] for i in only_data_columns]
new_data=[]
for row in data:
new_data.append([row[i] for i in only_data_columns])
data=new_data
return column_names, row_names, data
def sorted_data_numerical_or_alphabetical(self, data):
""" Sort the data numerically or alphabetically depending on data type """
# sort the data alphabetically
sorted_data = sorted(data)
try:
# allow for NA in keys
na=False
if "NA" in data:
data.remove("NA")
na=True
sorted_data = sorted(data, key=float)
if na:
sorted_data+=["NA"]
except ValueError:
pass
return sorted_data
def add_threshold(self, threshold, color, label):
""" Adds horizontal line to plot as threshold
:param threshold: float
:param color: string
:param label: string
"""
import matplotlib.pyplot as pyplot
pyplot.axhline(threshold, color=color)
pyplot.text(0, int(threshold) + 50, label)
pyplot.draw()
def add_ellipse(self,labels):
# shorten any labels over the max cutoff
MAX_LEN=37
ELLIPSE="..."
new_labels=[]
for label in labels:
if len(label) > MAX_LEN:
label=label[0:MAX_LEN]+ELLIPSE
new_labels+=[label]
return new_labels
def plot_stacked_barchart_grouped(self, grouped_data, row_labels, column_labels_grouped, title,
ylabel=None, legend_title=None, legend_style="normal", legend=True, legend_size=8, outfilename=None, legend_reverse=False):
""" Plot a stacked barchart with data grouped into subplots
:param grouped_data: A dict of lists containing the grouped data
:type data: dict
:param row_labels: The labels for the data rows
:type row_labels: list
:param column_labels_grouped: The labels for the columns grouped
:type column_labels: dict
:param title: The title for the plot
:type title: str
:keyword ylabel: The y-axis label
:type ylabel: str
:keyword legend_title: The title for the legend
:type legend_title: str
:keyword legend_style: The font style for the legend
:type legend_style: str
:keyword legend: Display legend
:type legend: bool
:keyword legend_size: The font size for the legend
:type legend_size: int
"""
import numpy
import matplotlib.pyplot as pyplot
total_groups=len(grouped_data.keys())
figure, group_axis = pyplot.subplots(1, total_groups, sharey=True, gridspec_kw = {'wspace':0.02},figsize=(10,6),dpi=150)
# create a set of custom colors to prevent overlap
# get only a set number of items to recycle colors through subplots
custom_colors=list(itertools.islice(self._custom_colors(total_colors=len(row_labels)),len(row_labels)))
# create a subplot for each group
group_number=0
# sort the groups prior to plotting
sorted_group_names = self.sorted_data_numerical_or_alphabetical(list(grouped_data.keys()))
# get the total number of columns for all groups
total_columns_all_groups=len(list(itertools.chain.from_iterable(column_labels_grouped.values())))
for group_name in sorted_group_names:
data = grouped_data[group_name]
bar_plots=[]
# create a plot for each stacked group
column_labels=column_labels_grouped[group_name]
plot_indexes=numpy.arange(len(column_labels))
y_offset=numpy.array([0.0]*len(column_labels))
for plot_abundance, color in zip(data, custom_colors):
bar_plots.append(group_axis[group_number].bar(plot_indexes, plot_abundance,
bottom=y_offset, align="center", color=color))
# add to the y_offset which is the bottom of the stacked plot
y_offset=y_offset+plot_abundance
# set the current axis to this groups plot
pyplot.sca(group_axis[group_number])
# Add the title, labels, and legend
pyplot.title(group_name, size=12)
# only label the x-axis if all subplots can have labels
if total_columns_all_groups <= self.max_labels:
# move the bottom of the figure for larger xaxis labels
figure.subplots_adjust(bottom=0.3)
pyplot.xticks(plot_indexes, column_labels, fontsize=7, rotation="vertical")
else:
pyplot.tick_params(axis="x",which="both",bottom="off",labelbottom="off")
pyplot.xticks([])
pyplot.yticks(fontsize=7)
group_number+=1
pyplot.tight_layout()
# add the legend to the last subplot
if legend:
# reduce the size of the plot to fit in the legend
figure.subplots_adjust(right=0.65)
if legend_reverse:
pyplot.legend(list(reversed(bar_plots)),self.add_ellipse(list(reversed(row_labels))), loc="center left", bbox_to_anchor=(1,0.5),
title=legend_title, frameon=False, prop={"size":legend_size, "style":legend_style})
else:
pyplot.legend(bar_plots,self.add_ellipse(row_labels), loc="center left", bbox_to_anchor=(1,0.5),
title=legend_title, frameon=False, prop={"size":legend_size, "style":legend_style})
figure.suptitle(title, fontsize=14)
if outfilename:
pyplot.savefig(outfilename)
print("\n\n{#id .class width=675px height=405px}\n\n")
pyplot.close()
else:
pyplot.draw()
def plot_grouped_barchart(self, data, row_labels, column_labels, title,
xlabel=None, ylabel=None, legend_title=None, yaxis_in_millions=None, outfilename=None):
""" Plot a grouped barchart
:param data: A list of lists containing the data
:type data: list
:param row_labels: The labels for the data rows
:type row_labels: list
:param column_labels: The labels for the columns
:type column_labels: list
:param title: The title for the plot
:type title: str
:keyword xlabel: The x-axis label
:type xlabel: str
:keyword ylabel: The y-axis label
:type ylabel: str
:keyword legend_title: The title for the legend
:type legend_title: str
:keyword yaxis_in_millions: Show the y-axis in millions
:type yaxis_in_millions: bool
"""
import numpy
import matplotlib.pyplot as pyplot
import matplotlib.ticker as ticker
# create a figure subplot to move the legend
figure = pyplot.figure(figsize=(10,6),dpi=150)
subplot=pyplot.subplot(111)
# create a set of custom colors to prevent overlap
custom_colors=self._custom_colors(total_colors=len(row_labels))
# change the yaxis format if set
axis = pyplot.gca()
if yaxis_in_millions:
# get the max value to determine if decimals should be shown on the label
max_value=max([max(row)for row in data])/1000000.0
if max_value <= 0.5:
yaxis_format = lambda value, position: "{:,.3f}".format(float(value/1000000.0))
elif max_value <= 1:
yaxis_format = lambda value, position: "{:,.2f}".format(float(value/1000000.0))
elif max_value <= 5:
yaxis_format = lambda value, position: "{:,.1f}".format(float(value/1000000.0))
else:
yaxis_format = lambda value, position: "{:,}".format(int(value/1000000))
axis.get_yaxis().set_major_formatter(ticker.FuncFormatter(yaxis_format))
# set the width of the bars as each total group width is one
bar_start_point = numpy.arange(len(column_labels))
gap = 0.1
bar_width = (1.0 - gap) / len(data)
# create the grouped barplots with gap offsets
barplots=[]
for i, data_set in enumerate(data):
barplots.append(subplot.bar(bar_start_point + i*bar_width, data_set,
width=bar_width, color=next(custom_colors)))
# add labels and title
if xlabel is not None and len(column_labels) <= self.max_labels:
pyplot.xlabel(xlabel)
if ylabel is not None:
pyplot.ylabel(ylabel)
pyplot.title(title)
# place the xticks in the middle of each group
if len(column_labels) <= self.max_labels:
# move the bottom of the figure for larger xaxis labels
# done first before adjusting the width of the figure
figure.subplots_adjust(bottom=0.3)
pyplot.xticks(bar_start_point + 0.5, column_labels, fontsize=7, rotation="vertical")
else:
pyplot.tick_params(axis="x",which="both",bottom="off",labelbottom="off")
pyplot.xticks([])
pyplot.yticks(fontsize=7)
# reduce the size of the plot to fit in the legend
subplot_position=subplot.get_position()
subplot.set_position([subplot_position.x0, subplot_position.y0,
subplot_position.width *0.80, subplot_position.height])
subplot.legend(barplots,self.add_ellipse(row_labels),loc="center left", bbox_to_anchor=(1,0.5),
fontsize=8, title=legend_title, frameon=False)
if outfilename:
pyplot.savefig(outfilename)
print("\n\n{#id .class width=675px height=405px}\n\n")
pyplot.close()
else:
pyplot.draw()
def plot_scatter(self, data, title, row_labels, xlabel=None, ylabel=None, trendline=None):
""" Plot a scatter plot
:param data: A list of lists containing the data
:type data: list
:param title: The title for the plot
:type title: str
:param row_labels: The labels for the data rows
:type row_labels: list
:keyword xlabel: The x-axis label
:type xlabel: str
:keyword ylabel: The y-axis label
:type ylabel: str
:keyword trendline: Add a trendline to the plot
:type trendline: bool
"""
import numpy
import matplotlib.pyplot as pyplot
# create a figure subplot to move the legend
figure = pyplot.figure()
subplot = pyplot.subplot(111)
plots=[]
for x,y in data:
# add a scatter plot
plots.append(subplot.scatter(x,y))
if trendline:
# compute linear least squares polynomial fit, returns vector of coefficients
coeff = numpy.polyfit(x,y,1)
trendline_function = numpy.poly1d(coeff)
# add trendline to the plot
pyplot.plot(x,trendline_function(x))
if ylabel:
pyplot.ylabel(ylabel)
if xlabel:
pyplot.xlabel(xlabel)
# reduce the size of the plot to fit in the legend and the xlabels
subplot_position=subplot.get_position()
subplot.set_position([subplot_position.x0, subplot_position.y0,
subplot_position.width *0.80, subplot_position.height])
subplot.legend(plots,self.add_ellipse(row_labels),loc="center left", bbox_to_anchor=(1,0.5),
fontsize=7, frameon=False)
pyplot.title(title)
pyplot.draw()
def plot_barchart(self, data, labels=None, title=None, xlabel=None, ylabel=None):
""" Plot a barchart
:param data: A list of lists containing the data
:type data: list
:keyword labels: The labels for the data rows
:type labels: list
:keyword title: The title for the plot
:type title: str
:keyword xlabel: The x-axis label
:type xlabel: str
:keyword ylabel: The y-axis label
:type ylabel: str
"""
import numpy
import matplotlib.pyplot as pyplot
figure = pyplot.figure()
# check for a list of lists
# if a list of lists of single items is found convert to a list
# if lists of multiple items are found then issue error
if isinstance(data[0], list):
max_length=max([len(row) for row in data])
if max_length == 1:
data_list=[row[0] for row in data]
data=data_list
else:
raise ValueError("Provide data to the AnADAMA2 document.plot_barchart as a list of floats or ints.")
positions=numpy.arange(len(data))
pyplot.bar(positions, data, align="center")
if labels:
pyplot.xticks(positions, labels, rotation="vertical")
# move the bottom of the figure for larger xaxis labels
figure.subplots_adjust(bottom=0.3)
if ylabel:
pyplot.ylabel(ylabel)
if xlabel:
pyplot.xlabel(xlabel)
pyplot.title(title)
pyplot.draw()
def _custom_colors(self,total_colors):
""" Get a set of custom colors for a matplotlib plot """
from matplotlib import cm
# create a set of custom colors
# get the max amount of colors for a few different color maps
tab10=[cm.tab10(i/10.0) for i in range(10)]
tab20=[cm.tab20(i/20.0) for i in range(20)]
tab20b=[cm.tab20b(i/20.0) for i in range(20)]
tab20c=[cm.tab20c(i/20.0) for i in range(20)]
if total_colors <= 20:
sets=tab20
else:
sets=tab20c+tab20b
for color in itertools.cycle(sets):
yield color
def plot_stacked_barchart(self, data, row_labels, column_labels, title,
xlabel=None, ylabel=None, legend_title=None, legend_style="normal", legend_size=8, outfilename=None, legend_reverse=False):
""" Plot a stacked barchart
:param data: A list of lists containing the data
:type data: list
:param row_labels: The labels for the data rows
:type row_labels: list
:param column_labels: The labels for the columns
:type column_labels: list
:param title: The title for the plot
:type title: str
:keyword xlabel: The x-axis label
:type xlabel: str
:keyword ylabel: The y-axis label
:type ylabel: str
:keyword legend_title: The title for the legend
:type legend_title: str
:keyword legend_style: The font style for the legend
:type legend_style: str
:keyword legend_size: The font size for the legend
:type legend_size: int
:keyword legend_reverse : Reverse the legend order
:type legend_reverse: bool
"""
import numpy
import matplotlib.pyplot as pyplot
figure = pyplot.figure(figsize=(10,6),dpi=150)
subplot=pyplot.subplot(111)
bar_plots=[]
names=[]
# create a set of custom colors to prevent overlap
custom_colors=self._custom_colors(total_colors=len(row_labels))
# create a plot for each stacked group
plot_indexes=numpy.arange(len(column_labels))
y_offset=numpy.array([0.0]*len(column_labels))
for name, plot_abundance, color in zip(row_labels, data, custom_colors):
bar_plots.append(subplot.bar(plot_indexes, plot_abundance,
bottom=y_offset, align="center", color=color))
names.append(name)
# add to the y_offset which is the bottom of the stacked plot
y_offset=y_offset+plot_abundance
# Add the title, labels, and legend
if xlabel is not None and len(column_labels) <= self.max_labels:
subplot.set_xlabel(xlabel)
if ylabel is not None:
subplot.set_ylabel(ylabel)
pyplot.title(title)
if len(column_labels) <= self.max_labels:
# move the bottom of the figure for larger xaxis labels
# done first before adjusting the width of the figure
figure.subplots_adjust(bottom=0.3)
# add labels
pyplot.xticks(plot_indexes, column_labels, fontsize=7, rotation="vertical")
else:
pyplot.tick_params(axis="x",which="both",bottom="off",labelbottom="off")
pyplot.xticks([])
pyplot.tight_layout()
# reduce the size of the plot to fit in the legend
subplot_position=subplot.get_position()
subplot.set_position([subplot_position.x0, subplot_position.y0,
subplot_position.width *0.65, subplot_position.height])
pyplot.yticks(fontsize=7)
if legend_reverse:
subplot.legend(list(reversed(bar_plots)),self.add_ellipse(list(reversed(names))),loc="center left", bbox_to_anchor=(1,0.5),
title=legend_title, frameon=False, prop={"size":legend_size, "style":legend_style})
else:
subplot.legend(bar_plots,self.add_ellipse(names),loc="center left", bbox_to_anchor=(1,0.5),
title=legend_title, frameon=False, prop={"size":legend_size, "style":legend_style})
if outfilename:
pyplot.savefig(outfilename)
print("\n\n{#id .class width=675px height=405px}\n\n")
pyplot.close()
else:
pyplot.draw()
def show_table(self, data, row_labels, column_labels, title, format_data_comma=None,
location="center", font=None, outfilename=None):
""" Plot the data as a table
:param data: A list of lists containing the data
:type data: list
:param row_labels: The labels for the data rows
:type row_labels: list
:param column_labels: The labels for the columns
:type column_labels: list
:param title: The title for the plot
:type title: str
:keyword format_data_comma: Format the data as comma delimited
:type format_data_comma: bool
:keyword location: The location for the text in the cell
:type location: str
:keyword font: The size of the font
:type font: int
"""
import numpy
import matplotlib.pyplot as pyplot
from matplotlib.table import Table
# if the option is set to format the data, add commas
if format_data_comma:
format_data = [list(map(lambda x: "{:,}".format(int(x)),row)) for row in data]
else:
format_data = [list(map(str,row)) for row in data]
data=format_data
# create a figure in one subplot
figure, axis = pyplot.subplots()
axis.set_axis_off()
# create a new table instance
table = Table(axis, bbox=[0,0,1,1])
total_rows=len(row_labels)
total_columns=len(column_labels)
height = 1.0 / total_rows
# get the width of the columns based on
# the length of the labels and values
max_width_chars = [max(map(len,row_labels))]
for i in range(total_columns):
current_values=[str(value) for value in [column_labels[i]]+[row[i] for row in data]]
max_width_chars.append(max(list(map(len,current_values))))
# compute the widths for each column
total_chars=sum(max_width_chars)*1.0
column_widths=[i/total_chars for i in max_width_chars]
# add column labels
for i, label in enumerate(column_labels):
table.add_cell(0, i+1, width=column_widths[i+1], height=height, text=label, loc=location)
# add row labels
for i, label in enumerate(row_labels):
table.add_cell(i+1, 0, width=column_widths[0], height=height, text=label, loc=location)
# Add data
for i, row in enumerate(data):
for j, value in enumerate(row):
table.add_cell(i+1, j+1, width=column_widths[j+1], height=height, text=value, loc=location)
axis.add_table(table)
# set the font size for the table
# first must turn off the auto set font size
table.auto_set_font_size(False)
font_size=8
if total_columns > 3:
font_size=7
# use the font if provided
if font is not None:
font_size=font
table.set_fontsize(font_size)
# add the title
pyplot.title(title)
if outfilename:
pyplot.savefig(outfilename)
print("\n\n{#id .class width=675px height=405px}\n\n")
pyplot.close()
else:
pyplot.draw()
def write_table(self, column_labels, row_labels, data, file):
""" Write a table of data to a file
:param column_labels: The labels for the columns
:type column_labels: list
:param row_labels: The labels for the data rows
:type row_labels: list
:param data: A list of lists containing the data
:type data: list
:param file: The file to write the table to
:type file: str
"""
# if the folder for the table does not exist, then create
if not os.path.isdir(os.path.dirname(file)):
os.makedirs(os.path.dirname(file))
with open(file, "w") as file_handle:
file_handle.write("\t".join(column_labels)+"\n")
for name, row in zip(row_labels, data):
file_handle.write("\t".join([name]+[str(i) for i in row])+"\n")
def show_hclust2(self,sample_names,feature_names,data,title,log_scale=True,zscore=False,metadata_rows=None,method="correlation",outfilename=None):
""" Create a hclust2 heatmap with dendrogram and show it in the document
:param sample_names: The names of the samples
:type sample_names: list
:param feature_names: The names of the features
:type feature_names: list
:param data: A list of lists containing the data
:type data: list
:param title: The title for the plot
:type title: str
:keyword log_scale: Show the heatmap with the log scale
:type log_scale: bool
:keyword zscore: Apply the zscore to the data prior to clustering
:type zscore: bool
:keyword metadata_rows: A list of metadata rows
:type metadata_rows: list
:keyword method: The distance function for features
:type method: str
:keyword outfilename: The file to write the image
:type method: str
"""
import matplotlib.pyplot as pyplot
import numpy
# apply zscore if requested
if zscore:
from scipy import stats
total_metadata=len(metadata_rows) if metadata_rows else 0
data[total_metadata:] = stats.zscore(numpy.array(data[total_metadata:]),axis=1).tolist()
# write a file of the data
handle, hclust2_input_file=tempfile.mkstemp(prefix="hclust2_input",dir=os.getcwd())
# if output file is provided, use that instead
if outfilename:
heatmap_file=outfilename
if metadata_rows:
outinfo = outfilename.split(".")
metadata_legend_file = outinfo[0]+"_legend."+outinfo[-1]
else:
heatmap_file=hclust2_input_file+".png"
if metadata_rows:
metadata_legend_file = hclust2_input_file+"_legend.png"
self.write_table([" "]+sample_names,feature_names,data,hclust2_input_file)
# increase the dpi for small text
dpi=150
label_font="8"
label_max="45"
# compute the aspect ratio based on the number of samples and features
aspect_ratio=len(sample_names)/(len(feature_names)*1.0)
# check for hclust executable
exe_name = "hclust2"
try:
with open(os.devnull, 'w') as devnull:
out=subprocess.check_call("which hclust2", shell=True, stderr=devnull, stdout=devnull)
except subprocess.CalledProcessError:
exe_name = "hclust2.py"
command=[exe_name,"-i",hclust2_input_file,"-o",heatmap_file,"--title",title,
"--title_font",str(int(label_font)*2),"--cell_aspect_ratio",str(aspect_ratio),
"--flabel_size", label_font, "--slabel_size", label_font,
"--colorbar_font_size",label_font,"--dpi",str(dpi),"--f_dist_f",method,"--max_flabel_len",label_max]
if log_scale:
command+=["--log_scale"]
if metadata_rows:
command+=["--metadata_rows",",".join(str(i) for i in metadata_rows)]
command+=["--legend_file", metadata_legend_file]
if len(metadata_rows) > 10:
command+=["--metadata_height","0.8"]
elif len(metadata_rows) > 4:
command+=["--metadata_height","0.4"]
elif len(metadata_rows) > 1:
command+=["--metadata_height","0.1"]
# if more than the max samples, do not include sample labels on the heatmap
if len(sample_names) > self.max_labels:
command+=["--no_slabels"]
# if more than max labels, do not include the feature labels on the heatmap
if len(feature_names) > self.max_labels:
command+=["--no_flabels"]
try:
output=subprocess.check_output(command)
# read the heatmap png file
heatmap=pyplot.imread(heatmap_file)
except (subprocess.CalledProcessError, OSError):
print("Unable to generate heatmap.")
heatmap=[]
# if the output file is provided, then just print out a link to it in the doc
if outfilename:
if os.path.isfile(heatmap_file):
print("\n\n{#id .class width=540px height=405px}\n\n")
if metadata_rows:
print("\n\n{#id .class width=540px height=405px}\n\n")
else:
# create a subplot and remove the frame and axis labels
# set the figure and increase the dpi for small text
fig = pyplot.figure(figsize=(6,6),dpi=dpi)
if metadata_rows:
subplot1 = pyplot.subplot2grid((4,1),(0,0), rowspan=3, frame_on=False)
subplot1.xaxis.set_visible(False)
subplot1.yaxis.set_visible(False)
else:
subplot = fig.add_subplot(111, frame_on=False)
subplot.xaxis.set_visible(False)
subplot.yaxis.set_visible(False)
# show but do not interpolate (as this will make the text hard to read)
pyplot.imshow(heatmap, interpolation="none")
if metadata_rows:
heatmap_legend = pyplot.imread(metadata_legend_file)
# metadata legend subplot
subplot2 = pyplot.subplot2grid((4,1),(3,0), rowspan=1, frame_on=False)
subplot2.xaxis.set_visible(False)
subplot2.yaxis.set_visible(False)
pyplot.imshow(heatmap_legend, interpolation="none")
pyplot.draw()
# adjust the heatmap to fit in the figure area
# this is needed to increase the image size (to fit in the increased figure)
pyplot.tight_layout()
def _run_r(self, commands, args=None):
""" Run R on the commands providing the arguments """
if args is None:
args=[]
proc=subprocess.Popen(["R","--vanilla","--quiet","--args"]+args,
stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = proc.communicate(input=bytearray("\n".join(commands),'utf-8'))
def filter_zero_rows(self, row_names, data):
""" Filter the rows from the data set that sum to zero
:param row_names: The names of the rows
:type row_names: list
:param data: A list of lists containing the data
:type data: list
"""
new_names=[]
new_data=[]
for name, row in zip(row_names, data):
if sum(row) != 0:
new_names.append(name)
new_data.append(row)
return new_names, new_data
def filter_zero_columns(self, column_names, data):
""" Filter the columns from the data set that sum to zero
:param column_names: The names of the columns
:type column_names: list
:param data: A list of lists containing the data
:type data: list
"""
import numpy
new_names, new_data = self.filter_zero_rows(column_names, numpy.transpose(data))
data_temp = []
for row in numpy.transpose(new_data):
data_temp.append(list(row))
new_data = data_temp
return new_names, new_data
def compute_pcoa(self, sample_names, feature_names, data, apply_transform):
""" Use the vegan package in R to compute a PCoA.
Input data should be organized with samples as columns and features as rows.
Data should be scaled to [0-1] if transform is to be applied.
:param sample_names: The labels for the columns
:type sample_names: list
:param feature_names: The labels for the data rows
:type feature_names: list
:param data: A list of lists containing the data
:type data: list
:keyword apply_transform: Arcsin transform to be applied
:type apply_transform: bool
"""
r_vegan_pcoa=[
"library(vegan)",
"args<-commandArgs(TRUE)",
"data<-read.table(args[1],sep='\\t',header=TRUE, row.names=1)",
"data.t<-as.data.frame(t(data))"]
if apply_transform:
r_vegan_pcoa+=["pcoa<-capscale(asin(sqrt(data.t))~1,distance='bray')"]
else:
r_vegan_pcoa+=["pcoa<-capscale(data.t~1,distance='bray')"]
r_vegan_pcoa+=[
"write.table(head(eigenvals(pcoa)/sum(eigenvals(pcoa))),args[2],sep='\\t')",
"write.table(as.data.frame(scores(pcoa,display='sites')),args[3],sep='\\t')"]
# test that the data is scaled to [0-1]
if apply_transform:
for row in data:
out_of_range=list(filter(lambda x: x < 0 or x > 1, row))
if len(out_of_range) > 0:
raise ValueError("Provide data to the AnADAMA2 document.show_pcoa function in the range of [0-1].")
# test for duplicate feature names
feature_set=set(feature_names)
if len(list(feature_set)) < len(feature_names):
raise ValueError("Do not provide duplicate feature names to document.show_pcoa.")
# test samples are provided as the columns of the data
if len(data[0]) != len(sample_names):
raise ValueError("Provide data to the AnADAMA2 document.show_pcoa function in the form of samples as columns.")
# test features are provided as rows of the data
if len(data) != len(feature_names):
raise ValueError("Provide data to the AnADAMA2 document.show_pcoa function in the form of features as rows.")
# remove any samples from the data for which all features are zero
sample_names, data = self.filter_zero_columns(sample_names, data)
# remove any features from the data for which all samples have zero values
feature_names, data = self.filter_zero_rows(feature_names, data)
# write a file of the data
handle, vegan_input_file=tempfile.mkstemp(prefix="vegan_input",dir=os.getcwd())
eigenvalues_file=vegan_input_file+".eigen"
scores_file=vegan_input_file+".scores"
self.write_table(["# "]+sample_names,feature_names,data,vegan_input_file)
self._run_r(r_vegan_pcoa,[vegan_input_file,eigenvalues_file,scores_file])
# get the x and y labels
r_run_error=False
try:
columns, rows, data = self.read_table(eigenvalues_file)
except EnvironmentError:
print("No eigenvalues found")
data=[[0],[0]]
r_run_error=True
pcoa1_x_label=int(data[0][0]*100)
pcoa2_y_label=int(data[1][0]*100)
# get the scores to plot
try:
columns, rows, pcoa_data = self.read_table(scores_file)
except EnvironmentError:
print("No scores found")
r_run_error=True
columns=[]
rows=[]
pcoa_data=[]
# if there were no errors, remove the temp files
if not r_run_error:
try:
os.remove(vegan_input_file)
os.remove(eigenvalues_file)
os.remove(scores_file)
except EnvironmentError:
print("Warning: Unable to remove temp files")
return pcoa_data, pcoa1_x_label, pcoa2_y_label
def show_pcoa_multiple_plots(self, sample_names, feature_names, data, title, abundances, legend_title="% Abundance", sample_types="samples", feature_types="species", apply_transform=False):
""" Use the vegan package in R plus matplotlib to plot a PCoA.
Input data should be organized with samples as columns and features as rows.
Data should be scaled to [0-1] if transform is to be applied.
Show multiple PCoA plots as subplots each with coloring based on abundance.
:param sample_names: The labels for the columns
:type sample_names: list
:param feature_names: The labels for the data rows
:type feature_names: list
:param data: A list of lists containing the data
:type data: list
:param title: The title for the plot
:type title: str
:param abundances: The sets of abundance data and names for the subplots
:type abundances: dict
:keyword legend_title: The title for the legend
:type legend_title: str
:keyword sample_types: What type of data are the columns
:type sample_types: str
:keyword feature_types: What type of data are the rows
:type feature_types: str
:keyword apply_transform: Arcsin transform to be applied
:type apply_transform: bool
"""
import numpy
import matplotlib.pyplot as pyplot
from matplotlib import cm
pcoa_data, pcoa1_x_label, pcoa2_y_label=self.compute_pcoa(sample_names, feature_names, data, apply_transform)
# create a figure and subplots
nrows = len(abundances.keys())/2
figure, axis = pyplot.subplots(nrows=nrows,ncols=2)
# if needed, modify matrix of axis to list
reformatted_axis = []
if isinstance(axis[0],numpy.ndarray):
for axis_list in axis:
reformatted_axis+=axis_list.tolist()
axis=reformatted_axis
figure.suptitle(title,fontsize=12,y=1.002)
x_values = [x for x,y in pcoa_data]
y_values = [y for x,y in pcoa_data]
for subplot, abundance_name in zip(axis,sorted(abundances.keys())):
pcoa_plot=subplot.scatter(x_values,y_values,c=abundances[abundance_name],cmap=cm.jet)
figure.colorbar(pcoa_plot,ax=subplot,label=legend_title)
subplot.set_title(abundance_name)
subplot.set(xlabel="PCoA 1 ("+str(pcoa1_x_label)+" %)",ylabel="PCoA 2 ("+str(pcoa2_y_label)+" %)")
subplot.tick_params(axis="both",bottom="off",labelbottom="off",left="off",labelleft="off")
# adjust spacing between subplots
figure.tight_layout()
pyplot.draw()
def show_pcoa(self, sample_names, feature_names, data, title, sample_types="samples", feature_types="species",
metadata=None, apply_transform=False, sort_function=None, metadata_type=None, outfilename=None):
""" Use the vegan package in R plus matplotlib to plot a PCoA.
Input data should be organized with samples as columns and features as rows.
Data should be scaled to [0-1] if transform is to be applied.
:param sample_names: The labels for the columns
:type sample_names: list
:param feature_names: The labels for the data rows
:type feature_names: list
:param data: A list of lists containing the data
:type data: list
:param title: The title for the plot
:type title: str
:keyword sample_types: What type of data are the columns
:type sample_types: str
:keyword feature_types: What type of data are the rows
:type feature_types: str
:keyword metadata: Metadata for each sample
:type metadata: dict
:keyword metadata_type: Type of metadata (continuous or categorical)
:type metadata_type: str
:keyword apply_transform: Arcsin transform to be applied
:type apply_transform: bool
:keyword sort_function: The function to sort the plot data
:type sort_function: lambda
"""
import matplotlib.pyplot as pyplot
import matplotlib.colors as mcolors
import matplotlib.cm as cm
import matplotlib.patches as mpatches
import numpy as np
pcoa_data, pcoa1_x_label, pcoa2_y_label = self.compute_pcoa(sample_names, feature_names, data, apply_transform)
# create a figure subplot to move the legend
figure = pyplot.figure(figsize=(10,6),dpi=150)
subplot = pyplot.subplot(111)
nancolor="grey"
# create a set of custom colors to prevent overlap
if metadata:
metadata_categories = list(set(metadata.values()))
custom_colors = self._custom_colors(total_colors=len(metadata_categories))
if metadata_type == 'con':
cleaned_array = [value for value in metadata_categories if ~np.isnan(value)]
normalize = mcolors.Normalize(vmin=min(cleaned_array), vmax=max(cleaned_array))
colormap = pyplot.get_cmap('jet')
scalarmappaple = cm.ScalarMappable(norm=normalize, cmap=colormap)
scalarmappaple.set_array(cleaned_array)
custom_colors_cont = []
for value in metadata_categories:
if np.isnan(value):
custom_colors_cont.append(nancolor)
else:
custom_colors_cont.append(colormap(normalize(value)))
colors_by_metadata = dict((key, color) for key, color in zip(metadata_categories, custom_colors_cont))
else:
colors_by_metadata = dict((key, color) for key, color in zip(metadata_categories, custom_colors))
colors_by_metadata["NA"] = nancolor
else:
custom_colors = self._custom_colors(total_colors=len(pcoa_data))
# reduce the size of the plot to fit in the legend
subplot_position = subplot.get_position()
subplot.set_position([subplot_position.x0, subplot_position.y0,
subplot_position.width * 0.70, subplot_position.height])
plots = []
metadata_plots = {}
for i, (x, y) in enumerate(pcoa_data):
if metadata:
if metadata[sample_names[i]] not in metadata_plots:
metadata_plots[metadata[sample_names[i]]] = [[x], [y]]
else:
metadata_plots[metadata[sample_names[i]]][0].append(x)
metadata_plots[metadata[sample_names[i]]][1].append(y)
else:
plots.append(subplot.scatter(x, y, color=next(custom_colors)))
# order the plots alphabetically or numerically
if not sort_function:
metadata_ordered_keys = self.sorted_data_numerical_or_alphabetical(list(metadata_plots.keys()))
else:
metadata_ordered_keys = sort_function(metadata_plots.keys())
for key in metadata_ordered_keys:
plots.append(subplot.scatter(metadata_plots[key][0], metadata_plots[key][1],
color=colors_by_metadata[key]))
pyplot.title(title)
pyplot.xlabel("PCoA 1 (" + str(pcoa1_x_label) + " %)")
pyplot.ylabel("PCoA 2 (" + str(pcoa2_y_label) + " %)")
# remove the tick marks on both axis
pyplot.tick_params(axis="x", which="both", bottom="off", labelbottom="off")
pyplot.tick_params(axis="y", which="both", left="off", labelleft="off")
if not metadata and len(sample_names) <= self.max_labels_legend:
subplot.legend(plots, self.add_ellipse(sample_names), loc="center left", bbox_to_anchor=(1, 0.5),
fontsize=8, title="Samples", frameon=False)
if metadata:
if metadata_type == 'con':
subplot.append = pyplot.colorbar(scalarmappaple)
if nancolor in custom_colors_cont:
figure.text(0.24, 0.01, "NA/Unknown values are shown in grey.")
else:
if len(metadata_ordered_keys) <= self.max_labels_legend:
subplot.legend(plots, self.add_ellipse(metadata_ordered_keys), loc="center left", bbox_to_anchor=(1, 0.5),
fontsize=8, frameon=False)
if apply_transform:
caption = "\n".join(
["Principal coordinate analysis of variance among " + sample_types + ", based on Bray-Curtis ",
"dissimilarities between " + feature_types + " profiles of " + sample_types + ". Filtered " + feature_types + "' relative abundances ",
"were arcsin-square root transformed to approximate a normal distribution and down-weigh the effect ",
"of highly abundant " + feature_types + " on Bray-Curtis dissimilarities. Numbers in parenthesis on each axis ",
"represent the amount of variance explained by that axis."])
else:
caption = "\n".join(
["Principal coordinate analysis of variance among " + sample_types + ", based on Bray-Curtis ",
"dissimilarities between " + feature_types + " profiles of " + sample_types + ". Numbers in parenthesis on each axis ",
"represent the amount of variance explained by that axis."])
if outfilename:
pyplot.savefig(outfilename)
print("\n\n{#id .class width=675px height=405px}\n\n")
pyplot.close()
else:
pyplot.draw()
return caption
| [
"matplotlib.pyplot.ylabel",
"numpy.polyfit",
"matplotlib.table.Table",
"numpy.array",
"matplotlib.cm.tab10",
"sys.exit",
"numpy.poly1d",
"matplotlib.pyplot.subplot2grid",
"os.remove",
"matplotlib.pyplot.imshow",
"re.split",
"os.listdir",
"matplotlib.cm.tab20c",
"matplotlib.ticker.FuncForma... | [((5670, 5703), 'os.path.splitext', 'os.path.splitext', (['self.targets[0]'], {}), '(self.targets[0])\n', (5686, 5703), False, 'import os\n'), ((8888, 8899), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8897, 8899), False, 'import os\n'), ((8908, 8932), 'os.chdir', 'os.chdir', (['temp_directory'], {}), '(temp_directory)\n', (8916, 8932), False, 'import os\n'), ((13978, 14013), 'os.chdir', 'os.chdir', (['current_working_directory'], {}), '(current_working_directory)\n', (13986, 14013), False, 'import os\n'), ((14099, 14140), 'shutil.copy', 'shutil.copy', (['temp_report', 'self.targets[0]'], {}), '(temp_report, self.targets[0])\n', (14110, 14140), False, 'import shutil\n'), ((14215, 14254), 'os.path.join', 'os.path.join', (['temp_directory', '"""figures"""'], {}), "(temp_directory, 'figures')\n", (14227, 14254), False, 'import os\n'), ((14274, 14305), 'os.listdir', 'os.listdir', (['temp_figures_folder'], {}), '(temp_figures_folder)\n', (14284, 14305), False, 'import os\n'), ((14485, 14534), 'shutil.rmtree', 'shutil.rmtree', (['temp_directory'], {'ignore_errors': '(True)'}), '(temp_directory, ignore_errors=True)\n', (14498, 14534), False, 'import shutil\n'), ((18317, 18355), 'matplotlib.pyplot.axhline', 'pyplot.axhline', (['threshold'], {'color': 'color'}), '(threshold, color=color)\n', (18331, 18355), True, 'import matplotlib.pyplot as pyplot\n'), ((18418, 18431), 'matplotlib.pyplot.draw', 'pyplot.draw', ([], {}), '()\n', (18429, 18431), True, 'import matplotlib.pyplot as pyplot\n'), ((20079, 20184), 'matplotlib.pyplot.subplots', 'pyplot.subplots', (['(1)', 'total_groups'], {'sharey': '(True)', 'gridspec_kw': "{'wspace': 0.02}", 'figsize': '(10, 6)', 'dpi': '(150)'}), "(1, total_groups, sharey=True, gridspec_kw={'wspace': 0.02},\n figsize=(10, 6), dpi=150)\n", (20094, 20184), True, 'import matplotlib.pyplot as pyplot\n'), ((22339, 22360), 'matplotlib.pyplot.tight_layout', 'pyplot.tight_layout', ([], {}), '()\n', (22358, 22360), True, 'import matplotlib.pyplot as pyplot\n'), ((24508, 24547), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(10, 6)', 'dpi': '(150)'}), '(figsize=(10, 6), dpi=150)\n', (24521, 24547), True, 'import matplotlib.pyplot as pyplot\n'), ((24562, 24581), 'matplotlib.pyplot.subplot', 'pyplot.subplot', (['(111)'], {}), '(111)\n', (24576, 24581), True, 'import matplotlib.pyplot as pyplot\n'), ((24787, 24799), 'matplotlib.pyplot.gca', 'pyplot.gca', ([], {}), '()\n', (24797, 24799), True, 'import matplotlib.pyplot as pyplot\n'), ((26264, 26283), 'matplotlib.pyplot.title', 'pyplot.title', (['title'], {}), '(title)\n', (26276, 26283), True, 'import matplotlib.pyplot as pyplot\n'), ((26814, 26839), 'matplotlib.pyplot.yticks', 'pyplot.yticks', ([], {'fontsize': '(7)'}), '(fontsize=7)\n', (26827, 26839), True, 'import matplotlib.pyplot as pyplot\n'), ((28313, 28328), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (28326, 28328), True, 'import matplotlib.pyplot as pyplot\n'), ((28347, 28366), 'matplotlib.pyplot.subplot', 'pyplot.subplot', (['(111)'], {}), '(111)\n', (28361, 28366), True, 'import matplotlib.pyplot as pyplot\n'), ((29375, 29394), 'matplotlib.pyplot.title', 'pyplot.title', (['title'], {}), '(title)\n', (29387, 29394), True, 'import matplotlib.pyplot as pyplot\n'), ((29404, 29417), 'matplotlib.pyplot.draw', 'pyplot.draw', ([], {}), '()\n', (29415, 29417), True, 'import matplotlib.pyplot as pyplot\n'), ((30070, 30085), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (30083, 30085), True, 'import matplotlib.pyplot as pyplot\n'), ((30660, 30703), 'matplotlib.pyplot.bar', 'pyplot.bar', (['positions', 'data'], {'align': '"""center"""'}), "(positions, data, align='center')\n", (30670, 30703), True, 'import matplotlib.pyplot as pyplot\n'), ((31040, 31059), 'matplotlib.pyplot.title', 'pyplot.title', (['title'], {}), '(title)\n', (31052, 31059), True, 'import matplotlib.pyplot as pyplot\n'), ((31069, 31082), 'matplotlib.pyplot.draw', 'pyplot.draw', ([], {}), '()\n', (31080, 31082), True, 'import matplotlib.pyplot as pyplot\n'), ((31704, 31725), 'itertools.cycle', 'itertools.cycle', (['sets'], {}), '(sets)\n', (31719, 31725), False, 'import itertools\n'), ((33032, 33071), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(10, 6)', 'dpi': '(150)'}), '(figsize=(10, 6), dpi=150)\n', (33045, 33071), True, 'import matplotlib.pyplot as pyplot\n'), ((33086, 33105), 'matplotlib.pyplot.subplot', 'pyplot.subplot', (['(111)'], {}), '(111)\n', (33100, 33105), True, 'import matplotlib.pyplot as pyplot\n'), ((34075, 34094), 'matplotlib.pyplot.title', 'pyplot.title', (['title'], {}), '(title)\n', (34087, 34094), True, 'import matplotlib.pyplot as pyplot\n'), ((34593, 34614), 'matplotlib.pyplot.tight_layout', 'pyplot.tight_layout', ([], {}), '()\n', (34612, 34614), True, 'import matplotlib.pyplot as pyplot\n'), ((34886, 34911), 'matplotlib.pyplot.yticks', 'pyplot.yticks', ([], {'fontsize': '(7)'}), '(fontsize=7)\n', (34899, 34911), True, 'import matplotlib.pyplot as pyplot\n'), ((36982, 36999), 'matplotlib.pyplot.subplots', 'pyplot.subplots', ([], {}), '()\n', (36997, 36999), True, 'import matplotlib.pyplot as pyplot\n'), ((37091, 37121), 'matplotlib.table.Table', 'Table', (['axis'], {'bbox': '[0, 0, 1, 1]'}), '(axis, bbox=[0, 0, 1, 1])\n', (37096, 37121), False, 'from matplotlib.table import Table\n'), ((38785, 38804), 'matplotlib.pyplot.title', 'pyplot.title', (['title'], {}), '(title)\n', (38797, 38804), True, 'import matplotlib.pyplot as pyplot\n'), ((46079, 46219), 'subprocess.Popen', 'subprocess.Popen', (["(['R', '--vanilla', '--quiet', '--args'] + args)"], {'stdin': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE'}), "(['R', '--vanilla', '--quiet', '--args'] + args, stdin=\n subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n", (46095, 46219), False, 'import subprocess\n'), ((47384, 47409), 'numpy.transpose', 'numpy.transpose', (['new_data'], {}), '(new_data)\n', (47399, 47409), False, 'import numpy\n'), ((53427, 53464), 'matplotlib.pyplot.subplots', 'pyplot.subplots', ([], {'nrows': 'nrows', 'ncols': '(2)'}), '(nrows=nrows, ncols=2)\n', (53442, 53464), True, 'import matplotlib.pyplot as pyplot\n'), ((54461, 54474), 'matplotlib.pyplot.draw', 'pyplot.draw', ([], {}), '()\n', (54472, 54474), True, 'import matplotlib.pyplot as pyplot\n'), ((56267, 56306), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(10, 6)', 'dpi': '(150)'}), '(figsize=(10, 6), dpi=150)\n', (56280, 56306), True, 'import matplotlib.pyplot as pyplot\n'), ((56323, 56342), 'matplotlib.pyplot.subplot', 'pyplot.subplot', (['(111)'], {}), '(111)\n', (56337, 56342), True, 'import matplotlib.pyplot as pyplot\n'), ((59012, 59031), 'matplotlib.pyplot.title', 'pyplot.title', (['title'], {}), '(title)\n', (59024, 59031), True, 'import matplotlib.pyplot as pyplot\n'), ((59212, 59287), 'matplotlib.pyplot.tick_params', 'pyplot.tick_params', ([], {'axis': '"""x"""', 'which': '"""both"""', 'bottom': '"""off"""', 'labelbottom': '"""off"""'}), "(axis='x', which='both', bottom='off', labelbottom='off')\n", (59230, 59287), True, 'import matplotlib.pyplot as pyplot\n'), ((59296, 59367), 'matplotlib.pyplot.tick_params', 'pyplot.tick_params', ([], {'axis': '"""y"""', 'which': '"""both"""', 'left': '"""off"""', 'labelleft': '"""off"""'}), "(axis='y', which='both', left='off', labelleft='off')\n", (59314, 59367), True, 'import matplotlib.pyplot as pyplot\n'), ((4733, 4764), 'os.path.isdir', 'os.path.isdir', (['self.data_folder'], {}), '(self.data_folder)\n', (4746, 4764), False, 'import os\n'), ((5508, 5543), 'os.path.splitext', 'os.path.splitext', (['self.templates[0]'], {}), '(self.templates[0])\n', (5524, 5543), False, 'import os\n'), ((5923, 5956), 'os.path.basename', 'os.path.basename', (['report_filename'], {}), '(report_filename)\n', (5939, 5956), False, 'import os\n'), ((13437, 13447), 'io.StringIO', 'StringIO', ([], {}), '()\n', (13445, 13447), False, 'from io import StringIO\n'), ((13467, 13486), 'pweave.Pweb', 'Pweb', (['temp_template'], {}), '(temp_template)\n', (13471, 13486), False, 'from pweave import PwebPandocFormatter, Pweb, PwebProcessor\n'), ((14660, 14689), 'os.rmdir', 'os.rmdir', (['self.figures_folder'], {}), '(self.figures_folder)\n', (14668, 14689), False, 'import os\n'), ((14810, 14836), 'os.rmdir', 'os.rmdir', (['self.data_folder'], {}), '(self.data_folder)\n', (14818, 14836), False, 'import os\n'), ((21607, 21643), 'matplotlib.pyplot.sca', 'pyplot.sca', (['group_axis[group_number]'], {}), '(group_axis[group_number])\n', (21617, 21643), True, 'import matplotlib.pyplot as pyplot\n'), ((21721, 21754), 'matplotlib.pyplot.title', 'pyplot.title', (['group_name'], {'size': '(12)'}), '(group_name, size=12)\n', (21733, 21754), True, 'import matplotlib.pyplot as pyplot\n'), ((22252, 22277), 'matplotlib.pyplot.yticks', 'pyplot.yticks', ([], {'fontsize': '(7)'}), '(fontsize=7)\n', (22265, 22277), True, 'import matplotlib.pyplot as pyplot\n'), ((23170, 23197), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['outfilename'], {}), '(outfilename)\n', (23184, 23197), True, 'import matplotlib.pyplot as pyplot\n'), ((23297, 23311), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (23309, 23311), True, 'import matplotlib.pyplot as pyplot\n'), ((23338, 23351), 'matplotlib.pyplot.draw', 'pyplot.draw', ([], {}), '()\n', (23349, 23351), True, 'import matplotlib.pyplot as pyplot\n'), ((26156, 26177), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['xlabel'], {}), '(xlabel)\n', (26169, 26177), True, 'import matplotlib.pyplot as pyplot\n'), ((26221, 26242), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['ylabel'], {}), '(ylabel)\n', (26234, 26242), True, 'import matplotlib.pyplot as pyplot\n'), ((26592, 26681), 'matplotlib.pyplot.xticks', 'pyplot.xticks', (['(bar_start_point + 0.5)', 'column_labels'], {'fontsize': '(7)', 'rotation': '"""vertical"""'}), "(bar_start_point + 0.5, column_labels, fontsize=7, rotation=\n 'vertical')\n", (26605, 26681), True, 'import matplotlib.pyplot as pyplot\n'), ((26703, 26778), 'matplotlib.pyplot.tick_params', 'pyplot.tick_params', ([], {'axis': '"""x"""', 'which': '"""both"""', 'bottom': '"""off"""', 'labelbottom': '"""off"""'}), "(axis='x', which='both', bottom='off', labelbottom='off')\n", (26721, 26778), True, 'import matplotlib.pyplot as pyplot\n'), ((26788, 26805), 'matplotlib.pyplot.xticks', 'pyplot.xticks', (['[]'], {}), '([])\n', (26801, 26805), True, 'import matplotlib.pyplot as pyplot\n'), ((27314, 27341), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['outfilename'], {}), '(outfilename)\n', (27328, 27341), True, 'import matplotlib.pyplot as pyplot\n'), ((27441, 27455), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (27453, 27455), True, 'import matplotlib.pyplot as pyplot\n'), ((27482, 27495), 'matplotlib.pyplot.draw', 'pyplot.draw', ([], {}), '()\n', (27493, 27495), True, 'import matplotlib.pyplot as pyplot\n'), ((28853, 28874), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['ylabel'], {}), '(ylabel)\n', (28866, 28874), True, 'import matplotlib.pyplot as pyplot\n'), ((28906, 28927), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['xlabel'], {}), '(xlabel)\n', (28919, 28927), True, 'import matplotlib.pyplot as pyplot\n'), ((30735, 30788), 'matplotlib.pyplot.xticks', 'pyplot.xticks', (['positions', 'labels'], {'rotation': '"""vertical"""'}), "(positions, labels, rotation='vertical')\n", (30748, 30788), True, 'import matplotlib.pyplot as pyplot\n'), ((30944, 30965), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['ylabel'], {}), '(ylabel)\n', (30957, 30965), True, 'import matplotlib.pyplot as pyplot\n'), ((30997, 31018), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['xlabel'], {}), '(xlabel)\n', (31010, 31018), True, 'import matplotlib.pyplot as pyplot\n'), ((31370, 31388), 'matplotlib.cm.tab10', 'cm.tab10', (['(i / 10.0)'], {}), '(i / 10.0)\n', (31378, 31388), True, 'import matplotlib.cm as cm\n'), ((31422, 31440), 'matplotlib.cm.tab20', 'cm.tab20', (['(i / 20.0)'], {}), '(i / 20.0)\n', (31430, 31440), True, 'import matplotlib.cm as cm\n'), ((31475, 31494), 'matplotlib.cm.tab20b', 'cm.tab20b', (['(i / 20.0)'], {}), '(i / 20.0)\n', (31484, 31494), True, 'import matplotlib.cm as cm\n'), ((31529, 31548), 'matplotlib.cm.tab20c', 'cm.tab20c', (['(i / 20.0)'], {}), '(i / 20.0)\n', (31538, 31548), True, 'import matplotlib.cm as cm\n'), ((34372, 34447), 'matplotlib.pyplot.xticks', 'pyplot.xticks', (['plot_indexes', 'column_labels'], {'fontsize': '(7)', 'rotation': '"""vertical"""'}), "(plot_indexes, column_labels, fontsize=7, rotation='vertical')\n", (34385, 34447), True, 'import matplotlib.pyplot as pyplot\n'), ((34474, 34549), 'matplotlib.pyplot.tick_params', 'pyplot.tick_params', ([], {'axis': '"""x"""', 'which': '"""both"""', 'bottom': '"""off"""', 'labelbottom': '"""off"""'}), "(axis='x', which='both', bottom='off', labelbottom='off')\n", (34492, 34549), True, 'import matplotlib.pyplot as pyplot\n'), ((34559, 34576), 'matplotlib.pyplot.xticks', 'pyplot.xticks', (['[]'], {}), '([])\n', (34572, 34576), True, 'import matplotlib.pyplot as pyplot\n'), ((35436, 35463), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['outfilename'], {}), '(outfilename)\n', (35450, 35463), True, 'import matplotlib.pyplot as pyplot\n'), ((35563, 35577), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (35575, 35577), True, 'import matplotlib.pyplot as pyplot\n'), ((35605, 35618), 'matplotlib.pyplot.draw', 'pyplot.draw', ([], {}), '()\n', (35616, 35618), True, 'import matplotlib.pyplot as pyplot\n'), ((38842, 38869), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['outfilename'], {}), '(outfilename)\n', (38856, 38869), True, 'import matplotlib.pyplot as pyplot\n'), ((38969, 38983), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (38981, 38983), True, 'import matplotlib.pyplot as pyplot\n'), ((39010, 39023), 'matplotlib.pyplot.draw', 'pyplot.draw', ([], {}), '()\n', (39021, 39023), True, 'import matplotlib.pyplot as pyplot\n'), ((43906, 43938), 'subprocess.check_output', 'subprocess.check_output', (['command'], {}), '(command)\n', (43929, 43938), False, 'import subprocess\n'), ((43999, 44026), 'matplotlib.pyplot.imread', 'pyplot.imread', (['heatmap_file'], {}), '(heatmap_file)\n', (44012, 44026), True, 'import matplotlib.pyplot as pyplot\n'), ((44282, 44310), 'os.path.isfile', 'os.path.isfile', (['heatmap_file'], {}), '(heatmap_file)\n', (44296, 44310), False, 'import os\n'), ((44708, 44746), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(6, 6)', 'dpi': 'dpi'}), '(figsize=(6, 6), dpi=dpi)\n', (44721, 44746), True, 'import matplotlib.pyplot as pyplot\n'), ((45238, 45282), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['heatmap'], {'interpolation': '"""none"""'}), "(heatmap, interpolation='none')\n", (45251, 45282), True, 'import matplotlib.pyplot as pyplot\n'), ((45693, 45706), 'matplotlib.pyplot.draw', 'pyplot.draw', ([], {}), '()\n', (45704, 45706), True, 'import matplotlib.pyplot as pyplot\n'), ((45867, 45888), 'matplotlib.pyplot.tight_layout', 'pyplot.tight_layout', ([], {}), '()\n', (45886, 45888), True, 'import matplotlib.pyplot as pyplot\n'), ((47319, 47340), 'numpy.transpose', 'numpy.transpose', (['data'], {}), '(data)\n', (47334, 47340), False, 'import numpy\n'), ((61231, 61258), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['outfilename'], {}), '(outfilename)\n', (61245, 61258), True, 'import matplotlib.pyplot as pyplot\n'), ((61358, 61372), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (61370, 61372), True, 'import matplotlib.pyplot as pyplot\n'), ((61399, 61412), 'matplotlib.pyplot.draw', 'pyplot.draw', ([], {}), '()\n', (61410, 61412), True, 'import matplotlib.pyplot as pyplot\n'), ((2593, 2625), 'os.path.dirname', 'os.path.dirname', (['self.targets[0]'], {}), '(self.targets[0])\n', (2608, 2625), False, 'import os\n'), ((2656, 2690), 'os.path.isdir', 'os.path.isdir', (['self.figures_folder'], {}), '(self.figures_folder)\n', (2669, 2690), False, 'import os\n'), ((2708, 2740), 'os.makedirs', 'os.makedirs', (['self.figures_folder'], {}), '(self.figures_folder)\n', (2719, 2740), False, 'import os\n'), ((2802, 2834), 'os.path.dirname', 'os.path.dirname', (['self.targets[0]'], {}), '(self.targets[0])\n', (2817, 2834), False, 'import os\n'), ((2862, 2893), 'os.path.isdir', 'os.path.isdir', (['self.data_folder'], {}), '(self.data_folder)\n', (2875, 2893), False, 'import os\n'), ((2911, 2940), 'os.makedirs', 'os.makedirs', (['self.data_folder'], {}), '(self.data_folder)\n', (2922, 2940), False, 'import os\n'), ((3923, 3944), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (3937, 3944), False, 'import matplotlib\n'), ((4163, 4233), 'subprocess.check_output', 'subprocess.check_output', (["['pypublish', '-h']"], {'stderr': 'subprocess.STDOUT'}), "(['pypublish', '-h'], stderr=subprocess.STDOUT)\n", (4186, 4233), False, 'import subprocess\n'), ((4400, 4473), 'subprocess.check_output', 'subprocess.check_output', (["['pdflatex', '--help']"], {'stderr': 'subprocess.STDOUT'}), "(['pdflatex', '--help'], stderr=subprocess.STDOUT)\n", (4423, 4473), False, 'import subprocess\n'), ((5828, 5860), 'os.path.dirname', 'os.path.dirname', (['self.targets[0]'], {}), '(self.targets[0])\n', (5843, 5860), False, 'import os\n'), ((10218, 10258), 're.split', 're.split', (['"""(<%[\\\\w\\\\s\\\\W]*?%>)"""', 'content'], {}), "('(<%[\\\\w\\\\s\\\\W]*?%>)', content)\n", (10226, 10258), False, 'import re\n'), ((14331, 14370), 'os.path.join', 'os.path.join', (['temp_figures_folder', 'file'], {}), '(temp_figures_folder, file)\n', (14343, 14370), False, 'import os\n'), ((14370, 14409), 'os.path.join', 'os.path.join', (['self.figures_folder', 'file'], {}), '(self.figures_folder, file)\n', (14382, 14409), False, 'import os\n'), ((14609, 14640), 'os.listdir', 'os.listdir', (['self.figures_folder'], {}), '(self.figures_folder)\n', (14619, 14640), False, 'import os\n'), ((14762, 14790), 'os.listdir', 'os.listdir', (['self.data_folder'], {}), '(self.data_folder)\n', (14772, 14790), False, 'import os\n'), ((15247, 15262), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (15257, 15262), False, 'import os\n'), ((22023, 22098), 'matplotlib.pyplot.xticks', 'pyplot.xticks', (['plot_indexes', 'column_labels'], {'fontsize': '(7)', 'rotation': '"""vertical"""'}), "(plot_indexes, column_labels, fontsize=7, rotation='vertical')\n", (22036, 22098), True, 'import matplotlib.pyplot as pyplot\n'), ((22133, 22208), 'matplotlib.pyplot.tick_params', 'pyplot.tick_params', ([], {'axis': '"""x"""', 'which': '"""both"""', 'bottom': '"""off"""', 'labelbottom': '"""off"""'}), "(axis='x', which='both', bottom='off', labelbottom='off')\n", (22151, 22208), True, 'import matplotlib.pyplot as pyplot\n'), ((22222, 22239), 'matplotlib.pyplot.xticks', 'pyplot.xticks', (['[]'], {}), '([])\n', (22235, 22239), True, 'import matplotlib.pyplot as pyplot\n'), ((25523, 25557), 'matplotlib.ticker.FuncFormatter', 'ticker.FuncFormatter', (['yaxis_format'], {}), '(yaxis_format)\n', (25543, 25557), True, 'import matplotlib.ticker as ticker\n'), ((28647, 28669), 'numpy.polyfit', 'numpy.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (28660, 28669), False, 'import numpy\n'), ((28705, 28724), 'numpy.poly1d', 'numpy.poly1d', (['coeff'], {}), '(coeff)\n', (28717, 28724), False, 'import numpy\n'), ((39626, 39647), 'os.path.dirname', 'os.path.dirname', (['file'], {}), '(file)\n', (39641, 39647), False, 'import os\n'), ((39674, 39695), 'os.path.dirname', 'os.path.dirname', (['file'], {}), '(file)\n', (39689, 39695), False, 'import os\n'), ((41520, 41531), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (41529, 41531), False, 'import os\n'), ((42461, 42548), 'subprocess.check_call', 'subprocess.check_call', (['"""which hclust2"""'], {'shell': '(True)', 'stderr': 'devnull', 'stdout': 'devnull'}), "('which hclust2', shell=True, stderr=devnull, stdout=\n devnull)\n", (42482, 42548), False, 'import subprocess\n'), ((44803, 44865), 'matplotlib.pyplot.subplot2grid', 'pyplot.subplot2grid', (['(4, 1)', '(0, 0)'], {'rowspan': '(3)', 'frame_on': '(False)'}), '((4, 1), (0, 0), rowspan=3, frame_on=False)\n', (44822, 44865), True, 'import matplotlib.pyplot as pyplot\n'), ((45347, 45382), 'matplotlib.pyplot.imread', 'pyplot.imread', (['metadata_legend_file'], {}), '(metadata_legend_file)\n', (45360, 45382), True, 'import matplotlib.pyplot as pyplot\n'), ((45452, 45514), 'matplotlib.pyplot.subplot2grid', 'pyplot.subplot2grid', (['(4, 1)', '(3, 0)'], {'rowspan': '(1)', 'frame_on': '(False)'}), '((4, 1), (3, 0), rowspan=1, frame_on=False)\n', (45471, 45514), True, 'import matplotlib.pyplot as pyplot\n'), ((45628, 45679), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['heatmap_legend'], {'interpolation': '"""none"""'}), "(heatmap_legend, interpolation='none')\n", (45641, 45679), True, 'import matplotlib.pyplot as pyplot\n'), ((50349, 50360), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (50358, 50360), False, 'import os\n'), ((51413, 51440), 'os.remove', 'os.remove', (['vegan_input_file'], {}), '(vegan_input_file)\n', (51422, 51440), False, 'import os\n'), ((51457, 51484), 'os.remove', 'os.remove', (['eigenvalues_file'], {}), '(eigenvalues_file)\n', (51466, 51484), False, 'import os\n'), ((51501, 51523), 'os.remove', 'os.remove', (['scores_file'], {}), '(scores_file)\n', (51510, 51523), False, 'import os\n'), ((56856, 56878), 'matplotlib.pyplot.get_cmap', 'pyplot.get_cmap', (['"""jet"""'], {}), "('jet')\n", (56871, 56878), True, 'import matplotlib.pyplot as pyplot\n'), ((56912, 56960), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'norm': 'normalize', 'cmap': 'colormap'}), '(norm=normalize, cmap=colormap)\n', (56929, 56960), True, 'import matplotlib.cm as cm\n'), ((59717, 59748), 'matplotlib.pyplot.colorbar', 'pyplot.colorbar', (['scalarmappaple'], {}), '(scalarmappaple)\n', (59732, 59748), True, 'import matplotlib.pyplot as pyplot\n'), ((3278, 3318), 'os.path.dirname', 'os.path.dirname', (["self.vars['targets'][0]"], {}), "(self.vars['targets'][0])\n", (3293, 3318), False, 'import os\n'), ((3378, 3418), 'os.path.dirname', 'os.path.dirname', (["self.vars['targets'][0]"], {}), "(self.vars['targets'][0])\n", (3393, 3418), False, 'import os\n'), ((3709, 3765), 'sys.exit', 'sys.exit', (['"""Please install numpy for document generation"""'], {}), "('Please install numpy for document generation')\n", (3717, 3765), False, 'import sys\n'), ((4044, 4105), 'sys.exit', 'sys.exit', (['"""Please install matplotlib for document generation"""'], {}), "('Please install matplotlib for document generation')\n", (4052, 4105), False, 'import sys\n'), ((4285, 4342), 'sys.exit', 'sys.exit', (['"""Please install pweave for document generation"""'], {}), "('Please install pweave for document generation')\n", (4293, 4342), False, 'import sys\n'), ((4525, 4610), 'sys.exit', 'sys.exit', (['"""Please install latex which includes pdflatex for document generation"""'], {}), "('Please install latex which includes pdflatex for document generation'\n )\n", (4533, 4610), False, 'import sys\n'), ((57131, 57146), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (57139, 57146), True, 'import numpy as np\n'), ((6745, 6779), 'os.path.isdir', 'os.path.isdir', (['self.vars[variable]'], {}), '(self.vars[variable])\n', (6758, 6779), False, 'import os\n'), ((6783, 6818), 'os.path.isfile', 'os.path.isfile', (['self.vars[variable]'], {}), '(self.vars[variable])\n', (6797, 6818), False, 'import os\n'), ((6866, 6902), 'os.path.abspath', 'os.path.abspath', (['self.vars[variable]'], {}), '(self.vars[variable])\n', (6881, 6902), False, 'import os\n'), ((41345, 41379), 'numpy.array', 'numpy.array', (['data[total_metadata:]'], {}), '(data[total_metadata:])\n', (41356, 41379), False, 'import numpy\n'), ((5001, 5028), 'os.path.basename', 'os.path.basename', (['data_file'], {}), '(data_file)\n', (5017, 5028), False, 'import os\n'), ((56716, 56731), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (56724, 56731), True, 'import numpy as np\n'), ((8089, 8110), 'os.path.dirname', 'os.path.dirname', (['file'], {}), '(file)\n', (8104, 8110), False, 'import os\n')] |
"""
hyperparams.py
====================================
It provides configuration for the tunable hyper-parameter ranges for all the algorithms.
"""
from argparse import ArgumentParser
from hyperopt import hp
from hyperopt.pyll.base import scope
import numpy as np
class HyperparamterLoader:
def __init__(self):
# This hyperparameter setting aims to reproduce the experimental setup in its original papers.
self.hyperparams_paper = {
'freebase15k':
{
'transe' : {'learning_rate': 0.01,'L1_flag': True,'hidden_size':50,'batch_size': 128,'epochs':1000,'margin':1.00,'optimizer': 'sgd','sampling':"uniform",'neg_rate':1},
'transh' : {'learning_rate': 0.005,'L1_flag':False,'hidden_size':50,'batch_size':1200,'epochs':1000,'margin': 0.5,'optimizer': 'sgd','sampling':"uniform",'neg_rate':1,'C': 0.015625},
'hole' : {'learning_rate': 0.01,'L1_flag': True,'hidden_size':50,'batch_size': 512,'epochs':1000,'margin': 1.0,'optimizer':'adam','sampling':"uniform",'neg_rate':1},
'transm' : {'learning_rate': 0.001,'L1_flag': True,'hidden_size':50,'batch_size': 128,'epochs':1000,'margin': 1.0,'optimizer':'adam','sampling':"uniform",'neg_rate':1},
'rescal' : {'learning_rate': 0.001,'L1_flag': True,'hidden_size':50,'batch_size': 128,'epochs':1000,'margin': 1.0,'optimizer':'adam','sampling':"uniform",'neg_rate':1},
'rotate' : {'learning_rate': 0.01,'L1_flag': True,'hidden_size':50,'batch_size': 128,'epochs':1000,'margin': 1.0,'optimizer':'adam','sampling':"uniform",'neg_rate':1},
'sme' : {'learning_rate': 0.001,'L1_flag': True,'hidden_size':50,'batch_size': 128,'epochs':1000,'margin': 1.0,'optimizer':'adam','sampling':"uniform",'neg_rate':1,'bilinear':False},
'transr' : {'learning_rate': 0.001,'L1_flag': True,'ent_hidden_size':50,'rel_hidden_size':50,'batch_size': 4800,'epochs': 1000,'margin': 1.0,'optimizer': 'sgd','sampling': "bern",'neg_rate':1},
'transd' : {'learning_rate': 0.001,'L1_flag':False,'ent_hidden_size':50,'rel_hidden_size':50,'batch_size': 200,'epochs': 1000,'margin': 1.0,'optimizer': 'sgd','sampling':"uniform",'neg_rate':1},
'ntn' : {'learning_rate': 0.01,'L1_flag': True,'ent_hidden_size':64,'rel_hidden_size':32,'batch_size': 128,'epochs': 1000,'margin': 1.0,'optimizer':'adam','sampling':"uniform",'neg_rate':1}, # problematic
'slm' : {'learning_rate': 0.01,'L1_flag': True,'ent_hidden_size':64,'rel_hidden_size':32,'batch_size': 128,'epochs': 1000,'margin': 1.0,'optimizer':'adam','sampling':"uniform",'neg_rate':1},
'kg2e' : {'learning_rate': 0.01,'L1_flag': True,'hidden_size':50,'batch_size':1440,'epochs':1000,'margin': 4.0,'optimizer': 'sgd','sampling':"uniform",'distance_measure': "kl_divergence",'cmax': 0.05,'cmin': 5.00,'neg_rate': 1},
'complex' : {'learning_rate': 0.5,'hidden_size':100,'batch_size':5000,'epochs':1000,'optimizer':'adagrad','sampling':"uniform",'neg_rate':10,'lmbda':0.0001},
'distmult': {'learning_rate': 0.1,'hidden_size':100,'batch_size':50000,'epochs':1000,'data':'Freebase15k','optimizer':'adagrad','sampling':"uniform",'neg_rate':1,'lmbda':0.0001},
}
}
self.hyperparams_paper['fb15k'] = self.hyperparams_paper['freebase15k']
def load_hyperparameter(self, dataset_name, algorithm):
d_name = dataset_name.lower()
a_name = algorithm.lower()
if d_name in self.hyperparams_paper and a_name in self.hyperparams_paper[d_name]:
params = self.hyperparams_paper[d_name][a_name]
return params
else:
raise Exception("We have not explored this experimental setting! (%s, %s)"%(dataset_name, algorithm))
class KGETuneArgParser:
"""The class defines the arguements accepted for the bayesian optimizer.
KGETuneArgParser utilizes the ArgumentParser module and add the arguments
accepted for tuning the model.
Args:
model (str): Name of the model/algorithm to be tuned.
debug (bool): If True, tunes the model in debugging mode.
Examples:
>>> from pykg2vec.config.hyperparams import KGETuneArgParser
>>> from pykg2vec.utils.bayesian_optimizer import BaysOptimizer
>>> args = KGETuneArgParser().get_args()
>>> bays_opt = BaysOptimizer(args=args)
Todo:
* Add more arguments!.
"""
def __init__(self):
self.parser = ArgumentParser(description='Knowledge Graph Embedding tunable configs.')
''' basic configs '''
self.parser.add_argument('-mn', dest='model', default='TransE', type=str, help='Model to tune')
self.parser.add_argument('-db', dest='debug', default=False, type=lambda x: (str(x).lower() == 'true'),
help='To use debug mode or not.')
self.parser.add_argument('-ds', dest='dataset_name', default='Freebase15k', type=str, help='The dataset name (choice: fb15k/wn18/wn18_rr/yago/fb15k_237/ks/nations/umls)')
self.parser.add_argument('-dsp', dest='dataset_path', default=None, type=str, help='The path to custom dataset.')
self.parser.add_argument('-mt', dest='max_number_trials', default=100, type=int, help='The maximum times of trials for bayesian optimizer.')
def get_args(self, args):
"""Gets the arguments from the console and parses it."""
return self.parser.parse_args(args)
class TransEParams:
"""This class defines the hyperameters and its ranges for tuning TranE algorithm.
TransEParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 10.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [500]) # always choose 10 training epochs.
}
class TransHParams:
"""This class defines the hyperameters and its ranges for tuning TranH algorithm.
TransHParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 2.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class TransMParams:
"""This class defines the hyperameters and its ranges for tuning TranM algorithm.
TransMParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 2.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.L1_flag = [True, False]
# self.hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class RescalParams:
"""This class defines the hyperameters and its ranges for tuning Rescal algorithm.
Rescal defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 2.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.L1_flag = [True, False]
# self.hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class SMEParams:
"""This class defines the hyperameters and its ranges for tuning SME algorithm.
SME defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
bilinear (bool): List of boolean values.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 2.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'bilinear': hp.choice('bilinear', [True, False]),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.L1_flag = [True, False]
# self.hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
# self.bilinear = [True, False]
class TransDParams:
"""This class defines the hyperameters and its ranges for tuning TranD algorithm.
TransDParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 2.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.L1_flag = [True, False]
# self.hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class TransRParams:
"""This class defines the hyperameters and its ranges for tuning TranR algorithm.
TransRParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
ent_hidden_size (list): List of integer values.
rel_hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'ent_hidden_size': scope.int(hp.qloguniform('ent_hidden_size', np.log(8), np.log(512),1)),
'rel_hidden_size': scope.int(hp.qloguniform('rel_hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 2.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.L1_flag = [True, False]
# self.ent_hidden_size = [8, 16, 32, 64, 128, 256]
# self.rel_hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class NTNParams:
"""This class defines the hyperameters and its ranges for tuning NTN algorithm.
NTNParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
ent_hidden_size (list): List of integer values.
rel_hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'ent_hidden_size': scope.int(hp.qloguniform('ent_hidden_size', np.log(8), np.log(64),1)),
'rel_hidden_size': scope.int(hp.qloguniform('rel_hidden_size', np.log(8), np.log(64),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 2.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.L1_flag = [True, False]
# self.ent_hidden_size = [8, 16, 32]
# self.rel_hidden_size = [8, 16, 32]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class SLMParams:
"""This class defines the hyperameters and its ranges for tuning SLM algorithm.
SLMParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
ent_hidden_size (list): List of integer values.
rel_hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'ent_hidden_size': scope.int(hp.qloguniform('ent_hidden_size', np.log(8), np.log(512),1)),
'rel_hidden_size': scope.int(hp.qloguniform('rel_hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 2.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.L1_flag = [True, False]
# self.ent_hidden_size = [8, 16, 32, 64, 128, 256]
# self.rel_hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class HoLEParams:
"""This class defines the hyperameters and its ranges for tuning HoLE algorithm.
HoLEParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 2.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.L1_flag = [True, False]
# self.hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class RotatEParams:
"""This class defines the hyperameters and its ranges for tuning RotatE algorithm.
RotatEParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 2.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.L1_flag = [True, False]
# self.hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class ConvEParams:
"""This class defines the hyperameters and its ranges for tuning ConvE algorithm.
ConvEParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
lambda (list) : List of floating point values.
feature_map_dropout (list) :List of floating point values.
input_dropout (list) : List of floating point values.
hidden_dropout (list) : List of floating point values.
use_bias (list) :List of boolean values.
label_smoothing (list) : List of floating point values.
lr_decay (float) : List of floating point values.
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.lmbda = [0.1, 0.2]
self.feature_map_dropout = [0.1, 0.2, 0.5]
self.input_dropout = [0.1, 0.2, 0.5]
self.hidden_dropout = [0.1, 0.2, 0.5]
self.use_bias = [True, False]
self.label_smoothing = [0.1, 0.2, 0.5]
self.lr_decay = [0.95, 0.9, 0.8]
self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
self.L1_flag = [True, False]
self.hidden_size = [50]
self.batch_size = [200, 400, 600]
self.epochs = [2, 5, 10]
self.margin = [0.4, 1.0, 2.0]
self.optimizer = ["adam", "sgd", 'rms']
self.sampling = ["uniform", "bern"]
class ProjE_pointwiseParams:
"""This class defines the hyperameters and its ranges for tuning ProjE_pointwise algorithm.
ProjE_pointwise defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
lambda (list) : List of floating point values.
feature_map_dropout (list) :List of floating point values.
input_dropout (list) : List of floating point values.
hidden_dropout (list) : List of floating point values.
use_bias (list) :List of boolean values.
label_smoothing (list) : List of floating point values.
lr_decay (float) : List of floating point values.
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.lmbda = [0.1, 0.2]
self.feature_map_dropout = [0.1, 0.2, 0.5]
self.input_dropout = [0.1, 0.2, 0.5]
self.hidden_dropout = [0.1, 0.2, 0.5]
self.use_bias = [True, False]
self.label_smoothing = [0.1, 0.2, 0.5]
self.lr_decay = [0.95, 0.9, 0.8]
self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
self.L1_flag = [True, False]
self.hidden_size = [8, 16]
self.batch_size = [256, 512]
self.epochs = [2, 5, 10]
self.margin = [0.4, 1.0, 2.0]
self.optimizer = ["adam", "sgd", 'rms']
self.sampling = ["uniform", "bern"]
class KG2EParams:
"""This class defines the hyperameters and its ranges for tuning KG2E algorithm.
KG2E defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
bilinear (list): List of boolean values.
distance_measure (list): [kl_divergence or expected_likelihood]
cmax (list): List of floating point values.
cmin (list): List of floating point values.
"""
def __init__(self):
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.L1_flag = [True, False]
# self.hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.distance_measure = ["kl_divergence", "expected_likelihood"]
# self.cmax = [0.05, 0.1, 0.2]
# self.cmin = [5.00, 3.00, 2.00, 1.00]
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'lmbda': hp.loguniform('lmbda', np.log(0.00001), np.log(0.001)),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'margin': hp.uniform('margin', 0.5, 8.0),
'distance_measure': hp.choice('distance_measure', ["kl_divergence", "expected_likelihood"]),
'cmax': hp.loguniform('cmax', np.log(0.05), np.log(0.2)),
'cmin': hp.loguniform('cmin', np.log(1), np.log(5)),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
class ComplexParams:
"""This class defines the hyperameters and its ranges for tuning Complex algorithm.
Complex defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
lambda (list) : List of floating point values.
feature_map_dropout (list) :List of floating point values.
input_dropout (list) : List of floating point values.
hidden_dropout (list) : List of floating point values.
use_bias (list) :List of boolean values.
label_smoothing (list) : List of floating point values.
lr_decay (float) : List of floating point values.
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'lmbda': hp.loguniform('lmbda', np.log(0.00001), np.log(0.001)),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.lmbda = [0.1, 0.2]
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class DistMultParams:
"""This class defines the hyperameters and its ranges for tuning DistMult algorithm.
DistMultParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
lambda (list) : List of floating point values.
feature_map_dropout (list) :List of floating point values.
input_dropout (list) : List of floating point values.
hidden_dropout (list) : List of floating point values.
use_bias (list) :List of boolean values.
label_smoothing (list) : List of floating point values.
lr_decay (float) : List of floating point values.
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'lmbda': hp.loguniform('lmbda', np.log(0.00001), np.log(0.001)),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.lmbda = [0.1, 0.2]
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class TuckERParams:
"""This class defines the hyperameters and its ranges for tuning TuckER algorithm.
TuckERParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
lambda (list) : List of floating point values.
feature_map_dropout (list) :List of floating point values.
input_dropout (list) : List of floating point values.
hidden_dropout (list) : List of floating point values.
use_bias (list) :List of boolean values.
label_smoothing (list) : List of floating point values.
lr_decay (float) : List of floating point values.
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.lmbda = [0.1, 0.2]
self.feature_map_dropout = [0.1, 0.2, 0.5]
self.input_dropout = [0.1, 0.2, 0.5]
self.hidden_dropout = [0.1, 0.2, 0.5]
self.use_bias = [True, False]
self.label_smoothing = [0.1, 0.2, 0.5]
self.lr_decay = [0.95, 0.9, 0.8]
self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
self.L1_flag = [True, False]
self.hidden_size = [8, 16, 32, 64, 128, 256]
self.batch_size = [128, 256, 512]
self.epochs = [2, 5, 10]
self.margin = [0.4, 1.0, 2.0]
self.optimizer = ["adam", "sgd", 'rms']
self.sampling = ["uniform", "bern"]
class TransGParams:
"""This class defines the hyperameters and its ranges for tuning TransG algorithm.
TransGParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
training_threshold (float): List of floating point values.
ncluster (int): List of integer values.
CRP_factor (float): List of floating point values.
weight_norm (bool): List of boolean values.
"""
def __init__(self):
self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
self.L1_flag = [True, False]
self.hidden_size = [8, 16, 32, 64, 128, 256]
self.batch_size = [128, 256, 512]
self.epochs = [2, 5, 10]
self.margin = [0.4, 1.0, 2.0]
self.optimizer = ["adam", "sgd", 'rms']
self.sampling = ["uniform", "bern"]
self.training_threshold = [1.0, 2.0, 3.0]
self.ncluster = [3, 4, 5, 6, 7]
self.CRP_factor = [0.01, 0.05, 0.1]
self.weight_norm = [True, False]
| [
"hyperopt.hp.choice",
"numpy.log",
"hyperopt.hp.uniform",
"argparse.ArgumentParser"
] | [((4484, 4556), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Knowledge Graph Embedding tunable configs."""'}), "(description='Knowledge Graph Embedding tunable configs.')\n", (4498, 4556), False, 'from argparse import ArgumentParser\n'), ((6437, 6472), 'hyperopt.hp.choice', 'hp.choice', (['"""L1_flag"""', '[True, False]'], {}), "('L1_flag', [True, False])\n", (6446, 6472), False, 'from hyperopt import hp\n'), ((6679, 6710), 'hyperopt.hp.uniform', 'hp.uniform', (['"""margin"""', '(0.0)', '(10.0)'], {}), "('margin', 0.0, 10.0)\n", (6689, 6710), False, 'from hyperopt import hp\n'), ((6735, 6781), 'hyperopt.hp.choice', 'hp.choice', (['"""optimizer"""', "['adam', 'sgd', 'rms']"], {}), "('optimizer', ['adam', 'sgd', 'rms'])\n", (6744, 6781), False, 'from hyperopt import hp\n'), ((6803, 6829), 'hyperopt.hp.choice', 'hp.choice', (['"""epochs"""', '[500]'], {}), "('epochs', [500])\n", (6812, 6829), False, 'from hyperopt import hp\n'), ((7851, 7886), 'hyperopt.hp.choice', 'hp.choice', (['"""L1_flag"""', '[True, False]'], {}), "('L1_flag', [True, False])\n", (7860, 7886), False, 'from hyperopt import hp\n'), ((8093, 8123), 'hyperopt.hp.uniform', 'hp.uniform', (['"""margin"""', '(0.0)', '(2.0)'], {}), "('margin', 0.0, 2.0)\n", (8103, 8123), False, 'from hyperopt import hp\n'), ((8148, 8194), 'hyperopt.hp.choice', 'hp.choice', (['"""optimizer"""', "['adam', 'sgd', 'rms']"], {}), "('optimizer', ['adam', 'sgd', 'rms'])\n", (8157, 8194), False, 'from hyperopt import hp\n'), ((8216, 8241), 'hyperopt.hp.choice', 'hp.choice', (['"""epochs"""', '[10]'], {}), "('epochs', [10])\n", (8225, 8241), False, 'from hyperopt import hp\n'), ((9533, 9568), 'hyperopt.hp.choice', 'hp.choice', (['"""L1_flag"""', '[True, False]'], {}), "('L1_flag', [True, False])\n", (9542, 9568), False, 'from hyperopt import hp\n'), ((9775, 9805), 'hyperopt.hp.uniform', 'hp.uniform', (['"""margin"""', '(0.0)', '(2.0)'], {}), "('margin', 0.0, 2.0)\n", (9785, 9805), False, 'from hyperopt import hp\n'), ((9830, 9876), 'hyperopt.hp.choice', 'hp.choice', (['"""optimizer"""', "['adam', 'sgd', 'rms']"], {}), "('optimizer', ['adam', 'sgd', 'rms'])\n", (9839, 9876), False, 'from hyperopt import hp\n'), ((9898, 9923), 'hyperopt.hp.choice', 'hp.choice', (['"""epochs"""', '[10]'], {}), "('epochs', [10])\n", (9907, 9923), False, 'from hyperopt import hp\n'), ((11319, 11354), 'hyperopt.hp.choice', 'hp.choice', (['"""L1_flag"""', '[True, False]'], {}), "('L1_flag', [True, False])\n", (11328, 11354), False, 'from hyperopt import hp\n'), ((11561, 11591), 'hyperopt.hp.uniform', 'hp.uniform', (['"""margin"""', '(0.0)', '(2.0)'], {}), "('margin', 0.0, 2.0)\n", (11571, 11591), False, 'from hyperopt import hp\n'), ((11616, 11662), 'hyperopt.hp.choice', 'hp.choice', (['"""optimizer"""', "['adam', 'sgd', 'rms']"], {}), "('optimizer', ['adam', 'sgd', 'rms'])\n", (11625, 11662), False, 'from hyperopt import hp\n'), ((11684, 11709), 'hyperopt.hp.choice', 'hp.choice', (['"""epochs"""', '[10]'], {}), "('epochs', [10])\n", (11693, 11709), False, 'from hyperopt import hp\n'), ((13143, 13178), 'hyperopt.hp.choice', 'hp.choice', (['"""L1_flag"""', '[True, False]'], {}), "('L1_flag', [True, False])\n", (13152, 13178), False, 'from hyperopt import hp\n'), ((13385, 13415), 'hyperopt.hp.uniform', 'hp.uniform', (['"""margin"""', '(0.0)', '(2.0)'], {}), "('margin', 0.0, 2.0)\n", (13395, 13415), False, 'from hyperopt import hp\n'), ((13440, 13486), 'hyperopt.hp.choice', 'hp.choice', (['"""optimizer"""', "['adam', 'sgd', 'rms']"], {}), "('optimizer', ['adam', 'sgd', 'rms'])\n", (13449, 13486), False, 'from hyperopt import hp\n'), ((13510, 13546), 'hyperopt.hp.choice', 'hp.choice', (['"""bilinear"""', '[True, False]'], {}), "('bilinear', [True, False])\n", (13519, 13546), False, 'from hyperopt import hp\n'), ((13568, 13593), 'hyperopt.hp.choice', 'hp.choice', (['"""epochs"""', '[10]'], {}), "('epochs', [10])\n", (13577, 13593), False, 'from hyperopt import hp\n'), ((15034, 15069), 'hyperopt.hp.choice', 'hp.choice', (['"""L1_flag"""', '[True, False]'], {}), "('L1_flag', [True, False])\n", (15043, 15069), False, 'from hyperopt import hp\n'), ((15276, 15306), 'hyperopt.hp.uniform', 'hp.uniform', (['"""margin"""', '(0.0)', '(2.0)'], {}), "('margin', 0.0, 2.0)\n", (15286, 15306), False, 'from hyperopt import hp\n'), ((15331, 15377), 'hyperopt.hp.choice', 'hp.choice', (['"""optimizer"""', "['adam', 'sgd', 'rms']"], {}), "('optimizer', ['adam', 'sgd', 'rms'])\n", (15340, 15377), False, 'from hyperopt import hp\n'), ((15399, 15424), 'hyperopt.hp.choice', 'hp.choice', (['"""epochs"""', '[10]'], {}), "('epochs', [10])\n", (15408, 15424), False, 'from hyperopt import hp\n'), ((16884, 16919), 'hyperopt.hp.choice', 'hp.choice', (['"""L1_flag"""', '[True, False]'], {}), "('L1_flag', [True, False])\n", (16893, 16919), False, 'from hyperopt import hp\n'), ((17235, 17265), 'hyperopt.hp.uniform', 'hp.uniform', (['"""margin"""', '(0.0)', '(2.0)'], {}), "('margin', 0.0, 2.0)\n", (17245, 17265), False, 'from hyperopt import hp\n'), ((17290, 17336), 'hyperopt.hp.choice', 'hp.choice', (['"""optimizer"""', "['adam', 'sgd', 'rms']"], {}), "('optimizer', ['adam', 'sgd', 'rms'])\n", (17299, 17336), False, 'from hyperopt import hp\n'), ((17358, 17383), 'hyperopt.hp.choice', 'hp.choice', (['"""epochs"""', '[10]'], {}), "('epochs', [10])\n", (17367, 17383), False, 'from hyperopt import hp\n'), ((18897, 18932), 'hyperopt.hp.choice', 'hp.choice', (['"""L1_flag"""', '[True, False]'], {}), "('L1_flag', [True, False])\n", (18906, 18932), False, 'from hyperopt import hp\n'), ((19246, 19276), 'hyperopt.hp.uniform', 'hp.uniform', (['"""margin"""', '(0.0)', '(2.0)'], {}), "('margin', 0.0, 2.0)\n", (19256, 19276), False, 'from hyperopt import hp\n'), ((19301, 19347), 'hyperopt.hp.choice', 'hp.choice', (['"""optimizer"""', "['adam', 'sgd', 'rms']"], {}), "('optimizer', ['adam', 'sgd', 'rms'])\n", (19310, 19347), False, 'from hyperopt import hp\n'), ((19369, 19394), 'hyperopt.hp.choice', 'hp.choice', (['"""epochs"""', '[10]'], {}), "('epochs', [10])\n", (19378, 19394), False, 'from hyperopt import hp\n'), ((20880, 20915), 'hyperopt.hp.choice', 'hp.choice', (['"""L1_flag"""', '[True, False]'], {}), "('L1_flag', [True, False])\n", (20889, 20915), False, 'from hyperopt import hp\n'), ((21231, 21261), 'hyperopt.hp.uniform', 'hp.uniform', (['"""margin"""', '(0.0)', '(2.0)'], {}), "('margin', 0.0, 2.0)\n", (21241, 21261), False, 'from hyperopt import hp\n'), ((21286, 21332), 'hyperopt.hp.choice', 'hp.choice', (['"""optimizer"""', "['adam', 'sgd', 'rms']"], {}), "('optimizer', ['adam', 'sgd', 'rms'])\n", (21295, 21332), False, 'from hyperopt import hp\n'), ((21354, 21379), 'hyperopt.hp.choice', 'hp.choice', (['"""epochs"""', '[10]'], {}), "('epochs', [10])\n", (21363, 21379), False, 'from hyperopt import hp\n'), ((22838, 22873), 'hyperopt.hp.choice', 'hp.choice', (['"""L1_flag"""', '[True, False]'], {}), "('L1_flag', [True, False])\n", (22847, 22873), False, 'from hyperopt import hp\n'), ((23080, 23110), 'hyperopt.hp.uniform', 'hp.uniform', (['"""margin"""', '(0.0)', '(2.0)'], {}), "('margin', 0.0, 2.0)\n", (23090, 23110), False, 'from hyperopt import hp\n'), ((23135, 23181), 'hyperopt.hp.choice', 'hp.choice', (['"""optimizer"""', "['adam', 'sgd', 'rms']"], {}), "('optimizer', ['adam', 'sgd', 'rms'])\n", (23144, 23181), False, 'from hyperopt import hp\n'), ((23203, 23228), 'hyperopt.hp.choice', 'hp.choice', (['"""epochs"""', '[10]'], {}), "('epochs', [10])\n", (23212, 23228), False, 'from hyperopt import hp\n'), ((24630, 24665), 'hyperopt.hp.choice', 'hp.choice', (['"""L1_flag"""', '[True, False]'], {}), "('L1_flag', [True, False])\n", (24639, 24665), False, 'from hyperopt import hp\n'), ((24872, 24902), 'hyperopt.hp.uniform', 'hp.uniform', (['"""margin"""', '(0.0)', '(2.0)'], {}), "('margin', 0.0, 2.0)\n", (24882, 24902), False, 'from hyperopt import hp\n'), ((24927, 24973), 'hyperopt.hp.choice', 'hp.choice', (['"""optimizer"""', "['adam', 'sgd', 'rms']"], {}), "('optimizer', ['adam', 'sgd', 'rms'])\n", (24936, 24973), False, 'from hyperopt import hp\n'), ((24995, 25020), 'hyperopt.hp.choice', 'hp.choice', (['"""epochs"""', '[10]'], {}), "('epochs', [10])\n", (25004, 25020), False, 'from hyperopt import hp\n'), ((30904, 30939), 'hyperopt.hp.choice', 'hp.choice', (['"""L1_flag"""', '[True, False]'], {}), "('L1_flag', [True, False])\n", (30913, 30939), False, 'from hyperopt import hp\n'), ((31224, 31270), 'hyperopt.hp.choice', 'hp.choice', (['"""optimizer"""', "['adam', 'sgd', 'rms']"], {}), "('optimizer', ['adam', 'sgd', 'rms'])\n", (31233, 31270), False, 'from hyperopt import hp\n'), ((31292, 31322), 'hyperopt.hp.uniform', 'hp.uniform', (['"""margin"""', '(0.5)', '(8.0)'], {}), "('margin', 0.5, 8.0)\n", (31302, 31322), False, 'from hyperopt import hp\n'), ((31354, 31425), 'hyperopt.hp.choice', 'hp.choice', (['"""distance_measure"""', "['kl_divergence', 'expected_likelihood']"], {}), "('distance_measure', ['kl_divergence', 'expected_likelihood'])\n", (31363, 31425), False, 'from hyperopt import hp\n'), ((31578, 31603), 'hyperopt.hp.choice', 'hp.choice', (['"""epochs"""', '[10]'], {}), "('epochs', [10])\n", (31587, 31603), False, 'from hyperopt import hp\n'), ((33290, 33336), 'hyperopt.hp.choice', 'hp.choice', (['"""optimizer"""', "['adam', 'sgd', 'rms']"], {}), "('optimizer', ['adam', 'sgd', 'rms'])\n", (33299, 33336), False, 'from hyperopt import hp\n'), ((33358, 33383), 'hyperopt.hp.choice', 'hp.choice', (['"""epochs"""', '[10]'], {}), "('epochs', [10])\n", (33367, 33383), False, 'from hyperopt import hp\n'), ((35412, 35458), 'hyperopt.hp.choice', 'hp.choice', (['"""optimizer"""', "['adam', 'sgd', 'rms']"], {}), "('optimizer', ['adam', 'sgd', 'rms'])\n", (35421, 35458), False, 'from hyperopt import hp\n'), ((35480, 35505), 'hyperopt.hp.choice', 'hp.choice', (['"""epochs"""', '[10]'], {}), "('epochs', [10])\n", (35489, 35505), False, 'from hyperopt import hp\n'), ((6385, 6398), 'numpy.log', 'np.log', (['(1e-05)'], {}), '(1e-05)\n', (6391, 6398), True, 'import numpy as np\n'), ((6402, 6413), 'numpy.log', 'np.log', (['(0.1)'], {}), '(0.1)\n', (6408, 6413), True, 'import numpy as np\n'), ((7799, 7812), 'numpy.log', 'np.log', (['(1e-05)'], {}), '(1e-05)\n', (7805, 7812), True, 'import numpy as np\n'), ((7816, 7827), 'numpy.log', 'np.log', (['(0.1)'], {}), '(0.1)\n', (7822, 7827), True, 'import numpy as np\n'), ((9481, 9494), 'numpy.log', 'np.log', (['(1e-05)'], {}), '(1e-05)\n', (9487, 9494), True, 'import numpy as np\n'), ((9498, 9509), 'numpy.log', 'np.log', (['(0.1)'], {}), '(0.1)\n', (9504, 9509), True, 'import numpy as np\n'), ((11267, 11280), 'numpy.log', 'np.log', (['(1e-05)'], {}), '(1e-05)\n', (11273, 11280), True, 'import numpy as np\n'), ((11284, 11295), 'numpy.log', 'np.log', (['(0.1)'], {}), '(0.1)\n', (11290, 11295), True, 'import numpy as np\n'), ((13091, 13104), 'numpy.log', 'np.log', (['(1e-05)'], {}), '(1e-05)\n', (13097, 13104), True, 'import numpy as np\n'), ((13108, 13119), 'numpy.log', 'np.log', (['(0.1)'], {}), '(0.1)\n', (13114, 13119), True, 'import numpy as np\n'), ((14982, 14995), 'numpy.log', 'np.log', (['(1e-05)'], {}), '(1e-05)\n', (14988, 14995), True, 'import numpy as np\n'), ((14999, 15010), 'numpy.log', 'np.log', (['(0.1)'], {}), '(0.1)\n', (15005, 15010), True, 'import numpy as np\n'), ((16832, 16845), 'numpy.log', 'np.log', (['(1e-05)'], {}), '(1e-05)\n', (16838, 16845), True, 'import numpy as np\n'), ((16849, 16860), 'numpy.log', 'np.log', (['(0.1)'], {}), '(0.1)\n', (16855, 16860), True, 'import numpy as np\n'), ((18845, 18858), 'numpy.log', 'np.log', (['(1e-05)'], {}), '(1e-05)\n', (18851, 18858), True, 'import numpy as np\n'), ((18862, 18873), 'numpy.log', 'np.log', (['(0.1)'], {}), '(0.1)\n', (18868, 18873), True, 'import numpy as np\n'), ((20828, 20841), 'numpy.log', 'np.log', (['(1e-05)'], {}), '(1e-05)\n', (20834, 20841), True, 'import numpy as np\n'), ((20845, 20856), 'numpy.log', 'np.log', (['(0.1)'], {}), '(0.1)\n', (20851, 20856), True, 'import numpy as np\n'), ((22786, 22799), 'numpy.log', 'np.log', (['(1e-05)'], {}), '(1e-05)\n', (22792, 22799), True, 'import numpy as np\n'), ((22803, 22814), 'numpy.log', 'np.log', (['(0.1)'], {}), '(0.1)\n', (22809, 22814), True, 'import numpy as np\n'), ((24578, 24591), 'numpy.log', 'np.log', (['(1e-05)'], {}), '(1e-05)\n', (24584, 24591), True, 'import numpy as np\n'), ((24595, 24606), 'numpy.log', 'np.log', (['(0.1)'], {}), '(0.1)\n', (24601, 24606), True, 'import numpy as np\n'), ((30852, 30865), 'numpy.log', 'np.log', (['(1e-05)'], {}), '(1e-05)\n', (30858, 30865), True, 'import numpy as np\n'), ((30869, 30880), 'numpy.log', 'np.log', (['(0.1)'], {}), '(0.1)\n', (30875, 30880), True, 'import numpy as np\n'), ((31168, 31181), 'numpy.log', 'np.log', (['(1e-05)'], {}), '(1e-05)\n', (31174, 31181), True, 'import numpy as np\n'), ((31185, 31198), 'numpy.log', 'np.log', (['(0.001)'], {}), '(0.001)\n', (31191, 31198), True, 'import numpy as np\n'), ((31467, 31479), 'numpy.log', 'np.log', (['(0.05)'], {}), '(0.05)\n', (31473, 31479), True, 'import numpy as np\n'), ((31481, 31492), 'numpy.log', 'np.log', (['(0.2)'], {}), '(0.2)\n', (31487, 31492), True, 'import numpy as np\n'), ((31535, 31544), 'numpy.log', 'np.log', (['(1)'], {}), '(1)\n', (31541, 31544), True, 'import numpy as np\n'), ((31546, 31555), 'numpy.log', 'np.log', (['(5)'], {}), '(5)\n', (31552, 31555), True, 'import numpy as np\n'), ((32976, 32989), 'numpy.log', 'np.log', (['(1e-05)'], {}), '(1e-05)\n', (32982, 32989), True, 'import numpy as np\n'), ((32993, 33004), 'numpy.log', 'np.log', (['(0.1)'], {}), '(0.1)\n', (32999, 33004), True, 'import numpy as np\n'), ((33234, 33247), 'numpy.log', 'np.log', (['(1e-05)'], {}), '(1e-05)\n', (33240, 33247), True, 'import numpy as np\n'), ((33251, 33264), 'numpy.log', 'np.log', (['(0.001)'], {}), '(0.001)\n', (33257, 33264), True, 'import numpy as np\n'), ((35098, 35111), 'numpy.log', 'np.log', (['(1e-05)'], {}), '(1e-05)\n', (35104, 35111), True, 'import numpy as np\n'), ((35115, 35126), 'numpy.log', 'np.log', (['(0.1)'], {}), '(0.1)\n', (35121, 35126), True, 'import numpy as np\n'), ((35356, 35369), 'numpy.log', 'np.log', (['(1e-05)'], {}), '(1e-05)\n', (35362, 35369), True, 'import numpy as np\n'), ((35373, 35386), 'numpy.log', 'np.log', (['(0.001)'], {}), '(0.001)\n', (35379, 35386), True, 'import numpy as np\n'), ((6539, 6548), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (6545, 6548), True, 'import numpy as np\n'), ((6550, 6561), 'numpy.log', 'np.log', (['(512)'], {}), '(512)\n', (6556, 6561), True, 'import numpy as np\n'), ((6630, 6639), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (6636, 6639), True, 'import numpy as np\n'), ((6641, 6653), 'numpy.log', 'np.log', (['(4096)'], {}), '(4096)\n', (6647, 6653), True, 'import numpy as np\n'), ((7953, 7962), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (7959, 7962), True, 'import numpy as np\n'), ((7964, 7975), 'numpy.log', 'np.log', (['(512)'], {}), '(512)\n', (7970, 7975), True, 'import numpy as np\n'), ((8044, 8053), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (8050, 8053), True, 'import numpy as np\n'), ((8055, 8067), 'numpy.log', 'np.log', (['(4096)'], {}), '(4096)\n', (8061, 8067), True, 'import numpy as np\n'), ((9635, 9644), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (9641, 9644), True, 'import numpy as np\n'), ((9646, 9657), 'numpy.log', 'np.log', (['(512)'], {}), '(512)\n', (9652, 9657), True, 'import numpy as np\n'), ((9726, 9735), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (9732, 9735), True, 'import numpy as np\n'), ((9737, 9749), 'numpy.log', 'np.log', (['(4096)'], {}), '(4096)\n', (9743, 9749), True, 'import numpy as np\n'), ((11421, 11430), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (11427, 11430), True, 'import numpy as np\n'), ((11432, 11443), 'numpy.log', 'np.log', (['(512)'], {}), '(512)\n', (11438, 11443), True, 'import numpy as np\n'), ((11512, 11521), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (11518, 11521), True, 'import numpy as np\n'), ((11523, 11535), 'numpy.log', 'np.log', (['(4096)'], {}), '(4096)\n', (11529, 11535), True, 'import numpy as np\n'), ((13245, 13254), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (13251, 13254), True, 'import numpy as np\n'), ((13256, 13267), 'numpy.log', 'np.log', (['(512)'], {}), '(512)\n', (13262, 13267), True, 'import numpy as np\n'), ((13336, 13345), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (13342, 13345), True, 'import numpy as np\n'), ((13347, 13359), 'numpy.log', 'np.log', (['(4096)'], {}), '(4096)\n', (13353, 13359), True, 'import numpy as np\n'), ((15136, 15145), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (15142, 15145), True, 'import numpy as np\n'), ((15147, 15158), 'numpy.log', 'np.log', (['(512)'], {}), '(512)\n', (15153, 15158), True, 'import numpy as np\n'), ((15227, 15236), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (15233, 15236), True, 'import numpy as np\n'), ((15238, 15250), 'numpy.log', 'np.log', (['(4096)'], {}), '(4096)\n', (15244, 15250), True, 'import numpy as np\n'), ((16994, 17003), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (17000, 17003), True, 'import numpy as np\n'), ((17005, 17016), 'numpy.log', 'np.log', (['(512)'], {}), '(512)\n', (17011, 17016), True, 'import numpy as np\n'), ((17095, 17104), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (17101, 17104), True, 'import numpy as np\n'), ((17106, 17117), 'numpy.log', 'np.log', (['(512)'], {}), '(512)\n', (17112, 17117), True, 'import numpy as np\n'), ((17186, 17195), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (17192, 17195), True, 'import numpy as np\n'), ((17197, 17209), 'numpy.log', 'np.log', (['(4096)'], {}), '(4096)\n', (17203, 17209), True, 'import numpy as np\n'), ((19007, 19016), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (19013, 19016), True, 'import numpy as np\n'), ((19018, 19028), 'numpy.log', 'np.log', (['(64)'], {}), '(64)\n', (19024, 19028), True, 'import numpy as np\n'), ((19107, 19116), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (19113, 19116), True, 'import numpy as np\n'), ((19118, 19128), 'numpy.log', 'np.log', (['(64)'], {}), '(64)\n', (19124, 19128), True, 'import numpy as np\n'), ((19197, 19206), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (19203, 19206), True, 'import numpy as np\n'), ((19208, 19220), 'numpy.log', 'np.log', (['(4096)'], {}), '(4096)\n', (19214, 19220), True, 'import numpy as np\n'), ((20990, 20999), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (20996, 20999), True, 'import numpy as np\n'), ((21001, 21012), 'numpy.log', 'np.log', (['(512)'], {}), '(512)\n', (21007, 21012), True, 'import numpy as np\n'), ((21091, 21100), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (21097, 21100), True, 'import numpy as np\n'), ((21102, 21113), 'numpy.log', 'np.log', (['(512)'], {}), '(512)\n', (21108, 21113), True, 'import numpy as np\n'), ((21182, 21191), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (21188, 21191), True, 'import numpy as np\n'), ((21193, 21205), 'numpy.log', 'np.log', (['(4096)'], {}), '(4096)\n', (21199, 21205), True, 'import numpy as np\n'), ((22940, 22949), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (22946, 22949), True, 'import numpy as np\n'), ((22951, 22962), 'numpy.log', 'np.log', (['(512)'], {}), '(512)\n', (22957, 22962), True, 'import numpy as np\n'), ((23031, 23040), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (23037, 23040), True, 'import numpy as np\n'), ((23042, 23054), 'numpy.log', 'np.log', (['(4096)'], {}), '(4096)\n', (23048, 23054), True, 'import numpy as np\n'), ((24732, 24741), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (24738, 24741), True, 'import numpy as np\n'), ((24743, 24754), 'numpy.log', 'np.log', (['(512)'], {}), '(512)\n', (24749, 24754), True, 'import numpy as np\n'), ((24823, 24832), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (24829, 24832), True, 'import numpy as np\n'), ((24834, 24846), 'numpy.log', 'np.log', (['(4096)'], {}), '(4096)\n', (24840, 24846), True, 'import numpy as np\n'), ((31006, 31015), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (31012, 31015), True, 'import numpy as np\n'), ((31017, 31028), 'numpy.log', 'np.log', (['(512)'], {}), '(512)\n', (31023, 31028), True, 'import numpy as np\n'), ((31097, 31106), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (31103, 31106), True, 'import numpy as np\n'), ((31108, 31120), 'numpy.log', 'np.log', (['(4096)'], {}), '(4096)\n', (31114, 31120), True, 'import numpy as np\n'), ((33072, 33081), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (33078, 33081), True, 'import numpy as np\n'), ((33083, 33094), 'numpy.log', 'np.log', (['(512)'], {}), '(512)\n', (33089, 33094), True, 'import numpy as np\n'), ((33163, 33172), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (33169, 33172), True, 'import numpy as np\n'), ((33174, 33186), 'numpy.log', 'np.log', (['(4096)'], {}), '(4096)\n', (33180, 33186), True, 'import numpy as np\n'), ((35194, 35203), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (35200, 35203), True, 'import numpy as np\n'), ((35205, 35216), 'numpy.log', 'np.log', (['(512)'], {}), '(512)\n', (35211, 35216), True, 'import numpy as np\n'), ((35285, 35294), 'numpy.log', 'np.log', (['(8)'], {}), '(8)\n', (35291, 35294), True, 'import numpy as np\n'), ((35296, 35308), 'numpy.log', 'np.log', (['(4096)'], {}), '(4096)\n', (35302, 35308), True, 'import numpy as np\n')] |
# python peripherals
import random
import os
import sys
import math
sys.path.insert(1, os.path.join(sys.path[0], '../..'))
# numpy
import numpy
# pandas
import pandas
# ipython
from IPython.display import display, HTML
# matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.lines
# pytorch
import torch
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data.sampler import SequentialSampler
from torch.utils.data import DataLoader
# deep signature
from deep_signature.utils import utils
from deep_signature.data_generation.curve_generation import LevelCurvesGenerator
from deep_signature.data_manipulation import curve_processing
from deep_signature.nn.datasets import DeepSignatureTupletsDataset
from deep_signature.nn.networks import DeepSignatureArcLengthNet
from deep_signature.nn.networks import DeepSignatureCurvatureNet
from deep_signature.nn.losses import ContrastiveLoss
from deep_signature.nn.trainers import ModelTrainer
from deep_signature.data_manipulation import curve_sampling
from deep_signature.data_manipulation import curve_processing
from deep_signature.linalg import euclidean_transform
from deep_signature.linalg import affine_transform
# common
from common import settings
from common import utils as common_utils
# notebooks
from notebooks.utils import utils as notebook_utils
# plt.style.use("dark_background")
transform_type = 'affine'
if transform_type == 'euclidean':
level_curves_arclength_tuplets_dir_path = settings.level_curves_euclidean_arclength_tuplets_dir_path
level_curves_arclength_tuplets_results_dir_path = settings.level_curves_euclidean_arclength_tuplets_results_dir_path
elif transform_type == 'equiaffine':
level_curves_arclength_tuplets_dir_path = settings.level_curves_equiaffine_arclength_tuplets_dir_path
level_curves_arclength_tuplets_results_dir_path = settings.level_curves_equiaffine_arclength_tuplets_results_dir_path
elif transform_type == 'affine':
level_curves_arclength_tuplets_dir_path = settings.level_curves_affine_arclength_tuplets_dir_path
level_curves_arclength_tuplets_results_dir_path = settings.level_curves_affine_arclength_tuplets_results_dir_path
if transform_type == 'euclidean':
level_curves_curvature_tuplets_dir_path = settings.level_curves_euclidean_curvature_tuplets_dir_path
level_curves_curvature_tuplets_results_dir_path = settings.level_curves_euclidean_curvature_tuplets_results_dir_path
elif transform_type == 'equiaffine':
level_curves_curvature_tuplets_dir_path = settings.level_curves_equiaffine_curvature_tuplets_dir_path
level_curves_curvature_tuplets_results_dir_path = settings.level_curves_equiaffine_curvature_tuplets_results_dir_path
elif transform_type == 'affine':
level_curves_curvature_tuplets_dir_path = settings.level_curves_affine_curvature_tuplets_dir_path
level_curves_curvature_tuplets_results_dir_path = settings.level_curves_affine_curvature_tuplets_results_dir_path
import warnings
warnings.filterwarnings("ignore")
# constants
true_arclength_colors = ['#FF8C00', '#444444']
predicted_arclength_colors = ['#AA0000', '#00AA00']
sample_colors = ['#AA0000', '#00AA00']
curve_colors = ['#AA0000', '#00AA00']
limit = 5
step = 60
comparison_curves_count = 1
section_supporting_points_count = 20
neighborhood_supporting_points_count = 3
curvature_sample_points = 2*neighborhood_supporting_points_count + 1
arclength_sample_points = section_supporting_points_count
sampling_ratio = 0.2
anchors_ratio = 0.2
device = torch.device('cuda')
# if we're in the equiaffine case, snap 'step' to the closest mutiple of 3 (from above)
# if transform_type == "equiaffine":
# step = int(3 * numpy.ceil(step / 3))
# package settings
torch.set_default_dtype(torch.float64)
numpy.random.seed(60)
# create models
arclength_model = DeepSignatureArcLengthNet(sample_points=arclength_sample_points).cuda()
curvature_model = DeepSignatureCurvatureNet(sample_points=curvature_sample_points).cuda()
# load arclength model state
latest_subdir = common_utils.get_latest_subdirectory(level_curves_arclength_tuplets_results_dir_path)
results = numpy.load(f"{latest_subdir}/results.npy", allow_pickle=True).item()
arclength_model.load_state_dict(torch.load(results['model_file_path'], map_location=device))
arclength_model.eval()
# load curvature model state
latest_subdir = common_utils.get_latest_subdirectory(level_curves_curvature_tuplets_results_dir_path)
results = numpy.load(f"{latest_subdir}/results.npy", allow_pickle=True).item()
curvature_model.load_state_dict(torch.load(results['model_file_path'], map_location=device))
curvature_model.eval()
# load curves (+ shuffle)
curves = LevelCurvesGenerator.load_curves(dir_path=settings.level_curves_dir_path_train)
numpy.random.shuffle(curves)
curves = curves[:limit]
# create color map
color_map = plt.get_cmap('rainbow', limit)
# generate curve records
curve_records = notebook_utils.generate_curve_records(
arclength_model=arclength_model,
curvature_model=curvature_model,
curves=curves,
transform_type=transform_type,
comparison_curves_count=comparison_curves_count,
sampling_ratio=sampling_ratio,
anchors_ratio=anchors_ratio,
step=step,
neighborhood_supporting_points_count=neighborhood_supporting_points_count,
section_supporting_points_count=section_supporting_points_count)
notebook_utils.plot_curve_signature_comparisons(
curve_records=curve_records,
curve_colors=curve_colors)
notebook_utils.plot_curve_arclength_records(
curve_records=curve_records,
true_arclength_colors=true_arclength_colors,
predicted_arclength_colors=predicted_arclength_colors,
sample_colors=sample_colors)
| [
"common.utils.get_latest_subdirectory",
"numpy.random.shuffle",
"notebooks.utils.utils.plot_curve_signature_comparisons",
"deep_signature.nn.networks.DeepSignatureArcLengthNet",
"deep_signature.data_generation.curve_generation.LevelCurvesGenerator.load_curves",
"torch.load",
"notebooks.utils.utils.plot_... | [((3022, 3055), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (3045, 3055), False, 'import warnings\n'), ((3553, 3573), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (3565, 3573), False, 'import torch\n'), ((3763, 3801), 'torch.set_default_dtype', 'torch.set_default_dtype', (['torch.float64'], {}), '(torch.float64)\n', (3786, 3801), False, 'import torch\n'), ((3802, 3823), 'numpy.random.seed', 'numpy.random.seed', (['(60)'], {}), '(60)\n', (3819, 3823), False, 'import numpy\n'), ((4067, 4157), 'common.utils.get_latest_subdirectory', 'common_utils.get_latest_subdirectory', (['level_curves_arclength_tuplets_results_dir_path'], {}), '(\n level_curves_arclength_tuplets_results_dir_path)\n', (4103, 4157), True, 'from common import utils as common_utils\n'), ((4394, 4484), 'common.utils.get_latest_subdirectory', 'common_utils.get_latest_subdirectory', (['level_curves_curvature_tuplets_results_dir_path'], {}), '(\n level_curves_curvature_tuplets_results_dir_path)\n', (4430, 4484), True, 'from common import utils as common_utils\n'), ((4711, 4790), 'deep_signature.data_generation.curve_generation.LevelCurvesGenerator.load_curves', 'LevelCurvesGenerator.load_curves', ([], {'dir_path': 'settings.level_curves_dir_path_train'}), '(dir_path=settings.level_curves_dir_path_train)\n', (4743, 4790), False, 'from deep_signature.data_generation.curve_generation import LevelCurvesGenerator\n'), ((4791, 4819), 'numpy.random.shuffle', 'numpy.random.shuffle', (['curves'], {}), '(curves)\n', (4811, 4819), False, 'import numpy\n'), ((4876, 4906), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""rainbow"""', 'limit'], {}), "('rainbow', limit)\n", (4888, 4906), True, 'import matplotlib.pyplot as plt\n'), ((4949, 5385), 'notebooks.utils.utils.generate_curve_records', 'notebook_utils.generate_curve_records', ([], {'arclength_model': 'arclength_model', 'curvature_model': 'curvature_model', 'curves': 'curves', 'transform_type': 'transform_type', 'comparison_curves_count': 'comparison_curves_count', 'sampling_ratio': 'sampling_ratio', 'anchors_ratio': 'anchors_ratio', 'step': 'step', 'neighborhood_supporting_points_count': 'neighborhood_supporting_points_count', 'section_supporting_points_count': 'section_supporting_points_count'}), '(arclength_model=arclength_model,\n curvature_model=curvature_model, curves=curves, transform_type=\n transform_type, comparison_curves_count=comparison_curves_count,\n sampling_ratio=sampling_ratio, anchors_ratio=anchors_ratio, step=step,\n neighborhood_supporting_points_count=\n neighborhood_supporting_points_count, section_supporting_points_count=\n section_supporting_points_count)\n', (4986, 5385), True, 'from notebooks.utils import utils as notebook_utils\n'), ((5401, 5508), 'notebooks.utils.utils.plot_curve_signature_comparisons', 'notebook_utils.plot_curve_signature_comparisons', ([], {'curve_records': 'curve_records', 'curve_colors': 'curve_colors'}), '(curve_records=curve_records,\n curve_colors=curve_colors)\n', (5448, 5508), True, 'from notebooks.utils import utils as notebook_utils\n'), ((5515, 5725), 'notebooks.utils.utils.plot_curve_arclength_records', 'notebook_utils.plot_curve_arclength_records', ([], {'curve_records': 'curve_records', 'true_arclength_colors': 'true_arclength_colors', 'predicted_arclength_colors': 'predicted_arclength_colors', 'sample_colors': 'sample_colors'}), '(curve_records=curve_records,\n true_arclength_colors=true_arclength_colors, predicted_arclength_colors\n =predicted_arclength_colors, sample_colors=sample_colors)\n', (5558, 5725), True, 'from notebooks.utils import utils as notebook_utils\n'), ((87, 121), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""../.."""'], {}), "(sys.path[0], '../..')\n", (99, 121), False, 'import os\n'), ((4264, 4323), 'torch.load', 'torch.load', (["results['model_file_path']"], {'map_location': 'device'}), "(results['model_file_path'], map_location=device)\n", (4274, 4323), False, 'import torch\n'), ((4591, 4650), 'torch.load', 'torch.load', (["results['model_file_path']"], {'map_location': 'device'}), "(results['model_file_path'], map_location=device)\n", (4601, 4650), False, 'import torch\n'), ((3859, 3923), 'deep_signature.nn.networks.DeepSignatureArcLengthNet', 'DeepSignatureArcLengthNet', ([], {'sample_points': 'arclength_sample_points'}), '(sample_points=arclength_sample_points)\n', (3884, 3923), False, 'from deep_signature.nn.networks import DeepSignatureArcLengthNet\n'), ((3949, 4013), 'deep_signature.nn.networks.DeepSignatureCurvatureNet', 'DeepSignatureCurvatureNet', ([], {'sample_points': 'curvature_sample_points'}), '(sample_points=curvature_sample_points)\n', (3974, 4013), False, 'from deep_signature.nn.networks import DeepSignatureCurvatureNet\n'), ((4163, 4224), 'numpy.load', 'numpy.load', (['f"""{latest_subdir}/results.npy"""'], {'allow_pickle': '(True)'}), "(f'{latest_subdir}/results.npy', allow_pickle=True)\n", (4173, 4224), False, 'import numpy\n'), ((4490, 4551), 'numpy.load', 'numpy.load', (['f"""{latest_subdir}/results.npy"""'], {'allow_pickle': '(True)'}), "(f'{latest_subdir}/results.npy', allow_pickle=True)\n", (4500, 4551), False, 'import numpy\n')] |
import numpy as np
def random_vec(length):
if type(length) is not int:
raise ValueError("length should be int.")
elif length <= 0:
raise ValueError("length should be a positive number.")
return np.random.rand(length)
def normalize_vec(vector):
return vector / np.linalg.norm(vector) | [
"numpy.random.rand",
"numpy.linalg.norm"
] | [((228, 250), 'numpy.random.rand', 'np.random.rand', (['length'], {}), '(length)\n', (242, 250), True, 'import numpy as np\n'), ((299, 321), 'numpy.linalg.norm', 'np.linalg.norm', (['vector'], {}), '(vector)\n', (313, 321), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for core layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import numpy as onp
from tensor2tensor.trax import backend
from tensor2tensor.trax.layers import base
from tensor2tensor.trax.layers import combinators
from tensor2tensor.trax.layers import core
class CoreLayerTest(absltest.TestCase):
def test_flatten_n(self):
input_shape = (29, 87, 10, 20, 30)
layer = core.Flatten()
expected_shape = (29, 87 * 10 * 20 * 30)
actual_shape = base.check_shape_agreement(layer, input_shape)
self.assertEqual(actual_shape, expected_shape)
layer = core.Flatten(n_axes_to_keep=2)
expected_shape = (29, 87, 10 * 20 * 30)
actual_shape = base.check_shape_agreement(layer, input_shape)
self.assertEqual(actual_shape, expected_shape)
layer = core.Flatten(n_axes_to_keep=3)
expected_shape = (29, 87, 10, 20 * 30)
actual_shape = base.check_shape_agreement(layer, input_shape)
self.assertEqual(actual_shape, expected_shape)
layer = core.Flatten(n_axes_to_keep=4)
expected_shape = (29, 87, 10, 20, 30)
actual_shape = base.check_shape_agreement(layer, input_shape)
self.assertEqual(actual_shape, expected_shape)
# Not enough dimensions.
with self.assertRaises(base.LayerError):
base.check_shape_agreement(core.Flatten(n_axes_to_keep=5), input_shape)
with self.assertRaises(base.LayerError):
base.check_shape_agreement(core.Flatten(n_axes_to_keep=6), input_shape)
def test_div(self):
layer = core.Div(divisor=2.0)
input_np = onp.array([[1, 2, 3], [4, 5, 6]], dtype=onp.float32)
output_np, _ = layer(input_np)
# absltest doesn't have ndarray equalities.
expected_output_np = input_np / 2.0
self.assertAlmostEqual(
0.0,
onp.sum((output_np - expected_output_np) ** 2),
delta=1e-6)
def test_div_shapes(self):
layer = core.Div(divisor=2.0)
input_shape = (3, 2)
expected_shape = (3, 2)
output_shape = base.check_shape_agreement(layer, input_shape)
self.assertEqual(output_shape, expected_shape)
def test_dense_param_sharing(self):
model1 = combinators.Serial(core.Dense(32), core.Dense(32))
layer = core.Dense(32)
model2 = combinators.Serial(layer, layer)
rng = backend.random.get_prng(0)
params1, _ = model1.initialize((1, 32), onp.float32, rng)
params2, _ = model2.initialize((1, 32), onp.float32, rng)
# The first parameters have 2 kernels of size (32, 32).
self.assertEqual((32, 32), params1[0][0].shape)
self.assertEqual((32, 32), params1[1][0].shape)
# The second parameters have 1 kernel of size (32, 32) and an empty dict.
self.assertEqual((32, 32), params2[0][0].shape)
self.assertEqual((), params2[1])
def test_dropout(self):
input_shape = (8, 7, 9)
output_shape = (8, 7, 9)
final_shape = base.check_shape_agreement(
core.Dropout(rate=0.1, mode="train"), input_shape)
self.assertEqual(final_shape, output_shape)
final_shape = base.check_shape_agreement(
core.Dropout(rate=0.1, mode="eval"), input_shape)
self.assertEqual(final_shape, output_shape)
if __name__ == "__main__":
absltest.main()
| [
"tensor2tensor.trax.layers.combinators.Serial",
"absl.testing.absltest.main",
"tensor2tensor.trax.layers.base.check_shape_agreement",
"tensor2tensor.trax.backend.random.get_prng",
"numpy.array",
"numpy.sum",
"tensor2tensor.trax.layers.core.Flatten",
"tensor2tensor.trax.layers.core.Dropout",
"tensor2... | [((3850, 3865), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (3863, 3865), False, 'from absl.testing import absltest\n'), ((1097, 1111), 'tensor2tensor.trax.layers.core.Flatten', 'core.Flatten', ([], {}), '()\n', (1109, 1111), False, 'from tensor2tensor.trax.layers import core\n'), ((1176, 1222), 'tensor2tensor.trax.layers.base.check_shape_agreement', 'base.check_shape_agreement', (['layer', 'input_shape'], {}), '(layer, input_shape)\n', (1202, 1222), False, 'from tensor2tensor.trax.layers import base\n'), ((1287, 1317), 'tensor2tensor.trax.layers.core.Flatten', 'core.Flatten', ([], {'n_axes_to_keep': '(2)'}), '(n_axes_to_keep=2)\n', (1299, 1317), False, 'from tensor2tensor.trax.layers import core\n'), ((1381, 1427), 'tensor2tensor.trax.layers.base.check_shape_agreement', 'base.check_shape_agreement', (['layer', 'input_shape'], {}), '(layer, input_shape)\n', (1407, 1427), False, 'from tensor2tensor.trax.layers import base\n'), ((1492, 1522), 'tensor2tensor.trax.layers.core.Flatten', 'core.Flatten', ([], {'n_axes_to_keep': '(3)'}), '(n_axes_to_keep=3)\n', (1504, 1522), False, 'from tensor2tensor.trax.layers import core\n'), ((1585, 1631), 'tensor2tensor.trax.layers.base.check_shape_agreement', 'base.check_shape_agreement', (['layer', 'input_shape'], {}), '(layer, input_shape)\n', (1611, 1631), False, 'from tensor2tensor.trax.layers import base\n'), ((1696, 1726), 'tensor2tensor.trax.layers.core.Flatten', 'core.Flatten', ([], {'n_axes_to_keep': '(4)'}), '(n_axes_to_keep=4)\n', (1708, 1726), False, 'from tensor2tensor.trax.layers import core\n'), ((1788, 1834), 'tensor2tensor.trax.layers.base.check_shape_agreement', 'base.check_shape_agreement', (['layer', 'input_shape'], {}), '(layer, input_shape)\n', (1814, 1834), False, 'from tensor2tensor.trax.layers import base\n'), ((2198, 2219), 'tensor2tensor.trax.layers.core.Div', 'core.Div', ([], {'divisor': '(2.0)'}), '(divisor=2.0)\n', (2206, 2219), False, 'from tensor2tensor.trax.layers import core\n'), ((2235, 2287), 'numpy.array', 'onp.array', (['[[1, 2, 3], [4, 5, 6]]'], {'dtype': 'onp.float32'}), '([[1, 2, 3], [4, 5, 6]], dtype=onp.float32)\n', (2244, 2287), True, 'import numpy as onp\n'), ((2570, 2591), 'tensor2tensor.trax.layers.core.Div', 'core.Div', ([], {'divisor': '(2.0)'}), '(divisor=2.0)\n', (2578, 2591), False, 'from tensor2tensor.trax.layers import core\n'), ((2664, 2710), 'tensor2tensor.trax.layers.base.check_shape_agreement', 'base.check_shape_agreement', (['layer', 'input_shape'], {}), '(layer, input_shape)\n', (2690, 2710), False, 'from tensor2tensor.trax.layers import base\n'), ((2877, 2891), 'tensor2tensor.trax.layers.core.Dense', 'core.Dense', (['(32)'], {}), '(32)\n', (2887, 2891), False, 'from tensor2tensor.trax.layers import core\n'), ((2905, 2937), 'tensor2tensor.trax.layers.combinators.Serial', 'combinators.Serial', (['layer', 'layer'], {}), '(layer, layer)\n', (2923, 2937), False, 'from tensor2tensor.trax.layers import combinators\n'), ((2948, 2974), 'tensor2tensor.trax.backend.random.get_prng', 'backend.random.get_prng', (['(0)'], {}), '(0)\n', (2971, 2974), False, 'from tensor2tensor.trax import backend\n'), ((2460, 2506), 'numpy.sum', 'onp.sum', (['((output_np - expected_output_np) ** 2)'], {}), '((output_np - expected_output_np) ** 2)\n', (2467, 2506), True, 'import numpy as onp\n'), ((2833, 2847), 'tensor2tensor.trax.layers.core.Dense', 'core.Dense', (['(32)'], {}), '(32)\n', (2843, 2847), False, 'from tensor2tensor.trax.layers import core\n'), ((2849, 2863), 'tensor2tensor.trax.layers.core.Dense', 'core.Dense', (['(32)'], {}), '(32)\n', (2859, 2863), False, 'from tensor2tensor.trax.layers import core\n'), ((3568, 3604), 'tensor2tensor.trax.layers.core.Dropout', 'core.Dropout', ([], {'rate': '(0.1)', 'mode': '"""train"""'}), "(rate=0.1, mode='train')\n", (3580, 3604), False, 'from tensor2tensor.trax.layers import core\n'), ((3721, 3756), 'tensor2tensor.trax.layers.core.Dropout', 'core.Dropout', ([], {'rate': '(0.1)', 'mode': '"""eval"""'}), "(rate=0.1, mode='eval')\n", (3733, 3756), False, 'from tensor2tensor.trax.layers import core\n'), ((1994, 2024), 'tensor2tensor.trax.layers.core.Flatten', 'core.Flatten', ([], {'n_axes_to_keep': '(5)'}), '(n_axes_to_keep=5)\n', (2006, 2024), False, 'from tensor2tensor.trax.layers import core\n'), ((2118, 2148), 'tensor2tensor.trax.layers.core.Flatten', 'core.Flatten', ([], {'n_axes_to_keep': '(6)'}), '(n_axes_to_keep=6)\n', (2130, 2148), False, 'from tensor2tensor.trax.layers import core\n')] |
#!python3
import numpy as np
from magLabUtilities.signalutilities.signals import SignalThread, Signal, SignalBundle
from magLabUtilities.signalutilities.hysteresis import HysteresisSignalBundle, XExpGendey101620
from magLabUtilities.uiutilities.plotting.hysteresis import MofHXofMPlotter
if __name__=='__main__':
# xInit:np.float64, mSat:np.float64, mNuc:np.float64, hCoercive:np.float64, hAnh:np.float64, hCoop:np.float64
xInit = 63.0
mSat = 1.60e6
mNuc = 1.44e6
hCoercive = 600
hAnh = 5800
hCoop = 750
amplitude = 1.51637e6
tVirginThread = SignalThread(np.linspace(0.0, 1.0, 10000, endpoint=False))
mVirginThread = SignalThread(np.linspace(0.0, amplitude, 10000, endpoint=False))
mVirginSignal = Signal.fromThreadPair(mVirginThread, tVirginThread)
tPRThread = SignalThread(np.linspace(1.0, 3.0, 20000, endpoint=False))
mPRThread = SignalThread(np.linspace(amplitude, -amplitude, 20000, endpoint=False))
mPRSignal = Signal.fromThreadPair(mPRThread, tPRThread)
tNRThread = SignalThread(np.linspace(2.0, 5.0, 20000))
mNRThread = SignalThread(np.linspace(-amplitude, amplitude, 20000))
mNRSignal = Signal.fromThreadPair(mNRThread, tNRThread)
xExp = XExpGendey101620(xInit, mSat, mNuc, hCoercive, hAnh, hCoop)
virginBundle = xExp.evaluate(mVirginSignal, mRev=0.0, hRev=0.0, curveRegion='virgin')
prBundle = xExp.evaluate(mPRSignal, mRev=amplitude, hRev=np.amax(virginBundle.signals['H'].independentThread.data), curveRegion='reversal')
nrBundle = xExp.evaluate(mNRSignal, mRev=-amplitude, hRev=np.amin(prBundle.signals['H'].independentThread.data), curveRegion='reversal')
plotter = MofHXofMPlotter()
plotter.addMofHPlot(virginBundle, 'virgin')
plotter.addMofHPlot(prBundle, 'Positive Reversal')
plotter.addMofHPlot(nrBundle, 'Negative Reversal')
plotter.addXofMPlot(virginBundle, 'virgin')
plotter.addXofMPlot(prBundle, 'Positive Reversal')
plotter.addXofMPlot(nrBundle, 'Negative Reversal')
print('done.')
| [
"numpy.amin",
"magLabUtilities.uiutilities.plotting.hysteresis.MofHXofMPlotter",
"magLabUtilities.signalutilities.hysteresis.XExpGendey101620",
"numpy.linspace",
"magLabUtilities.signalutilities.signals.Signal.fromThreadPair",
"numpy.amax"
] | [((766, 817), 'magLabUtilities.signalutilities.signals.Signal.fromThreadPair', 'Signal.fromThreadPair', (['mVirginThread', 'tVirginThread'], {}), '(mVirginThread, tVirginThread)\n', (787, 817), False, 'from magLabUtilities.signalutilities.signals import SignalThread, Signal, SignalBundle\n'), ((1002, 1045), 'magLabUtilities.signalutilities.signals.Signal.fromThreadPair', 'Signal.fromThreadPair', (['mPRThread', 'tPRThread'], {}), '(mPRThread, tPRThread)\n', (1023, 1045), False, 'from magLabUtilities.signalutilities.signals import SignalThread, Signal, SignalBundle\n'), ((1198, 1241), 'magLabUtilities.signalutilities.signals.Signal.fromThreadPair', 'Signal.fromThreadPair', (['mNRThread', 'tNRThread'], {}), '(mNRThread, tNRThread)\n', (1219, 1241), False, 'from magLabUtilities.signalutilities.signals import SignalThread, Signal, SignalBundle\n'), ((1256, 1315), 'magLabUtilities.signalutilities.hysteresis.XExpGendey101620', 'XExpGendey101620', (['xInit', 'mSat', 'mNuc', 'hCoercive', 'hAnh', 'hCoop'], {}), '(xInit, mSat, mNuc, hCoercive, hAnh, hCoop)\n', (1272, 1315), False, 'from magLabUtilities.signalutilities.hysteresis import HysteresisSignalBundle, XExpGendey101620\n'), ((1713, 1730), 'magLabUtilities.uiutilities.plotting.hysteresis.MofHXofMPlotter', 'MofHXofMPlotter', ([], {}), '()\n', (1728, 1730), False, 'from magLabUtilities.uiutilities.plotting.hysteresis import MofHXofMPlotter\n'), ((613, 657), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(10000)'], {'endpoint': '(False)'}), '(0.0, 1.0, 10000, endpoint=False)\n', (624, 657), True, 'import numpy as np\n'), ((693, 743), 'numpy.linspace', 'np.linspace', (['(0.0)', 'amplitude', '(10000)'], {'endpoint': '(False)'}), '(0.0, amplitude, 10000, endpoint=False)\n', (704, 743), True, 'import numpy as np\n'), ((850, 894), 'numpy.linspace', 'np.linspace', (['(1.0)', '(3.0)', '(20000)'], {'endpoint': '(False)'}), '(1.0, 3.0, 20000, endpoint=False)\n', (861, 894), True, 'import numpy as np\n'), ((926, 983), 'numpy.linspace', 'np.linspace', (['amplitude', '(-amplitude)', '(20000)'], {'endpoint': '(False)'}), '(amplitude, -amplitude, 20000, endpoint=False)\n', (937, 983), True, 'import numpy as np\n'), ((1078, 1106), 'numpy.linspace', 'np.linspace', (['(2.0)', '(5.0)', '(20000)'], {}), '(2.0, 5.0, 20000)\n', (1089, 1106), True, 'import numpy as np\n'), ((1138, 1179), 'numpy.linspace', 'np.linspace', (['(-amplitude)', 'amplitude', '(20000)'], {}), '(-amplitude, amplitude, 20000)\n', (1149, 1179), True, 'import numpy as np\n'), ((1471, 1528), 'numpy.amax', 'np.amax', (["virginBundle.signals['H'].independentThread.data"], {}), "(virginBundle.signals['H'].independentThread.data)\n", (1478, 1528), True, 'import numpy as np\n'), ((1617, 1670), 'numpy.amin', 'np.amin', (["prBundle.signals['H'].independentThread.data"], {}), "(prBundle.signals['H'].independentThread.data)\n", (1624, 1670), True, 'import numpy as np\n')] |
#Python 2.7.9 (default, Apr 5 2015, 22:21:35)
# full env in environment.yml
import sys, os
'''
This is a full aggregation of the Pulsar Hunters project, including user weighting.
Note it's quite a simple project - basically one Yes/No question - and there is gold-standard data, so the weighting is relatively straightforward and the aggregation is just determining a single fraction for each subject.
For an example of an aggregation of a much more complex question tree, check out scripts for Galaxy Zoo. The user weighting in that project is also completely different.
Hopefully this is well-enough commented below to be useful for others.
--BDS
'''
# file with raw classifications (csv) needed
# put this way up here so if there are no inputs we exit quickly before even trying to load everything else
try:
classfile_in = sys.argv[1]
except:
#classfile_in = 'pulsar-hunters-classifications_first500k.csv'
# just a shout-out to whoever changed Panoptes so that the export filenames
# are human-readable instead of their previous format. Thank you
#classfile_in = 'data/2e3d12a2-56ca-4d1f-930a-9ecc7fd39885.csv'
print("\nUsage: %s classifications_infile [weight_class aggregations_outfile]" % sys.argv[0])
print(" classifications_infile is a Zooniverse (Panoptes) classifications data export CSV.")
print(" weight_class is 1 if you want to calculate and apply user weightings, 0 otherwise.")
print(" aggregations_outfile is the name of the file you want written. If you don't specify,")
print(" the filename is %s by default." % outfile_default)
sys.exit(0)
import numpy as np # using 1.10.1
import pandas as pd # using 0.13.1
#import datetime
#import dateutil.parser
import json
############ Define files and settings below ##############
# default outfile
outfile_default = 'pulsar_aggregations.csv'
rankfile_stem = 'subjects_ranked_by_weighted_class_asof_'
# file with tags left in Talk, for value-added columns below
talk_export_file = "helperfiles/project-764-tags_2016-01-15.json"
# file with master list between Zooniverse metadata image filename (no source coords) and
# original filename with source coords and additional info
# also I get to have a variable that uses "filename" twice where each means a different thing
# a filename for a file full of filenames #alliterationbiyotch
filename_master_list_filename = "helperfiles/HTRU-N_sets_keys.csv"
# this is a list of possible matches to known pulsars that was done after the fact so they
# are flagged as "cand" in the database instead of "known" etc.
poss_match_file = 'helperfiles/PossibleMatches.csv'
# later we will select on tags by the project team and possibly weight them differently
# note I've included the moderators and myself (though I didn't tag anything).
# Also note it's possible to do this in a more general fashion using a file with project users and roles
# However, hard-coding seemed the thing to do given our time constraints (and the fact that I don't think
# you can currently export the user role file from the project builder)
project_team = 'bretonr jocelynbb spindizzy Simon_Rookyard <NAME>_ilie jamesy23 <NAME> walkcr <NAME> benjamin_shaw bhaswati djchampion jwbmartin bstappers ElisabethB Capella05 vrooje'.split()
# define the active workflow - we will ignore all classifications not on this workflow
# we could make this an input but let's not get too fancy for a specific case.
# for beta test
#active_workflow_id = 1099
#active_workflow_major = 6
# for live project
active_workflow_id = 1224
active_workflow_major = 4
# do we want sum(weighted vote count) = sum(raw vote count)?
normalise_weights = True
# do we want to write an extra file with just classification counts and usernames
# (and a random color column, for treemaps)?
counts_out = True
counts_out_file = 'class_counts_colors.csv'
############ Set the other inputs now ###############
try:
apply_weight = int(sys.argv[2])
except:
apply_weight = 0
try:
outfile = sys.argv[3]
except:
outfile = outfile_default
#################################################################################
#################################################################################
#################################################################################
# This is the function that actually does the aggregating
def aggregate_class(grp):
# translate the group to a dataframe because FML if I don't (some indexing etc is different)
thegrp = pd.DataFrame(grp)
# figure out what we're looping over below
answers = thegrp.pulsar_classification.unique()
# aggregating is a matter of grouping by different answers and summing the counts/weights
byans = thegrp.groupby('pulsar_classification')
ans_ct_tot = byans['count'].aggregate('sum')
ans_wt_tot = byans['weight'].aggregate('sum')
# we want fractions eventually, so we need denominators
count_tot = np.sum(ans_ct_tot) # we could also do len(thegrp)
weight_tot = np.sum(ans_wt_tot)
# okay, now we should have a series of counts for each answer, one for weighted counts, and
# the total votes and weighted votes for this subject.
# now loop through the possible answers and create the raw and weighted vote fractions
# and save the counts as well.
# this is a list for now and we'll make it into a series and order the columns later
class_agg = {}
class_agg['count_unweighted'] = count_tot
class_agg['count_weighted'] = weight_tot
class_agg['subject_type'] = thegrp.subject_type.unique()[0]
class_agg['filename'] = thegrp.filename.unique()[0]
for a in answers:
# don't be that jerk who labels things with "p0" or otherwise useless internal indices.
# Use the text of the response next to this answer choice in the project builder (but strip spaces)
raw_frac_label = ('p_'+a).replace(' ', '_')
wt_frac_label = ('p_'+a+'_weight').replace(' ', '_')
class_agg[raw_frac_label] = ans_ct_tot[a]/float(count_tot)
class_agg[wt_frac_label] = ans_wt_tot[a]/float(weight_tot)
# oops, this is hard-coded so that there's Yes and No as answers - sorry to those trying to generalise
col_order = ["filename", "p_Yes", "p_No", "p_Yes_weight", "p_No_weight",
"count_unweighted", "count_weighted", "subject_type"]
return pd.Series(class_agg)[col_order]
#################################################################################
#################################################################################
#################################################################################
# The new weighting assignment function allows the user to choose between different weighting schemes
# though note the one in this function is not preferred for reasons explained below.
def assign_weight_old(seed):
# keep the two seed cases separate because we might want to use a different base for each
if seed < 0.:
return max([0.05, pow(1.0025, seed)])
elif seed > 0:
return min([3.0, pow(1.0025, seed)])
else:
return 1.0
# assigns a weight based on a seed parameter
# The weight is assigned using the seed as an exponent and the number below as the base.
# The number is just slightly offset from 1 so that it takes many classifications for
# a user's potential weight to cap out at the max weight (3) or bottom out at the min (0.05).
# Currently there are 641 "known" pulsars in the DB so the base of 1.025 is largely based on that.
# Update: there are now about 5,000 simulated pulsars in the subject set as well, and they have a
# much higher retirement limit, so that more people will have classified them and we have more info.
# Note I'd rather this did a proper analysis with a confusion matrix etc but under a time crunch
# we went with something simpler.
def assign_weight(q, which_weight):
# the floor weight for the case of which_weight == 2
# i.e. someone who has seed = 0 will have this
# seed = 0 could either be equal numbers right & wrong, OR that we don't have any information
c0 = 0.5
seed = q[1].seed
n_gs = q[1].n_gs
# Two possible weighting schemes:
# which_weight == 1: w = 1.0025^(seed), bounded between 0.05 and 3.0
# which_weight == 2: w = (1 + log n_gs)^(seed/n_gs), bounded between 0.05 and 3.0
#
# Weighting Scheme 1:
# this is an okay weighting scheme, but it doesn't account for the fact that someone might be prolific
# but not a very good classifier, and those classifiers shouldn't have a high weight.
# Example: Bob does 10000 gold-standard classifications and gets 5100 right, 4900 wrong.
# In this weighting scheme, Bob's weighting seed is +100, which means a weight of 1.0025^100 = 1.3,
# despite the fact that Bob's classifications are consistent with random within 1%.
# The weighting below this one would take the weight based on 100/10000, which is much better.
if which_weight == 1:
# keep the two seed cases separate because we might want to use a different base for each
if seed < 0.:
return max([0.05, pow(1.0025, seed)])
elif seed > 0:
return min([3.0, pow(1.0025, seed)])
else:
return 1.0
elif which_weight == 2:
if n_gs < 1: # don't divide by or take the log of 0
# also if they didn't do any gold-standard classifications assume they have the default weight
return c0
else:
# note the max of 3 is unlikely to be reached, but someone could hit the floor.
return min([3.0, max([0.05, c0*pow((1.0 + np.log10(n_gs)), (float(seed)/float(n_gs)))])])
else:
# unweighted - so maybe don't even enter this function if which_weight is not 1 or 2...
return 1.0
#################################################################################
#################################################################################
#################################################################################
# Get the Gini coefficient - https://en.wikipedia.org/wiki/Gini_coefficient
# Typical values of the Gini for healthy Zooniverse projects (Cox et al. 2015) are
# in the range of 0.7-0.9.
def gini(list_of_values):
sorted_list = sorted(list_of_values)
height, area = 0, 0
for value in sorted_list:
height += value
area += height - value / 2.
fair_area = height * len(list_of_values) / 2
return (fair_area - area) / fair_area
#################################################################################
#################################################################################
#################################################################################
# assign a color randomly if logged in, gray otherwise
def randcolor(user_label):
if user_label.startswith('not-logged-in-'):
# keep it confined to grays, i.e. R=G=B and not too bright, not too dark
g = random.randint(25,150)
return '#%02X%02X%02X' % (g,g,g)
#return '#555555'
else:
# the lambda makes this generate a new int every time it's called, so that
# in general R != G != B below.
r = lambda: random.randint(0,255)
return '#%02X%02X%02X' % (r(),r(),r())
#################################################################################
#################################################################################
#################################################################################
# These are functions that extract information from the various JSONs that are
# included in the classification exports. To Do: optimise these so that one .apply()
# call will extract them for everything without so many &^%@$ing loops.
def get_subject_type(q):
try:
return q[1].subject_json[q[1].subject_id]['#Class']
except:
return "cand"
def get_filename(q):
try:
return q[1].subject_json[q[1].subject_id]['CandidateFile']
except:
try:
return q[1].subject_json[q[1].subject_id]['CandidateFileVertical']
except:
try:
return q[1].subject_json[q[1].subject_id]['CandidateFileHorizontal']
except:
return "filenotfound.png"
# get number of gold-standard classifications completed by a user (used if weighting)
def get_n_gs(thegrp):
return sum(pd.DataFrame(thegrp).seed != 0)
# Something went weird with IP addresses, so use more info to determine unique users
# Note the user_name still has the IP address in it if the user is not logged in;
# it's just that for this specific project it's not that informative.
def get_alternate_sessioninfo(row):
# if they're logged in, save yourself all this trouble
if not row[1]['user_name'].startswith('not-logged-in'):
return row[1]['user_name']
else:
metadata = row[1]['meta_json']
# IP + session, if it exists
# (IP, agent, viewport_width, viewport_height) if session doesn't exist
try:
# start with "not-logged-in" so stuff later doesn't break
return str(row[1]['user_name']) +"_"+ str(metadata['session'])
except:
try:
viewport = str(metadata['viewport'])
except:
viewport = "NoViewport"
try:
user_agent = str(metadata['user_agent'])
except:
user_agent = "NoUserAgent"
try:
user_ip = str(row[1]['user_name'])
except:
user_ip = "NoUserIP"
thesession = user_ip + user_agent + viewport
return thesession
#################################################################################
#################################################################################
#################################################################################
# Print out the input parameters just as a sanity check
print("Computing aggregations using:")
print(" infile: %s" % classfile_in)
print(" weighted? %d" % apply_weight)
print(" Will print to %s after processing." % outfile)
#################################################################################
#################################################################################
#################################################################################
#
#
#
#
# Begin the main work
#
#
#
#
print("Reading classifications from %s ..." % classfile_in)
classifications = pd.read_csv(classfile_in) # this step can take a few minutes for a big file
# Talk tags are not usually huge files so this doesn't usually take that long
print("Parsing Talk tag file for team tags %s ..." % talk_export_file)
talkjson = json.loads(open(talk_export_file).read())
talktags_all = pd.DataFrame(talkjson)
# we only care about the Subject comments here, not discussions on the boards
# also we only care about tags by the research team & moderators
talktags = talktags_all[(talktags_all.taggable_type == "Subject") & (talktags_all.user_login.isin(project_team))].copy()
# make a username-tag pair column
# subject id is a string in the classifications array so force it to be one here or the match won't work
talktags['subject_id'] = [str(int(q)) for q in talktags.taggable_id]
talktags["user_tag"] = talktags.user_login+": #"+talktags.name+";"
# when we're talking about Subject tags, taggable_id is subject_id
talk_bysubj = talktags.groupby('subject_id')
# this now contains all the project-team-written tags on each subject, 1 row per subject
subj_tags = pd.DataFrame(talk_bysubj.user_tag.unique())
# if we need this as an explicit column
#subj_tags['subject_id'] = subj_tags.index
# likewise reading this matched files doesn't take long even though we have a for loop.
print("Reading master list of matched filenames %s..." % filename_master_list_filename)
matched_filenames = pd.read_csv(filename_master_list_filename)
print("Reading from list of possible matches to known pulsars %s..." % poss_match_file)
# ['Zooniverse name', 'HTRU-N name', 'Possible source']
possible_knowns = pd.read_csv(poss_match_file)
possible_knowns['is_poss_known'] = [True for q in possible_knowns['Possible source']]
# This section takes quite a while and it's because we have so many for loops, which I think is
# in part because reading out of a dict from a column in a DataFrame needs loops when done this way
# and in part because we were in a rush.
# I think it's possible we could pass this to a function and reshape things there, then return
# a set of new columns - but I didn't have time to figure that out under the deadlines we had.
print("Making new columns and getting user labels...")
# first, extract the started_at and finished_at from the annotations column
classifications['meta_json'] = [json.loads(q) for q in classifications.metadata]
classifications['started_at_str'] = [q['started_at'] for q in classifications.meta_json]
classifications['finished_at_str'] = [q['finished_at'] for q in classifications.meta_json]
# we need to set up a new user id column that's login name if the classification is while logged in,
# session if not (right now "user_name" is login name or hashed IP and, well, read on...)
# in this particular run of this particular project, session is a better tracer of uniqueness than IP
# for anonymous users, because of a bug with some back-end stuff that someone else is fixing
# but we also want to keep the user name if it exists, so let's use this function
#classifications['user_label'] = [get_alternate_sessioninfo(q) for q in classifications.iterrows()]
classifications['user_label'] = [get_alternate_sessioninfo(q) for q in classifications['user_name meta_json'.split()].iterrows()]
classifications['created_day'] = [q[:10] for q in classifications.created_at]
# Get subject info into a format we can actually use
classifications['subject_json'] = [json.loads(q) for q in classifications.subject_data]
'''
ALERT: I think they may have changed the format of the subject_dict such that later projects will have a different structure to this particular json.
That will mean you'll have to adapt this part. Sorry - but hopefully it'll use the format that I note below I wish it had, or something similarly simple.
'''
# extract the subject ID because that's needed later
# Note the subject ID becomes the *index* of the dict, which is actually pretty strange versus
# everything else in the export, and I'd really rather it be included here as "subject_id":"1234567" etc.
#
# You can isolate the keys as a new column but then it's a DictKey type, but stringifying it adds
# all these other characters that you then have to take out. Thankfully all our subject IDs are numbers
# this is a little weird and there must be a better way but... it works
classifications['subject_id'] = [str(q.keys()).replace("dict_keys(['", "").replace("'])", '') for q in classifications.subject_json]
# extract retired status, though not sure we're actually going to use it.
# also, what a mess - you have to extract the subject ID first and then use it to call the subject_json. UGH
# update: we didn't use it and each of these lines takes ages, so commenting it out
#classifications['retired'] = [q[1].subject_json[q[1].subject_id]['retired'] for q in classifications.iterrows()]
# Get annotation info into a format we can actually use
# these annotations are just a single yes or no question, yay
classifications['annotation_json'] = [json.loads(q) for q in classifications.annotations]
classifications['pulsar_classification'] = [q[0]['value'] for q in classifications.annotation_json]
# create a weight parameter but set it to 1.0 for all classifications (unweighted) - may change later
classifications['weight'] = [1.0 for q in classifications.workflow_version]
# also create a count parameter, because at the time of writing this .aggregate('count') was sometimes off by 1
classifications['count'] = [1 for q in classifications.workflow_version]
#######################################################
# discard classifications not in the active workflow #
#######################################################
print("Picking classifications from the active workflow (id %d, version %d.*)" % (active_workflow_id, active_workflow_major))
# use any workflow consistent with this major version, e.g. 6.12 and 6.23 are both 6 so they're both ok
# also check it's the correct workflow id
the_active_workflow = [int(q) == active_workflow_major for q in classifications.workflow_version]
this_workflow = classifications.workflow_id == active_workflow_id
in_workflow = this_workflow & the_active_workflow
# note I haven't saved the full DF anywhere because of memory reasons, so if you're debugging:
# classifications_all = classifications.copy()
classifications = classifications[in_workflow]
print("Extracting filenames and subject types...")
# extract whether this is a known pulsar or a candidate that needs classifying - that info is in the
# "#Class" column in the subject metadata (where # means it can't be seen by classifiers).
# the options are "cand" for "candidate", "known" for known pulsar, "disc" for a pulsar that has been
# discovered by this team but is not yet published
# do this after you choose a workflow because #Class doesn't exist for the early subjects so it will break
# also don't send the entirety of classifications into the function, to save memory
#classifications['subject_type'] = [get_subject_type(q) for q in classifications.iterrows()]
#classifications['filename'] = [get_filename(q) for q in classifications.iterrows()]
classifications['subject_type'] = [get_subject_type(q) for q in classifications['subject_id subject_json'.split()].iterrows()]
classifications['filename'] = [get_filename(q) for q in classifications['subject_id subject_json'.split()].iterrows()]
# Let me just pause a second to rant again about the fact that subject ID is the index of the subject_json.
# Because of that, because the top-level access to that was-json-now-a-dict requires the subject id rather than
# just being label:value pairs, I have to do an iterrows() and send part of the entire classifications DF into
# a loop so that I can simultaneously access each subject ID *and* the dict, rather than just accessing the
# info from the dict directly, which would be much faster.
# this might be useful for a sanity check later
# first_class_day = min(classifications.created_day).replace(' ', '')
# last_class_day = max(classifications.created_day).replace(' ', '')
# for some reason this is reporting last-classification dates that are days after the actual last
# classification. Not sure? Might be because this is a front-end reporting, so if someone has set
# their computer's time wrong we could get the wrong time here.
# could fix that by using created_at but ... I forgot.
last_class_time = max(classifications.finished_at_str)[:16].replace(' ', '_').replace('T', '_').replace(':', 'h')+"m"
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #
#######################################################
# Apply weighting function (or don't) #
#######################################################
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #
classifications['seed'] = [0 for q in classifications.weight]
classifications['is_gs'] = [0 for q in classifications.weight]
if apply_weight > 0:
print(" Computing user weights...")
# for now this is assuming all subjects marked as "known" or "disc" are pulsars
# and also "fake" are simulated pulsars
is_known = (classifications.subject_type == 'known') | (classifications.subject_type == 'disc') | (classifications.subject_type == 'fake')
#is_candidate = np.invert(is_known)
# if it's a non-gold-standard classification, mark it
classifications.loc[is_known, 'is_gs'] = 1
ok_incr = 1.0 # upweight if correct
oops_incr = -2.0 # downweight more if incorrect
# find the correct classifications of known pulsars
ok_class = (is_known) & (classifications.pulsar_classification == 'Yes')
# find the incorrect classifications of known pulsars
oops_class = (is_known) & (classifications.pulsar_classification == 'No')
# set the individual seeds
classifications.loc[ok_class, 'seed'] = ok_incr
classifications.loc[oops_class, 'seed'] = oops_incr
# then group classifications by user name, which will weight logged in as well as not-logged-in (the latter by session)
by_user = classifications.groupby('user_label')
# get the user's summed seed, which goes into the exponent for the weight
user_exp = by_user.seed.aggregate('sum')
# then set up the DF that will contain the weights etc, and fill it
user_weights = pd.DataFrame(user_exp)
user_weights.columns = ['seed']
user_weights['user_label'] = user_weights.index
user_weights['nclass_user'] = by_user['count'].aggregate('sum')
user_weights['n_gs'] = by_user['is_gs'].aggregate('sum')
user_weights['weight'] = [assign_weight(q, apply_weight) for q in user_weights.iterrows()]
#user_weights['weight'] = [assign_weight_old(q) for q in user_exp]
# if you want sum(unweighted classification count) == sum(weighted classification count), do this
if normalise_weights:
user_weights.weight *= float(len(classifications))/float(sum(user_weights.weight * user_weights.nclass_user))
# weights are assigned, now need to match them up to the main classifications table
# making sure that this weight keeps the name 'weight' and the other gets renamed (suffixes flag)
# if assign_weight == 0 then we won't enter this loop and the old "weights" will stay
# as they are, i.e. == 1 uniformly.
classifications_old = classifications.copy()
classifications = pd.merge(classifications_old, user_weights, how='left',
on='user_label',
sort=False, suffixes=('_2', ''), copy=True)
else:
# just make a collated classification count array so we can print it to the screen
by_user = classifications.groupby('user_label')
user_exp = by_user.seed.aggregate('sum')
user_weights = pd.DataFrame(user_exp)
user_weights.columns = ['seed']
#user_weights['user_label'] = user_weights.index
user_weights['nclass_user'] = by_user['count'].aggregate('sum')
user_weights['n_gs'] = by_user['is_gs'].aggregate('sum')
# UNWEIGHTED
user_weights['weight'] = [1 for q in user_exp]
# grab basic stats
n_subj_tot = len(classifications.subject_data.unique())
by_subject = classifications.groupby('subject_id')
subj_class = by_subject.created_at.aggregate('count')
all_users = classifications.user_label.unique()
n_user_tot = len(all_users)
n_user_unreg = sum([q.startswith('not-logged-in-') for q in all_users])
# obviously if we didn't weight then we don't need to get stats on weights
if apply_weight > 0:
user_weight_mean = np.mean(user_weights.weight)
user_weight_median = np.median(user_weights.weight)
user_weight_25pct = np.percentile(user_weights.weight, 25)
user_weight_75pct = np.percentile(user_weights.weight, 75)
user_weight_min = min(user_weights.weight)
user_weight_max = max(user_weights.weight)
nclass_mean = np.mean(user_weights.nclass_user)
nclass_median = np.median(user_weights.nclass_user)
nclass_tot = len(classifications)
user_weights.sort_values(['nclass_user'], ascending=False, inplace=True)
# If you want to print out a file of classification counts per user, with colors for making a treemap
# honestly I'm not sure why you wouldn't want to print this, as it's very little extra effort
if counts_out == True:
print("Printing classification counts to %s..." % counts_out_file)
user_weight['color'] = [randcolor(q) for q in user_weight.index]
user_weight.to_csv(counts_out_file)
## ## ## ## ## ## ## ## ## ## ## ## ## ## #
#######################################################
# Print out basic project info #
#######################################################
## ## ## ## ## ## ## ## ## ## ## ## ## ## #
print("%d classifications from %d users, %d registered and %d unregistered.\n" % (nclass_tot, n_user_tot, n_user_tot - n_user_unreg, n_user_unreg))
print("Mean n_class per user %.1f, median %.1f." % (nclass_mean, nclass_median))
if apply_weight > 0:
print("Mean user weight %.3f, median %.3f, with the middle 50 percent of users between %.3f and %.3f." % (user_weight_mean, user_weight_median, user_weight_25pct, user_weight_75pct))
print("The min user weight is %.3f and the max user weight is %.3f.\n" % (user_weight_min, user_weight_max))
cols_print = 'nclass_user weight'.split()
else:
cols_print = 'nclass_user'
# don't make this leaderboard public unless you want to gamify your users in ways we already know
# have unintended and sometimes negative consequences. This is just for your information.
print("Classification leaderboard:")
print(user_weights[cols_print].head(20))
print("Gini coefficient for project: %.3f" % gini(user_weight['nclass_user']))
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #
#######################################################
# Aggregate classifications, unweighted and weighted #
#######################################################
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #
print("\nAggregating classifications...\n")
class_agg = by_subject['weight count pulsar_classification subject_type filename'.split()].apply(aggregate_class)
# really ought to replace all the NaNs with 0.0
#######################################################
# Write to files #
#######################################################
#
# add value-added columns
#
# let people look up the subject on Talk directly from the aggregated file
class_agg['link'] = ['https://www.zooniverse.org/projects/zooniverse/pulsar-hunters/talk/subjects/'+str(q) for q in class_agg.index]
# after we do the merges below the new indices might not be linked to the subject id, so save it explicitly
class_agg['subject_id'] = [str(q) for q in class_agg.index]
# match up all the ancillary file data. Maybe there's a faster way to do this than with a chain but meh,
# it's actually not *that* slow compared to the clusterf*ck of for loops in the column assignment part above
class_agg_old = class_agg.copy()
class_agg_interm = pd.merge(class_agg_old, subj_tags, how='left', left_index=True, right_index=True, sort=False, copy=True)
class_agg_interm2 = pd.merge(class_agg_interm, matched_filenames, how='left', left_on='filename', right_on='Pulsar Hunters File', sort=False, copy=True)
class_agg = pd.merge(class_agg_interm2, possible_knowns, how='left', left_on='filename', right_on='Zooniverse name', sort=False, copy=True)
# fill in the is_poss_known column with False where it is currently NaN
# currently it's either True or NaN -- with pd.isnull NaN becomes True and True becomes False, so invert that.
class_agg['is_poss_known'] = np.invert(pd.isnull(class_agg['is_poss_known']))
# make the list ranked by p_Yes_weight
class_agg.sort_values(['subject_type','p_Yes_weight'], ascending=False, inplace=True)
print("Writing aggregated output to file %s...\n" % outfile)
pd.DataFrame(class_agg).to_csv(outfile)
# Now make files ranked by p_Yes, one with all subjects classified and one with only candidates
# /Users/vrooje/anaconda/bin/ipython:1: FutureWarning: sort(columns=....) is deprecated, use sort_values(by=.....)
# #!/bin/bash /Users/vrooje/anaconda/bin/python.app
#class_agg.sort('p_Yes_weight', ascending=False, inplace=True)
class_agg.sort_values(['p_Yes_weight'], ascending=False, inplace=True)
# I'd rather note the last classification date than the date we happen to produce the file
# rightnow = datetime.datetime.now().strftime('%Y-%M-%D_%H:%M')
# rankfile_all = rankfile_stem + rightnow + ".csv"
rankfile_all = 'all_'+rankfile_stem + last_class_time + ".csv"
# there go those hard-coded columns again
rank_cols = ['subject_id', 'filename', 'p_Yes_weight', 'count_weighted', 'p_Yes', 'count_unweighted', 'subject_type', 'link', 'user_tag', 'HTRU-N File']
print("Writing full ranked list to file %s...\n" % rankfile_all)
# write just the weighted yes percentage, the weighted count, the subject type, and the link to the subject page
# the subject ID is the index so it will be written anyway
pd.DataFrame(class_agg[rank_cols]).to_csv(rankfile_all)
rankfile = 'cand_allsubj_'+rankfile_stem + last_class_time + ".csv"
print("Writing candidate-only ranked list to file %s...\n" % rankfile)
# also only include entries where there were at least 5 weighted votes tallied
# and only "cand" subject_type objects
classified_candidate = (class_agg.count_weighted > 5) & (class_agg.subject_type == 'cand')
pd.DataFrame(class_agg[rank_cols][classified_candidate]).to_csv(rankfile)
rankfile_unk = 'cand_'+rankfile_stem + last_class_time + ".csv"
print("Writing candidate-only, unknown-only ranked list to file %s...\n" % rankfile_unk)
# also only include entries where there were at least 5 weighted votes tallied
# and only "cand" subject_type objects
classified_unknown_candidate = (classified_candidate) & (np.invert(class_agg.is_poss_known))
pd.DataFrame(class_agg[rank_cols][classified_unknown_candidate]).to_csv(rankfile_unk)
# copy the candidate list into Google Drive so others can see it, overwriting previous versions
# Note: this is the way I instantly shared the new aggregated results with collaborators, because
# Google Drive automatically syncs with the online version. Dropbox would work too, etc. YMMV
cpfile = "/Users/vrooje/Google Drive/pulsar_hunters_share/all_candidates_ranked_by_classifications_%dclass.csv" % nclass_tot
print("Copying to Google Drive folder as %s..." % cpfile)
os.system("cp -f '%s' '%s'" % (rankfile, cpfile))
# and the unknown candidate sub-list
cpfile2 = "/Users/vrooje/Google Drive/pulsar_hunters_share/unknown_candidates_ranked_by_classifications_%dclass.csv" % nclass_tot
print("Copying to Google Drive folder as %s..." % cpfile2)
os.system("cp -f '%s' '%s'" % (rankfile_unk, cpfile2))
# and just for the record, all subjects.
cpfile3 = "/Users/vrooje/Google Drive/pulsar_hunters_share/all_subjects_ranked_by_classifications_%dclass.csv" % nclass_tot
print("... and %s" % cpfile3)
os.system("cp -f '%s' '%s'" % (rankfile_all, cpfile3))
#done.
| [
"pandas.Series",
"numpy.mean",
"json.loads",
"numpy.median",
"pandas.isnull",
"numpy.log10",
"pandas.read_csv",
"pandas.merge",
"numpy.invert",
"numpy.sum",
"numpy.percentile",
"sys.exit",
"pandas.DataFrame",
"os.system"
] | [((14656, 14681), 'pandas.read_csv', 'pd.read_csv', (['classfile_in'], {}), '(classfile_in)\n', (14667, 14681), True, 'import pandas as pd\n'), ((14950, 14972), 'pandas.DataFrame', 'pd.DataFrame', (['talkjson'], {}), '(talkjson)\n', (14962, 14972), True, 'import pandas as pd\n'), ((16050, 16092), 'pandas.read_csv', 'pd.read_csv', (['filename_master_list_filename'], {}), '(filename_master_list_filename)\n', (16061, 16092), True, 'import pandas as pd\n'), ((16256, 16284), 'pandas.read_csv', 'pd.read_csv', (['poss_match_file'], {}), '(poss_match_file)\n', (16267, 16284), True, 'import pandas as pd\n'), ((27453, 27486), 'numpy.mean', 'np.mean', (['user_weights.nclass_user'], {}), '(user_weights.nclass_user)\n', (27460, 27486), True, 'import numpy as np\n'), ((27503, 27538), 'numpy.median', 'np.median', (['user_weights.nclass_user'], {}), '(user_weights.nclass_user)\n', (27512, 27538), True, 'import numpy as np\n'), ((30646, 30755), 'pandas.merge', 'pd.merge', (['class_agg_old', 'subj_tags'], {'how': '"""left"""', 'left_index': '(True)', 'right_index': '(True)', 'sort': '(False)', 'copy': '(True)'}), "(class_agg_old, subj_tags, how='left', left_index=True, right_index\n =True, sort=False, copy=True)\n", (30654, 30755), True, 'import pandas as pd\n'), ((30771, 30908), 'pandas.merge', 'pd.merge', (['class_agg_interm', 'matched_filenames'], {'how': '"""left"""', 'left_on': '"""filename"""', 'right_on': '"""Pulsar Hunters File"""', 'sort': '(False)', 'copy': '(True)'}), "(class_agg_interm, matched_filenames, how='left', left_on=\n 'filename', right_on='Pulsar Hunters File', sort=False, copy=True)\n", (30779, 30908), True, 'import pandas as pd\n'), ((30925, 31056), 'pandas.merge', 'pd.merge', (['class_agg_interm2', 'possible_knowns'], {'how': '"""left"""', 'left_on': '"""filename"""', 'right_on': '"""Zooniverse name"""', 'sort': '(False)', 'copy': '(True)'}), "(class_agg_interm2, possible_knowns, how='left', left_on='filename',\n right_on='Zooniverse name', sort=False, copy=True)\n", (30933, 31056), True, 'import pandas as pd\n'), ((34053, 34102), 'os.system', 'os.system', (['("cp -f \'%s\' \'%s\'" % (rankfile, cpfile))'], {}), '("cp -f \'%s\' \'%s\'" % (rankfile, cpfile))\n', (34062, 34102), False, 'import sys, os\n'), ((34330, 34384), 'os.system', 'os.system', (['("cp -f \'%s\' \'%s\'" % (rankfile_unk, cpfile2))'], {}), '("cp -f \'%s\' \'%s\'" % (rankfile_unk, cpfile2))\n', (34339, 34384), False, 'import sys, os\n'), ((34581, 34635), 'os.system', 'os.system', (['("cp -f \'%s\' \'%s\'" % (rankfile_all, cpfile3))'], {}), '("cp -f \'%s\' \'%s\'" % (rankfile_all, cpfile3))\n', (34590, 34635), False, 'import sys, os\n'), ((4533, 4550), 'pandas.DataFrame', 'pd.DataFrame', (['grp'], {}), '(grp)\n', (4545, 4550), True, 'import pandas as pd\n'), ((4977, 4995), 'numpy.sum', 'np.sum', (['ans_ct_tot'], {}), '(ans_ct_tot)\n', (4983, 4995), True, 'import numpy as np\n'), ((5046, 5064), 'numpy.sum', 'np.sum', (['ans_wt_tot'], {}), '(ans_wt_tot)\n', (5052, 5064), True, 'import numpy as np\n'), ((16964, 16977), 'json.loads', 'json.loads', (['q'], {}), '(q)\n', (16974, 16977), False, 'import json\n'), ((18063, 18076), 'json.loads', 'json.loads', (['q'], {}), '(q)\n', (18073, 18076), False, 'import json\n'), ((19634, 19647), 'json.loads', 'json.loads', (['q'], {}), '(q)\n', (19644, 19647), False, 'import json\n'), ((24923, 24945), 'pandas.DataFrame', 'pd.DataFrame', (['user_exp'], {}), '(user_exp)\n', (24935, 24945), True, 'import pandas as pd\n'), ((25968, 26088), 'pandas.merge', 'pd.merge', (['classifications_old', 'user_weights'], {'how': '"""left"""', 'on': '"""user_label"""', 'sort': '(False)', 'suffixes': "('_2', '')", 'copy': '(True)'}), "(classifications_old, user_weights, how='left', on='user_label',\n sort=False, suffixes=('_2', ''), copy=True)\n", (25976, 26088), True, 'import pandas as pd\n'), ((26357, 26379), 'pandas.DataFrame', 'pd.DataFrame', (['user_exp'], {}), '(user_exp)\n', (26369, 26379), True, 'import pandas as pd\n'), ((27123, 27151), 'numpy.mean', 'np.mean', (['user_weights.weight'], {}), '(user_weights.weight)\n', (27130, 27151), True, 'import numpy as np\n'), ((27177, 27207), 'numpy.median', 'np.median', (['user_weights.weight'], {}), '(user_weights.weight)\n', (27186, 27207), True, 'import numpy as np\n'), ((27233, 27271), 'numpy.percentile', 'np.percentile', (['user_weights.weight', '(25)'], {}), '(user_weights.weight, 25)\n', (27246, 27271), True, 'import numpy as np\n'), ((27297, 27335), 'numpy.percentile', 'np.percentile', (['user_weights.weight', '(75)'], {}), '(user_weights.weight, 75)\n', (27310, 27335), True, 'import numpy as np\n'), ((31276, 31313), 'pandas.isnull', 'pd.isnull', (["class_agg['is_poss_known']"], {}), "(class_agg['is_poss_known'])\n", (31285, 31313), True, 'import pandas as pd\n'), ((33459, 33493), 'numpy.invert', 'np.invert', (['class_agg.is_poss_known'], {}), '(class_agg.is_poss_known)\n', (33468, 33493), True, 'import numpy as np\n'), ((1625, 1636), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1633, 1636), False, 'import sys, os\n'), ((6428, 6448), 'pandas.Series', 'pd.Series', (['class_agg'], {}), '(class_agg)\n', (6437, 6448), True, 'import pandas as pd\n'), ((31504, 31527), 'pandas.DataFrame', 'pd.DataFrame', (['class_agg'], {}), '(class_agg)\n', (31516, 31527), True, 'import pandas as pd\n'), ((32649, 32683), 'pandas.DataFrame', 'pd.DataFrame', (['class_agg[rank_cols]'], {}), '(class_agg[rank_cols])\n', (32661, 32683), True, 'import pandas as pd\n'), ((33055, 33111), 'pandas.DataFrame', 'pd.DataFrame', (['class_agg[rank_cols][classified_candidate]'], {}), '(class_agg[rank_cols][classified_candidate])\n', (33067, 33111), True, 'import pandas as pd\n'), ((33495, 33559), 'pandas.DataFrame', 'pd.DataFrame', (['class_agg[rank_cols][classified_unknown_candidate]'], {}), '(class_agg[rank_cols][classified_unknown_candidate])\n', (33507, 33559), True, 'import pandas as pd\n'), ((12526, 12546), 'pandas.DataFrame', 'pd.DataFrame', (['thegrp'], {}), '(thegrp)\n', (12538, 12546), True, 'import pandas as pd\n'), ((9735, 9749), 'numpy.log10', 'np.log10', (['n_gs'], {}), '(n_gs)\n', (9743, 9749), True, 'import numpy as np\n')] |
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
from pytorch.utils import flip, _ntuple, HardSigmoid, StochasticDropout, LayerNorm
class ST_round(torch.autograd.Function):
def __init__(self, gradient_factor=1.0):
super(ST_round, self).__init__()
self.gradient_factor = gradient_factor
def forward(self, x):
return torch.round(x)
def backward(self, grad_output):
return grad_output * self.gradient_factor
class ST_ceil(torch.autograd.Function):
def __init__(self, gradient_factor=1.0):
super(ST_ceil, self).__init__()
self.gradient_factor = gradient_factor
def forward(self, x):
return torch.ceil(x)
def backward(self, grad_output):
return grad_output * self.gradient_factor
class Quantize(nn.Module):
'''
Quantizes the input x to specified bitwidth (bits) and polarity (signed).
There are four phases of operations, which can be toggled with set_qmode and should be run in order.
1. 'F': Forward, no quantization - use for standard FP32 training.
2. 'C': Calibration - run a forward pass in this mode to calibrate an initial setting for self.lt.
3. 'T': Quantization Threshold Training - train weights/activations and clipping threshold simultaneously.
4. 'Q': Quantization Training - freeze clipping thresholds and only train quantized weights/activations.
'''
def __init__(self, bits, signed, balanced=False, momentum=0.1, percentile=0.5, epsilon=1e-8):
super(Quantize, self).__init__()
self.lt = nn.Parameter(torch.zeros([]))
self.bits = bits
self.signed = signed
self.midrise = signed and (bits <= 2)
self.balanced = balanced
self.mom = momentum
self.pct = percentile
self.eps = epsilon
self.qmode = 'F'
self.hist = []
self.diag = {'last_in':None, 'last_out':None}
def set_qmode(self, qmode):
self.qmode = qmode
def forward(self, x):
if self.qmode != 'F':
self.hist.append(float(self.lt.data.clone().detach().cpu().numpy()))
if self.qmode in ['C']: # Calibrate
values = x.clone().detach().cpu().numpy().flatten()
lim_l = np.percentile(values, self.pct)
lim_h = np.percentile(values, 100 - self.pct)
lt_cur = np.log2(np.max(np.abs([lim_l, lim_h])) + self.eps)
lt_cur = 3
self.lt.data = (1 - self.mom) * self.lt + self.mom * lt_cur
if self.qmode in ['F', 'C']: # Forward pass w/ no quantization
x = x + 0 * self.lt
if self.qmode in ['T', 'Q']: # Forward pass w/ quantization
t = torch.exp(ST_ceil(self.qmode == 'T')(self.lt) * np.log(2)) # Only train the threshold self.lt in mode 'T'
qmax = 2**(self.bits - 1) if self.signed else 2**self.bits
s = x * qmax / t
rounded = ST_round()(s - 0.5) + 0.5 if self.midrise else ST_round()(s)
q = torch.clamp(rounded, -qmax + self.balanced + self.midrise*0.5 if self.signed else 0, qmax - 1 + self.midrise * 0.5) * t / qmax
if not self.training:
self.diag = {'last_in':x, 'last_out':q}
x = q
return x
class FixedQuantize(nn.Module):
'''
Quantizes the input x to specified bitwidth (bits) and polarity (signed).
There are four phases of operations, which can be toggled with set_qmode and should be run in order.
1. 'F': Forward, no quantization - use for standard FP32 training.
2. 'C': Calibration - does nothing, since this is a fixed quantizer.
3. 'T': Quantization Threshold Training - train weights/activations and clipping threshold simultaneously.
4. 'Q': Quantization Training - freeze clipping thresholds and only train quantized weights/activations.
'''
def __init__(self, bits, signed, clip=1.0):
super(FixedQuantize, self).__init__()
self.register_buffer('lt', torch.tensor(np.log2(clip) - 1e-8))
self.clip = clip
self.bits = bits
self.signed = signed
self.qmode = 'F'
self.hist = []
self.diag = {'last_in':None, 'last_out':None}
def set_qmode(self, qmode):
self.qmode = qmode
def forward(self, x):
if self.qmode != 'F':
self.hist.append(float(self.lt.data.clone().detach().cpu().numpy()))
if self.qmode in ['T', 'Q']: # Forward pass w/ quantization
t = torch.exp(ST_ceil(self.qmode == 'T')(self.lt) * np.log(2)) # Only train the threshold self.lt in mode 'T'
qmax = 2**(self.bits - 1) if self.signed else 2**self.bits
s = x * qmax / t
rounded = ST_round()(s)
q = torch.clamp(rounded, -qmax if self.signed else 0, qmax - 1) * t / qmax
if not self.training:
self.diag = {'last_in':x, 'last_out':q}
x = q
else:
x = torch.clamp(x, -self.clip, self.clip)
return x
class QModule(nn.Module):
def __init__(self):
super(QModule, self).__init__()
self.qmode = 'F'
def set_qmode(self, qmode):
def f(t):
if 'set_qmode' in dir(t): t.qmode = qmode
return self.apply(f)
def forward(self, x):
return x
class QStreamBatchnormNd(QModule):
def __init__(self, N, channels, qbits, update_every_ba=10, update_every_mv=1000, momentum=0.05, eps=1e-5):
super(QStreamBatchnormNd, self).__init__()
self.N = N
self.channels = channels
self.update_every = int(max(1, update_every_ba))
self.mom_ba = 1.0 - 1.0 / update_every_ba
self.mom_mv = 1.0 - 1.0 / (update_every_mv / update_every_ba)
self.eps = 1e-8
self.gamma = nn.Parameter(torch.Tensor(channels))
self.beta = nn.Parameter(torch.Tensor(channels))
self.register_buffer('mu', torch.zeros(channels))
self.register_buffer('std', torch.zeros(channels))
self.register_buffer('mean_ba', torch.zeros(channels))
self.register_buffer('msq_ba', torch.zeros(channels))
self.register_buffer('mean_mv', torch.zeros(channels))
self.register_buffer('var_mv', torch.ones(channels))
self.register_buffer('step', torch.Tensor([0]))
self.qgamma = FixedQuantize(qbits['b'], signed=True, clip=qbits['bmax'])
self.qbeta = FixedQuantize(qbits['b'], signed=True, clip=qbits['bmax'])
self.qa = FixedQuantize(qbits['a'], signed=True, clip=qbits['amax'])
# Initialization
nn.init.constant_(self.beta, 0.0)
nn.init.constant_(self.gamma, 1.0)
def forward(self, X):
self.step += 1
Xflat = X.permute(0,2,3,1).contiguous()
init_shape = Xflat.shape
Xflat = Xflat.view(-1, self.channels)
Xflatd = Xflat.detach()
if self.training:
mean = Xflatd.mean(dim=0)
msq = (Xflatd**2).mean(dim=0)
self.mean_ba = self.mom_ba * self.mean_ba + (1 - self.mom_ba) * mean
self.msq_ba = self.mom_ba * self.msq_ba + (1 - self.mom_ba) * msq
bias_correct = 1 - self.mom_ba**self.step
mu_ba = self.mean_ba / bias_correct
var_ba = F.relu(self.msq_ba / bias_correct - mu_ba**2)
std_ba = torch.sqrt(var_ba + self.eps)
if self.step % self.update_every == 0:
self.mean_mv = self.mom_mv * self.mean_mv + (1 - self.mom_mv) * mu_ba
self.var_mv = self.mom_mv * self.var_mv + (1 - self.mom_mv) * var_ba
else:
mu_ba = self.mean_mv
std_ba = torch.sqrt(self.var_mv + self.eps)
self.mu = mu_ba
self.std = std_ba
Y = self.qgamma(self.gamma) * (Xflat - self.mu) / self.std + self.qbeta(self.beta)
return self.qa(Y.view(init_shape).permute(0,3,1,2).contiguous())
def force_quantize(self):
self.gamma.data = self.qgamma(self.gamma.data)
self.beta.data = self.qbeta(self.beta.data)
class QConvNd(QModule):
def __init__(self, N, in_channels, out_channels, kernel_size, qbits, stride=1, padding=0, dilation=1, transposed=False, groups=1, post='bias', relu=False):
super(QConvNd, self).__init__()
if in_channels % groups != 0: raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0: raise ValueError('out_channels must be divisible by groups')
self.N = N
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _ntuple(N)(kernel_size)
self.stride = _ntuple(N)(stride)
self.padding = _ntuple(N)(padding)
self.dilation = _ntuple(N)(dilation)
self.transposed = transposed
self.output_padding = _ntuple(N)(0)
self.groups = groups
self.post = post
self.relu = relu
self.convNd = F.conv1d if N==1 else F.conv3d if N==3 else F.conv2d
if transposed: self.weight = nn.Parameter(torch.Tensor(in_channels, out_channels // groups, *kernel_size))
else: self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels // groups, *kernel_size))
if post == 'bias': self.bias = nn.Parameter(torch.Tensor(out_channels))
else: self.register_parameter('bias', None)
if post == 'bn': self.bn = QStreamBatchnormNd(2, 128, qbits=qbits[1:])
in_size = in_channels * np.prod(self.kernel_size)
nlvar = 2 # Assumes ReLU will follow at some point.
self.wmult = np.float32(2**(np.floor(np.log2(np.sqrt(3 * nlvar / in_size)))))
# Quantization
self.q_w = FixedQuantize(bits=qbits['w'], signed=True, clip=qbits['wmax'])
if post == 'bias': self.q_b = FixedQuantize(bits=qbits['b'], signed=True, clip=qbits['bmax'])
self.q_a = FixedQuantize(bits=qbits['a'], signed=False, clip=qbits['amax']) # Assume ReLU will follow.(not relu))
# Initialization
n = self.in_channels
nn.init.uniform_(self.weight, -1, 1)
if self.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / np.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, x):
self.wq = self.wmult * self.q_w(self.weight)
if self.post == 'bias':
bq = self.q_b(self.bias).view(1,-1,1,1)
else:
bq = 0
Z = self.convNd(x, self.wq, None, self.stride, self.padding, self.dilation, self.groups)
Z = self.q_b(Z) + bq
Z = self.q_b(Z)
if self.post == 'bn': Z = self.bn(Z)
self.Z = Z
return self.q_a(self.Z)
def force_quantize(self):
self.weight.data = self.q_w(self.weight.data)
if self.post == 'bias': self.bias.data = self.q_b(self.bias.data)
def get_weight_as_matrix(self):
return self.weight.view([self.weight.shape[0], -1])
class QLinear(QModule):
def __init__(self, in_features, out_features, qbits, bias=True, relu=False, postact=True):
super(QLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
if bias: self.bias = nn.Parameter(torch.Tensor(out_features))
else: self.register_parameter('bias', None)
self.relu = relu
in_size = in_features
nlvar = 2 if relu else 1
self.wmult = np.float32(2**(np.floor(np.log2(np.sqrt(3 * nlvar / in_size)))))
# Quantization
self.q_w = FixedQuantize(bits=qbits['w'], signed=True, clip=qbits['wmax'])
self.q_b = FixedQuantize(bits=qbits['b'], signed=True, clip=qbits['bmax'])
self.q_a = FixedQuantize(bits=qbits['a'], signed=(not relu), clip=qbits['amax']) if postact else QModule()
# Initialization
nn.init.uniform_(self.weight, -1, 1)
if self.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / np.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, x):
wq = self.wmult * self.q_w(self.weight)
bq = self.q_b(self.bias).view(1,-1)
Z = F.linear(x, wq, None)
Z = self.q_b(Z) + bq
Z = self.q_b(Z)
return self.q_a(Z)
def force_quantize(self):
self.weight.data = self.q_w(self.weight.data)
self.bias.data = self.q_b(self.bias.data)
| [
"numpy.prod",
"numpy.sqrt",
"torch.nn.init.constant_",
"numpy.log",
"torch.sqrt",
"torch.nn.functional.linear",
"pytorch.utils._ntuple",
"torch.nn.init.uniform_",
"numpy.abs",
"torch.Tensor",
"torch.ceil",
"torch.nn.init._calculate_fan_in_and_fan_out",
"torch.round",
"torch.nn.functional.r... | [((388, 402), 'torch.round', 'torch.round', (['x'], {}), '(x)\n', (399, 402), False, 'import torch\n'), ((704, 717), 'torch.ceil', 'torch.ceil', (['x'], {}), '(x)\n', (714, 717), False, 'import torch\n'), ((6631, 6664), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.beta', '(0.0)'], {}), '(self.beta, 0.0)\n', (6648, 6664), True, 'import torch.nn as nn\n'), ((6673, 6707), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.gamma', '(1.0)'], {}), '(self.gamma, 1.0)\n', (6690, 6707), True, 'import torch.nn as nn\n'), ((10150, 10186), 'torch.nn.init.uniform_', 'nn.init.uniform_', (['self.weight', '(-1)', '(1)'], {}), '(self.weight, -1, 1)\n', (10166, 10186), True, 'import torch.nn as nn\n'), ((12069, 12105), 'torch.nn.init.uniform_', 'nn.init.uniform_', (['self.weight', '(-1)', '(1)'], {}), '(self.weight, -1, 1)\n', (12085, 12105), True, 'import torch.nn as nn\n'), ((12441, 12462), 'torch.nn.functional.linear', 'F.linear', (['x', 'wq', 'None'], {}), '(x, wq, None)\n', (12449, 12462), True, 'import torch.nn.functional as F\n'), ((1612, 1627), 'torch.zeros', 'torch.zeros', (['[]'], {}), '([])\n', (1623, 1627), False, 'import torch\n'), ((2283, 2314), 'numpy.percentile', 'np.percentile', (['values', 'self.pct'], {}), '(values, self.pct)\n', (2296, 2314), True, 'import numpy as np\n'), ((2335, 2372), 'numpy.percentile', 'np.percentile', (['values', '(100 - self.pct)'], {}), '(values, 100 - self.pct)\n', (2348, 2372), True, 'import numpy as np\n'), ((4999, 5036), 'torch.clamp', 'torch.clamp', (['x', '(-self.clip)', 'self.clip'], {}), '(x, -self.clip, self.clip)\n', (5010, 5036), False, 'import torch\n'), ((5844, 5866), 'torch.Tensor', 'torch.Tensor', (['channels'], {}), '(channels)\n', (5856, 5866), False, 'import torch\n'), ((5904, 5926), 'torch.Tensor', 'torch.Tensor', (['channels'], {}), '(channels)\n', (5916, 5926), False, 'import torch\n'), ((5964, 5985), 'torch.zeros', 'torch.zeros', (['channels'], {}), '(channels)\n', (5975, 5985), False, 'import torch\n'), ((6023, 6044), 'torch.zeros', 'torch.zeros', (['channels'], {}), '(channels)\n', (6034, 6044), False, 'import torch\n'), ((6086, 6107), 'torch.zeros', 'torch.zeros', (['channels'], {}), '(channels)\n', (6097, 6107), False, 'import torch\n'), ((6149, 6170), 'torch.zeros', 'torch.zeros', (['channels'], {}), '(channels)\n', (6160, 6170), False, 'import torch\n'), ((6212, 6233), 'torch.zeros', 'torch.zeros', (['channels'], {}), '(channels)\n', (6223, 6233), False, 'import torch\n'), ((6275, 6295), 'torch.ones', 'torch.ones', (['channels'], {}), '(channels)\n', (6285, 6295), False, 'import torch\n'), ((6334, 6351), 'torch.Tensor', 'torch.Tensor', (['[0]'], {}), '([0])\n', (6346, 6351), False, 'import torch\n'), ((7307, 7354), 'torch.nn.functional.relu', 'F.relu', (['(self.msq_ba / bias_correct - mu_ba ** 2)'], {}), '(self.msq_ba / bias_correct - mu_ba ** 2)\n', (7313, 7354), True, 'import torch.nn.functional as F\n'), ((7374, 7403), 'torch.sqrt', 'torch.sqrt', (['(var_ba + self.eps)'], {}), '(var_ba + self.eps)\n', (7384, 7403), False, 'import torch\n'), ((7695, 7729), 'torch.sqrt', 'torch.sqrt', (['(self.var_mv + self.eps)'], {}), '(self.var_mv + self.eps)\n', (7705, 7729), False, 'import torch\n'), ((8654, 8664), 'pytorch.utils._ntuple', '_ntuple', (['N'], {}), '(N)\n', (8661, 8664), False, 'from pytorch.utils import flip, _ntuple, HardSigmoid, StochasticDropout, LayerNorm\n'), ((8708, 8718), 'pytorch.utils._ntuple', '_ntuple', (['N'], {}), '(N)\n', (8715, 8718), False, 'from pytorch.utils import flip, _ntuple, HardSigmoid, StochasticDropout, LayerNorm\n'), ((8757, 8767), 'pytorch.utils._ntuple', '_ntuple', (['N'], {}), '(N)\n', (8764, 8767), False, 'from pytorch.utils import flip, _ntuple, HardSigmoid, StochasticDropout, LayerNorm\n'), ((8807, 8817), 'pytorch.utils._ntuple', '_ntuple', (['N'], {}), '(N)\n', (8814, 8817), False, 'from pytorch.utils import flip, _ntuple, HardSigmoid, StochasticDropout, LayerNorm\n'), ((8899, 8909), 'pytorch.utils._ntuple', '_ntuple', (['N'], {}), '(N)\n', (8906, 8909), False, 'from pytorch.utils import flip, _ntuple, HardSigmoid, StochasticDropout, LayerNorm\n'), ((9568, 9593), 'numpy.prod', 'np.prod', (['self.kernel_size'], {}), '(self.kernel_size)\n', (9575, 9593), True, 'import numpy as np\n'), ((10245, 10295), 'torch.nn.init._calculate_fan_in_and_fan_out', 'nn.init._calculate_fan_in_and_fan_out', (['self.weight'], {}), '(self.weight)\n', (10282, 10295), True, 'import torch.nn as nn\n'), ((10348, 10390), 'torch.nn.init.uniform_', 'nn.init.uniform_', (['self.bias', '(-bound)', 'bound'], {}), '(self.bias, -bound, bound)\n', (10364, 10390), True, 'import torch.nn as nn\n'), ((11376, 11415), 'torch.Tensor', 'torch.Tensor', (['out_features', 'in_features'], {}), '(out_features, in_features)\n', (11388, 11415), False, 'import torch\n'), ((12164, 12214), 'torch.nn.init._calculate_fan_in_and_fan_out', 'nn.init._calculate_fan_in_and_fan_out', (['self.weight'], {}), '(self.weight)\n', (12201, 12214), True, 'import torch.nn as nn\n'), ((12267, 12309), 'torch.nn.init.uniform_', 'nn.init.uniform_', (['self.bias', '(-bound)', 'bound'], {}), '(self.bias, -bound, bound)\n', (12283, 12309), True, 'import torch.nn as nn\n'), ((9153, 9216), 'torch.Tensor', 'torch.Tensor', (['in_channels', '(out_channels // groups)', '*kernel_size'], {}), '(in_channels, out_channels // groups, *kernel_size)\n', (9165, 9216), False, 'import torch\n'), ((9259, 9322), 'torch.Tensor', 'torch.Tensor', (['out_channels', '(in_channels // groups)', '*kernel_size'], {}), '(out_channels, in_channels // groups, *kernel_size)\n', (9271, 9322), False, 'import torch\n'), ((9376, 9402), 'torch.Tensor', 'torch.Tensor', (['out_channels'], {}), '(out_channels)\n', (9388, 9402), False, 'import torch\n'), ((10320, 10335), 'numpy.sqrt', 'np.sqrt', (['fan_in'], {}), '(fan_in)\n', (10327, 10335), True, 'import numpy as np\n'), ((11459, 11485), 'torch.Tensor', 'torch.Tensor', (['out_features'], {}), '(out_features)\n', (11471, 11485), False, 'import torch\n'), ((12239, 12254), 'numpy.sqrt', 'np.sqrt', (['fan_in'], {}), '(fan_in)\n', (12246, 12254), True, 'import numpy as np\n'), ((2775, 2784), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (2781, 2784), True, 'import numpy as np\n'), ((3032, 3154), 'torch.clamp', 'torch.clamp', (['rounded', '(-qmax + self.balanced + self.midrise * 0.5 if self.signed else 0)', '(qmax - 1 + self.midrise * 0.5)'], {}), '(rounded, -qmax + self.balanced + self.midrise * 0.5 if self.\n signed else 0, qmax - 1 + self.midrise * 0.5)\n', (3043, 3154), False, 'import torch\n'), ((4038, 4051), 'numpy.log2', 'np.log2', (['clip'], {}), '(clip)\n', (4045, 4051), True, 'import numpy as np\n'), ((4580, 4589), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (4586, 4589), True, 'import numpy as np\n'), ((4790, 4849), 'torch.clamp', 'torch.clamp', (['rounded', '(-qmax if self.signed else 0)', '(qmax - 1)'], {}), '(rounded, -qmax if self.signed else 0, qmax - 1)\n', (4801, 4849), False, 'import torch\n'), ((2409, 2431), 'numpy.abs', 'np.abs', (['[lim_l, lim_h]'], {}), '([lim_l, lim_h])\n', (2415, 2431), True, 'import numpy as np\n'), ((9707, 9735), 'numpy.sqrt', 'np.sqrt', (['(3 * nlvar / in_size)'], {}), '(3 * nlvar / in_size)\n', (9714, 9735), True, 'import numpy as np\n'), ((11681, 11709), 'numpy.sqrt', 'np.sqrt', (['(3 * nlvar / in_size)'], {}), '(3 * nlvar / in_size)\n', (11688, 11709), True, 'import numpy as np\n')] |
"""
One-element fields
======================
"""
import numpy as np
from bfieldtools.mesh_magnetics import (
scalar_potential_coupling,
vector_potential_coupling,
)
from bfieldtools.mesh_magnetics import (
magnetic_field_coupling,
magnetic_field_coupling_analytic,
)
import trimesh
from mayavi import mlab
# Define the element
x = np.sin(np.pi / 6)
y = np.cos(np.pi / 6)
points0 = np.array(
[[0, 0, 0], [1, 0, 0], [x, y, 0], [-x, y, 0], [-1, 0, 0], [-x, -y, 0], [x, -y, 0]]
)
tris0 = np.array([[0, 1, 2], [0, 2, 3], [0, 3, 4], [0, 4, 5], [0, 5, 6], [0, 6, 1]])
mesh = trimesh.Trimesh(points0, tris0)
scalars = np.zeros(7)
scalars[0] = 1
#%%
#%% Plot element
def plot_element():
# Stream function
s1 = mlab.triangular_mesh(*points0.T, tris0, scalars=scalars, colormap="viridis")
# Stream lines
s2 = mlab.triangular_mesh(*points0.T, tris0, scalars=scalars, colormap="viridis")
s2.enable_contours = True
s2.actor.mapper.scalar_range = np.array([0.0, 1.0])
s2.actor.mapper.scalar_visibility = False
s2.actor.property.render_lines_as_tubes = True
s2.actor.property.line_width = 3.0
mlab.figure(bgcolor=(1, 1, 1))
plot_element()
#%%
#%% Scalar potential
points = np.array([[0.01, 1, 1], [0.01, 1, -1], [0.01, -1, -1], [0.01, -1, 1]]) * 2
tris = np.array([[0, 1, 2], [2, 3, 0]])
mesh2 = trimesh.Trimesh(points, tris)
for ii in range(7):
mesh2 = mesh2.subdivide()
U = scalar_potential_coupling(mesh, mesh2.vertices, multiply_coeff=True) @ scalars
mlab.figure(bgcolor=(1, 1, 1))
s3 = mlab.triangular_mesh(*mesh2.vertices.T, mesh2.faces, scalars=U, colormap="bwr")
s3.enable_contours = True
s3.contour.minimum_contour = -5.2e-07
s3.contour.maximum_contour = 5.2e-07
s3.actor.property.render_lines_as_tubes = True
s3.actor.property.line_width = 3.0
s3.scene.x_plus_view()
plot_element()
#%%
#%% Vector potential
points = np.array([[1, 1, 0.01], [1, -1, 0.01], [-1, -1, 0.01], [-1, 1, 0.01]]) * 2
tris = np.array([[0, 1, 2], [2, 3, 0]])
mesh3 = trimesh.Trimesh(points, tris)
for ii in range(5):
mesh3 = mesh3.subdivide()
A = vector_potential_coupling(mesh, mesh3.vertices) @ scalars
mlab.figure(bgcolor=(1, 1, 1))
vectors = mlab.quiver3d(*mesh3.vertices.T, *A, mode="2ddash", color=(0, 0, 1))
vectors.glyph.glyph_source.glyph_position = "center"
vectors.actor.property.render_lines_as_tubes = True
vectors.actor.property.line_width = 3.0
plot_element()
#%%
#%% Magnetic field and its magnitude
from bfieldtools.viz import plot_data_on_vertices
points = (
np.array([[0.0, 1, 1.001], [0.0, 1, -1], [0.0, -1, -1], [0.0, -1, 1.001]]) * 1.1
)
tris = np.array([[0, 1, 2], [2, 3, 0]])
mesh2 = trimesh.Trimesh(points, tris)
for ii in range(6):
mesh2 = mesh2.subdivide()
# B0 = magnetic_field_coupling(mesh, mesh2.vertices) @ scalars
B1 = magnetic_field_coupling_analytic(mesh, mesh2.vertices) @ scalars
plot_data_on_vertices(
mesh2, np.linalg.norm(B1, axis=1), ncolors=32, colormap="viridis", vmax=1.5e-6
)
vectors = mlab.quiver3d(
*mesh2.vertices.T, *B1.T, mode="arrow", color=(1, 0, 1), scale_factor=5e4
)
vectors.glyph.glyph_source.glyph_position = "center"
# vectors = mlab.quiver3d(*mesh2.vertices.T, *B0.T, mode="arrow", color=(1, 0, 0))
# vectors.glyph.glyph_source.glyph_position = "center"
plot_element()
vectors.scene.x_plus_view()
| [
"numpy.linalg.norm",
"mayavi.mlab.figure",
"bfieldtools.mesh_magnetics.magnetic_field_coupling_analytic",
"numpy.array",
"numpy.zeros",
"mayavi.mlab.quiver3d",
"numpy.cos",
"trimesh.Trimesh",
"bfieldtools.mesh_magnetics.scalar_potential_coupling",
"numpy.sin",
"bfieldtools.mesh_magnetics.vector_... | [((354, 371), 'numpy.sin', 'np.sin', (['(np.pi / 6)'], {}), '(np.pi / 6)\n', (360, 371), True, 'import numpy as np\n'), ((376, 393), 'numpy.cos', 'np.cos', (['(np.pi / 6)'], {}), '(np.pi / 6)\n', (382, 393), True, 'import numpy as np\n'), ((404, 500), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 0], [x, y, 0], [-x, y, 0], [-1, 0, 0], [-x, -y, 0], [x, \n -y, 0]]'], {}), '([[0, 0, 0], [1, 0, 0], [x, y, 0], [-x, y, 0], [-1, 0, 0], [-x, -y,\n 0], [x, -y, 0]])\n', (412, 500), True, 'import numpy as np\n'), ((512, 588), 'numpy.array', 'np.array', (['[[0, 1, 2], [0, 2, 3], [0, 3, 4], [0, 4, 5], [0, 5, 6], [0, 6, 1]]'], {}), '([[0, 1, 2], [0, 2, 3], [0, 3, 4], [0, 4, 5], [0, 5, 6], [0, 6, 1]])\n', (520, 588), True, 'import numpy as np\n'), ((596, 627), 'trimesh.Trimesh', 'trimesh.Trimesh', (['points0', 'tris0'], {}), '(points0, tris0)\n', (611, 627), False, 'import trimesh\n'), ((638, 649), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (646, 649), True, 'import numpy as np\n'), ((1144, 1174), 'mayavi.mlab.figure', 'mlab.figure', ([], {'bgcolor': '(1, 1, 1)'}), '(bgcolor=(1, 1, 1))\n', (1155, 1174), False, 'from mayavi import mlab\n'), ((1307, 1339), 'numpy.array', 'np.array', (['[[0, 1, 2], [2, 3, 0]]'], {}), '([[0, 1, 2], [2, 3, 0]])\n', (1315, 1339), True, 'import numpy as np\n'), ((1348, 1377), 'trimesh.Trimesh', 'trimesh.Trimesh', (['points', 'tris'], {}), '(points, tris)\n', (1363, 1377), False, 'import trimesh\n'), ((1512, 1542), 'mayavi.mlab.figure', 'mlab.figure', ([], {'bgcolor': '(1, 1, 1)'}), '(bgcolor=(1, 1, 1))\n', (1523, 1542), False, 'from mayavi import mlab\n'), ((1548, 1627), 'mayavi.mlab.triangular_mesh', 'mlab.triangular_mesh', (['*mesh2.vertices.T', 'mesh2.faces'], {'scalars': 'U', 'colormap': '"""bwr"""'}), "(*mesh2.vertices.T, mesh2.faces, scalars=U, colormap='bwr')\n", (1568, 1627), False, 'from mayavi import mlab\n'), ((1966, 1998), 'numpy.array', 'np.array', (['[[0, 1, 2], [2, 3, 0]]'], {}), '([[0, 1, 2], [2, 3, 0]])\n', (1974, 1998), True, 'import numpy as np\n'), ((2007, 2036), 'trimesh.Trimesh', 'trimesh.Trimesh', (['points', 'tris'], {}), '(points, tris)\n', (2022, 2036), False, 'import trimesh\n'), ((2149, 2179), 'mayavi.mlab.figure', 'mlab.figure', ([], {'bgcolor': '(1, 1, 1)'}), '(bgcolor=(1, 1, 1))\n', (2160, 2179), False, 'from mayavi import mlab\n'), ((2190, 2258), 'mayavi.mlab.quiver3d', 'mlab.quiver3d', (['*mesh3.vertices.T', '*A'], {'mode': '"""2ddash"""', 'color': '(0, 0, 1)'}), "(*mesh3.vertices.T, *A, mode='2ddash', color=(0, 0, 1))\n", (2203, 2258), False, 'from mayavi import mlab\n'), ((2617, 2649), 'numpy.array', 'np.array', (['[[0, 1, 2], [2, 3, 0]]'], {}), '([[0, 1, 2], [2, 3, 0]])\n', (2625, 2649), True, 'import numpy as np\n'), ((2658, 2687), 'trimesh.Trimesh', 'trimesh.Trimesh', (['points', 'tris'], {}), '(points, tris)\n', (2673, 2687), False, 'import trimesh\n'), ((2990, 3086), 'mayavi.mlab.quiver3d', 'mlab.quiver3d', (['*mesh2.vertices.T', '*B1.T'], {'mode': '"""arrow"""', 'color': '(1, 0, 1)', 'scale_factor': '(50000.0)'}), "(*mesh2.vertices.T, *B1.T, mode='arrow', color=(1, 0, 1),\n scale_factor=50000.0)\n", (3003, 3086), False, 'from mayavi import mlab\n'), ((738, 814), 'mayavi.mlab.triangular_mesh', 'mlab.triangular_mesh', (['*points0.T', 'tris0'], {'scalars': 'scalars', 'colormap': '"""viridis"""'}), "(*points0.T, tris0, scalars=scalars, colormap='viridis')\n", (758, 814), False, 'from mayavi import mlab\n'), ((843, 919), 'mayavi.mlab.triangular_mesh', 'mlab.triangular_mesh', (['*points0.T', 'tris0'], {'scalars': 'scalars', 'colormap': '"""viridis"""'}), "(*points0.T, tris0, scalars=scalars, colormap='viridis')\n", (863, 919), False, 'from mayavi import mlab\n'), ((985, 1005), 'numpy.array', 'np.array', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (993, 1005), True, 'import numpy as np\n'), ((1225, 1295), 'numpy.array', 'np.array', (['[[0.01, 1, 1], [0.01, 1, -1], [0.01, -1, -1], [0.01, -1, 1]]'], {}), '([[0.01, 1, 1], [0.01, 1, -1], [0.01, -1, -1], [0.01, -1, 1]])\n', (1233, 1295), True, 'import numpy as np\n'), ((1433, 1501), 'bfieldtools.mesh_magnetics.scalar_potential_coupling', 'scalar_potential_coupling', (['mesh', 'mesh2.vertices'], {'multiply_coeff': '(True)'}), '(mesh, mesh2.vertices, multiply_coeff=True)\n', (1458, 1501), False, 'from bfieldtools.mesh_magnetics import scalar_potential_coupling, vector_potential_coupling\n'), ((1884, 1954), 'numpy.array', 'np.array', (['[[1, 1, 0.01], [1, -1, 0.01], [-1, -1, 0.01], [-1, 1, 0.01]]'], {}), '([[1, 1, 0.01], [1, -1, 0.01], [-1, -1, 0.01], [-1, 1, 0.01]])\n', (1892, 1954), True, 'import numpy as np\n'), ((2091, 2138), 'bfieldtools.mesh_magnetics.vector_potential_coupling', 'vector_potential_coupling', (['mesh', 'mesh3.vertices'], {}), '(mesh, mesh3.vertices)\n', (2116, 2138), False, 'from bfieldtools.mesh_magnetics import scalar_potential_coupling, vector_potential_coupling\n'), ((2527, 2601), 'numpy.array', 'np.array', (['[[0.0, 1, 1.001], [0.0, 1, -1], [0.0, -1, -1], [0.0, -1, 1.001]]'], {}), '([[0.0, 1, 1.001], [0.0, 1, -1], [0.0, -1, -1], [0.0, -1, 1.001]])\n', (2535, 2601), True, 'import numpy as np\n'), ((2807, 2861), 'bfieldtools.mesh_magnetics.magnetic_field_coupling_analytic', 'magnetic_field_coupling_analytic', (['mesh', 'mesh2.vertices'], {}), '(mesh, mesh2.vertices)\n', (2839, 2861), False, 'from bfieldtools.mesh_magnetics import magnetic_field_coupling, magnetic_field_coupling_analytic\n'), ((2906, 2932), 'numpy.linalg.norm', 'np.linalg.norm', (['B1'], {'axis': '(1)'}), '(B1, axis=1)\n', (2920, 2932), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2013 <NAME>
#
# Everyone is permitted to copy and distribute verbatim or modified
# copies of this license document, and changing it is allowed as long
# as the name is changed.
#
# DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
# TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
#
# 0. You just DO WHAT THE FUCK YOU WANT TO.
"""
Test matplotlib's plot3d with a flowing data feed (using a random walk)
@author: <NAME>
@license: WTFPL
"""
import argparse
import logging
from matplotlib import pyplot
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.animation as animation
import numpy
import time
import threading
logger = logging.getLogger(__name__)
METHOD_ANIMATE = 'method:animate'
METHOD_THREAD = 'method:thread'
DRAW_PLOT = 'draw:plot'
DRAW_SCATTER = 'draw:scatter'
DEFAULT_COLOR = 'b'
DEFAULT_MARKER = 'o'
def randomwalk(n=50, sigma=0.02, alpha=0.95, seed=1):
""" A simple random walk with memory
This piece of code comes from
http://stackoverflow.com/questions/11874767/real-time-plotting-in-while-loop-with-matplotlib
"""
gen = numpy.random.RandomState(seed)
pos = gen.rand(3, n)
old_delta = gen.randn(3, n) * sigma
while True:
delta = (1. - alpha) * gen.randn(3, n) * sigma + alpha * old_delta
pos += delta
for i in range(n):
if not (0. <= pos[0, i] < 1):
pos[0, i] = abs(pos[0, i] % 1)
if not (0. <= pos[1, i] < 1):
pos[1, i] = abs(pos[1, i] % 1)
if not (0. <= pos[2, i] < 1):
pos[2, i] = abs(pos[2, i] % 1)
old_delta = delta
yield pos
def log_fps(frames, timediff):
"""Print FPS with a reasonable amount of time between each message"""
if timediff < 1 or frames == 0 or (frames % 100) != 0:
return
# Print message each 1000 frame if FPS > 100
if frames > 100 * timediff and (frames % 1000) != 0:
return
logger.info('Frame {:6d}: FPS {}'.format(frames, int(frames / timediff)))
class FeedPlot3d(object):
"""
Create a 3D plot which simulate a feed of continuous data with a random
walk
"""
def __init__(self, method, blit=False, draw=None, color=None, marker=None):
self.method = method
self.blit = blit
self.draw = draw or DRAW_SCATTER
self.color = color or DEFAULT_COLOR
self.marker = marker or DEFAULT_MARKER
self.plt = None
self.rw = randomwalk()
# Setup figure and axes
self.fig = pyplot.figure()
self.ax = Axes3D(self.fig)
self.ax.set_xlabel('X')
self.ax.set_ylabel('Y')
self.ax.set_zlabel('Z')
self.ax.set_title('3D Test')
self.ax.set_aspect('equal')
self.ax.set_xlim3d([0, 1])
self.ax.set_ylim3d([0, 1])
self.ax.set_zlim3d([0, 1])
self.ax.hold(True)
logger.debug("Use {}, {}, {}".format(
self.method, self.draw, "blit" if blit else "noblit"))
if self.method == METHOD_ANIMATE:
# Setup animation
self.anim = animation.FuncAnimation(
self.fig,
self._animate_update_plot, fargs=([0],),
init_func=self.setup_draw,
interval=1, blit=self.blit)
elif self.method == METHOD_THREAD:
# Start computing thread
self.setup_draw()
thread = threading.Thread(target=self._computing_thread)
thread.daemon = True
thread.start()
else:
raise Exception("Unknown method {}".format(self.method))
def setup_draw(self):
"""Setup the drawing"""
if self.plt is None:
if self.draw == DRAW_SCATTER:
self.plt = self.ax.scatter(
[], [], [],
c=self.color, marker=self.marker,
animated=(self.method == METHOD_ANIMATE))
elif self.draw == DRAW_PLOT:
self.plt = self.ax.plot([], [], [], self.color + self.marker)[0]
else:
raise Exception("Unknown drawing {}".format(self.draw))
return self.plt,
def _animate_update_plot(self, iframe, start_tic_ptr):
"""animation callback to draw the plot"""
if iframe == 0:
start_tic_ptr[0] = time.time()
else:
log_fps(iframe, time.time() - start_tic_ptr[0])
xyz = next(self.rw)
if self.draw == DRAW_SCATTER:
# 3D projection is overwriting 2D properties, which needs to be
# reset before each drawing
self.plt.set_alpha(1)
self.plt.set_facecolors(self.color)
self.plt.set_offsets(xyz[:2])
self.plt.set_3d_properties(xyz[2], 'z')
# Hack if blit is set: force 3D projection
if self.blit:
self.plt.do_3d_projection(self.ax.get_renderer_cache())
elif self.draw == DRAW_PLOT:
self.plt.set_data(xyz[:2])
self.plt.set_3d_properties(xyz[2])
return self.plt,
def _computing_thread(self):
"""Entry point of the thread which draws the plot"""
# Only redraw background once per second
background = None
bkg_tic = None
start_tic = time.time()
iframe = 0
while True:
tic = time.time()
iframe += 1
xyz = next(self.rw)
if self.draw == DRAW_SCATTER:
# Use mpl_toolkits.mplot3d.art3d.Patch3DCollection to
# update everything. As do_3d_projection changes alpha and
# offsets, they need to be reset beforehand.
#
# Note: self.plt.set_array(xyz[2]) may be used to only update
# z coordinates without changing (x, y)
self.plt.set_alpha(1)
self.plt.set_offsets(xyz[:2])
self.plt.set_3d_properties(xyz[2], 'z')
elif self.draw == DRAW_PLOT:
self.plt.set_data(xyz[:2])
self.plt.set_3d_properties(xyz[2])
if not self.blit:
self.fig.canvas.draw()
else:
if not self.blit or background is None or bkg_tic + 0.5 <= tic:
# Basic drawing and cache the background
self.plt.set_visible(False)
self.fig.canvas.draw()
self.plt.set_visible(True)
background = self.fig.canvas.copy_from_bbox(self.ax.bbox)
self.fig.canvas.draw()
bkg_tic = tic
else:
# Use blit/partial redrawing
self.fig.canvas.restore_region(background)
# Blit drawing
if self.draw == DRAW_SCATTER:
renderer = self.ax.get_renderer_cache()
self.plt.do_3d_projection(renderer)
self.plt.draw(renderer)
else:
self.ax.draw_artist(self.plt)
self.fig.canvas.blit(self.ax.bbox)
log_fps(iframe, tic - start_tic)
@staticmethod
def loop():
"""Blocking main loop"""
pyplot.show(block=True)
def main(argv=None):
"""Entry point"""
parser = argparse.ArgumentParser(
description="Test matplotlib's plot3d with a flowing data feed")
parser.add_argument(
'-a', '--animate', dest='method', action='store_const',
const=METHOD_ANIMATE,
help="use matplotlib.animation method")
parser.add_argument(
'-t', '--thread', dest='method', action='store_const',
const=METHOD_THREAD,
help="use thread method (default)")
parser.add_argument(
'-p', '--plot', dest='draw', action='store_const',
const=DRAW_PLOT,
help="draw points with plot (default)")
parser.add_argument(
'-s', '--scatter', dest='draw', action='store_const',
const=DRAW_SCATTER,
help="draw points with scatter")
parser.add_argument(
'-b', '--blit', dest='blit', action='store_const',
const=True, default=False,
help="enable blit")
parser.add_argument(
'-c', '--color', dest='color', action='store', type=str,
help="color ('{}' by default)".format(DEFAULT_COLOR))
parser.add_argument(
'-m', '--marker', dest='marker', action='store', type=str,
help="marker ('{}' by default)".format(DEFAULT_MARKER))
args = parser.parse_args(argv)
obj = FeedPlot3d(
args.method or METHOD_THREAD,
blit=args.blit,
draw=args.draw or DRAW_PLOT,
color=args.color,
marker=args.marker)
obj.loop()
return 0
if __name__ == '__main__':
import sys
logging.basicConfig(format='[%(levelname)5s] %(name)s: %(message)s',
level=logging.DEBUG)
sys.exit(main())
| [
"logging.getLogger",
"logging.basicConfig",
"argparse.ArgumentParser",
"matplotlib.animation.FuncAnimation",
"mpl_toolkits.mplot3d.Axes3D",
"matplotlib.pyplot.figure",
"threading.Thread",
"time.time",
"numpy.random.RandomState",
"matplotlib.pyplot.show"
] | [((723, 750), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (740, 750), False, 'import logging\n'), ((1160, 1190), 'numpy.random.RandomState', 'numpy.random.RandomState', (['seed'], {}), '(seed)\n', (1184, 1190), False, 'import numpy\n'), ((7389, 7482), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Test matplotlib\'s plot3d with a flowing data feed"""'}), '(description=\n "Test matplotlib\'s plot3d with a flowing data feed")\n', (7412, 7482), False, 'import argparse\n'), ((8870, 8964), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""[%(levelname)5s] %(name)s: %(message)s"""', 'level': 'logging.DEBUG'}), "(format='[%(levelname)5s] %(name)s: %(message)s', level=\n logging.DEBUG)\n", (8889, 8964), False, 'import logging\n'), ((2589, 2604), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (2602, 2604), False, 'from matplotlib import pyplot\n'), ((2623, 2639), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['self.fig'], {}), '(self.fig)\n', (2629, 2639), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((5351, 5362), 'time.time', 'time.time', ([], {}), '()\n', (5360, 5362), False, 'import time\n'), ((7307, 7330), 'matplotlib.pyplot.show', 'pyplot.show', ([], {'block': '(True)'}), '(block=True)\n', (7318, 7330), False, 'from matplotlib import pyplot\n'), ((3152, 3285), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['self.fig', 'self._animate_update_plot'], {'fargs': '([0],)', 'init_func': 'self.setup_draw', 'interval': '(1)', 'blit': 'self.blit'}), '(self.fig, self._animate_update_plot, fargs=([0],),\n init_func=self.setup_draw, interval=1, blit=self.blit)\n', (3175, 3285), True, 'import matplotlib.animation as animation\n'), ((4393, 4404), 'time.time', 'time.time', ([], {}), '()\n', (4402, 4404), False, 'import time\n'), ((5420, 5431), 'time.time', 'time.time', ([], {}), '()\n', (5429, 5431), False, 'import time\n'), ((3478, 3525), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._computing_thread'}), '(target=self._computing_thread)\n', (3494, 3525), False, 'import threading\n'), ((4447, 4458), 'time.time', 'time.time', ([], {}), '()\n', (4456, 4458), False, 'import time\n')] |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math, copy
import attr
import numpy as np
import autodiff as ad
import backend as T
from utils import CharacterGetter
from tensors.quimb_tensors import rand_mps, ham_heis_mpo, load_quimb_tensors, gauge_transform_mps
from graph_ops.graph_generator import split_einsum
from graph_ops.graph_transformer import simplify
from graph_ops.graph_als_optimizer import generate_sequential_optimal_tree
from numpy.core.einsumfunc import _parse_einsum_input
from utils import update_variables
@attr.s()
class MpsGraph(object):
"""
Produce a graph representing the MPS:
A-A-A-A-A-A
| | | | | |
Each A is a tensor, each line is a leg of the tensor diagram
representing the contracting index.
Each tensor is arranged as left leg, right leg, downward leg.
The left one is arranged as right leg, downward leg, and
the right one is arranged as left leg, downward leg.
Variables:
-------
1. a einsum node representing the MPS.
2. The input nodes of the einsum node.
"""
output = attr.ib()
inputs = attr.ib(default=[])
@classmethod
def create(cls, num, ranks, size=2):
"""
Parameters
----------
num: Number of sites in the MPS
size: the size of uncontracted dimensions
ranks: a list of the size of contracted dimensions.
The length of the list should be num-1.
"""
assert len(ranks) == num - 1
A_left = ad.Variable(name='A0', shape=[ranks[0], size])
A_right = ad.Variable(name=f'A{num-1}', shape=[ranks[-1], size])
A_middle_list = []
for i in range(1, num - 1):
node = ad.Variable(name=f'A{i}',
shape=[ranks[i - 1], ranks[i], size])
A_middle_list.append(node)
untracted_subs_list = []
cg = CharacterGetter()
# set subscripts for all the node
A_left.subscripts = f"{cg.getchar()}{cg.getchar()}"
prev_char = A_left.subscripts[0]
untracted_subs_list.append(A_left.subscripts[1])
for node in A_middle_list:
node.subscripts = f"{prev_char}{cg.getchar()}{cg.getchar()}"
prev_char = node.subscripts[1]
untracted_subs_list.append(node.subscripts[2])
A_right.subscripts = f"{prev_char}{cg.getchar()}"
untracted_subs_list.append(A_right.subscripts[1])
# produce output
A_list = [A_left] + A_middle_list + [A_right]
out_subs = "".join(untracted_subs_list)
input_subs = ','.join([node.subscripts for node in A_list])
einsum_subscripts = input_subs + '->' + out_subs
# clear all the subscripts
for node in A_list:
node.subscripts = None
return cls(ad.einsum(einsum_subscripts, *A_list), A_list)
@attr.s()
class MpoGraph(object):
"""
Produce a graph representing the MPO:
| | | | | |
H-H-H-H-H-H
| | | | | |
Each A is a tensor, each line is a leg of the tensor diagram
representing the contracting index.
Each tensor is arranged as left leg, right leg, upward leg, downward leg.
The left one is arranged as right leg, upward leg, downward leg and
the right one is arranged as left leg, upward leg, downward leg.
Returns
-------
1. a einsum node representing the MPO.
2. The input nodes of the einsum node.
"""
output = attr.ib()
inputs = attr.ib(default=[])
@classmethod
def create(cls, num, ranks, size=2):
"""
Parameters
----------
num: Number of sites in the MPO
size: the size of uncontracted dimensions
ranks: a list of the size of contracted dimensions.
The length of the list should be num-1.
"""
assert len(ranks) == num - 1
H_left = ad.Variable(name='H0',
shape=[ranks[0], size, size],
symmetry=[[1, 2]])
H_right = ad.Variable(name=f'H{num-1}',
shape=[ranks[-1], size, size],
symmetry=[[1, 2]])
H_middle_list = []
for i in range(1, num - 1):
node = ad.Variable(name=f'H{i}',
shape=[ranks[i - 1], ranks[i], size, size],
symmetry=[[2, 3]])
H_middle_list.append(node)
up_subs_list = []
down_subs_list = []
cg = CharacterGetter()
# set subscripts for all the node
H_left.subscripts = f"{cg.getchar()}{cg.getchar()}{cg.getchar()}"
prev_char = H_left.subscripts[0]
up_subs_list.append(H_left.subscripts[1])
down_subs_list.append(H_left.subscripts[2])
for node in H_middle_list:
node.subscripts = f"{prev_char}{cg.getchar()}{cg.getchar()}{cg.getchar()}"
prev_char = node.subscripts[1]
up_subs_list.append(node.subscripts[2])
down_subs_list.append(node.subscripts[3])
H_right.subscripts = f"{prev_char}{cg.getchar()}{cg.getchar()}"
up_subs_list.append(H_right.subscripts[1])
down_subs_list.append(H_right.subscripts[2])
# produce output
H_list = [H_left] + H_middle_list + [H_right]
up_subs = "".join(up_subs_list)
down_subs = "".join(down_subs_list)
input_subs = ','.join([node.subscripts for node in H_list])
einsum_subscripts = input_subs + '->' + up_subs + down_subs
# clear all the subscripts
for node in H_list:
node.subscripts = None
return cls(ad.einsum(einsum_subscripts, *H_list), H_list)
@attr.s()
class DmrgGraph(object):
"""
Produce a graph representing the DMRG algorithm.
Note: here we use hessian calculation to get the contractions
among the mpo and the mps except the intermediates.
Parameters
----------
num: number of tensors in mpo and mps
mpo_ranks: an array containing mpo ranks
mps_ranks: an array containing mps ranks
size: untracted legs dimension size in both mpo and mps
Variables
---------
mpo_inputs: inputs of the MpoGraph
mps_inputs: inputs the MpsGraph
intermediates: an array of einsum nodes taking hessian w.r.t.
hessians: a list of graphs for hessians
"""
mpo_inputs = attr.ib()
mps_inputs = attr.ib()
intermediates = attr.ib(default=[])
hessians = attr.ib(default=[])
def update_graph(self, num, mpo_ranks, mps_ranks, size):
self.mpo_inputs = MpoGraph.create(num, mpo_ranks, size).inputs
self.mps_inputs = MpsGraph.create(num, mps_ranks, size).inputs
update_variables(self.intermediates + self.hessians,
self.mpo_inputs + self.mps_inputs)
@classmethod
def create(cls, num, mpo_ranks, mps_ranks, size):
mpo_graph = MpoGraph.create(num, mpo_ranks, size)
mps_graph = MpsGraph.create(num, mps_ranks, size)
intermediates, hessians = [], []
for i in range(num - 1):
intermediate, hes = cls._get_sub_hessian(i, mpo_graph, mps_graph)
hessians.append(hes)
intermediates.append(intermediate)
return cls(mpo_graph.inputs, mps_graph.inputs, intermediates, hessians)
@classmethod
def _get_sub_hessian(cls, index, mpo_graph, mps_graph):
# rebuild mps graph
intermediate_set = {
mps_graph.inputs[index], mps_graph.inputs[index + 1]
}
split_input_nodes = list(set(mps_graph.inputs) - intermediate_set)
mps = split_einsum(mps_graph.output, split_input_nodes)
# get the intermediate node
intermediate, = [
node for node in mps.inputs if isinstance(node, ad.EinsumNode)
]
mps_outer_product = ad.tensordot(mps, mps, axes=[[], []])
mpo_axes = list(range(len(mpo_graph.output.shape)))
# The 0.5 factor makes sure that the Hessian can be written as an einsum
objective = 0.5 * ad.tensordot(
mps_outer_product, mpo_graph.output, axes=[mpo_axes, mpo_axes])
hes = ad.hessian(objective, [intermediate])
return intermediate, hes[0][0]
def dmrg_local_update(intermediate, eigvec, max_mps_rank):
"""
Perform local update for DMRG.
Parameters
----------
intermediate: the input einsum node. Its inputs are two mps sites.
eigvec: the eigenvector to get the low rank decomposition.
max_mps_rank: maximum mps tensor rank.
"""
# parse intermediate strings
inputs = intermediate.inputs
assert len(inputs) == 2
# Here input names are formatted as A{i}.
index_input_0 = int(inputs[0].name[1:])
index_input_1 = int(inputs[1].name[1:])
in_subs, out_subs, _ = _parse_einsum_input(
(intermediate.einsum_subscripts, *intermediate.inputs))
if index_input_0 > index_input_1:
# right site appers first
right_subs, left_subs = in_subs.split(',')
else:
left_subs, right_subs = in_subs.split(',')
map_subs_indices = dict(zip(out_subs,
list(range(len(intermediate.shape)))))
contract_char, = list(set(left_subs) - set(out_subs))
left_uncontract_chars = list(set(left_subs) - set(contract_char))
right_uncontract_chars = list(set(right_subs) - set(contract_char))
left_indices = [map_subs_indices[char] for char in left_uncontract_chars]
right_indices = [map_subs_indices[char] for char in right_uncontract_chars]
left_uncontract_str = "".join(left_uncontract_chars)
right_uncontract_str = "".join(right_uncontract_chars)
#############################################################
# svd decomposition to get updated sites
eigvec_shape = intermediate.shape
eigvec_mat = T.transpose(eigvec, left_indices + right_indices)
eigvec_mat = T.reshape(eigvec_mat,
(np.prod([eigvec_shape[i]
for i in left_indices]), -1))
U, s, VT = T.svd(eigvec_mat)
rank = min([max_mps_rank, eigvec_mat.shape[0], eigvec_mat.shape[1]])
U, s, VT = U[:, :rank], s[:rank], VT[:rank, :]
VT = T.diag(s) @ VT
U = T.reshape(U, [eigvec_shape[i] for i in left_indices] + [rank])
VT = T.reshape(VT, ([rank] + [eigvec_shape[i] for i in right_indices]))
left = T.einsum(f"{left_uncontract_str}{contract_char}->{left_subs}", U)
right = T.einsum(f"{contract_char}{right_uncontract_str}->{right_subs}",
VT)
return left, right
def get_smallest_eigenpair(hes_val, eigvec_shape):
"""
Get the smallest eigenvalue and its corresponding eigenvector of the input hes_val.
"""
assert len(hes_val.shape) == 2 * len(eigvec_shape)
assert np.array_equal(eigvec_shape, hes_val.shape[:len(eigvec_shape)])
assert np.array_equal(eigvec_shape, hes_val.shape[len(eigvec_shape):])
# get the eigenvector of the hessian matrix
hes_val_mat = T.reshape(hes_val, (np.prod(eigvec_shape), -1))
eigvals, eigvecs = T.eigh(hes_val_mat)
# index for smallest eigenvalue
idx = T.argmin(eigvals)
eig_val = eigvals[idx]
eigvec = T.reshape(eigvecs[:, idx], eigvec_shape)
return eig_val, eigvec
def dmrg(mpo_tensors,
init_mps_tensors,
max_mps_rank,
num_iter=1,
sequence='R'):
"""
Perform DMRG iterations.
Parameters
----------
mpo_tensors: an array of mpo tensor data
init_mps_tensors: an array of mps tensor data
max_mps_rank: maximum mps rank in the iterations
num_iter: total number of iterations
sequence: str, String made of 'L' and 'R' defining the sweep sequence, e.g 'RRL'.
The sequence will be repeated until num_iter is reached.
"""
if sequence != "R":
raise NotImplementedError
num = len(mpo_tensors)
size = mpo_tensors[0].shape[1]
mpo_ranks = [mpo_tensors[i].shape[0] for i in range(1, len(mpo_tensors))]
mps_tensors = copy.deepcopy(init_mps_tensors)
mps_ranks = [mps_tensors[i].shape[0] for i in range(1, len(mps_tensors))]
dg = DmrgGraph.create(num, mpo_ranks, mps_ranks, size)
executor = ad.Executor(dg.hessians)
# sequence is R
for iter in range(num_iter):
mps_tensors = gauge_transform_mps(mps_tensors, right=True)
mps_ranks = [
mps_tensors[i].shape[0] for i in range(1, len(mps_tensors))
]
for i in range(num - 1):
dg.update_graph(num, mpo_ranks, mps_ranks, size)
feed_dict = dict(zip(dg.mpo_inputs, mpo_tensors))
feed_dict.update(dict(zip(dg.mps_inputs, mps_tensors)))
hes_val, = executor.run(feed_dict=feed_dict,
out_nodes=[dg.hessians[i]])
# get the smallest eigenvalue and the corresponding eigenvector of the hesval
eigvec_shape = dg.intermediates[i].shape
eig_val, eigvec = get_smallest_eigenpair(hes_val,
dg.intermediates[i].shape)
# Update the two sites of mps
mps_tensors[i], mps_tensors[i + 1] = dmrg_local_update(
dg.intermediates[i], eigvec, max_mps_rank)
# update the rank
mps_ranks[i] = mps_tensors[i + 1].shape[0]
print(f'At iteration {iter} the smallest eigenvalue is: {eig_val}')
return mps_tensors, eig_val
def dmrg_shared_exec(mpo_tensors,
init_mps_tensors,
max_mps_rank,
num_iter=1,
sequence='R'):
"""
Perform DMRG iterations with shared executions.
"""
if sequence != "R":
raise NotImplementedError
num = len(mpo_tensors)
size = mpo_tensors[0].shape[1]
mpo_ranks = [mpo_tensors[i].shape[0] for i in range(1, len(mpo_tensors))]
mps_tensors = copy.deepcopy(init_mps_tensors)
mps_ranks = [mps_tensors[i].shape[0] for i in range(1, len(mps_tensors))]
dg = DmrgGraph.create(num, mpo_ranks, mps_ranks, size)
for i, hes in enumerate(dg.hessians):
dg.hessians[i] = simplify(hes)
assert isinstance(hes, ad.EinsumNode)
dg.hessians = generate_sequential_optimal_tree(dg.hessians, dg.mps_inputs)
executor = ad.Executor(dg.hessians)
# sequence is R
for iter in range(num_iter):
mps_tensors = gauge_transform_mps(mps_tensors, right=True)
mps_ranks = [
mps_tensors[i].shape[0] for i in range(1, len(mps_tensors))
]
for i in range(num - 1):
dg.update_graph(num, mpo_ranks, mps_ranks, size)
feed_dict = dict(zip(dg.mpo_inputs, mpo_tensors))
feed_dict.update(dict(zip(dg.mps_inputs, mps_tensors)))
hes_val, = executor.run(feed_dict=feed_dict,
out_nodes=[dg.hessians[i]])
# get the smallest eigenvalue and the corresponding eigenvector of the hesval
eigvec_shape = dg.intermediates[i].shape
eig_val, eigvec = get_smallest_eigenpair(hes_val,
dg.intermediates[i].shape)
# Update the two sites of mps
mps_tensors[i], mps_tensors[i + 1] = dmrg_local_update(
dg.intermediates[i], eigvec, max_mps_rank)
# update the rank
mps_ranks[i] = mps_tensors[i + 1].shape[0]
print(f'At iteration {iter} the smallest eigenvalue is: {eig_val}')
return mps_tensors, eig_val
if __name__ == "__main__":
# mps = mps_graph(4, 10)
# mpo = mpo_graph(4, 10)
mpo_tensors = ham_heis_mpo(num=6)
mps_tensors = rand_mps(num=6, rank=2, size=2)
dmrg(mpo_tensors, mps_tensors, max_mps_rank=20, num_iter=2)
| [
"backend.svd",
"numpy.prod",
"autodiff.tensordot",
"tensors.quimb_tensors.gauge_transform_mps",
"backend.transpose",
"backend.reshape",
"copy.deepcopy",
"numpy.core.einsumfunc._parse_einsum_input",
"tensors.quimb_tensors.rand_mps",
"autodiff.hessian",
"backend.einsum",
"graph_ops.graph_transfo... | [((1067, 1075), 'attr.s', 'attr.s', ([], {}), '()\n', (1073, 1075), False, 'import attr\n'), ((3390, 3398), 'attr.s', 'attr.s', ([], {}), '()\n', (3396, 3398), False, 'import attr\n'), ((6226, 6234), 'attr.s', 'attr.s', ([], {}), '()\n', (6232, 6234), False, 'import attr\n'), ((1615, 1624), 'attr.ib', 'attr.ib', ([], {}), '()\n', (1622, 1624), False, 'import attr\n'), ((1638, 1657), 'attr.ib', 'attr.ib', ([], {'default': '[]'}), '(default=[])\n', (1645, 1657), False, 'import attr\n'), ((3980, 3989), 'attr.ib', 'attr.ib', ([], {}), '()\n', (3987, 3989), False, 'import attr\n'), ((4003, 4022), 'attr.ib', 'attr.ib', ([], {'default': '[]'}), '(default=[])\n', (4010, 4022), False, 'import attr\n'), ((6910, 6919), 'attr.ib', 'attr.ib', ([], {}), '()\n', (6917, 6919), False, 'import attr\n'), ((6937, 6946), 'attr.ib', 'attr.ib', ([], {}), '()\n', (6944, 6946), False, 'import attr\n'), ((6967, 6986), 'attr.ib', 'attr.ib', ([], {'default': '[]'}), '(default=[])\n', (6974, 6986), False, 'import attr\n'), ((7002, 7021), 'attr.ib', 'attr.ib', ([], {'default': '[]'}), '(default=[])\n', (7009, 7021), False, 'import attr\n'), ((9340, 9415), 'numpy.core.einsumfunc._parse_einsum_input', '_parse_einsum_input', (['(intermediate.einsum_subscripts, *intermediate.inputs)'], {}), '((intermediate.einsum_subscripts, *intermediate.inputs))\n', (9359, 9415), False, 'from numpy.core.einsumfunc import _parse_einsum_input\n'), ((10369, 10418), 'backend.transpose', 'T.transpose', (['eigvec', '(left_indices + right_indices)'], {}), '(eigvec, left_indices + right_indices)\n', (10380, 10418), True, 'import backend as T\n'), ((10594, 10611), 'backend.svd', 'T.svd', (['eigvec_mat'], {}), '(eigvec_mat)\n', (10599, 10611), True, 'import backend as T\n'), ((10769, 10831), 'backend.reshape', 'T.reshape', (['U', '([eigvec_shape[i] for i in left_indices] + [rank])'], {}), '(U, [eigvec_shape[i] for i in left_indices] + [rank])\n', (10778, 10831), True, 'import backend as T\n'), ((10841, 10905), 'backend.reshape', 'T.reshape', (['VT', '([rank] + [eigvec_shape[i] for i in right_indices])'], {}), '(VT, [rank] + [eigvec_shape[i] for i in right_indices])\n', (10850, 10905), True, 'import backend as T\n'), ((10920, 10985), 'backend.einsum', 'T.einsum', (['f"""{left_uncontract_str}{contract_char}->{left_subs}"""', 'U'], {}), "(f'{left_uncontract_str}{contract_char}->{left_subs}', U)\n", (10928, 10985), True, 'import backend as T\n'), ((10998, 11066), 'backend.einsum', 'T.einsum', (['f"""{contract_char}{right_uncontract_str}->{right_subs}"""', 'VT'], {}), "(f'{contract_char}{right_uncontract_str}->{right_subs}', VT)\n", (11006, 11066), True, 'import backend as T\n'), ((11612, 11631), 'backend.eigh', 'T.eigh', (['hes_val_mat'], {}), '(hes_val_mat)\n', (11618, 11631), True, 'import backend as T\n'), ((11678, 11695), 'backend.argmin', 'T.argmin', (['eigvals'], {}), '(eigvals)\n', (11686, 11695), True, 'import backend as T\n'), ((11736, 11776), 'backend.reshape', 'T.reshape', (['eigvecs[:, idx]', 'eigvec_shape'], {}), '(eigvecs[:, idx], eigvec_shape)\n', (11745, 11776), True, 'import backend as T\n'), ((12558, 12589), 'copy.deepcopy', 'copy.deepcopy', (['init_mps_tensors'], {}), '(init_mps_tensors)\n', (12571, 12589), False, 'import math, copy\n'), ((12743, 12767), 'autodiff.Executor', 'ad.Executor', (['dg.hessians'], {}), '(dg.hessians)\n', (12754, 12767), True, 'import autodiff as ad\n'), ((14460, 14491), 'copy.deepcopy', 'copy.deepcopy', (['init_mps_tensors'], {}), '(init_mps_tensors)\n', (14473, 14491), False, 'import math, copy\n'), ((14775, 14835), 'graph_ops.graph_als_optimizer.generate_sequential_optimal_tree', 'generate_sequential_optimal_tree', (['dg.hessians', 'dg.mps_inputs'], {}), '(dg.hessians, dg.mps_inputs)\n', (14807, 14835), False, 'from graph_ops.graph_als_optimizer import generate_sequential_optimal_tree\n'), ((14851, 14875), 'autodiff.Executor', 'ad.Executor', (['dg.hessians'], {}), '(dg.hessians)\n', (14862, 14875), True, 'import autodiff as ad\n'), ((16208, 16227), 'tensors.quimb_tensors.ham_heis_mpo', 'ham_heis_mpo', ([], {'num': '(6)'}), '(num=6)\n', (16220, 16227), False, 'from tensors.quimb_tensors import rand_mps, ham_heis_mpo, load_quimb_tensors, gauge_transform_mps\n'), ((16246, 16277), 'tensors.quimb_tensors.rand_mps', 'rand_mps', ([], {'num': '(6)', 'rank': '(2)', 'size': '(2)'}), '(num=6, rank=2, size=2)\n', (16254, 16277), False, 'from tensors.quimb_tensors import rand_mps, ham_heis_mpo, load_quimb_tensors, gauge_transform_mps\n'), ((2037, 2083), 'autodiff.Variable', 'ad.Variable', ([], {'name': '"""A0"""', 'shape': '[ranks[0], size]'}), "(name='A0', shape=[ranks[0], size])\n", (2048, 2083), True, 'import autodiff as ad\n'), ((2102, 2158), 'autodiff.Variable', 'ad.Variable', ([], {'name': 'f"""A{num - 1}"""', 'shape': '[ranks[-1], size]'}), "(name=f'A{num - 1}', shape=[ranks[-1], size])\n", (2113, 2158), True, 'import autodiff as ad\n'), ((2421, 2438), 'utils.CharacterGetter', 'CharacterGetter', ([], {}), '()\n', (2436, 2438), False, 'from utils import CharacterGetter\n'), ((4401, 4472), 'autodiff.Variable', 'ad.Variable', ([], {'name': '"""H0"""', 'shape': '[ranks[0], size, size]', 'symmetry': '[[1, 2]]'}), "(name='H0', shape=[ranks[0], size, size], symmetry=[[1, 2]])\n", (4412, 4472), True, 'import autodiff as ad\n'), ((4549, 4634), 'autodiff.Variable', 'ad.Variable', ([], {'name': 'f"""H{num - 1}"""', 'shape': '[ranks[-1], size, size]', 'symmetry': '[[1, 2]]'}), "(name=f'H{num - 1}', shape=[ranks[-1], size, size], symmetry=[[1,\n 2]])\n", (4560, 4634), True, 'import autodiff as ad\n'), ((5030, 5047), 'utils.CharacterGetter', 'CharacterGetter', ([], {}), '()\n', (5045, 5047), False, 'from utils import CharacterGetter\n'), ((7234, 7326), 'utils.update_variables', 'update_variables', (['(self.intermediates + self.hessians)', '(self.mpo_inputs + self.mps_inputs)'], {}), '(self.intermediates + self.hessians, self.mpo_inputs + self\n .mps_inputs)\n', (7250, 7326), False, 'from utils import update_variables\n'), ((8149, 8198), 'graph_ops.graph_generator.split_einsum', 'split_einsum', (['mps_graph.output', 'split_input_nodes'], {}), '(mps_graph.output, split_input_nodes)\n', (8161, 8198), False, 'from graph_ops.graph_generator import split_einsum\n'), ((8375, 8412), 'autodiff.tensordot', 'ad.tensordot', (['mps', 'mps'], {'axes': '[[], []]'}), '(mps, mps, axes=[[], []])\n', (8387, 8412), True, 'import autodiff as ad\n'), ((8685, 8722), 'autodiff.hessian', 'ad.hessian', (['objective', '[intermediate]'], {}), '(objective, [intermediate])\n', (8695, 8722), True, 'import autodiff as ad\n'), ((10745, 10754), 'backend.diag', 'T.diag', (['s'], {}), '(s)\n', (10751, 10754), True, 'import backend as T\n'), ((12845, 12889), 'tensors.quimb_tensors.gauge_transform_mps', 'gauge_transform_mps', (['mps_tensors'], {'right': '(True)'}), '(mps_tensors, right=True)\n', (12864, 12889), False, 'from tensors.quimb_tensors import rand_mps, ham_heis_mpo, load_quimb_tensors, gauge_transform_mps\n'), ((14697, 14710), 'graph_ops.graph_transformer.simplify', 'simplify', (['hes'], {}), '(hes)\n', (14705, 14710), False, 'from graph_ops.graph_transformer import simplify\n'), ((14953, 14997), 'tensors.quimb_tensors.gauge_transform_mps', 'gauge_transform_mps', (['mps_tensors'], {'right': '(True)'}), '(mps_tensors, right=True)\n', (14972, 14997), False, 'from tensors.quimb_tensors import rand_mps, ham_heis_mpo, load_quimb_tensors, gauge_transform_mps\n'), ((2240, 2303), 'autodiff.Variable', 'ad.Variable', ([], {'name': 'f"""A{i}"""', 'shape': '[ranks[i - 1], ranks[i], size]'}), "(name=f'A{i}', shape=[ranks[i - 1], ranks[i], size])\n", (2251, 2303), True, 'import autodiff as ad\n'), ((3340, 3377), 'autodiff.einsum', 'ad.einsum', (['einsum_subscripts', '*A_list'], {}), '(einsum_subscripts, *A_list)\n', (3349, 3377), True, 'import autodiff as ad\n'), ((4772, 4864), 'autodiff.Variable', 'ad.Variable', ([], {'name': 'f"""H{i}"""', 'shape': '[ranks[i - 1], ranks[i], size, size]', 'symmetry': '[[2, 3]]'}), "(name=f'H{i}', shape=[ranks[i - 1], ranks[i], size, size],\n symmetry=[[2, 3]])\n", (4783, 4864), True, 'import autodiff as ad\n'), ((6176, 6213), 'autodiff.einsum', 'ad.einsum', (['einsum_subscripts', '*H_list'], {}), '(einsum_subscripts, *H_list)\n', (6185, 6213), True, 'import autodiff as ad\n'), ((8581, 8657), 'autodiff.tensordot', 'ad.tensordot', (['mps_outer_product', 'mpo_graph.output'], {'axes': '[mpo_axes, mpo_axes]'}), '(mps_outer_product, mpo_graph.output, axes=[mpo_axes, mpo_axes])\n', (8593, 8657), True, 'import autodiff as ad\n'), ((10486, 10534), 'numpy.prod', 'np.prod', (['[eigvec_shape[i] for i in left_indices]'], {}), '([eigvec_shape[i] for i in left_indices])\n', (10493, 10534), True, 'import numpy as np\n'), ((11561, 11582), 'numpy.prod', 'np.prod', (['eigvec_shape'], {}), '(eigvec_shape)\n', (11568, 11582), True, 'import numpy as np\n')] |
"""Private module for the DataIO class."""
import json
import logging
import warnings
from collections import OrderedDict
from copy import deepcopy
from datetime import datetime
from pathlib import Path
from typing import Tuple
import numpy as np
import pandas as pd
try:
import pyarrow as pa
except ImportError:
HAS_PYARROW = False
else:
HAS_PYARROW = True
from pyarrow import feather
import xtgeo
from . import _utils
VALID_SURFACE_FORMATS = {"irap_binary": ".gri"}
VALID_GRID_FORMATS = {"hdf": ".hdf", "roff": ".roff"}
VALID_CUBE_FORMATS = {"segy": ".segy"}
VALID_TABLE_FORMATS = {"hdf": ".hdf", "csv": ".csv", "arrow": ".arrow"}
VALID_POLYGONS_FORMATS = {
"hdf": ".hdf",
"csv": ".csv", # columns will be X Y Z, ID
"csv|xtgeo": ".csv", # use default xtgeo columns: X_UTME, Y_UTMN, Z_TVDSS, POLY_ID
"irap_ascii": ".pol",
}
VALID_POINTS_FORMATS = {
"hdf": ".hdf",
"csv": ".csv", # columns will be X Y Z
"csv|xtgeo": ".csv", # use default xtgeo columns: X_UTME, Y_UTMN, Z_TVDSS
"irap_ascii": ".poi",
}
# The produced metadata must conform with the corresponding JSON schema.
# Metadata definitions are stored under schema/
# Some content types have additional input requirements.
# When value is None, a repeat field shall not be present, otherwise it may be as this:
# content: seismics
# seismics:
# attribute: mean
# zrange: 42.0
# filter_size: 4.0
# scaling_factor: 1.5
ALLOWED_CONTENTS = {
"depth": None,
"time": None,
"thickness": None,
"property": {"attribute": str, "is_discrete": bool},
"seismic": {
"attribute": str,
"zrange": float,
"filter_size": float,
"scaling_factor": float,
"offset": str,
},
"fluid_contact": {"contact": str, "truncated": bool},
"field_outline": {"contact": str},
"regions": None,
"pinchout": None,
"subcrop": None,
"fault_lines": None,
"velocity": None,
"volumes": None,
"volumetrics": None, # or?
"khproduct": None,
"timeseries": None,
}
# this setting will set if subkeys is required or not. If not found in list then
# assume False.
CONTENTS_REQUIRED = {
"fluid_contact": {"contact": True},
"field_outline": {"contact": False},
}
logger = logging.getLogger(__name__)
def _override_arg(obj, vname, proposal, default=None):
"""Return correct argument for export() keys.
The _ExportItem class can receive args that comes directly from the DataIO class
attribute *or* from the export function itself. Rules, with examples
from "name" attribute (obj = dataio):
* dataio._name == None and name == None => use name
* dataio._name == "Some" and name == None => use obj._name
* dataio._name == None and name = "Some" => use name
* dataio._name == "Some" and name = "Other" => use name
"""
instance_attr = getattr(obj, "_" + vname)
logger.info(
"Instance attribute %s has %s while proposal is %s",
vname,
instance_attr,
proposal,
)
result = None
if instance_attr is default:
result = proposal
elif instance_attr is not default and proposal is default:
result = instance_attr
elif instance_attr is not default and proposal is not default:
result = proposal
return result
class ValidationError(ValueError):
"""Error in validating an item."""
class _ExportItem:
"""Export of the actual data item with metadata."""
def __init__(
self,
dataio,
obj,
subfolder=None,
verbosity="WARNING",
include_index=False,
name=None,
parent=None,
tagname=None,
description=None,
display_name=None,
unit=None,
**kwargs,
):
self.dataio = dataio
self.obj = obj
self.verbosity = _override_arg(dataio, "verbosity", verbosity)
self.name = _override_arg(dataio, "name", name)
self.parent = _override_arg(dataio, "parent", parent)
self.tagname = _override_arg(dataio, "tagname", tagname)
self.description = _override_arg(dataio, "description", description)
self.display_name = _override_arg(dataio, "display_name", display_name)
self.unit = _override_arg(dataio, "unit", unit)
self.subfolder = _override_arg(dataio, "subfolder", subfolder)
self.verbosity = _override_arg(dataio, "verbosity", verbosity)
self.include_index = _override_arg(
dataio, "include_index", include_index, default=False
)
logger.setLevel(level=self.verbosity)
self.timedata = self.dataio.timedata # the a bit complex time input
self.times = None # will be populated later as None or list of 2
if "index" in kwargs:
self.include_index = kwargs.get(
"index", self.include_index
) # bwcompatibility for deprecated "index"
warnings.warn(
"Using 'index' is deprecated and will be removed in future versions, "
"use 'include_index' instead.",
DeprecationWarning,
)
logger.info("Using Pandas INDEX is %s", self.include_index)
self.subtype = None
self.classname = "unset"
# to be populated later
self.efolder = "other"
self.valid = None
self.fmt = None
if self.verbosity is None:
self.verbosity = "WARNING" # fallback
self.realfolder = dataio.realfolder
self.iterfolder = dataio.iterfolder
self.createfolder = dataio.createfolder
if subfolder is not None:
warnings.warn(
"Exporting to a subfolder is a deviation from the standard "
"and could have consequences for later dependencies",
UserWarning,
)
def save_to_file(self) -> str:
"""Save (export) an instance to file with rich metadata.
Many metadata items are object independent and are treated directly in the
dataio module. Here additional metadata (dependent on this datatype) are
collected/processed and subsequently both 'independent' and object dependent
metadata are collected and written to disk here (as separate file or bundled
with data, depending on data type and format).
"""
logger.info("Save to file...")
if isinstance(self.obj, xtgeo.RegularSurface):
self.subtype = "RegularSurface"
self.classname = "surface"
self.efolder = "maps"
self.valid = VALID_SURFACE_FORMATS
self.fmt = self.dataio.surface_fformat
elif isinstance(self.obj, xtgeo.Polygons):
self.subtype = "Polygons"
self.classname = "polygons"
self.efolder = "polygons"
self.valid = VALID_POLYGONS_FORMATS
self.fmt = self.dataio.polygons_fformat
elif isinstance(self.obj, xtgeo.Points):
self.subtype = "Points"
self.classname = "points"
self.efolder = "points"
self.valid = VALID_POINTS_FORMATS
self.fmt = self.dataio.points_fformat
elif isinstance(self.obj, xtgeo.Cube):
self.subtype = "RegularCube"
self.classname = "cube"
self.efolder = "cubes"
self.valid = VALID_CUBE_FORMATS
self.fmt = self.dataio.cube_fformat
elif isinstance(self.obj, xtgeo.Grid):
self.subtype = "CPGrid"
self.classname = "cpgrid"
self.efolder = "grids"
self.valid = VALID_GRID_FORMATS
self.fmt = self.dataio.grid_fformat
elif isinstance(self.obj, xtgeo.GridProperty):
self.subtype = "CPGridProperty"
self.classname = "cpgrid_property"
self.efolder = "grids"
self.valid = VALID_GRID_FORMATS
self.fmt = self.dataio.grid_fformat
elif isinstance(self.obj, pd.DataFrame):
self.subtype = "DataFrame"
self.classname = "table"
self.efolder = "tables"
self.valid = VALID_TABLE_FORMATS
self.fmt = self.dataio.table_fformat
elif HAS_PYARROW and isinstance(self.obj, pa.Table):
self.subtype = "ArrowTable"
self.classname = "table"
self.efolder = "tables"
self.valid = VALID_TABLE_FORMATS
self.fmt = self.dataio.arrow_fformat
else:
raise NotImplementedError(
"This data type is not (yet) supported: ", type(self.obj)
)
logger.info("Found %s", self.subtype)
self._data_process()
self._data_process_object()
self._display_process()
fpath = self._item_to_file()
return str(fpath)
def _data_process(self):
"""Process some potentially common subfields in the data block.
These subfields are:
- name
- top/base (from context)
- content
- time
- properties? Disabled!
- context
- is_observation
- is_prediction
- description
"""
self._data_process_name()
self._data_process_context()
self._data_process_content()
self._data_process_parent()
self._data_process_timedata()
self._data_process_description()
self._data_process_various()
def _data_process_name(self):
"""Process the name and also the display_name subfield.
First detect if name is given, or infer name from object if possible
then determine if name is stratigraphic and assing a "true" valid name
"""
logger.info("Evaluate data:name attribute")
usename = "unknown"
meta = self.dataio.metadata4data
if self.name is None or self.name == "unknown":
try:
usename = self.obj.name
except AttributeError:
warnings.warn(
"Cannot get name from object, assume 'unknown'", UserWarning
)
usename = "unknown"
else:
usename = self.name
self.name = usename
# next check if usename has a "truename" and/or aliases from the config
strat = self.dataio.metadata4strat # shortform
logger.debug("self.dataio.metadata4strat is %s", self.dataio.metadata4strat)
if strat is None or usename not in strat:
meta["stratigraphic"] = False
meta["name"] = usename
else:
meta["name"] = strat[usename].get("name", usename)
meta["stratigraphic"] = strat[usename].get("stratigraphic", False)
meta["alias"] = strat[usename].get("alias", None)
meta["stratigraphic_alias"] = strat[usename].get(
"stratigraphic_alias", None
)
logger.info(
"Evaluate data:name attribute done, true name is <%s>", meta["name"]
)
def _data_process_context(self):
"""Process the context input which gives offset and top/base settings.
For example::
context:
offset: 3.5
top:
ref: TopVolantis
offset: 2.0
base:
ref: BaseVolantis
offset: 8.3
The stratigraphic input in fmuconfig may look like this::
TopVolantis: <-- RMS modelling name -> ref
stratigraphic: true
name: VOLANTIS GP. Top <-- SMDA / official name -> name
So the dilemmea is that in the input, it is natural for the end user
to use the RMS modelling name, but it may be that the SMDA name also
is applied? And what if not found? Assume OK or complain? Should one
validate at all?
"""
logger.info("Evaluate context (offset, top, base), if any")
meta = self.dataio.metadata4data
if self.dataio.context is None:
logger.info("No context found, which may be ok")
return # context data are missing
rel = self.dataio.context # shall be a dictionary
offset = rel.get("offset", None)
if offset is not None:
logger.info("Offset is found")
meta["offset"] = offset
# top process top and base (both must be present in case)
top = rel.get("top", None)
base = rel.get("base", None)
if top is None or base is None:
logger.info("context top and/base is missing, skip further")
return
topname = rel["top"].get("ref", None)
basename = rel["base"].get("ref", None)
if topname is None or basename is None:
warnings.warn(
"context top and/base is present but <ref> is missing, skip further",
UserWarning,
)
return
# finally, validate if top/base name is stratigraphic and set metadata
group = {"top": topname, "base": basename}
strat = self.dataio.metadata4strat
for item, somename in group.items():
usename = somename
offset = 0.0
stratigraphic = False
if somename in strat:
logger.info("Found <%s> in stratigraphy", somename)
usename = strat[somename].get("name", somename)
stratigraphic = strat[somename].get("stratigraphic", False)
offset = rel[item].get("offset", 0.0)
else:
logger.error("Did not find <%s> in stratigraphy input", somename)
raise ValueError(f"Cannot find {somename} in stratigraphy input")
meta[item] = OrderedDict()
meta[item]["name"] = usename
meta[item]["stratigraphic"] = stratigraphic
meta[item]["offset"] = offset
def _data_process_content(self):
"""Process the content block (within data block) which can be complex."""
logger.info("Evaluate content")
content = self.dataio.content
logger.debug("content is %s of type %s", str(content), type(content))
meta = self.dataio.metadata4data
usecontent = "unset"
useextra = None
if content is None:
warnings.warn(
"The <content> is not provided which defaults to 'depth'. "
"It is strongly recommended that content is given explicitly!",
UserWarning,
)
usecontent = "depth"
elif isinstance(content, str):
if content in CONTENTS_REQUIRED:
raise ValidationError(f"content {content} requires additional input")
usecontent = content
elif isinstance(content, dict):
usecontent = (list(content.keys()))[0]
useextra = content[usecontent]
else:
raise ValidationError("content must be string or dict")
if usecontent not in ALLOWED_CONTENTS.keys():
raise ValidationError(
f"Invalid content: <{usecontent}>! "
f"Valid content: {', '.join(ALLOWED_CONTENTS.keys())}"
)
meta["content"] = usecontent
logger.debug("outgoing content is set to %s", usecontent)
if useextra:
self._data_process_content_validate(usecontent, useextra)
meta[usecontent] = useextra
else:
logger.debug("content has no extra information")
logger.debug("content was %s", content)
def _data_process_parent(self):
"""Process the parent block within data block.
A parent is only required for few datatypes, in particular a GridProperty
which will need a grid geometry name.
"""
logger.info("Evaluate parent")
parent = self.parent
meta = self.dataio.metadata4data
if self.classname == "cpgrid_property" and parent is None:
raise ValidationError("Input 'parent' is required for GridProperty!")
else:
if parent is None:
return
# evaluate 'parent' which can be a str or a dict
if isinstance(parent, str):
meta["parent"] = {"name": parent}
self.parent = parent
else:
if "name" not in parent:
raise ValidationError("Input 'parent' shall have a 'name' attribute!")
meta["parent"] = parent
self.parent = parent["name"]
@staticmethod
def _data_process_content_validate(name, fields):
logger.debug("starting staticmethod _data_process_content_validate")
valid = ALLOWED_CONTENTS.get(name, None)
if valid is None:
raise ValidationError(f"Cannot validate content for <{name}>")
logger.info("name: %s", name)
for key, dtype in fields.items():
if key in valid.keys():
wanted_type = valid[key]
if not isinstance(dtype, wanted_type):
raise ValidationError(
f"Invalid type for <{key}> with value <{dtype}>, not of "
f"type <{wanted_type}>"
)
else:
raise ValidationError(f"Key <{key}> is not valid for <{name}>")
required = CONTENTS_REQUIRED.get(name, None)
if isinstance(required, dict):
rlist = list(required.items())
logger.info("rlist is %s", rlist)
logger.info("fields is %s", fields)
rkey, status = rlist.pop()
logger.info("rkey not in fields.keys(): %s", str(rkey not in fields.keys()))
logger.info("rkey: %s", rkey)
logger.info("fields.keys(): %s", str(fields.keys()))
if rkey not in fields.keys() and status is True:
raise ValidationError(
f"The subkey <{rkey}> is required for content <{name}> ",
"but is not found",
)
def _data_process_timedata(self):
"""Process the time subfield and also construct self.times."""
# first detect if timedata is given, the process it
# timedata may be like:
# None
# [["20220101", "monitor"], ["20200101", "base"]]
# [["20220101", None], ["20200101", None]]
# [["20220101", "any"], None]
logger.info("Evaluate data:name attribute")
meta = self.dataio.metadata4data
datelimits = (18140517, 33000101)
if self.timedata is None:
return
# this is used in file name construction:
self.times = [] # e.g. ["20211102", "20231101"] or ["20211102", None]
# normally self.timedata (input) has two entries, but one may accepted,
# implicitly meaning the second item is None
usetimedata = deepcopy(self.timedata)
if len(usetimedata) == 1:
usetimedata.append(None)
for xtime in usetimedata:
if xtime is None:
self.times.append(None)
continue
if isinstance(xtime[0], int):
if xtime[0] < datelimits[0] or xtime[0] > datelimits[1]:
raise ValidationError(
"Integer date input seems to be outside reasonable "
f"limits: {datelimits}"
)
tdate = str(xtime[0])
tlabel = None
if len(xtime) > 1:
tlabel = xtime[1]
tdate = tdate.replace("-", "") # 2021-04-23 --> 20210403
if tdate and int(tdate) < datelimits[0] or int(tdate) > datelimits[1]:
raise ValidationError(
f"Date input outside reasonable limits: {datelimits}"
)
tdate = datetime.strptime(tdate, "%Y%m%d")
self.times.append(tdate)
tdate = tdate.strftime("%Y-%m-%dT%H:%M:%S")
if "time" not in meta:
meta["time"] = list()
usetime = OrderedDict()
usetime["value"] = tdate
if tlabel:
usetime["label"] = tlabel
meta["time"].append(usetime)
def _data_process_description(self):
"""Process the data.description item.
Description is described as an array in the schema. But intuitively it is
provided as a string. Also need to maintain backwards compatibility for
string as input.
If description is not given, return without action
If description is array, stringify all items
If description is string, convert to single-item array
"""
meta = self.dataio.metadata4data
if self.description is None:
return
if isinstance(self.description, list):
meta["description"] = [str(item) for item in self.description]
elif isinstance(self.description, str):
meta["description"] = [self.description]
def _data_process_various(self):
"""Process "all the rest" of the generic items.
i.e.::
unit,
vertical_domain
depth_reference
properties (as tmp)
grid_model
is_prediction
is_observation
"""
logger.info("Process various general items in data block")
meta = self.dataio.metadata4data
meta["unit"] = self.unit
(meta["vertical_domain"], meta["depth_reference"],) = list(
self.dataio.vertical_domain.items()
)[0]
meta["is_prediction"] = self.dataio.is_prediction
meta["is_observation"] = self.dataio.is_observation
# tmp:
meta["grid_model"] = None
def _data_process_object(self):
"""Process data fields which are object dependent.
I.e::
layout
spec
bbox
Note that 'format' field will be added in _item_to_file
"""
if self.subtype == "RegularSurface":
self._data_process_object_regularsurface()
elif self.subtype == "RegularCube":
self._data_process_object_regularcube()
elif self.subtype == "CPGrid":
self._data_process_cpgrid()
elif self.subtype == "CPGridProperty":
self._data_process_cpgridproperty()
elif self.subtype == "Polygons":
self._data_process_object_polygons()
elif self.subtype == "Points":
self._data_process_object_points()
elif self.subtype == "DataFrame":
self._data_process_object_dataframe()
elif self.subtype == "ArrowTable":
self._data_process_object_arrowtable()
def _data_process_cpgrid(self):
"""Process/collect the data items for Corner Point Grid"""
logger.info("Process data metadata for CP Grid")
dataio = self.dataio
grid = self.obj
meta = dataio.metadata4data # shortform
meta["layout"] = "cornerpoint"
# define spec record
specs = grid.metadata.required
newspecs = OrderedDict()
for spec, val in specs.items():
if isinstance(val, (np.float32, np.float64)):
val = float(val)
newspecs[spec] = val
meta["spec"] = newspecs
geox = grid.get_geometrics(cellcenter=False, allcells=True, return_dict=True)
meta["bbox"] = OrderedDict()
meta["bbox"]["xmin"] = round(float(geox["xmin"]), 4)
meta["bbox"]["xmax"] = round(float(geox["xmax"]), 4)
meta["bbox"]["ymin"] = round(float(geox["ymin"]), 4)
meta["bbox"]["ymax"] = round(float(geox["ymax"]), 4)
meta["bbox"]["zmin"] = round(float(geox["zmin"]), 4)
meta["bbox"]["zmax"] = round(float(geox["zmax"]), 4)
logger.info("Process data metadata for Grid... done!!")
def _data_process_cpgridproperty(self):
"""Process/collect the data items for Corner Point GridProperty"""
logger.info("Process data metadata for CPGridProperty")
dataio = self.dataio
gridprop = self.obj
meta = dataio.metadata4data # shortform
meta["layout"] = "cornerpoint_property"
# define spec record
specs = OrderedDict()
specs["ncol"] = gridprop.ncol
specs["nrow"] = gridprop.nrow
specs["nlay"] = gridprop.nlay
meta["spec"] = specs
logger.info("Process data metadata for GridProperty... done!!")
def _data_process_object_regularsurface(self):
"""Process/collect the data items for RegularSurface"""
logger.info("Process data metadata for RegularSurface")
dataio = self.dataio
regsurf = self.obj
meta = dataio.metadata4data # shortform
meta["layout"] = "regular"
# define spec record
specs = regsurf.metadata.required
newspecs = OrderedDict()
for spec, val in specs.items():
if isinstance(val, (np.float32, np.float64)):
val = float(val)
newspecs[spec] = val
meta["spec"] = newspecs
meta["spec"]["undef"] = 1.0e30 # irap binary undef
meta["bbox"] = OrderedDict()
meta["bbox"]["xmin"] = float(regsurf.xmin)
meta["bbox"]["xmax"] = float(regsurf.xmax)
meta["bbox"]["ymin"] = float(regsurf.ymin)
meta["bbox"]["ymax"] = float(regsurf.ymax)
meta["bbox"]["zmin"] = float(regsurf.values.min())
meta["bbox"]["zmax"] = float(regsurf.values.max())
logger.info("Process data metadata for RegularSurface... done!!")
def _data_process_object_regularcube(self):
"""Process/collect the data items for RegularCube"""
logger.info("Process data metadata for RegularCube")
dataio = self.dataio
cube = self.obj
meta = dataio.metadata4data # shortform
meta["layout"] = "regular"
# define spec record
specs = cube.metadata.required
newspecs = OrderedDict()
for spec, val in specs.items():
if isinstance(val, (np.float32, np.float64)):
val = float(val)
newspecs[spec] = val
meta["spec"] = newspecs
meta["bbox"] = OrderedDict()
# current xtgeo is missing xmin, xmax etc attributes for cube, so need
# to compute (simplify when xtgeo has this):
xmin = 1.0e23
ymin = xmin
xmax = -1 * xmin
ymax = -1 * ymin
for corner in ((1, 1), (1, cube.nrow), (cube.ncol, 1), (cube.ncol, cube.nrow)):
xco, yco = cube.get_xy_value_from_ij(*corner)
xmin = xco if xco < xmin else xmin
xmax = xco if xco > xmax else xmax
ymin = yco if yco < ymin else ymin
ymax = yco if yco > ymax else ymax
meta["bbox"]["xmin"] = xmin
meta["bbox"]["xmax"] = xmax
meta["bbox"]["ymin"] = ymin
meta["bbox"]["ymax"] = ymax
meta["bbox"]["zmin"] = float(cube.zori)
meta["bbox"]["zmax"] = float(cube.zori + cube.zinc * (cube.nlay - 1))
logger.info("Process data metadata for RegularCube... done!!")
def _data_process_object_polygons(self):
"""Process/collect the data items for Polygons"""
logger.info("Process data metadata for Polygons/Polylines")
dataio = self.dataio
poly = self.obj
meta = dataio.metadata4data # shortform
meta["spec"] = OrderedDict()
# number of polygons:
meta["spec"]["npolys"] = np.unique(poly.dataframe[poly.pname].values).size
xmin, xmax, ymin, ymax, zmin, zmax = poly.get_boundary()
meta["bbox"] = OrderedDict()
meta["bbox"]["xmin"] = float(xmin)
meta["bbox"]["xmax"] = float(xmax)
meta["bbox"]["ymin"] = float(ymin)
meta["bbox"]["ymax"] = float(ymax)
meta["bbox"]["zmin"] = float(zmin)
meta["bbox"]["zmax"] = float(zmax)
logger.info("Process data metadata for Polygons... done!!")
def _data_process_object_points(self):
"""Process/collect the data items for Points"""
logger.info("Process data metadata for Points")
dataio = self.dataio
poi = self.obj
meta = dataio.metadata4data
# shortform
meta["spec"] = OrderedDict()
# list attributes (extra columns)
if len(poi.dataframe.columns) > 3:
attrnames = poi.dataframe.columns[3:]
meta["spec"]["attributes"] = list(attrnames)
meta["spec"]["size"] = int(poi.dataframe.size)
# min, max:
xmin = poi.dataframe[poi.xname].min()
xmax = poi.dataframe[poi.xname].max()
ymin = poi.dataframe[poi.yname].min()
ymax = poi.dataframe[poi.yname].max()
zmin = poi.dataframe[poi.zname].min()
zmax = poi.dataframe[poi.zname].max()
meta["bbox"] = OrderedDict()
meta["bbox"]["xmin"] = float(xmin)
meta["bbox"]["xmax"] = float(xmax)
meta["bbox"]["ymin"] = float(ymin)
meta["bbox"]["ymax"] = float(ymax)
meta["bbox"]["zmin"] = float(zmin)
meta["bbox"]["zmax"] = float(zmax)
logger.info("Process data metadata for Points... done!!")
def _data_process_object_dataframe(self):
"""Process/collect the data items for DataFrame."""
logger.info("Process data metadata for DataFrame (tables)")
dataio = self.dataio
dfr = self.obj
meta = dataio.metadata4data # shortform
meta["layout"] = "table"
# define spec record
meta["spec"] = OrderedDict()
meta["spec"]["columns"] = list(dfr.columns)
meta["spec"]["size"] = int(dfr.size)
meta["bbox"] = None
logger.info("Process data metadata for DataFrame... done!!")
def _data_process_object_arrowtable(self):
"""Process/collect the data items for pa.Table"""
logger.info("Process data metadata for ArrowTables (tables)")
dataio = self.dataio
table = self.obj
meta = dataio.metadata4data # shortform
meta["layout"] = "table"
# define spec record
meta["spec"] = OrderedDict()
meta["spec"]["columns"] = list(table.column_names)
meta["spec"]["size"] = table.num_columns * table.num_rows
meta["bbox"] = None
logger.info("Process data metadata for ArrowTable... done!!")
def _display_process(self):
"""Process common subfields in the display block.
For now, this is simply injecting a skeleton with loose
defaults. We might want to be more elaborate in the future.
Pending discussions and learning from usage.
The main 'name' attribute may be related to master-data and/or
be a reference to other things, hence it cannot be prettified
for display on maps. The display.name serves this purpose.
display.name can be set through the display_name argument to
fmu.dataio.ExportData. If not set, the first fallback is the
name argument. If that is not set either, the last fallback is
the object name. If that is not set, display.name will be exported
as None/null.
The main concept followed for now is that the visualising client
is to take the most responsibility for how a data object is
visualized.
"""
logger.info("Processing display")
self._display_process_name()
logger.info("Display has been processed.")
def _display_process_name(self):
"""Process display.name"""
logger.info("Processing display name")
meta = self.dataio.metadata4display
logger.debug("self.display_name is %s", self.display_name)
logger.debug("self.name is %s", self.name)
meta["name"] = (
self.display_name or self.name or _utils.get_object_name(self.obj)
)
logger.info("display.name is set to %s", meta["name"])
logger.info("Processing display is done!")
def _item_to_file(self):
logger.info("Export item to file...")
logger.debug("Subtype is %s", self.subtype)
# fstem is filename without suffix
fstem, fpath = self._construct_filename_fmustandard1()
if self.fmt not in self.valid.keys():
raise ValueError(
f"The file format {self.fmt} is not supported.",
f"Valid {self.subtype} formats are: {list(self.valid.keys())}",
)
ext = self.valid.get(self.fmt, None)
if ext is None:
raise RuntimeError(f"Cannot get correct file extension for {self.fmt}")
outfile, metafile, relpath, abspath = self._verify_path(fstem, fpath, ext)
self._export_actual_object(outfile, metafile, relpath, abspath)
return abspath
def _construct_filename_fmustandard1(self):
"""Construct filename stem according to datatype (class) and fmu style 1.
fmu style/standard 1:
surface:
namehorizon--tagname
namehorizon--tagname--t1
namehorizon--tagname--t1_t2 # t1 is monitor time while t2 is base time
e.g.
topvolantis--ds_gf_extracted
therys--facies_fraction_lowershoreface
grid (geometry):
gridname
gridproperty
gridname--proptagname
gridname--tagname--t1
gridname--tagname--t1_t2
e.g.
geogrid_valysar--phit
Destinations accoring to datatype.
For timedata with two dates, the standard is some--monitortime_basetime. Hence
t1 is newer than t2.
Removing dots from filename: Currently, when multiple dots in a filename stem,
XTgeo, using pathlib, will interpret the part after the last dot as the file
suffix, and remove it. This causes errors in the output filenames. While this is
being taken care of in XTgeo, we temporarily sanitize dots from the outgoing
filename only to avoid this.
Space will also be replaced in file names.
Returns stem for file name and destination
"""
stem = "unset"
outroot = self.dataio.runpath / "share" / "results"
if self.dataio.is_observation:
outroot = self.dataio.runpath / "share" / "observations"
loc = self.efolder
stem = self.name.lower()
if self.tagname:
stem += "--" + self.tagname.lower()
if self.parent:
stem = self.parent.lower() + "--" + stem
if self.times:
time0 = self.times[0]
time1 = self.times[1]
if time0 and not time1:
stem += "--" + (str(time0)[0:10]).replace("-", "")
elif time0 and time1:
monitor = (str(time0)[0:10]).replace("-", "")
base = (str(time1)[0:10]).replace("-", "")
if monitor == base:
warnings.warn(
"The monitor date and base date are equal", UserWarning
) # TODO: consider add clocktimes in such cases?
stem += "--" + monitor + "_" + base
stem = stem.replace(".", "_").replace(" ", "_")
dest = outroot / loc
if self.subfolder:
dest = dest / self.subfolder
dest.mkdir(parents=True, exist_ok=True)
return stem, dest
def _verify_path(
self, filestem: str, filepath: Path, ext: str, dryrun=False
) -> Tuple[Path, Path, Path, Path]:
"""Combine file name, extensions, etc, verify paths and return cleaned items."""
logger.info("Incoming file stem is %s", filestem)
logger.info("Incoming file path is %s", filepath)
logger.info("Incoming ext is %s", ext)
path = Path(filepath) / filestem.lower()
path = path.with_suffix(path.suffix + ext)
# resolve() will fix ".." e.g. change /some/path/../other to /some/other
abspath = path.resolve()
logger.info("Path with suffix is %s", path)
logger.info("Absolute path (resolved) is %s", abspath)
logger.info("The RUNPATH is %s", self.dataio.runpath)
if not dryrun:
if path.parent.exists():
logger.info("Folder exists")
else:
# this folder should have been made in _construct_filename...
raise IOError(f"Folder {str(path.parent)} is not present.")
# create metafile path
metapath = (
(Path(filepath) / ("." + filestem.lower())).with_suffix(ext + ".yml")
).resolve()
# get the relative path (relative to runptah if interactive, and to casedir
# if this is an ERT run)
useroot = self.dataio.runpath.resolve()
logger.info("The useroot (initial) is %s", useroot)
if self.iterfolder:
useroot = (useroot / "../..").resolve()
logger.info("The useroot (updated) is %s", useroot)
relpath = abspath.relative_to(useroot)
path = path.absolute() # may contain "../.." in path (not resolved)
logger.info("Full path to the actual file is: %s", path)
logger.info("Full path to the actual file is (resolved): %s", abspath)
logger.info("Full path to the metadata file (if used) is: %s", metapath)
logger.info("Relative path to actual file: %s", relpath)
return path, metapath, relpath, abspath
def _export_actual_object(self, outfile, metafile, relpath, abspath):
"""Export to file, dependent on format and object type."""
if "irap" in self.fmt and self.subtype == "RegularSurface":
self.obj.to_file(outfile, fformat="irap_binary")
self.dataio.metadata4data["format"] = "irap_binary"
elif "segy" in self.fmt:
self.obj.to_file(outfile, fformat="segy")
self.dataio.metadata4data["format"] = "segy"
elif "roff" in self.fmt:
self.obj.to_file(outfile, fformat="roff")
self.dataio.metadata4data["format"] = "roff"
elif "csv" in self.fmt and self.subtype == "Polygons":
worker = self.obj.dataframe.copy()
if "xtgeo" not in self.fmt:
renamings = {
self.obj.xname: "X",
self.obj.yname: "Y",
self.obj.zname: "Z",
self.obj.pname: "ID",
}
worker.rename(columns=renamings, inplace=True)
worker.to_csv(outfile, index=False)
self.dataio.metadata4data["format"] = "csv"
elif "csv" in self.fmt and self.subtype == "Points":
worker = self.obj.dataframe.copy()
if "xtgeo" not in self.fmt:
renamings = {
self.obj.xname: "X",
self.obj.yname: "Y",
self.obj.zname: "Z",
}
worker.rename(columns=renamings, inplace=True)
worker.to_csv(outfile, index=False)
self.dataio.metadata4data["format"] = "csv"
elif "irap_ascii" in self.fmt and self.subtype == "Polygons":
self.obj.to_file(outfile)
self.dataio.metadata4data["format"] = "irap_ascii"
elif "irap_ascii" in self.fmt and self.subtype == "Points":
self.obj.to_file(outfile)
self.dataio.metadata4data["format"] = "irap_ascii"
elif self.fmt == "csv" and self.subtype == "DataFrame":
logger.info("Exporting table as csv, with INDEX %s", self.include_index)
self.obj.to_csv(outfile, index=self.include_index)
self.dataio.metadata4data["format"] = "csv"
elif self.fmt == "arrow":
logger.info("Exporting table as arrow")
# comment taken from equinor/webviz_subsurface/smry2arrow.py
# Writing here is done through the feather import, but could also be
# done using pa.RecordBatchFileWriter.write_table() with a few
# pa.ipc.IpcWriteOptions(). It is convenient to use feather since it
# has ready configured defaults and the actual file format is the same
# (https://arrow.apache.org/docs/python/feather.html)
feather.write_feather(self.obj, dest=outfile)
self.dataio.metadata4data["format"] = "arrow"
else:
raise TypeError(f"Exporting {self.fmt} for {self.subtype} is not supported")
# metadata:
self._item_to_file_create_file_block(outfile, relpath, abspath)
allmeta = self._item_to_file_collect_all_metadata()
_utils.export_metadata_file(
metafile, allmeta, verbosity=self.verbosity, savefmt=self.dataio.meta_format
)
return str(outfile)
def _item_to_file_collect_all_metadata(self):
"""Process all metadata for actual instance."""
logger.info("Collect all metadata")
dataio = self.dataio
allmeta = OrderedDict()
for dollar in dataio.metadata4dollars.keys():
allmeta[dollar] = dataio.metadata4dollars[dollar]
allmeta["class"] = self.classname
allmeta["file"] = dataio.metadata4file
allmeta["access"] = dataio.metadata4access
allmeta["masterdata"] = dataio.metadata4masterdata
allmeta["tracklog"] = dataio.metadata4tracklog
allmeta["fmu"] = dataio.metadata4fmu
allmeta["data"] = dataio.metadata4data
allmeta["display"] = dataio.metadata4display
logger.debug("\n%s", json.dumps(allmeta, indent=2, default=str))
logger.info("Collect all metadata, done")
return allmeta
def _item_to_file_create_file_block(self, outfile, relpath, abspath):
"""Process the file block.
The file block contains relative and absolute paths, file size
and md5 checksum. This function receives the paths, calculates
size and checksum, and populates the file block by inserting
directly to the premade dataio.metadata4file.
"""
self.dataio.metadata4file["relative_path"] = str(relpath)
self.dataio.metadata4file["absolute_path"] = str(abspath)
md5sum = _utils.md5sum(outfile)
self.dataio.metadata4file["checksum_md5"] = md5sum
size_bytes = _utils.size(outfile)
self.dataio.metadata4file["size_bytes"] = size_bytes
| [
"logging.getLogger",
"collections.OrderedDict",
"numpy.unique",
"pathlib.Path",
"datetime.datetime.strptime",
"json.dumps",
"copy.deepcopy",
"warnings.warn",
"pyarrow.feather.write_feather"
] | [((2264, 2291), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2281, 2291), False, 'import logging\n'), ((18900, 18923), 'copy.deepcopy', 'deepcopy', (['self.timedata'], {}), '(self.timedata)\n', (18908, 18923), False, 'from copy import deepcopy\n'), ((23141, 23154), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (23152, 23154), False, 'from collections import OrderedDict\n'), ((23462, 23475), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (23473, 23475), False, 'from collections import OrderedDict\n'), ((24293, 24306), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (24304, 24306), False, 'from collections import OrderedDict\n'), ((24937, 24950), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (24948, 24950), False, 'from collections import OrderedDict\n'), ((25231, 25244), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (25242, 25244), False, 'from collections import OrderedDict\n'), ((26040, 26053), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (26051, 26053), False, 'from collections import OrderedDict\n'), ((26274, 26287), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (26285, 26287), False, 'from collections import OrderedDict\n'), ((27489, 27502), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (27500, 27502), False, 'from collections import OrderedDict\n'), ((27705, 27718), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (27716, 27718), False, 'from collections import OrderedDict\n'), ((28334, 28347), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (28345, 28347), False, 'from collections import OrderedDict\n'), ((28917, 28930), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (28928, 28930), False, 'from collections import OrderedDict\n'), ((29620, 29633), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (29631, 29633), False, 'from collections import OrderedDict\n'), ((30196, 30209), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (30207, 30209), False, 'from collections import OrderedDict\n'), ((41082, 41095), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (41093, 41095), False, 'from collections import OrderedDict\n'), ((4941, 5084), 'warnings.warn', 'warnings.warn', (['"""Using \'index\' is deprecated and will be removed in future versions, use \'include_index\' instead."""', 'DeprecationWarning'], {}), '(\n "Using \'index\' is deprecated and will be removed in future versions, use \'include_index\' instead."\n , DeprecationWarning)\n', (4954, 5084), False, 'import warnings\n'), ((5656, 5804), 'warnings.warn', 'warnings.warn', (['"""Exporting to a subfolder is a deviation from the standard and could have consequences for later dependencies"""', 'UserWarning'], {}), "(\n 'Exporting to a subfolder is a deviation from the standard and could have consequences for later dependencies'\n , UserWarning)\n", (5669, 5804), False, 'import warnings\n'), ((12783, 12888), 'warnings.warn', 'warnings.warn', (['"""context top and/base is present but <ref> is missing, skip further"""', 'UserWarning'], {}), "(\n 'context top and/base is present but <ref> is missing, skip further',\n UserWarning)\n", (12796, 12888), False, 'import warnings\n'), ((13758, 13771), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (13769, 13771), False, 'from collections import OrderedDict\n'), ((14321, 14478), 'warnings.warn', 'warnings.warn', (['"""The <content> is not provided which defaults to \'depth\'. It is strongly recommended that content is given explicitly!"""', 'UserWarning'], {}), '(\n "The <content> is not provided which defaults to \'depth\'. It is strongly recommended that content is given explicitly!"\n , UserWarning)\n', (14334, 14478), False, 'import warnings\n'), ((19862, 19896), 'datetime.datetime.strptime', 'datetime.strptime', (['tdate', '"""%Y%m%d"""'], {}), "(tdate, '%Y%m%d')\n", (19879, 19896), False, 'from datetime import datetime\n'), ((20085, 20098), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (20096, 20098), False, 'from collections import OrderedDict\n'), ((27566, 27610), 'numpy.unique', 'np.unique', (['poly.dataframe[poly.pname].values'], {}), '(poly.dataframe[poly.pname].values)\n', (27575, 27610), True, 'import numpy as np\n'), ((35906, 35920), 'pathlib.Path', 'Path', (['filepath'], {}), '(filepath)\n', (35910, 35920), False, 'from pathlib import Path\n'), ((41642, 41684), 'json.dumps', 'json.dumps', (['allmeta'], {'indent': '(2)', 'default': 'str'}), '(allmeta, indent=2, default=str)\n', (41652, 41684), False, 'import json\n'), ((9999, 10074), 'warnings.warn', 'warnings.warn', (['"""Cannot get name from object, assume \'unknown\'"""', 'UserWarning'], {}), '("Cannot get name from object, assume \'unknown\'", UserWarning)\n', (10012, 10074), False, 'import warnings\n'), ((35057, 35127), 'warnings.warn', 'warnings.warn', (['"""The monitor date and base date are equal"""', 'UserWarning'], {}), "('The monitor date and base date are equal', UserWarning)\n", (35070, 35127), False, 'import warnings\n'), ((36627, 36641), 'pathlib.Path', 'Path', (['filepath'], {}), '(filepath)\n', (36631, 36641), False, 'from pathlib import Path\n'), ((40358, 40403), 'pyarrow.feather.write_feather', 'feather.write_feather', (['self.obj'], {'dest': 'outfile'}), '(self.obj, dest=outfile)\n', (40379, 40403), False, 'from pyarrow import feather\n')] |
#!/usr/bin/env python
import roslib
roslib.load_manifest('crazyflie_control')
import rospy
import sys
from geometry_msgs.msg import Vector3
from nav_msgs.msg import Odometry
from crazyflie_driver.msg import RPYT
import dynamic_reconfigure.server
from crazyflie_control.cfg import CrazyflieControlConfig
from math import *
import numpy as np
class CrazyflieControlNode(object):
mass = 1.0
gravity = 9.801
kpz = 1.0
kdz = 1.0
kpx = 1.0
kpy = 1.0
kdx = 1.0
kdy = 1.0
xd = 0.0
yd = 0.0
zd = 0.0
xp = 0.0
yp = 0.0
zp = 0.0
x = 0.0
y = 0.0
z = 0.0
q0 = 1.0
q1 = 0.0
q2 = 0.0
q3 = 0.0
last_odometry_update = rospy.Time()
def __init__(self, default_name='apollo', default_update_rate=100):
self.default_name = default_name
self.default_update_rate = default_update_rate
rospy.init_node('crazyflie_control')
self._init_params()
self._init_pubsub()
dynamic_reconfigure.server.Server(CrazyflieControlConfig, self.reconfigure)
self.last_odometry_update = rospy.get_rostime()
def _init_params(self):
self.name = rospy.get_param('~name', self.default_name)
self.update_rate = rospy.get_param('~update_rate', self.default_update_rate)
def _init_pubsub(self):
self.vicon_sub = rospy.Subscriber('/' + self.name + '/odom', Odometry, self.set_odometry)
self.rotation_desired_pub = rospy.Publisher('/' + self.name + '/rotation_desired', RPYT)
self.rotation_actual_pub = rospy.Publisher('/' + self.name + '/rotation_actual', Vector3)
def set_odometry(self, msg):
now = rospy.get_rostime()
dt = self.last_odometry_update - now
x_old = self.x
y_old = self.y
z_old = self.z
self.x = msg.pose.pose.position.x * 0.001
self.y = msg.pose.pose.position.y * 0.001
self.z = msg.pose.pose.position.z * 0.001
self.q1 = msg.pose.pose.orientation.x
self.q2 = msg.pose.pose.orientation.y
self.q3 = msg.pose.pose.orientation.z
self.q0 = msg.pose.pose.orientation.w
self.xd = (2.0/dt.to_sec())*(self.x - x_old) - self.xd
self.yd = (2.0/dt.to_sec())*(self.y - y_old) - self.yd
self.zd = (2.0/dt.to_sec())*(self.z - z_old) - self.zd
self.last_odometry_update = now
def reconfigure(self, config, level):
self.kpx = config['kpx']
self.kpy = config['kpy']
self.kpz = config['kpz']
self.kdx = config['kdx']
self.kdy = config['kdy']
self.kdz = config['kdz']
self.xd = config['xd']
self.yd = config['yd']
self.zd = config['zd']
self.power = config['power']
return config
def spin(self):
rospy.loginfo("Spinning")
r = rospy.Rate(self.update_rate)
while not rospy.is_shutdown():
gx = 2 * (self.q1*self.q3 - self.q0*self.q2);
gy = 2 * (self.q0*self.q1 + self.q2*self.q3);
gz = self.q0*self.q0 - self.q1*self.q1 - self.q2*self.q2 + self.q3*self.q3;
yaw = atan2(2*self.q1*self.q2 - 2*self.q0*self.q3, 2*self.q0*self.q0 + 2*self.q1*self.q1 - 1) * 180 /pi;
pitch = atan(gx / sqrt(gy*gy + gz*gz)) * 180 / pi;
roll = atan(gy / sqrt(gx*gx + gz*gz)) * 180 / pi;
msg_actual = Vector3()
msg_actual.x = roll
msg_actual.y = pitch
msg_actual.z = yaw
self.rotation_actual_pub.publish(msg_actual)
R = [ [0]*3 ]*3
R[0][0] = pow(self.q0,2) + pow(self.q1,2) - pow(self.q2,2) - pow(self.q3,2)
R[0][1] = 2*self.q0*self.q1 - 2*self.q0*self.q3
R[0][2] = 2*self.q1*self.q3 + 2*self.q0*self.q2
R[1][0] = 2*self.q0*self.q1 + 2*self.q0*self.q3
R[1][1] = pow(self.q0,2) - pow(self.q1,2) + pow(self.q2,2) - pow(self.q3,2)
R[1][2] = 2*self.q2*self.q3 - 2*self.q0*self.q1
R[2][0] = 2*self.q1*self.q3 - 2*self.q0*self.q2
R[2][1] = 2*self.q2*self.q3 + 2*self.q0*self.q1
R[2][2] = pow(self.q0,2) - pow(self.q1,2) - pow(self.q2,2) + pow(self.q3,2)
r_matrix = np.matrix(R)
# This is the thrust, should be also placed in the function below...
f = self.mass / R[2][2] * ( self.gravity - self.kpz*(self.z-self.zd) - self.kdz*self.zp )
r13d = self.mass / f * ( -self.kpx*(self.x-self.xd) - self.kdx*self.xp )
r23d = self.mass / f * ( -self.kpy*(self.y-self.yd) - self.kdy*self.yp )
r33d = sqrt(1-pow(r13d,2)-pow(r23d,2))
v = [0]*3
v[0] = -r23d
v[1] = r13d
v[2] = 0.0
angle = acos(r33d)
ca = cos(angle)
sa = sin(angle)
A = [ [0]*3 ]*3
A[0][0] = ca + pow(v[0],2)*(1-ca)
A[0][1] = v[0]*v[1]*(1-ca) - v[2]*sa
A[0][2] = v[0]*v[2]*(1-ca) + v[1]*sa
A[1][0] = v[0]*v[1]*(1-ca) + v[2]*sa
A[1][1] = ca + pow(v[1],2)*(1-ca)
A[1][2] = v[1]*v[2]*(1-ca) - v[0]*sa
A[2][0] = v[0]*v[2]*(1-ca) + v[1]*sa
A[2][1] = v[1]*v[2]*(1-ca) + v[0]*sa
A[2][2] = ca + pow(v[2],2)*(1-ca)
a_matrix = np.matrix(A)
rd = [0]*3
rd[0] = r13d
rd[1] = r23d
rd[2] = r33d
rd_matrix = np.matrix(rd)
gd = np.transpose(r_matrix)*a_matrix*np.transpose(rd_matrix)
eulerRollDesired = atan2(gd[1],sqrt(pow(gd[1],2)+pow(gd[2],2))) * 180 / pi
eulerPitchDesired = -atan(gd[0]/sqrt(pow(gd[1],2)+pow(gd[2],2))) * 180 / pi
eulerYawDesired = 0.0;
msg_desired = RPYT()
msg_desired.roll = eulerRollDesired
msg_desired.pitch = eulerPitchDesired
msg_desired.yaw = eulerYawDesired
if self.power:
msg_desired.thrust = f
else:
msg_desired.thrust = 0.0
self.rotation_desired_pub.publish(msg_desired)
r.sleep()
def crazyflie_control_main(argv):
c = CrazyflieControlNode()
c.spin()
if __name__ == '__main__':
crazyflie_control_main(sys.argv)
| [
"rospy.Publisher",
"geometry_msgs.msg.Vector3",
"numpy.transpose",
"rospy.is_shutdown",
"rospy.init_node",
"rospy.get_param",
"rospy.get_rostime",
"roslib.load_manifest",
"rospy.Time",
"rospy.Rate",
"numpy.matrix",
"rospy.Subscriber",
"crazyflie_driver.msg.RPYT",
"rospy.loginfo"
] | [((37, 78), 'roslib.load_manifest', 'roslib.load_manifest', (['"""crazyflie_control"""'], {}), "('crazyflie_control')\n", (57, 78), False, 'import roslib\n'), ((641, 653), 'rospy.Time', 'rospy.Time', ([], {}), '()\n', (651, 653), False, 'import rospy\n'), ((814, 850), 'rospy.init_node', 'rospy.init_node', (['"""crazyflie_control"""'], {}), "('crazyflie_control')\n", (829, 850), False, 'import rospy\n'), ((1009, 1028), 'rospy.get_rostime', 'rospy.get_rostime', ([], {}), '()\n', (1026, 1028), False, 'import rospy\n'), ((1071, 1114), 'rospy.get_param', 'rospy.get_param', (['"""~name"""', 'self.default_name'], {}), "('~name', self.default_name)\n", (1086, 1114), False, 'import rospy\n'), ((1136, 1193), 'rospy.get_param', 'rospy.get_param', (['"""~update_rate"""', 'self.default_update_rate'], {}), "('~update_rate', self.default_update_rate)\n", (1151, 1193), False, 'import rospy\n'), ((1241, 1313), 'rospy.Subscriber', 'rospy.Subscriber', (["('/' + self.name + '/odom')", 'Odometry', 'self.set_odometry'], {}), "('/' + self.name + '/odom', Odometry, self.set_odometry)\n", (1257, 1313), False, 'import rospy\n'), ((1344, 1404), 'rospy.Publisher', 'rospy.Publisher', (["('/' + self.name + '/rotation_desired')", 'RPYT'], {}), "('/' + self.name + '/rotation_desired', RPYT)\n", (1359, 1404), False, 'import rospy\n'), ((1434, 1496), 'rospy.Publisher', 'rospy.Publisher', (["('/' + self.name + '/rotation_actual')", 'Vector3'], {}), "('/' + self.name + '/rotation_actual', Vector3)\n", (1449, 1496), False, 'import rospy\n'), ((1540, 1559), 'rospy.get_rostime', 'rospy.get_rostime', ([], {}), '()\n', (1557, 1559), False, 'import rospy\n'), ((2516, 2541), 'rospy.loginfo', 'rospy.loginfo', (['"""Spinning"""'], {}), "('Spinning')\n", (2529, 2541), False, 'import rospy\n'), ((2548, 2576), 'rospy.Rate', 'rospy.Rate', (['self.update_rate'], {}), '(self.update_rate)\n', (2558, 2576), False, 'import rospy\n'), ((2592, 2611), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (2609, 2611), False, 'import rospy\n'), ((3029, 3038), 'geometry_msgs.msg.Vector3', 'Vector3', ([], {}), '()\n', (3036, 3038), False, 'from geometry_msgs.msg import Vector3\n'), ((3760, 3772), 'numpy.matrix', 'np.matrix', (['R'], {}), '(R)\n', (3769, 3772), True, 'import numpy as np\n'), ((4657, 4669), 'numpy.matrix', 'np.matrix', (['A'], {}), '(A)\n', (4666, 4669), True, 'import numpy as np\n'), ((4756, 4769), 'numpy.matrix', 'np.matrix', (['rd'], {}), '(rd)\n', (4765, 4769), True, 'import numpy as np\n'), ((5043, 5049), 'crazyflie_driver.msg.RPYT', 'RPYT', ([], {}), '()\n', (5047, 5049), False, 'from crazyflie_driver.msg import RPYT\n'), ((4811, 4834), 'numpy.transpose', 'np.transpose', (['rd_matrix'], {}), '(rd_matrix)\n', (4823, 4834), True, 'import numpy as np\n'), ((4779, 4801), 'numpy.transpose', 'np.transpose', (['r_matrix'], {}), '(r_matrix)\n', (4791, 4801), True, 'import numpy as np\n')] |
## data folder: D:\work\project\ITA Refresh\Session4 Oil Prediction
# -*- coding: utf-8 -*-
from __future__ import print_function
import time
import warnings
import numpy as np
import time
import matplotlib.pyplot as plt
from numpy import newaxis
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
warnings.filterwarnings("ignore")
def load_data(filename, seq_len, normalise_window):
f = open(filename, 'rb').read()
print("f:",type(f))
# data = f.decode().split("\n")
data = f.decode().replace("b'","").split("\r\n")
print("data:",data)
# data = f;
print('data len:',len(data))
print('sequence len:',seq_len)
sequence_length = seq_len + 1
result = []
for index in range(len(data) - sequence_length):
result.append(data[index: index + sequence_length]) #得到长度为seq_len+1的向量,最后一个作为label
print('result len:',len(result))
print('result shape:',np.array(result).shape)
print(result[:1])
if normalise_window:
result = normalise_windows(result)
print(result[:1])
print('normalise_windows result shape:',np.array(result).shape)
result = np.array(result)
#划分train、test
row = round(0.9 * result.shape[0])
train = result[:row, :]
np.random.shuffle(train)
x_train = train[:, :-1]
y_train = train[:, -1]
x_test = result[row:, :-1]
y_test = result[row:, -1]
x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
return [x_train, y_train, x_test, y_test]
def normalise_windows(window_data):
normalised_data = []
for window in window_data: #window shape (sequence_length L ,) 即(51L,)
normalised_window = [((float(p) / float(window[0])) - 1) for p in window]
normalised_data.append(normalised_window)
return normalised_data
def build_model(layers): #layers [1,50,100,1]
model = Sequential()
model.add(LSTM(input_dim=layers[0],output_dim=layers[1],return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(layers[2],return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(output_dim=layers[3]))
model.add(Activation("linear"))
start = time.time()
model.compile(loss="mse", optimizer="rmsprop")
print("Compilation Time : ", time.time() - start)
return model
#直接全部预测
def predict_point_by_point(model, data):
predicted = model.predict(data)
print('predicted shape:',np.array(predicted).shape) #(412L,1L)
predicted = np.reshape(predicted, (predicted.size,))
return predicted
#滚动预测
def predict_sequence_full(model, data, window_size): #data X_test
curr_frame = data[0] #(50L,1L)
predicted = []
for i in range(len(data)):
#x = np.array([[[1],[2],[3]], [[4],[5],[6]]]) x.shape (2, 3, 1) x[0,0] = array([1]) x[:,np.newaxis,:,:].shape (2, 1, 3, 1)
predicted.append(model.predict(curr_frame[newaxis,:,:])[0,0]) #np.array(curr_frame[newaxis,:,:]).shape (1L,50L,1L)
curr_frame = curr_frame[1:]
curr_frame = np.insert(curr_frame, [window_size-1], predicted[-1], axis=0) #numpy.insert(arr, obj, values, axis=None)
return predicted
def predict_sequences_multiple(model, data, window_size, prediction_len): #window_size = seq_len
prediction_seqs = []
for i in range(len(data)/prediction_len):
curr_frame = data[i*prediction_len]
predicted = []
for j in range(prediction_len):
predicted.append(model.predict(curr_frame[newaxis,:,:])[0,0])
curr_frame = curr_frame[1:]
curr_frame = np.insert(curr_frame, [window_size-1], predicted[-1], axis=0)
prediction_seqs.append(predicted)
return prediction_seqs
def plot_results(predicted_data, true_data, filename):
fig = plt.figure(facecolor='white')
ax = fig.add_subplot(111)
ax.plot(true_data, label='True Data')
# plt.ylim(min(predicted_data),max(predicted_data))
# print("min(predicted_data):",min(predicted_data))
plt.plot(predicted_data, label='Prediction')
plt.legend()
plt.show()
plt.savefig(filename+'.png')
def plot_results_multiple(predicted_data, true_data, prediction_len):
fig = plt.figure(facecolor='white')
ax = fig.add_subplot(111)
ax.plot(true_data, label='True Data')
#Pad the list of predictions to shift it in the graph to it's correct start
for i, data in enumerate(predicted_data):
padding = [None for p in range(i * prediction_len)]
plt.plot(padding + data, label='Prediction')
plt.legend()
plt.show()
plt.savefig('plot_results_multiple.png')
if __name__=='__main__':
global_start_time = time.time()
epochs = 1
seq_len = 50
print('> Loading data... ')
data_file_name = 'D:/software/Python/data/oil prediction data/Cushing_OK_WTI_Spot_Price_FOB __Day_Price_Only.csv'
# data_file_name1 = 'D:/software/Python/data/oil prediction data/sp500.csv'
X_train, y_train, X_test, y_test = load_data(data_file_name, seq_len, True)
print('X_train shape:',X_train.shape) #(3709L, 50L, 1L)
print('y_train shape:',y_train.shape) #(3709L,)
print('X_test shape:',X_test.shape) #(412L, 50L, 1L)
print('y_test shape:',y_test.shape) #(412L,)
print('> Data Loaded. Compiling...')
model = build_model([1, 50, 100, 1])
model.fit(X_train,y_train,batch_size=512,nb_epoch=epochs,validation_split=0.05)
# multiple_predictions = predict_sequences_multiple(model, X_test, seq_len, prediction_len=50)
# print('multiple_predictions shape:',np.array(multiple_predictions).shape) #(8L,50L)
# full_predictions = predict_sequence_full(model, X_test, seq_len)
# print('full_predictions shape:',np.array(full_predictions).shape) #(412L,)
point_by_point_predictions = predict_point_by_point(model, X_test)
print('point_by_point_predictions shape:',np.array(point_by_point_predictions).shape) #(412L)
print('Training duration (s) : ', time.time() - global_start_time)
# plot_results_multiple(multiple_predictions, y_test, 50)
# plot_results(full_predictions,y_test,'full_predictions')
plot_results(point_by_point_predictions,y_test,'point_by_point_predictions') | [
"numpy.insert",
"numpy.reshape",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.show",
"keras.layers.core.Activation",
"keras.layers.recurrent.LSTM",
"matplotlib.pyplot.plot",
"keras.models.Sequential",
"numpy.array",
"matplotlib.pyplot.figure",
"keras.layers.core.Dense",
"keras.layers.core.D... | [((384, 417), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (407, 417), False, 'import warnings\n'), ((1208, 1224), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (1216, 1224), True, 'import numpy as np\n'), ((1315, 1339), 'numpy.random.shuffle', 'np.random.shuffle', (['train'], {}), '(train)\n', (1332, 1339), True, 'import numpy as np\n'), ((1471, 1531), 'numpy.reshape', 'np.reshape', (['x_train', '(x_train.shape[0], x_train.shape[1], 1)'], {}), '(x_train, (x_train.shape[0], x_train.shape[1], 1))\n', (1481, 1531), True, 'import numpy as np\n'), ((1545, 1602), 'numpy.reshape', 'np.reshape', (['x_test', '(x_test.shape[0], x_test.shape[1], 1)'], {}), '(x_test, (x_test.shape[0], x_test.shape[1], 1))\n', (1555, 1602), True, 'import numpy as np\n'), ((2009, 2021), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2019, 2021), False, 'from keras.models import Sequential\n'), ((2311, 2322), 'time.time', 'time.time', ([], {}), '()\n', (2320, 2322), False, 'import time\n'), ((2615, 2655), 'numpy.reshape', 'np.reshape', (['predicted', '(predicted.size,)'], {}), '(predicted, (predicted.size,))\n', (2625, 2655), True, 'import numpy as np\n'), ((3893, 3922), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'facecolor': '"""white"""'}), "(facecolor='white')\n", (3903, 3922), True, 'import matplotlib.pyplot as plt\n'), ((4111, 4155), 'matplotlib.pyplot.plot', 'plt.plot', (['predicted_data'], {'label': '"""Prediction"""'}), "(predicted_data, label='Prediction')\n", (4119, 4155), True, 'import matplotlib.pyplot as plt\n'), ((4160, 4172), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4170, 4172), True, 'import matplotlib.pyplot as plt\n'), ((4177, 4187), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4185, 4187), True, 'import matplotlib.pyplot as plt\n'), ((4192, 4222), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(filename + '.png')"], {}), "(filename + '.png')\n", (4203, 4222), True, 'import matplotlib.pyplot as plt\n'), ((4302, 4331), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'facecolor': '"""white"""'}), "(facecolor='white')\n", (4312, 4331), True, 'import matplotlib.pyplot as plt\n'), ((4668, 4678), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4676, 4678), True, 'import matplotlib.pyplot as plt\n'), ((4683, 4723), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plot_results_multiple.png"""'], {}), "('plot_results_multiple.png')\n", (4694, 4723), True, 'import matplotlib.pyplot as plt\n'), ((4774, 4785), 'time.time', 'time.time', ([], {}), '()\n', (4783, 4785), False, 'import time\n'), ((2037, 2107), 'keras.layers.recurrent.LSTM', 'LSTM', ([], {'input_dim': 'layers[0]', 'output_dim': 'layers[1]', 'return_sequences': '(True)'}), '(input_dim=layers[0], output_dim=layers[1], return_sequences=True)\n', (2041, 2107), False, 'from keras.layers.recurrent import LSTM\n'), ((2121, 2133), 'keras.layers.core.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2128, 2133), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((2150, 2189), 'keras.layers.recurrent.LSTM', 'LSTM', (['layers[2]'], {'return_sequences': '(False)'}), '(layers[2], return_sequences=False)\n', (2154, 2189), False, 'from keras.layers.recurrent import LSTM\n'), ((2204, 2216), 'keras.layers.core.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2211, 2216), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((2233, 2260), 'keras.layers.core.Dense', 'Dense', ([], {'output_dim': 'layers[3]'}), '(output_dim=layers[3])\n', (2238, 2260), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((2276, 2296), 'keras.layers.core.Activation', 'Activation', (['"""linear"""'], {}), "('linear')\n", (2286, 2296), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((3152, 3215), 'numpy.insert', 'np.insert', (['curr_frame', '[window_size - 1]', 'predicted[-1]'], {'axis': '(0)'}), '(curr_frame, [window_size - 1], predicted[-1], axis=0)\n', (3161, 3215), True, 'import numpy as np\n'), ((4598, 4642), 'matplotlib.pyplot.plot', 'plt.plot', (['(padding + data)'], {'label': '"""Prediction"""'}), "(padding + data, label='Prediction')\n", (4606, 4642), True, 'import matplotlib.pyplot as plt\n'), ((4651, 4663), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4661, 4663), True, 'import matplotlib.pyplot as plt\n'), ((988, 1004), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (996, 1004), True, 'import numpy as np\n'), ((1170, 1186), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (1178, 1186), True, 'import numpy as np\n'), ((2407, 2418), 'time.time', 'time.time', ([], {}), '()\n', (2416, 2418), False, 'import time\n'), ((2560, 2579), 'numpy.array', 'np.array', (['predicted'], {}), '(predicted)\n', (2568, 2579), True, 'import numpy as np\n'), ((3696, 3759), 'numpy.insert', 'np.insert', (['curr_frame', '[window_size - 1]', 'predicted[-1]'], {'axis': '(0)'}), '(curr_frame, [window_size - 1], predicted[-1], axis=0)\n', (3705, 3759), True, 'import numpy as np\n'), ((5992, 6028), 'numpy.array', 'np.array', (['point_by_point_predictions'], {}), '(point_by_point_predictions)\n', (6000, 6028), True, 'import numpy as np\n'), ((6084, 6095), 'time.time', 'time.time', ([], {}), '()\n', (6093, 6095), False, 'import time\n')] |
r"""undocumented
这个页面的代码很大程度上参考(复制粘贴)了https://github.com/huggingface/pytorch-pretrained-BERT的代码, 如果你发现该代码对你
有用,也请引用一下他们。
"""
__all__ = [
"BertModel",
]
import copy
import json
import math
import os
import torch
from torch import nn
import numpy as np
from ...io.file_utils import _get_file_name_base_on_postfix
from ...io.file_utils import _get_bert_dir
from ...core import logger
CONFIG_FILE = 'config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
BERT_KEY_RENAME_MAP_1 = {
'gamma': 'weight',
'beta': 'bias',
'distilbert.embeddings': 'bert.embeddings',
'distilbert.transformer': 'bert.encoder',
}
BERT_KEY_RENAME_MAP_2 = {
'q_lin': 'self.query',
'k_lin': 'self.key',
'v_lin': 'self.value',
'out_lin': 'output.dense',
'sa_layer_norm': 'attention.output.LayerNorm',
'ffn.lin1': 'intermediate.dense',
'ffn.lin2': 'output.dense',
'output_layer_norm': 'output.LayerNorm',
}
class BertConfig(object):
r"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
architectures='bert'):
r"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.architectures = architectures
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
r"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
r"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
r"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
r"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
r""" Save this instance to a json file."""
if os.path.isdir(json_file_path):
json_file_path = os.path.join(json_file_path, CONFIG_FILE)
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
def save_pretrained(self, save_directory):
self.to_json_file(save_directory)
def gelu(x):
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
BertLayerNorm = torch.nn.LayerNorm
class DistilBertEmbeddings(nn.Module):
def __init__(self, config):
super(DistilBertEmbeddings, self).__init__()
def create_sinusoidal_embeddings(n_pos, dim, out):
position_enc = np.array([
[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)]
for pos in range(n_pos)
])
out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
out.detach_()
out.requires_grad = False
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
if config.sinusoidal_pos_embds:
create_sinusoidal_embeddings(n_pos=config.max_position_embeddings,
dim=config.hidden_size,
out=self.position_embeddings.weight)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids, position_ids=None):
r"""
Parameters
----------
input_ids: torch.tensor(bs, max_seq_length)
The token ids to embed.
token_type_ids: no used.
position_ids: no used.
Outputs
-------
embeddings: torch.tensor(bs, max_seq_length, dim)
The embedded tokens (plus position embeddings, no token_type embeddings)
"""
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) # (max_seq_length)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # (bs, max_seq_length)
word_embeddings = self.word_embeddings(input_ids) # (bs, max_seq_length, dim)
position_embeddings = self.position_embeddings(position_ids) # (bs, max_seq_length, dim)
embeddings = word_embeddings + position_embeddings # (bs, max_seq_length, dim)
embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim)
embeddings = self.dropout(embeddings) # (bs, max_seq_length, dim)
return embeddings
class BertEmbeddings(nn.Module):
r"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None, words_embeddings=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
if words_embeddings is None:
words_embeddings = self.word_embeddings(input_ids)
else:
assert input_ids.size() == words_embeddings.size()[: -1]
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config, num_output_layer=-1):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
num_output_layer = num_output_layer if num_output_layer >= 0 else (len(self.layer) + num_output_layer)
self.num_output_layer = max(min(num_output_layer, len(self.layer)), 0)
if self.num_output_layer + 1 < len(self.layer):
logger.info(f'The transformer encoder will early exit after layer-{self.num_output_layer} '
f'(layer 0 means embedding layer)!')
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for idx, layer_module in enumerate(self.layer):
if idx >= self.num_output_layer:
break
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertModel(nn.Module):
r"""
BERT(Bidirectional Embedding Representations from Transformers).
用预训练权重矩阵来建立BERT模型::
model = BertModel.from_pretrained(model_dir_or_name)
用随机初始化权重矩阵来建立BERT模型::
model = BertModel()
:param int vocab_size: 词表大小,默认值为30522,为BERT English uncase版本的词表大小
:param int hidden_size: 隐层大小,默认值为768,为BERT base的版本
:param int num_hidden_layers: 隐藏层数,默认值为12,为BERT base的版本
:param int num_attention_heads: 多头注意力头数,默认值为12,为BERT base的版本
:param int intermediate_size: FFN隐藏层大小,默认值是3072,为BERT base的版本
:param str hidden_act: FFN隐藏层激活函数,默认值为``gelu``
:param float hidden_dropout_prob: FFN隐藏层dropout,默认值为0.1
:param float attention_probs_dropout_prob: Attention层的dropout,默认值为0.1
:param int max_position_embeddings: 最大的序列长度,默认值为512,
:param int type_vocab_size: 最大segment数量,默认值为2
:param int initializer_range: 初始化权重范围,默认值为0.02
"""
def __init__(self, config, *inputs, **kwargs):
super(BertModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
super(BertModel, self).__init__()
self.config = config
self.hidden_size = self.config.hidden_size
self.model_type = 'bert'
neg_num_output_layer = kwargs.get('neg_num_output_layer', -1)
pos_num_output_layer = kwargs.get('pos_num_output_layer', self.config.num_hidden_layers)
self.num_output_layer = max(neg_num_output_layer + 1 + self.config.num_hidden_layers, pos_num_output_layer)
if hasattr(config, 'sinusoidal_pos_embds'):
self.model_type = 'distilbert'
elif 'model_type' in kwargs:
self.model_type = kwargs['model_type'].lower()
if self.model_type == 'distilbert':
self.embeddings = DistilBertEmbeddings(config)
else:
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config, num_output_layer=self.num_output_layer)
if self.model_type != 'distilbert':
self.pooler = BertPooler(config)
else:
logger.info('DistilBert has NOT pooler, will use hidden states of [CLS] token as pooled output.')
self.apply(self.init_bert_weights)
@property
def dtype(self):
"""
:obj:`torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).
"""
try:
return next(self.parameters()).dtype
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module):
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].dtype
def init_bert_weights(self, module):
r""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True,
position_ids=None):
"""
:param torch.LongTensor input_ids: bsz x max_len的输入id
:param torch.LongTensor token_type_ids: bsz x max_len,如果不输入认为全为0,一般第一个sep(含)及以前为0, 一个sep之后为1
:param attention_mask: 需要attend的为1,不需要为0
:param bool output_all_encoded_layers: 是否输出所有层,默认输出token embedding(包含bpe, position以及type embedding)
及每一层的hidden states。如果为False,只输出最后一层的结果
:param torch.LongTensor position_ids: bsz x max_len, position的id
:return: encode_layers: 如果output_all_encoded_layers为True,返回list(共num_layers+1个元素),每个元素为
bsz x max_len x hidden_size否则返回bsz x max_len x hidden_size的tensor;
pooled_output: bsz x hidden_size为cls的表示,可以用于句子的分类
"""
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
# this will case an issue when DataParallel: https://github.com/pytorch/pytorch/issues/40457#issuecomment-648396469
# extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = extended_attention_mask.to(self.dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids=token_type_ids, position_ids=position_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
encoded_layers.insert(0, embedding_output)
sequence_output = encoded_layers[-1]
if self.model_type != 'distilbert':
pooled_output = self.pooler(sequence_output)
else:
pooled_output = sequence_output[:, 0]
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
@classmethod
def from_pretrained(cls, model_dir_or_name, *inputs, **kwargs):
state_dict = kwargs.get('state_dict', None)
kwargs.pop('state_dict', None)
kwargs.pop('cache_dir', None)
kwargs.pop('from_tf', None)
# get model dir from name or dir
pretrained_model_dir = _get_bert_dir(model_dir_or_name)
# Load config
config_file = _get_file_name_base_on_postfix(pretrained_model_dir, '.json')
config = BertConfig.from_json_file(config_file)
if state_dict is None:
weights_path = _get_file_name_base_on_postfix(pretrained_model_dir, '.bin')
state_dict = torch.load(weights_path, map_location='cpu')
else:
logger.error(f'Cannot load parameters through `state_dict` variable.')
raise RuntimeError(f'Cannot load parameters through `state_dict` variable.')
model_type = 'BERT'
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'bert' not in key:
new_key = 'bert.' + key
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
for key_name in BERT_KEY_RENAME_MAP_1:
if key_name in key:
new_key = key.replace(key_name, BERT_KEY_RENAME_MAP_1[key_name])
if 'distilbert' in key:
model_type = 'DistilBert'
break
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
for key_name in BERT_KEY_RENAME_MAP_2:
if key_name in key:
new_key = key.replace(key_name, BERT_KEY_RENAME_MAP_2[key_name])
break
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
# Instantiate model.
model = cls(config, model_type=model_type, *inputs, **kwargs)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
if len(missing_keys) > 0:
logger.warning("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.debug("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
logger.info(f"Load pre-trained {model_type} parameters from file {weights_path}.")
return model
def save_pretrained(self, save_directory):
""" 保存模型到某个folder
"""
assert os.path.isdir(
save_directory
), "Saving path should be a directory where the model and configuration can be saved"
# Only save the model itself if we are using distributed training
model_to_save = self.module if hasattr(self, "module") else self
# Attach architecture to the config
model_to_save.config.architectures = [model_to_save.__class__.__name__]
# Save configuration file
model_to_save.config.save_pretrained(save_directory)
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
logger.debug("Model weights saved in {}".format(output_model_file))
| [
"torch.nn.Dropout",
"torch.nn.Tanh",
"math.sqrt",
"copy.deepcopy",
"numpy.sin",
"torch.arange",
"torch.nn.LayerNorm",
"os.path.isdir",
"torch.matmul",
"torch.zeros_like",
"torch.nn.Embedding",
"json.loads",
"torch.ones_like",
"torch.is_tensor",
"numpy.cos",
"torch.nn.Softmax",
"numpy... | [((5139, 5167), 'copy.deepcopy', 'copy.deepcopy', (['self.__dict__'], {}), '(self.__dict__)\n', (5152, 5167), False, 'import copy\n'), ((5461, 5490), 'os.path.isdir', 'os.path.isdir', (['json_file_path'], {}), '(json_file_path)\n', (5474, 5490), False, 'import os\n'), ((5873, 5889), 'torch.sigmoid', 'torch.sigmoid', (['x'], {}), '(x)\n', (5886, 5889), False, 'import torch\n'), ((6610, 6676), 'torch.nn.Embedding', 'nn.Embedding', (['config.vocab_size', 'config.hidden_size'], {'padding_idx': '(0)'}), '(config.vocab_size, config.hidden_size, padding_idx=0)\n', (6622, 6676), False, 'from torch import nn\n'), ((6712, 6776), 'torch.nn.Embedding', 'nn.Embedding', (['config.max_position_embeddings', 'config.hidden_size'], {}), '(config.max_position_embeddings, config.hidden_size)\n', (6724, 6776), False, 'from torch import nn\n'), ((7065, 7108), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['config.hidden_size'], {'eps': '(1e-12)'}), '(config.hidden_size, eps=1e-12)\n', (7077, 7108), False, 'from torch import nn\n'), ((7132, 7170), 'torch.nn.Dropout', 'nn.Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (7142, 7170), False, 'from torch import nn\n'), ((8669, 8735), 'torch.nn.Embedding', 'nn.Embedding', (['config.vocab_size', 'config.hidden_size'], {'padding_idx': '(0)'}), '(config.vocab_size, config.hidden_size, padding_idx=0)\n', (8681, 8735), False, 'from torch import nn\n'), ((8771, 8835), 'torch.nn.Embedding', 'nn.Embedding', (['config.max_position_embeddings', 'config.hidden_size'], {}), '(config.max_position_embeddings, config.hidden_size)\n', (8783, 8835), False, 'from torch import nn\n'), ((8873, 8929), 'torch.nn.Embedding', 'nn.Embedding', (['config.type_vocab_size', 'config.hidden_size'], {}), '(config.type_vocab_size, config.hidden_size)\n', (8885, 8929), False, 'from torch import nn\n'), ((9190, 9228), 'torch.nn.Dropout', 'nn.Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (9200, 9228), False, 'from torch import nn\n'), ((10827, 10876), 'torch.nn.Linear', 'nn.Linear', (['config.hidden_size', 'self.all_head_size'], {}), '(config.hidden_size, self.all_head_size)\n', (10836, 10876), False, 'from torch import nn\n'), ((10896, 10945), 'torch.nn.Linear', 'nn.Linear', (['config.hidden_size', 'self.all_head_size'], {}), '(config.hidden_size, self.all_head_size)\n', (10905, 10945), False, 'from torch import nn\n'), ((10967, 11016), 'torch.nn.Linear', 'nn.Linear', (['config.hidden_size', 'self.all_head_size'], {}), '(config.hidden_size, self.all_head_size)\n', (10976, 11016), False, 'from torch import nn\n'), ((11041, 11088), 'torch.nn.Dropout', 'nn.Dropout', (['config.attention_probs_dropout_prob'], {}), '(config.attention_probs_dropout_prob)\n', (11051, 11088), False, 'from torch import nn\n'), ((12481, 12523), 'torch.matmul', 'torch.matmul', (['attention_probs', 'value_layer'], {}), '(attention_probs, value_layer)\n', (12493, 12523), False, 'import torch\n'), ((12912, 12961), 'torch.nn.Linear', 'nn.Linear', (['config.hidden_size', 'config.hidden_size'], {}), '(config.hidden_size, config.hidden_size)\n', (12921, 12961), False, 'from torch import nn\n'), ((13071, 13109), 'torch.nn.Dropout', 'nn.Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (13081, 13109), False, 'from torch import nn\n'), ((13919, 13974), 'torch.nn.Linear', 'nn.Linear', (['config.hidden_size', 'config.intermediate_size'], {}), '(config.hidden_size, config.intermediate_size)\n', (13928, 13974), False, 'from torch import nn\n'), ((14467, 14522), 'torch.nn.Linear', 'nn.Linear', (['config.intermediate_size', 'config.hidden_size'], {}), '(config.intermediate_size, config.hidden_size)\n', (14476, 14522), False, 'from torch import nn\n'), ((14632, 14670), 'torch.nn.Dropout', 'nn.Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (14642, 14670), False, 'from torch import nn\n'), ((16808, 16857), 'torch.nn.Linear', 'nn.Linear', (['config.hidden_size', 'config.hidden_size'], {}), '(config.hidden_size, config.hidden_size)\n', (16817, 16857), False, 'from torch import nn\n'), ((16884, 16893), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (16891, 16893), False, 'from torch import nn\n'), ((28024, 28053), 'os.path.isdir', 'os.path.isdir', (['save_directory'], {}), '(save_directory)\n', (28037, 28053), False, 'import os\n'), ((28643, 28685), 'os.path.join', 'os.path.join', (['save_directory', 'WEIGHTS_NAME'], {}), '(save_directory, WEIGHTS_NAME)\n', (28655, 28685), False, 'import os\n'), ((4949, 4965), 'json.loads', 'json.loads', (['text'], {}), '(text)\n', (4959, 4965), False, 'import json\n'), ((5521, 5562), 'os.path.join', 'os.path.join', (['json_file_path', 'CONFIG_FILE'], {}), '(json_file_path, CONFIG_FILE)\n', (5533, 5562), False, 'import os\n'), ((7730, 7797), 'torch.arange', 'torch.arange', (['seq_length'], {'dtype': 'torch.long', 'device': 'input_ids.device'}), '(seq_length, dtype=torch.long, device=input_ids.device)\n', (7742, 7797), False, 'import torch\n'), ((9426, 9493), 'torch.arange', 'torch.arange', (['seq_length'], {'dtype': 'torch.long', 'device': 'input_ids.device'}), '(seq_length, dtype=torch.long, device=input_ids.device)\n', (9438, 9493), False, 'import torch\n'), ((9632, 9659), 'torch.zeros_like', 'torch.zeros_like', (['input_ids'], {}), '(input_ids)\n', (9648, 9659), False, 'import torch\n'), ((11920, 11955), 'math.sqrt', 'math.sqrt', (['self.attention_head_size'], {}), '(self.attention_head_size)\n', (11929, 11955), False, 'import math\n'), ((12202, 12220), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (12212, 12220), False, 'from torch import nn\n'), ((22003, 22029), 'torch.ones_like', 'torch.ones_like', (['input_ids'], {}), '(input_ids)\n', (22018, 22029), False, 'import torch\n'), ((22094, 22121), 'torch.zeros_like', 'torch.zeros_like', (['input_ids'], {}), '(input_ids)\n', (22110, 22121), False, 'import torch\n'), ((24736, 24780), 'torch.load', 'torch.load', (['weights_path'], {'map_location': '"""cpu"""'}), "(weights_path, map_location='cpu')\n", (24746, 24780), False, 'import torch\n'), ((6407, 6436), 'numpy.sin', 'np.sin', (['position_enc[:, 0::2]'], {}), '(position_enc[:, 0::2])\n', (6413, 6436), True, 'import numpy as np\n'), ((6483, 6512), 'numpy.cos', 'np.cos', (['position_enc[:, 1::2]'], {}), '(position_enc[:, 1::2])\n', (6489, 6512), True, 'import numpy as np\n'), ((15664, 15684), 'copy.deepcopy', 'copy.deepcopy', (['layer'], {}), '(layer)\n', (15677, 15684), False, 'import copy\n'), ((5825, 5839), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (5834, 5839), False, 'import math\n'), ((6250, 6285), 'numpy.power', 'np.power', (['(10000)', '(2 * (j // 2) / dim)'], {}), '(10000, 2 * (j // 2) / dim)\n', (6258, 6285), True, 'import numpy as np\n'), ((20256, 20274), 'torch.is_tensor', 'torch.is_tensor', (['v'], {}), '(v)\n', (20271, 20274), False, 'import torch\n')] |
import h5py
import numpy as np
import sys
def save_activity(activity, network_params, filename, folder_index, base_path, activity_key='activity',dim=2):
filepath = base_path + 'pattern_formation/data{}d/'.format(dim)
full_name = filepath + filename
f = h5py.File(full_name,'a')
f.create_dataset(folder_index + activity_key, data=activity)
if activity_key=='activity':
f.create_dataset(folder_index + 'nevents', data=activity.shape[0])
for key in network_params.keys():
f.create_dataset(folder_index + key, data=network_params[key])
f.close()
def gimme_index(filename,base_path,dim=2):
filepath = base_path + 'pattern_formation/data{}d/'.format(dim)
full_name = filepath + filename
print('save under:',full_name)
try:
f = h5py.File(full_name,'a')
indices = [int(item) for item in sorted(f.keys())]
max_index = np.max(indices)
f.create_group('{}'.format(max_index+1))
f.close()
except Exception as e:
#print(e)
max_index = -1
return max_index + 1
| [
"numpy.max",
"h5py.File"
] | [((258, 283), 'h5py.File', 'h5py.File', (['full_name', '"""a"""'], {}), "(full_name, 'a')\n", (267, 283), False, 'import h5py\n'), ((751, 776), 'h5py.File', 'h5py.File', (['full_name', '"""a"""'], {}), "(full_name, 'a')\n", (760, 776), False, 'import h5py\n'), ((843, 858), 'numpy.max', 'np.max', (['indices'], {}), '(indices)\n', (849, 858), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 18 22:59:59 2020
@author: CS
Check the read-me file for a in-depth summary of the problem
"""
import numpy as np
import sys
sys.path.append('..')
import TrussAnalysis as ta
class environment:
"""
The enviroment will act as a container for the data in the problem.
The boundary node coordinates and element fixities are defined as static
values.
"""
def __init__(self, forces = np.array([1000., 1000., 0.]),trussMat = 10):
self.forces = forces
"""
Here the nodes and connectivityies between each node are defined for
the problem. The connectivity assumes a
"""
self.xNodeCoords = np.array([0.,1.,0.,1.,0.,1.,1.])
self.yNodeCoords = np.array([0.,0.,1.,1.,2.,2.,3.])
self.Connectivity = [[1,2],[1,4],[2,4],[1,3], [3,4],[3,6],[3,5],[4,6], [5,6], [5,7], [6,7]]
self.nodeIds = np.arange(len(self.Connectivity)) + 1
self.trussMat = trussMat
def ftest(individual, environment):
"""
Tests and individual and returns the result of that test.
This function essentially is a wrapper that converts data from our
individual and environment into a form usable by the truss anaylsis
functions.
Note several values are returned as a result.
This will be processed later to define the value of fitness.
Parameters
----------
individual : Naturalize Indivdual
The input indivdual.
environment : Naturalize Environment
The input environment.
"""
Areas = individual.genotype[0]
"""
Make the truss defined in the problem definition. This is fixed.
We could condense this by returning a list and passing that list in with a
*List to the function, however, we'll write it out explicitly for this
example to be clear'
"""
Forces = environment.forces
xNodeCoords = environment.xNodeCoords
yNodeCoords = environment.yNodeCoords
Connectivity = environment.Connectivity
trussMat = environment.trussMat
nodeIds = environment.nodeIds
result = ta.runTrussAnalysis(Areas, Forces, nodeIds, xNodeCoords,
yNodeCoords, Connectivity, trussMat)
disp, volume, Forces = result
return disp, volume, Forces
def fitness_basic(individual, environment):
"""
Determines how good each solution is, this is what that is minimized
In this case, minimize the displacement in the x direction.
Parameters
----------
individual : Naturalize Indivdual
The input indivdual.
environment : Naturalize Environment
The input environment.
Returns
-------
disp : float
The output displacement of the analysis.
"""
disp, volumes, _ = individual.result
dx = disp[0]
disp = np.abs(dx)
return disp
def fitness_normalized(individual, environment):
"""
Determines how good each solution is, this is what that is minimized.
In this function, the displacement multiplied by volume is minimized.
This will make solutions with a lower volume more attractive.
Parameters
----------
individual : Naturalize Indivdual
The input indivdual.
environment : Naturalize Environment
The input environment.
Returns
-------
normDisp : float
The output displacement of the analysis.
"""
disp, volumes, _ = individual.result
dx = disp[0]
normDisp = np.abs(dx * np.sum(volumes))
return normDisp
def fitness_Volume(individual, environment):
"""
The fitness function, this value is what is actually minimized.
In this case, the volume is minimized, assuming displacement is below some
limit. This will make solutions with a lower volume more attractive.
Parameters
----------
individual : Naturalize Indivdual
The input indivdual.
environment : Naturalize Environment
The input environment.
Returns
-------
volume : float
The output volume of the truss.
"""
disp, volumes, _ = individual.result
dx = disp[0]
"""
The limit could be placed within the environment function.
"""
lim = 0.01
if dx < lim:
volume = np.sum(volumes)
# normDisp = np.abs(dx * np.sum(volumes))
else:
volume = 100*np.sum(volumes)
return volume
def plotIndividual(data):
"""
Makes a matplotlib plot of the truss.
"""
areas = data
xNodeCoords = np.array([0.,1.,0.,1.,0.,1.,1.])
yNodeCoords = np.array([0.,0.,1.,1.,2.,2.,3.])
Connectivity = [[1,2],[1,4],[2,4],[1,3], [3,4], [3,6], [3,5], [4,6], [5,6], [5,7], [6,7]]
nodeIds = np.arange(len(Connectivity)) + 1
fig, ax = ta.plotTruss(areas, nodeIds, xNodeCoords, yNodeCoords, Connectivity)
maxArea = max(areas)
style_blue(fig, ax, areas, maxArea)
return fig, ax
def style_blue(fig, ax, areas, maxArea):
"""
Used to make the animated plots
"""
fig.set_figwidth(8)
fig.set_figheight(6)
for text in ax.texts:
text.set_fontsize(10)
ax.texts = []
for ii, line in enumerate(ax.lines):
line.set_linewidth(5*areas[ii]/maxArea)
line.set_color("steelblue")
ax.set_facecolor("skyblue")
ax.collections[0].set_color('cornsilk')
ax.collections[0].set_zorder(10)
ax.collections[0].set_linewidth(2)
# fig.savefig("mygraph.png")
# ax.axis('off')
ax.set_xlim([-1.5, 2.5])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# ax.annotate("", xy=(0.9, 3), xytext=(0.5, 3), arrowprops=dict(arrowstyle="->", color = 'red') )
return fig, ax
| [
"numpy.abs",
"TrussAnalysis.plotTruss",
"numpy.array",
"numpy.sum",
"TrussAnalysis.runTrussAnalysis",
"sys.path.append"
] | [((174, 195), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (189, 195), False, 'import sys\n'), ((2180, 2277), 'TrussAnalysis.runTrussAnalysis', 'ta.runTrussAnalysis', (['Areas', 'Forces', 'nodeIds', 'xNodeCoords', 'yNodeCoords', 'Connectivity', 'trussMat'], {}), '(Areas, Forces, nodeIds, xNodeCoords, yNodeCoords,\n Connectivity, trussMat)\n', (2199, 2277), True, 'import TrussAnalysis as ta\n'), ((2928, 2938), 'numpy.abs', 'np.abs', (['dx'], {}), '(dx)\n', (2934, 2938), True, 'import numpy as np\n'), ((4624, 4669), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0]'], {}), '([0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0])\n', (4632, 4669), True, 'import numpy as np\n'), ((4675, 4720), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0]'], {}), '([0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0])\n', (4683, 4720), True, 'import numpy as np\n'), ((4871, 4939), 'TrussAnalysis.plotTruss', 'ta.plotTruss', (['areas', 'nodeIds', 'xNodeCoords', 'yNodeCoords', 'Connectivity'], {}), '(areas, nodeIds, xNodeCoords, yNodeCoords, Connectivity)\n', (4883, 4939), True, 'import TrussAnalysis as ta\n'), ((459, 490), 'numpy.array', 'np.array', (['[1000.0, 1000.0, 0.0]'], {}), '([1000.0, 1000.0, 0.0])\n', (467, 490), True, 'import numpy as np\n'), ((729, 774), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0]'], {}), '([0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0])\n', (737, 774), True, 'import numpy as np\n'), ((789, 834), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0]'], {}), '([0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0])\n', (797, 834), True, 'import numpy as np\n'), ((4371, 4386), 'numpy.sum', 'np.sum', (['volumes'], {}), '(volumes)\n', (4377, 4386), True, 'import numpy as np\n'), ((3589, 3604), 'numpy.sum', 'np.sum', (['volumes'], {}), '(volumes)\n', (3595, 3604), True, 'import numpy as np\n'), ((4464, 4479), 'numpy.sum', 'np.sum', (['volumes'], {}), '(volumes)\n', (4470, 4479), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Created by "Thieu" at 20:22, 12/06/2020 ----------%
# Email: <EMAIL> %
# Github: https://github.com/thieu1995 %
# --------------------------------------------------%
import numpy as np
from copy import deepcopy
from mealpy.optimizer import Optimizer
class BaseSMA(Optimizer):
"""
My changed version of: Slime Mould Algorithm (SMA)
Notes
~~~~~
+ Selected 2 unique and random solution to create new solution (not to create variable) --> remove third loop in original version
+ Check bound and update fitness after each individual move instead of after the whole population move in the original version
+ This version not only faster but also better than the original version
Hyper-parameters should fine tuned in approximate range to get faster convergence toward the global optimum:
+ p_t (float): [0.01, 0.1], probability threshold (z in the paper)
Examples
~~~~~~~~
>>> import numpy as np
>>> from mealpy.bio_based.SMA import BaseSMA
>>>
>>> def fitness_function(solution):
>>> return np.sum(solution**2)
>>>
>>> problem_dict1 = {
>>> "fit_func": fitness_function,
>>> "lb": [-10, -15, -4, -2, -8],
>>> "ub": [10, 15, 12, 8, 20],
>>> "minmax": "min",
>>> }
>>>
>>> epoch = 1000
>>> pop_size = 50
>>> p_t = 0.03
>>> model = BaseSMA(problem_dict1, epoch, pop_size, p_t)
>>> best_position, best_fitness = model.solve()
>>> print(f"Solution: {best_position}, Fitness: {best_fitness}")
"""
ID_WEI = 2
def __init__(self, problem, epoch=10000, pop_size=100, p_t=0.03, **kwargs):
"""
Args:
problem (dict): The problem dictionary
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
p_t (float): probability threshold (z in the paper), default = 0.03
"""
super().__init__(problem, kwargs)
self.epoch = self.validator.check_int("epoch", epoch, [1, 100000])
self.pop_size = self.validator.check_int("pop_size", pop_size, [10, 10000])
self.p_t = self.validator.check_float("p_t", p_t, (0, 1.0))
self.nfe_per_epoch = self.pop_size
self.sort_flag = True
def create_solution(self, lb=None, ub=None):
"""
Args:
lb: list of lower bound values
ub: list of upper bound values
Returns:
list: [position, target, weight]
"""
position = self.generate_position(lb, ub)
position = self.amend_position(position, lb, ub)
target = self.get_target_wrapper(position)
weight = np.zeros(len(lb))
return [position, target, weight]
def evolve(self, epoch):
"""
The main operations (equations) of algorithm. Inherit from Optimizer class
Args:
epoch (int): The current iteration
"""
# plus eps to avoid denominator zero
s = self.g_best[self.ID_TAR][self.ID_FIT] - self.pop[-1][self.ID_TAR][self.ID_FIT] + self.EPSILON
# calculate the fitness weight of each slime mold
for i in range(0, self.pop_size):
# Eq.(2.5)
if i <= int(self.pop_size / 2):
self.pop[i][self.ID_WEI] = 1 + np.random.uniform(0, 1, self.problem.n_dims) * \
np.log10((self.g_best[self.ID_TAR][self.ID_FIT] - self.pop[i][self.ID_TAR][self.ID_FIT]) / s + 1)
else:
self.pop[i][self.ID_WEI] = 1 - np.random.uniform(0, 1, self.problem.n_dims) * \
np.log10((self.g_best[self.ID_TAR][self.ID_FIT] - self.pop[i][self.ID_TAR][self.ID_FIT]) / s + 1)
a = np.arctanh(-((epoch + 1) / (self.epoch+1)) + 1) # Eq.(2.4)
b = 1 - (epoch + 1) / (self.epoch+1)
pop_new = []
for idx in range(0, self.pop_size):
# Update the Position of search agent
if np.random.uniform() < self.p_t: # Eq.(2.7)
pos_new = self.generate_position(self.problem.lb, self.problem.ub)
else:
p = np.tanh(np.abs(self.pop[idx][self.ID_TAR][self.ID_FIT] - self.g_best[self.ID_TAR][self.ID_FIT])) # Eq.(2.2)
vb = np.random.uniform(-a, a, self.problem.n_dims) # Eq.(2.3)
vc = np.random.uniform(-b, b, self.problem.n_dims)
# two positions randomly selected from population, apply for the whole problem size instead of 1 variable
id_a, id_b = np.random.choice(list(set(range(0, self.pop_size)) - {idx}), 2, replace=False)
pos_1 = self.g_best[self.ID_POS] + vb * (self.pop[idx][self.ID_WEI] * self.pop[id_a][self.ID_POS] - self.pop[id_b][self.ID_POS])
pos_2 = vc * self.pop[idx][self.ID_POS]
pos_new = np.where(np.random.uniform(0, 1, self.problem.n_dims) < p, pos_1, pos_2)
# Check bound and re-calculate fitness after each individual move
pos_new = self.amend_position(pos_new, self.problem.lb, self.problem.ub)
pop_new.append([pos_new, None, np.zeros(self.problem.n_dims)])
self.pop = self.update_target_wrapper_population(pop_new)
class OriginalSMA(BaseSMA):
"""
The original version of: Slime Mould Algorithm (SMA)
Links:
1. https://doi.org/10.1016/j.future.2020.03.055
2. https://www.researchgate.net/publication/340431861_Slime_mould_algorithm_A_new_method_for_stochastic_optimization
Hyper-parameters should fine tuned in approximate range to get faster convergence toward the global optimum:
+ p_t (float): [0.01, 0.1], probability threshold (z in the paper)
Examples
~~~~~~~~
>>> import numpy as np
>>> from mealpy.bio_based.SMA import OriginalSMA
>>>
>>> def fitness_function(solution):
>>> return np.sum(solution**2)
>>>
>>> problem_dict1 = {
>>> "fit_func": fitness_function,
>>> "lb": [-10, -15, -4, -2, -8],
>>> "ub": [10, 15, 12, 8, 20],
>>> "minmax": "min",
>>> }
>>>
>>> epoch = 1000
>>> pop_size = 50
>>> p_t = 0.03
>>> model = OriginalSMA(problem_dict1, epoch, pop_size, p_t)
>>> best_position, best_fitness = model.solve()
>>> print(f"Solution: {best_position}, Fitness: {best_fitness}")
References
~~~~~~~~~~
[1] Li, S., <NAME>., <NAME>., <NAME>. and <NAME>., 2020. Slime mould algorithm: A new method for
stochastic optimization. Future Generation Computer Systems, 111, pp.300-323.
"""
ID_WEI = 2
def __init__(self, problem, epoch=10000, pop_size=100, p_t=0.03, **kwargs):
"""
Args:
problem (dict): The problem dictionary
epoch (int): maximum number of iterations, default = 1000
pop_size (int): number of population size, default = 100
p_t (float): probability threshold (z in the paper), default = 0.03
"""
super().__init__(problem, epoch, pop_size, p_t, **kwargs)
def evolve(self, epoch):
"""
The main operations (equations) of algorithm. Inherit from Optimizer class
Args:
epoch (int): The current iteration
"""
# plus eps to avoid denominator zero
s = self.g_best[self.ID_TAR][self.ID_FIT] - self.pop[-1][self.ID_TAR][self.ID_FIT] + self.EPSILON
# calculate the fitness weight of each slime mold
for i in range(0, self.pop_size):
# Eq.(2.5)
if i <= int(self.pop_size / 2):
self.pop[i][self.ID_WEI] = 1 + np.random.uniform(0, 1, self.problem.n_dims) * \
np.log10((self.g_best[self.ID_TAR][self.ID_FIT] - self.pop[i][self.ID_TAR][self.ID_FIT]) / s + 1)
else:
self.pop[i][self.ID_WEI] = 1 - np.random.uniform(0, 1, self.problem.n_dims) * \
np.log10((self.g_best[self.ID_TAR][self.ID_FIT] - self.pop[i][self.ID_TAR][self.ID_FIT]) / s + 1)
a = np.arctanh(-((epoch + 1) / (self.epoch+1)) + 1) # Eq.(2.4)
b = 1 - (epoch + 1) / (self.epoch+1)
pop_new = []
for idx in range(0, self.pop_size):
# Update the Position of search agent
current_agent = deepcopy(self.pop[idx])
if np.random.uniform() < self.p_t: # Eq.(2.7)
current_agent[self.ID_POS] = np.random.uniform(self.problem.lb, self.problem.ub)
else:
p = np.tanh(np.abs(current_agent[self.ID_TAR][self.ID_FIT] - self.g_best[self.ID_TAR][self.ID_FIT])) # Eq.(2.2)
vb = np.random.uniform(-a, a, self.problem.n_dims) # Eq.(2.3)
vc = np.random.uniform(-b, b, self.problem.n_dims)
for j in range(0, self.problem.n_dims):
# two positions randomly selected from population
id_a, id_b = np.random.choice(list(set(range(0, self.pop_size)) - {idx}), 2, replace=False)
if np.random.uniform() < p: # Eq.(2.1)
current_agent[self.ID_POS][j] = self.g_best[self.ID_POS][j] + \
vb[j] * (current_agent[self.ID_WEI][j] * self.pop[id_a][self.ID_POS][j] - self.pop[id_b][self.ID_POS][j])
else:
current_agent[self.ID_POS][j] = vc[j] * current_agent[self.ID_POS][j]
pos_new = self.amend_position(current_agent[self.ID_POS], self.problem.lb, self.problem.ub)
pop_new.append([pos_new, None, np.zeros(self.problem.n_dims)])
self.pop = self.update_target_wrapper_population(pop_new)
| [
"numpy.abs",
"numpy.log10",
"copy.deepcopy",
"numpy.zeros",
"numpy.random.uniform",
"numpy.arctanh"
] | [((3792, 3841), 'numpy.arctanh', 'np.arctanh', (['(-((epoch + 1) / (self.epoch + 1)) + 1)'], {}), '(-((epoch + 1) / (self.epoch + 1)) + 1)\n', (3802, 3841), True, 'import numpy as np\n'), ((8079, 8128), 'numpy.arctanh', 'np.arctanh', (['(-((epoch + 1) / (self.epoch + 1)) + 1)'], {}), '(-((epoch + 1) / (self.epoch + 1)) + 1)\n', (8089, 8128), True, 'import numpy as np\n'), ((8328, 8351), 'copy.deepcopy', 'deepcopy', (['self.pop[idx]'], {}), '(self.pop[idx])\n', (8336, 8351), False, 'from copy import deepcopy\n'), ((4028, 4047), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (4045, 4047), True, 'import numpy as np\n'), ((4323, 4368), 'numpy.random.uniform', 'np.random.uniform', (['(-a)', 'a', 'self.problem.n_dims'], {}), '(-a, a, self.problem.n_dims)\n', (4340, 4368), True, 'import numpy as np\n'), ((4402, 4447), 'numpy.random.uniform', 'np.random.uniform', (['(-b)', 'b', 'self.problem.n_dims'], {}), '(-b, b, self.problem.n_dims)\n', (4419, 4447), True, 'import numpy as np\n'), ((8367, 8386), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (8384, 8386), True, 'import numpy as np\n'), ((8456, 8507), 'numpy.random.uniform', 'np.random.uniform', (['self.problem.lb', 'self.problem.ub'], {}), '(self.problem.lb, self.problem.ub)\n', (8473, 8507), True, 'import numpy as np\n'), ((8676, 8721), 'numpy.random.uniform', 'np.random.uniform', (['(-a)', 'a', 'self.problem.n_dims'], {}), '(-a, a, self.problem.n_dims)\n', (8693, 8721), True, 'import numpy as np\n'), ((8755, 8800), 'numpy.random.uniform', 'np.random.uniform', (['(-b)', 'b', 'self.problem.n_dims'], {}), '(-b, b, self.problem.n_dims)\n', (8772, 8800), True, 'import numpy as np\n'), ((4201, 4293), 'numpy.abs', 'np.abs', (['(self.pop[idx][self.ID_TAR][self.ID_FIT] - self.g_best[self.ID_TAR][self.\n ID_FIT])'], {}), '(self.pop[idx][self.ID_TAR][self.ID_FIT] - self.g_best[self.ID_TAR][\n self.ID_FIT])\n', (4207, 4293), True, 'import numpy as np\n'), ((5187, 5216), 'numpy.zeros', 'np.zeros', (['self.problem.n_dims'], {}), '(self.problem.n_dims)\n', (5195, 5216), True, 'import numpy as np\n'), ((8554, 8646), 'numpy.abs', 'np.abs', (['(current_agent[self.ID_TAR][self.ID_FIT] - self.g_best[self.ID_TAR][self.\n ID_FIT])'], {}), '(current_agent[self.ID_TAR][self.ID_FIT] - self.g_best[self.ID_TAR][\n self.ID_FIT])\n', (8560, 8646), True, 'import numpy as np\n'), ((9588, 9617), 'numpy.zeros', 'np.zeros', (['self.problem.n_dims'], {}), '(self.problem.n_dims)\n', (9596, 9617), True, 'import numpy as np\n'), ((3380, 3424), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'self.problem.n_dims'], {}), '(0, 1, self.problem.n_dims)\n', (3397, 3424), True, 'import numpy as np\n'), ((3449, 3551), 'numpy.log10', 'np.log10', (['((self.g_best[self.ID_TAR][self.ID_FIT] - self.pop[i][self.ID_TAR][self.\n ID_FIT]) / s + 1)'], {}), '((self.g_best[self.ID_TAR][self.ID_FIT] - self.pop[i][self.ID_TAR][\n self.ID_FIT]) / s + 1)\n', (3457, 3551), True, 'import numpy as np\n'), ((3612, 3656), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'self.problem.n_dims'], {}), '(0, 1, self.problem.n_dims)\n', (3629, 3656), True, 'import numpy as np\n'), ((3681, 3783), 'numpy.log10', 'np.log10', (['((self.g_best[self.ID_TAR][self.ID_FIT] - self.pop[i][self.ID_TAR][self.\n ID_FIT]) / s + 1)'], {}), '((self.g_best[self.ID_TAR][self.ID_FIT] - self.pop[i][self.ID_TAR][\n self.ID_FIT]) / s + 1)\n', (3689, 3783), True, 'import numpy as np\n'), ((4916, 4960), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'self.problem.n_dims'], {}), '(0, 1, self.problem.n_dims)\n', (4933, 4960), True, 'import numpy as np\n'), ((7667, 7711), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'self.problem.n_dims'], {}), '(0, 1, self.problem.n_dims)\n', (7684, 7711), True, 'import numpy as np\n'), ((7736, 7838), 'numpy.log10', 'np.log10', (['((self.g_best[self.ID_TAR][self.ID_FIT] - self.pop[i][self.ID_TAR][self.\n ID_FIT]) / s + 1)'], {}), '((self.g_best[self.ID_TAR][self.ID_FIT] - self.pop[i][self.ID_TAR][\n self.ID_FIT]) / s + 1)\n', (7744, 7838), True, 'import numpy as np\n'), ((7899, 7943), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'self.problem.n_dims'], {}), '(0, 1, self.problem.n_dims)\n', (7916, 7943), True, 'import numpy as np\n'), ((7968, 8070), 'numpy.log10', 'np.log10', (['((self.g_best[self.ID_TAR][self.ID_FIT] - self.pop[i][self.ID_TAR][self.\n ID_FIT]) / s + 1)'], {}), '((self.g_best[self.ID_TAR][self.ID_FIT] - self.pop[i][self.ID_TAR][\n self.ID_FIT]) / s + 1)\n', (7976, 8070), True, 'import numpy as np\n'), ((9062, 9081), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (9079, 9081), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
iris = load_iris()
pca = PCA(n_components=2)
X_pca = pca.fit_transform(iris.data)
colors = ['navy', 'turquoise', 'darkorange']
plt.figure(figsize=(8, 8))
for color, i, target_name in zip(colors, [0, 1, 2], iris.target_names):
plt.scatter(X_pca[iris.target == i, 0], X_pca[iris.target == i, 1], color=color, lw=2, label=target_name)
plt.title("PCA of iris dataset")
plt.legend(loc="best", shadow=False, scatterpoints=1)
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
pca4d = PCA(n_components=4)
pca4d.fit(iris.data)
X_pca = pca4d.transform(iris.data)
explained_variances = np.zeros(5)
np.cumsum(pca4d.explained_variance_ratio_, out=explained_variances[1:])
fig = plt.figure()
plt.plot(explained_variances)
plt.title('explained variances of Iris PCA')
ax = fig.gca()
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.show()
| [
"sklearn.datasets.load_iris",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure",
"numpy.zeros",
"matplotlib.ticker.MaxNLocator",
"numpy.cumsum",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend",
"matplo... | [((178, 189), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (187, 189), False, 'from sklearn.datasets import load_iris\n'), ((197, 216), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (200, 216), False, 'from sklearn.decomposition import PCA\n'), ((300, 326), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (310, 326), True, 'import matplotlib.pyplot as plt\n'), ((509, 541), 'matplotlib.pyplot.title', 'plt.title', (['"""PCA of iris dataset"""'], {}), "('PCA of iris dataset')\n", (518, 541), True, 'import matplotlib.pyplot as plt\n'), ((542, 595), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'shadow': '(False)', 'scatterpoints': '(1)'}), "(loc='best', shadow=False, scatterpoints=1)\n", (552, 595), True, 'import matplotlib.pyplot as plt\n'), ((596, 624), 'matplotlib.pyplot.axis', 'plt.axis', (['[-4, 4, -1.5, 1.5]'], {}), '([-4, 4, -1.5, 1.5])\n', (604, 624), True, 'import matplotlib.pyplot as plt\n'), ((625, 635), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (633, 635), True, 'import matplotlib.pyplot as plt\n'), ((646, 665), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(4)'}), '(n_components=4)\n', (649, 665), False, 'from sklearn.decomposition import PCA\n'), ((745, 756), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (753, 756), True, 'import numpy as np\n'), ((757, 828), 'numpy.cumsum', 'np.cumsum', (['pca4d.explained_variance_ratio_'], {'out': 'explained_variances[1:]'}), '(pca4d.explained_variance_ratio_, out=explained_variances[1:])\n', (766, 828), True, 'import numpy as np\n'), ((836, 848), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (846, 848), True, 'import matplotlib.pyplot as plt\n'), ((849, 878), 'matplotlib.pyplot.plot', 'plt.plot', (['explained_variances'], {}), '(explained_variances)\n', (857, 878), True, 'import matplotlib.pyplot as plt\n'), ((879, 923), 'matplotlib.pyplot.title', 'plt.title', (['"""explained variances of Iris PCA"""'], {}), "('explained variances of Iris PCA')\n", (888, 923), True, 'import matplotlib.pyplot as plt\n'), ((993, 1003), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1001, 1003), True, 'import matplotlib.pyplot as plt\n'), ((403, 513), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_pca[iris.target == i, 0]', 'X_pca[iris.target == i, 1]'], {'color': 'color', 'lw': '(2)', 'label': 'target_name'}), '(X_pca[iris.target == i, 0], X_pca[iris.target == i, 1], color=\n color, lw=2, label=target_name)\n', (414, 513), True, 'import matplotlib.pyplot as plt\n'), ((966, 991), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', ([], {'integer': '(True)'}), '(integer=True)\n', (977, 991), False, 'from matplotlib.ticker import MaxNLocator\n')] |
"""Python module for COVID-19 Survival-Convolution Death Model.
In this module we focus on predicting the number of deaths with a similar
approach to modeling the number of infections.
"""
from typing import Optional, Sequence, Text, Tuple
import numpy as np
import tensorflow as tf
import piecewise_linear_infection_model as infection_model
tf.keras.backend.set_floatx('float64')
AVE_DAYS_TO_OUTCOME = 18.6
DEFAULT_MAX_INTERVENTION = 40
class Covid19DeathPredictModel(infection_model.Covid19InfectionsPredictModel):
"""Model to predict the number of cases dying from Covid-19.
Attributes:
w: hazard rate parameter used in defining the survival function that
specifies the average of time until an infected patient becoming
symptomatic and being diagnosed positive.
death_rate: a variable that indicates probability of death given infection.
"""
def __init__(self,
max_intervention: int = DEFAULT_MAX_INTERVENTION,
**kwargs):
"""Initializes an instance of Covid19DeathPredictModel."""
self._max_intervention = max_intervention
self.w = tf.Variable(
kwargs.pop("initial_guess_w", AVE_DAYS_TO_OUTCOME),
name="w",
dtype=tf.float64,
trainable=kwargs.pop("variable_w_trainable", True))
trainable_death_rate = kwargs.pop("variable_death_rate_trainable", True)
initial_guess_death_rate = kwargs.pop("initial_guess_death_rate", 0.04)
super(Covid19DeathPredictModel, self).__init__(**kwargs)
self.death_rate = tf.Variable(
np.ones([self._n_weights, 1]) * initial_guess_death_rate,
name="death_rate",
dtype=tf.float64,
trainable=trainable_death_rate)
def get_outcome_survival_probs(self):
"""Returns survival probabilities of time from onset to final outcome."""
surv = tf.math.exp(
tf.multiply(
- 1. / self.w,
tf.range(self._max_intervention, dtype=tf.float64)))
return (surv - surv[self._max_intervention - 1]) / (
1. - surv[self._max_intervention - 1])
def get_death_conv_weights(self):
"""Returns the convolutional weights for modeling daily death numbers.
These are the probability mass functions of the time from a case being
infected until observing the final outcome (recovery / death). They are
based on the assumption that: the incubation period, and the time between
infection and final outcome (death / recovery) are independent.
"""
symptomatic_surv = self.get_symptomatic_survival_probs()
symptomatic_pmf = symptomatic_surv[:-1] - symptomatic_surv[1:]
outcome_surv = self.get_outcome_survival_probs()
outcome_pmf = outcome_surv[:-1] - outcome_surv[1:]
return tf.squeeze(
tf.nn.conv1d(
tf.reshape(
tf.pad(
outcome_pmf,
paddings=tf.constant(
[[self._max_latency, self._max_latency - 1]]
),
mode="CONSTANT"),
[1, 2 * self._max_latency + self._max_intervention - 2, 1]
),
tf.reshape(
tf.pad(
symptomatic_pmf[::-1],
paddings=tf.constant([[0, 1]]),
mode="CONSTANT"
),
[self._max_latency, 1, 1]
), 1, "VALID"))
def daily_death(self, inputs: np.ndarray,
static_tensorshape: bool = False):
"""Returns the number of daily new death cases in an array.
Args:
inputs: a reformulated sequence of predictors that fits tensorflow model.
For the detailed formats, see method "_get_trainable_x()" in class
Covid19InfectionsEstimator.
static_tensorshape: if True, this method will return tensor with a fixed
length in training a model. Otherwise, the length can be any positive
integer, which is used in predicting the number of new confirmed cases
in future.
Raises:
xxx
Returns:
A 1d tensor for storing the number of daily new death cases. The length
is same as the length of argument "inputs".
"""
inputs_len = self._len_inputs if static_tensorshape else inputs.shape[0]
daily_infected_cases = tf.reshape(
tf.slice(
self._daily_infected(inputs),
begin=[0, self._max_latency - 1],
size=[inputs_len, 1]
), [inputs_len])
conv_weights_len = self._max_latency + self._max_intervention - 1
conv_weights = self.get_death_conv_weights()
if conv_weights.shape[0] != conv_weights_len:
raise ValueError(
f"The length of convolutional weights {conv_weights.shape[0]} is "
f"different from the expected value {conv_weights_len}.")
return tf.squeeze(
tf.nn.conv1d(
tf.reshape(
tf.pad(
daily_infected_cases,
paddings=tf.constant(
[[conv_weights_len - 1, 0]]
),
mode="CONSTANT"),
[1, conv_weights_len + inputs_len - 1, 1]
),
tf.reshape(
conv_weights[::-1],
[conv_weights_len, 1, 1]
), 1, "VALID")) * tf.reshape( # Allow death rate change over time.
tf.matmul(inputs, (self.death_rate)), [inputs_len])
def call(self, inputs: np.ndarray) -> infection_model.TensorType:
"""Returns the number of deaths on a daily basis.
This method needs to be overriden to subclass keras.Model.
Args:
inputs: a reformulated sequence of predictors that fits tensorflow model.
For the detailed formats, see method "_get_trainable_x()" in class
Covid19InfectionsEstimator.
Returns:
A 1d tensor for storing the number of daily new death cases. The length
is same as the length of argument "inputs".
"""
return self.daily_death(inputs, True)
class Covid19DeathEstimator(infection_model.Covid19InfectionsEstimator):
"""Selects the best model to predict Covid-19 death tolls."""
def __init__(self, **kwargs):
"""Initializes a Covid19DeathEstimator instance."""
super(Covid19DeathEstimator, self).__init__(**kwargs)
def _fit_with_t0(self, data: Sequence[int], t0: int, message: Text,
enable_tensorboard: bool = False,
tensorboard_logdir: Optional[Text] = None
) -> Tuple[
Covid19DeathPredictModel, infection_model.TensorType]:
"""Returns the death toll model after training with a given t0.
Args:
data: training data (number of daily new death tolls) in a 1d array.
t0: specifies the number of days between the occurrence of the first
infected case (patient zero) and the first observed case.
message: optionally pass a prefix string in the filenames of training
weights (in the format of hdf5 file). We will generate a lot of such
files in the training process.
enable_tensorboard: whether or not use tensorboard to monitor training.
tensorboard_logdir: xxx.
Returns:
model: the best model after training with t0.
loss: the loss of the best model after training with t0.
"""
model = Covid19DeathPredictModel(
n_weights=2 * len(self._knots) + 1 - sum(self._knots_connect),
t0=t0,
len_inputs=len(data) + t0,
max_latency=self._estimator_args.get(
"max_latency", infection_model.DEFAULT_MAX_LATENCY),
max_intervention=self._estimator_args.get(
"max_intervention", DEFAULT_MAX_INTERVENTION),
**self._model_args)
x = self._get_trainable_x(len(data), t0)
# Pad t0 elements at front to be 0.
y = np.pad(data, [t0, 0]).astype(np.float64)
# Define the loss function for each t0 value. Compare the square-root
# difference.
def custom_loss(y_actual, y_pred):
return self._estimator_args.get(
"loss_function", tf.keras.losses.MSE)(
# tf.math.sqrt(y_actual[t0:]), tf.math.sqrt(y_pred[t0:]))
(y_actual[t0:]), (y_pred[t0:]))
optimizer_option = self._estimator_args.get(
"optimizer", tf.keras.optimizers.Adam)
optimizer = optimizer_option(
learning_rate=self._estimator_args.get("learning_rate", 0.01),
clipnorm=1.0)
model.compile(optimizer, custom_loss)
callbacks, min_loss_filepath = Covid19DeathEstimator._setup_callbacks(
message, t0, enable_tensorboard, tensorboard_logdir)
model.fit(
x, y, epochs=self._estimator_args.get("epochs", 100),
batch_size=len(data) + t0, shuffle=False,
verbose=self._estimator_args.get("verbose", 0),
callbacks=callbacks)
model.load_weights(min_loss_filepath)
loss = custom_loss(y, model(x))
return model, loss
def predict_death(self, duration: int,
flatten_future: bool = False) -> Optional[
infection_model.TensorType]:
"""Predicts the number of new death cases reported on each day.
Args:
duration: specifies the number of days for prediction.
flatten_future: this parameter takes effect in prediction only,
indicating whether the infection rate or death rate is flattened in
the future.
Returns:
The number of daily new death cases in 1d tensor. The length is equal to
the value of duration.
"""
if self._final_model is None:
return None
x_pred = self._get_trainable_x(
duration, self._final_model.t0, flatten_future)
return self._final_model.daily_death(x_pred)[self._final_model.t0:]
| [
"numpy.ones",
"tensorflow.keras.backend.set_floatx",
"tensorflow.range",
"tensorflow.constant",
"tensorflow.matmul",
"tensorflow.reshape",
"numpy.pad"
] | [((343, 381), 'tensorflow.keras.backend.set_floatx', 'tf.keras.backend.set_floatx', (['"""float64"""'], {}), "('float64')\n", (370, 381), True, 'import tensorflow as tf\n'), ((1548, 1577), 'numpy.ones', 'np.ones', (['[self._n_weights, 1]'], {}), '([self._n_weights, 1])\n', (1555, 1577), True, 'import numpy as np\n'), ((1902, 1952), 'tensorflow.range', 'tf.range', (['self._max_intervention'], {'dtype': 'tf.float64'}), '(self._max_intervention, dtype=tf.float64)\n', (1910, 1952), True, 'import tensorflow as tf\n'), ((5342, 5376), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'self.death_rate'], {}), '(inputs, self.death_rate)\n', (5351, 5376), True, 'import tensorflow as tf\n'), ((7792, 7813), 'numpy.pad', 'np.pad', (['data', '[t0, 0]'], {}), '(data, [t0, 0])\n', (7798, 7813), True, 'import numpy as np\n'), ((5157, 5213), 'tensorflow.reshape', 'tf.reshape', (['conv_weights[::-1]', '[conv_weights_len, 1, 1]'], {}), '(conv_weights[::-1], [conv_weights_len, 1, 1])\n', (5167, 5213), True, 'import tensorflow as tf\n'), ((2869, 2926), 'tensorflow.constant', 'tf.constant', (['[[self._max_latency, self._max_latency - 1]]'], {}), '([[self._max_latency, self._max_latency - 1]])\n', (2880, 2926), True, 'import tensorflow as tf\n'), ((3222, 3243), 'tensorflow.constant', 'tf.constant', (['[[0, 1]]'], {}), '([[0, 1]])\n', (3233, 3243), True, 'import tensorflow as tf\n'), ((4946, 4986), 'tensorflow.constant', 'tf.constant', (['[[conv_weights_len - 1, 0]]'], {}), '([[conv_weights_len - 1, 0]])\n', (4957, 4986), True, 'import tensorflow as tf\n')] |
"""Training script.
usage: train.py [options]
options:
--inner_learning_rate=ilr Learning rate of inner loop [default: 1e-3]
--outer_learning_rate=olr Learning rate of outer loop [default: 1e-4]
--batch_size=bs Size of task to train with [default: 4]
--inner_epochs=ie Amount of meta epochs in the inner loop [default: 10]
--height=h Height of image [default: 32]
--length=l Length of image [default: 32]
--dataset=ds Dataset name (Mnist, Omniglot, FIGR8) [default: FIGR8]
--neural_network=nn Either ResNet or DCGAN [default: DCGAN]
-h, --help Show this help message and exit
"""
from docopt import docopt
import torch
import torch.optim as optim
import torch.autograd as autograd
from tensorboardX import SummaryWriter
import numpy as np
import os
from environnements import MnistMetaEnv, OmniglotMetaEnv, FIGR8MetaEnv
from model import ResNetDiscriminator, ResNetGenerator, DCGANGenerator, DCGANDiscriminator
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def wassertein_loss(inputs, targets):
return torch.mean(inputs * targets)
def calc_gradient_penalty(discriminator, real_batch, fake_batch):
epsilon = torch.rand(real_batch.shape[0], 1, device=device)
interpolates = epsilon.view(-1, 1, 1, 1) * real_batch + (1 - epsilon).view(-1, 1, 1, 1) * fake_batch
interpolates = autograd.Variable(interpolates, requires_grad=True)
disc_interpolates = discriminator(interpolates)
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size(), device=device),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * 10
return gradient_penalty
def normalize_data(data):
data *= 2
data -= 1
return data
def unnormalize_data(data):
data += 1
data /= 2
return data
class FIGR:
def __init__(self, args):
self.load_args(args)
self.id_string = self.get_id_string()
self.z_shape = 100
self.writer = SummaryWriter('Runs/' + self.id_string)
self.env = eval(self.dataset + 'MetaEnv(height=self.height, length=self.length)')
self.initialize_gan()
self.load_checkpoint()
def inner_loop(self, real_batch):
self.meta_g.train()
fake_batch = self.meta_g(torch.tensor(np.random.normal(size=(self.batch_size, self.z_shape)), dtype=torch.float, device=device))
training_batch = torch.cat([real_batch, fake_batch])
# Training discriminator
gradient_penalty = calc_gradient_penalty(self.meta_d, real_batch, fake_batch)
discriminator_pred = self.meta_d(training_batch)
discriminator_loss = wassertein_loss(discriminator_pred, self.discriminator_targets)
discriminator_loss += gradient_penalty
self.meta_d_optim.zero_grad()
discriminator_loss.backward()
self.meta_d_optim.step()
# Training generator
output = self.meta_d(self.meta_g(torch.tensor(np.random.normal(size=(self.batch_size, self.z_shape)), dtype=torch.float, device=device)))
generator_loss = wassertein_loss(output, self.generator_targets)
self.meta_g_optim.zero_grad()
generator_loss.backward()
self.meta_g_optim.step()
return discriminator_loss.item(), generator_loss.item()
def validation_run(self):
data, task = self.env.sample_validation_task(self.batch_size)
training_images = data.cpu().numpy()
training_images = np.expand_dims(np.concatenate([training_images[i] for i in range(self.batch_size)], axis=-1), 0)
data = normalize_data(data)
real_batch = data.to(device)
discriminator_total_loss = 0
generator_total_loss = 0
for _ in range(self.inner_epochs):
disc_loss, gen_loss = self.inner_loop(real_batch)
discriminator_total_loss += disc_loss
generator_total_loss += gen_loss
self.meta_g.eval()
with torch.no_grad():
img = self.meta_g(torch.tensor(np.random.normal(size=(self.batch_size * 3, self.z_shape)), dtype=torch.float, device=device))
img = img.detach().cpu().numpy()
img = np.expand_dims(np.concatenate([np.concatenate([img[i * 3 + j] for j in range(3)], axis=-2) for i in range(self.batch_size)], axis=-1), 0)
img = unnormalize_data(img)
img = np.concatenate([training_images, img], axis=-2)
self.writer.add_image('Validation_generated', img, self.eps)
self.writer.add_scalar('Validation_discriminator_loss', discriminator_total_loss, self.eps)
self.writer.add_scalar('Validation_generator_loss', generator_total_loss, self.eps)
def meta_training_loop(self):
data, task = self.env.sample_training_task(self.batch_size)
data = normalize_data(data)
real_batch = data.to(device)
discriminator_total_loss = 0
generator_total_loss = 0
for _ in range(self.inner_epochs):
disc_loss, gen_loss = self.inner_loop(real_batch)
discriminator_total_loss += disc_loss
generator_total_loss += gen_loss
self.writer.add_scalar('Training_discriminator_loss', discriminator_total_loss, self.eps)
self.writer.add_scalar('Training_generator_loss', generator_total_loss, self.eps)
# Updating both generator and dicriminator
for p, meta_p in zip(self.g.parameters(), self.meta_g.parameters()):
diff = p - meta_p.cpu()
p.grad = diff
self.g_optim.step()
for p, meta_p in zip(self.d.parameters(), self.meta_d.parameters()):
diff = p - meta_p.cpu()
p.grad = diff
self.d_optim.step()
def reset_meta_model(self):
self.meta_g.train()
self.meta_d.train()
self.meta_d.load_state_dict(self.d.state_dict())
self.meta_g.load_state_dict(self.g.state_dict())
def training(self):
while self.eps <= 1000000:
self.reset_meta_model()
self.meta_training_loop()
# Validation run every 10000 training loop
if self.eps % 10000 == 0:
self.reset_meta_model()
self.validation_run()
self.checkpoint_model()
self.eps += 1
def load_args(self, args):
self.outer_learning_rate = float(args['--outer_learning_rate'])
self.inner_learning_rate = float(args['--inner_learning_rate'])
self.batch_size = int(args['--batch_size'])
self.inner_epochs = int(args['--inner_epochs'])
self.height = int(args['--height'])
self.length = int(args['--length'])
self.dataset = args['--dataset']
self.neural_network = args['--neural_network']
def load_checkpoint(self):
if os.path.isfile('Runs/' + self.id_string + '/checkpoint'):
checkpoint = torch.load('Runs/' + self.id_string + '/checkpoint')
self.d.load_state_dict(checkpoint['discriminator'])
self.g.load_state_dict(checkpoint['generator'])
self.eps = checkpoint['episode']
else:
self.eps = 0
def get_id_string(self):
return '{}_{}_olr{}_ilr{}_bsize{}_ie{}_h{}_l{}'.format(self.neural_network,
self.dataset,
str(self.outer_learning_rate),
str(self.inner_learning_rate),
str(self.batch_size),
str(self.inner_epochs),
str(self.height),
str(self.length))
def initialize_gan(self):
# D and G on CPU since they never do a feed forward operation
self.d = eval(self.neural_network + 'Discriminator(self.env.channels, self.env.height, self.env.length)')
self.g = eval(self.neural_network + 'Generator(self.z_shape, self.env.channels, self.env.height, self.env.length)')
self.meta_d = eval(self.neural_network + 'Discriminator(self.env.channels, self.env.height, self.env.length)').to(device)
self.meta_g = eval(self.neural_network + 'Generator(self.z_shape, self.env.channels, self.env.height, self.env.length)').to(device)
self.d_optim = optim.Adam(params=self.d.parameters(), lr=self.outer_learning_rate)
self.g_optim = optim.Adam(params=self.g.parameters(), lr=self.outer_learning_rate)
self.meta_d_optim = optim.SGD(params=self.meta_d.parameters(), lr=self.inner_learning_rate)
self.meta_g_optim = optim.SGD(params=self.meta_g.parameters(), lr=self.inner_learning_rate)
self.discriminator_targets = torch.tensor([1] * self.batch_size + [-1] * self.batch_size, dtype=torch.float, device=device).view(-1, 1)
self.generator_targets = torch.tensor([1] * self.batch_size, dtype=torch.float, device=device).view(-1, 1)
def checkpoint_model(self):
checkpoint = {'discriminator': self.d.state_dict(),
'generator': self.g.state_dict(),
'episode': self.eps}
torch.save(checkpoint, 'Runs/' + self.id_string + '/checkpoint')
if __name__ == '__main__':
args = docopt(__doc__)
env = FIGR(args)
env.training()
| [
"numpy.random.normal",
"tensorboardX.SummaryWriter",
"torch.mean",
"torch.load",
"os.path.isfile",
"torch.cat",
"torch.tensor",
"torch.cuda.is_available",
"numpy.concatenate",
"torch.save",
"torch.no_grad",
"torch.autograd.Variable",
"docopt.docopt",
"torch.rand"
] | [((1173, 1201), 'torch.mean', 'torch.mean', (['(inputs * targets)'], {}), '(inputs * targets)\n', (1183, 1201), False, 'import torch\n'), ((1284, 1333), 'torch.rand', 'torch.rand', (['real_batch.shape[0]', '(1)'], {'device': 'device'}), '(real_batch.shape[0], 1, device=device)\n', (1294, 1333), False, 'import torch\n'), ((1458, 1509), 'torch.autograd.Variable', 'autograd.Variable', (['interpolates'], {'requires_grad': '(True)'}), '(interpolates, requires_grad=True)\n', (1475, 1509), True, 'import torch.autograd as autograd\n'), ((9701, 9716), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (9707, 9716), False, 'from docopt import docopt\n'), ((1084, 1109), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1107, 1109), False, 'import torch\n'), ((2242, 2281), 'tensorboardX.SummaryWriter', 'SummaryWriter', (["('Runs/' + self.id_string)"], {}), "('Runs/' + self.id_string)\n", (2255, 2281), False, 'from tensorboardX import SummaryWriter\n'), ((2662, 2697), 'torch.cat', 'torch.cat', (['[real_batch, fake_batch]'], {}), '([real_batch, fake_batch])\n', (2671, 2697), False, 'import torch\n'), ((4598, 4645), 'numpy.concatenate', 'np.concatenate', (['[training_images, img]'], {'axis': '(-2)'}), '([training_images, img], axis=-2)\n', (4612, 4645), True, 'import numpy as np\n'), ((7018, 7074), 'os.path.isfile', 'os.path.isfile', (["('Runs/' + self.id_string + '/checkpoint')"], {}), "('Runs/' + self.id_string + '/checkpoint')\n", (7032, 7074), False, 'import os\n'), ((9597, 9661), 'torch.save', 'torch.save', (['checkpoint', "('Runs/' + self.id_string + '/checkpoint')"], {}), "(checkpoint, 'Runs/' + self.id_string + '/checkpoint')\n", (9607, 9661), False, 'import torch\n'), ((4200, 4215), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4213, 4215), False, 'import torch\n'), ((7101, 7153), 'torch.load', 'torch.load', (["('Runs/' + self.id_string + '/checkpoint')"], {}), "('Runs/' + self.id_string + '/checkpoint')\n", (7111, 7153), False, 'import torch\n'), ((2546, 2600), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(self.batch_size, self.z_shape)'}), '(size=(self.batch_size, self.z_shape))\n', (2562, 2600), True, 'import numpy as np\n'), ((9174, 9273), 'torch.tensor', 'torch.tensor', (['([1] * self.batch_size + [-1] * self.batch_size)'], {'dtype': 'torch.float', 'device': 'device'}), '([1] * self.batch_size + [-1] * self.batch_size, dtype=torch.\n float, device=device)\n', (9186, 9273), False, 'import torch\n'), ((9314, 9383), 'torch.tensor', 'torch.tensor', (['([1] * self.batch_size)'], {'dtype': 'torch.float', 'device': 'device'}), '([1] * self.batch_size, dtype=torch.float, device=device)\n', (9326, 9383), False, 'import torch\n'), ((3209, 3263), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(self.batch_size, self.z_shape)'}), '(size=(self.batch_size, self.z_shape))\n', (3225, 3263), True, 'import numpy as np\n'), ((4260, 4318), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(self.batch_size * 3, self.z_shape)'}), '(size=(self.batch_size * 3, self.z_shape))\n', (4276, 4318), True, 'import numpy as np\n')] |
import os
import numpy as np
import pandas as pd
import tensorflow as tf
from PIL import Image
from matplotlib import pyplot as plt
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
from tqdm import tqdm
CWD_PATH = '/home/ubuntu/rue/object_detector/open_images_fashion'
PATH_TO_CKPT = os.path.join(CWD_PATH, 'export/frozen_inference_graph.pb')
PATH_TO_LABELS = os.path.join(CWD_PATH, 'openimage_label_map.pbtxt')
IMAGE_SIZE = (12, 8)
PATH_TO_TEST_IMAGES_DIR = '/home/ubuntu/rue/object_detector/open_images_fashion/challenge2018_test'
TEST_IMAGE_PATHS = os.listdir(PATH_TO_TEST_IMAGES_DIR)
NUM_CLASSES = 493
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
class_names = pd.read_csv(os.path.join(CWD_PATH, 'class_names.csv'), header=None)
l2n_dict = {}
n2l_dict = {}
for i in range(class_names.shape[0]):
label = class_names.iloc[i,0]
name = class_names.iloc[i,1]
l2n_dict[label] = name
n2l_dict[name] = label
#end for
def detect_objects(image_np, sess, detection_graph):
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
"""
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
"""
return boxes, scores, classes
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
#Load a frozen TF model
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
TOP_K = 3
SCORE_THRESHOLD = 0.15
submission_df = pd.DataFrame(columns=['ImageId', 'PredictionString'])
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
for idx, image_path in tqdm(enumerate(TEST_IMAGE_PATHS)):
image = Image.open(PATH_TO_TEST_IMAGES_DIR + '/' + image_path)
image_np = load_image_into_numpy_array(image)
image_name = image_path.split('/')[-1].split('.')[0]
image_boxes, image_scores, image_classes = detect_objects(image_np, sess, detection_graph)
image_scores_top = image_scores.flatten()[:TOP_K]
image_classes_top = image_classes.flatten()[:TOP_K]
prediction_str = ""
for i in range(TOP_K):
if (image_scores_top[i] > SCORE_THRESHOLD):
image_object_label = category_index[image_classes_top[i]]['name']
y_min, x_min, y_max, x_max = image_boxes[0,i,:]
#print(image_object_label)
#print(n2l_dict[image_object_label])
#print(image_object_box)
prediction_str += n2l_dict[image_object_label] + " " + str(round(image_scores_top[i], 2)) + " " + str(round(x_min, 4)) + " " + str(round(y_min, 4)) + " " + str(round(x_max, 4)) + " " + str(round(y_max, 4)) + " "
#end for
print("{},{}".format(image_name, prediction_str))
submission_df.loc[idx,'ImageId'] = image_name
submission_df.loc[idx,'PredictionString'] = prediction_str
submission_df.to_csv("./ssd_76880_top3_t015_with_scores_ordered.csv", index=False)
| [
"tensorflow.Graph",
"os.listdir",
"PIL.Image.open",
"tensorflow.gfile.GFile",
"tensorflow.Session",
"os.path.join",
"tensorflow.GraphDef",
"object_detection.utils.label_map_util.convert_label_map_to_categories",
"numpy.expand_dims",
"pandas.DataFrame",
"tensorflow.import_graph_def",
"object_de... | [((356, 414), 'os.path.join', 'os.path.join', (['CWD_PATH', '"""export/frozen_inference_graph.pb"""'], {}), "(CWD_PATH, 'export/frozen_inference_graph.pb')\n", (368, 414), False, 'import os\n'), ((432, 483), 'os.path.join', 'os.path.join', (['CWD_PATH', '"""openimage_label_map.pbtxt"""'], {}), "(CWD_PATH, 'openimage_label_map.pbtxt')\n", (444, 483), False, 'import os\n'), ((625, 660), 'os.listdir', 'os.listdir', (['PATH_TO_TEST_IMAGES_DIR'], {}), '(PATH_TO_TEST_IMAGES_DIR)\n', (635, 660), False, 'import os\n'), ((695, 739), 'object_detection.utils.label_map_util.load_labelmap', 'label_map_util.load_labelmap', (['PATH_TO_LABELS'], {}), '(PATH_TO_LABELS)\n', (723, 739), False, 'from object_detection.utils import label_map_util\n'), ((753, 867), 'object_detection.utils.label_map_util.convert_label_map_to_categories', 'label_map_util.convert_label_map_to_categories', (['label_map'], {'max_num_classes': 'NUM_CLASSES', 'use_display_name': '(True)'}), '(label_map, max_num_classes=\n NUM_CLASSES, use_display_name=True)\n', (799, 867), False, 'from object_detection.utils import label_map_util\n'), ((880, 928), 'object_detection.utils.label_map_util.create_category_index', 'label_map_util.create_category_index', (['categories'], {}), '(categories)\n', (916, 928), False, 'from object_detection.utils import label_map_util\n'), ((2775, 2785), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2783, 2785), True, 'import tensorflow as tf\n'), ((3100, 3153), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['ImageId', 'PredictionString']"}), "(columns=['ImageId', 'PredictionString'])\n", (3112, 3153), True, 'import pandas as pd\n'), ((955, 996), 'os.path.join', 'os.path.join', (['CWD_PATH', '"""class_names.csv"""'], {}), "(CWD_PATH, 'class_names.csv')\n", (967, 996), False, 'import os\n'), ((1377, 1409), 'numpy.expand_dims', 'np.expand_dims', (['image_np'], {'axis': '(0)'}), '(image_np, axis=0)\n', (1391, 1409), True, 'import numpy as np\n'), ((2840, 2853), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (2851, 2853), True, 'import tensorflow as tf\n'), ((2863, 2897), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['PATH_TO_CKPT', '"""rb"""'], {}), "(PATH_TO_CKPT, 'rb')\n", (2877, 2897), True, 'import tensorflow as tf\n'), ((3007, 3049), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (3026, 3049), True, 'import tensorflow as tf\n'), ((3199, 3232), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'detection_graph'}), '(graph=detection_graph)\n', (3209, 3232), True, 'import tensorflow as tf\n'), ((3328, 3382), 'PIL.Image.open', 'Image.open', (["(PATH_TO_TEST_IMAGES_DIR + '/' + image_path)"], {}), "(PATH_TO_TEST_IMAGES_DIR + '/' + image_path)\n", (3338, 3382), False, 'from PIL import Image\n')] |
"""
Evaluates the performance on the UKP ASPECT Corpus with hierachical clustering.
Greedy hierachical clustering.
Merges two clusters if the pairwise mean cluster similarity is larger than a threshold.
Merges clusters with highest similarity first
Uses dev set to determine the threshold for supervised systems
"""
import csv
import os
from collections import defaultdict
import numpy as np
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.cluster import AgglomerativeClustering
def hclustering(t2f_model, testfile, eval_method, threshold):
unique_sentences = set()
with open(testfile, 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter='\t', quotechar=None)
for splits in csvreader:
splits = map(str.strip, splits)
gold_topic, sentence_a, sentence_b, __ = splits
unique_sentences.add((sentence_a, gold_topic))
unique_sentences.add((sentence_b, gold_topic))
sentences_by_topic = {}
for sent, gold_topic in unique_sentences:
if eval_method == "t2f":
idx = np.where(t2f_model.documents == sent)[0][0]
topic = t2f_model.doc_topic_facet[idx]["topic"]
elif eval_method == "gold_topics":
topic = gold_topic
elif eval_method == "no_topic_model":
topic = 0
if topic not in sentences_by_topic:
sentences_by_topic[topic] = []
sentences_by_topic[topic].append(sent)
agg_cls = AgglomerativeClustering(n_clusters=None, affinity='precomputed',
linkage="average", distance_threshold=threshold)
clusters = {}
for topic in sentences_by_topic:
clusters[topic] = {}
docs = sentences_by_topic[topic]
if len(docs) == 1:
clusters[topic][0] = [docs[0]]
continue
t_doc_idxs = [np.where(t2f_model.documents == doc)[0][0] for doc in docs]
dist_mat = 1 - cosine_similarity(t2f_model.document_vectors[t_doc_idxs])
clustering = agg_cls.fit(dist_mat)
for cluster in clustering.labels_:
c_doc_idxs = np.where(clustering.labels_ == cluster)[0]
clusters[topic][cluster] = [doc for idx, doc in enumerate(docs)
if idx in c_doc_idxs]
return clusters
def eval_split(clusters, labels_file):
all_f1_means = []
all_f1_sim = []
all_f1_dissim = []
test_data = defaultdict(list)
with open(labels_file, 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter='\t', quotechar=None)
for splits in csvreader:
splits = map(str.strip, splits)
label_topic, sentence_a, sentence_b, label = splits
label_bin = '1' if label in ['SS', 'HS'] else '0'
test_data[label_topic].append(
{'topic': label_topic, 'sentence_a': sentence_a,
'sentence_b': sentence_b, 'label': label,
'label_bin': label_bin})
sentences_cluster_id = {}
for topic in clusters:
topic_cluster = clusters[topic]
for cluster_id in topic_cluster:
for sentence in topic_cluster[cluster_id]:
sentences_cluster_id[sentence] = str(topic) + "_" + str(cluster_id)
for topic in test_data:
topic_test_data = test_data[topic]
y_true = np.zeros(len(topic_test_data))
y_pred = np.zeros(len(topic_test_data))
for idx, test_annotation in enumerate(topic_test_data):
sentence_a = test_annotation['sentence_a']
sentence_b = test_annotation['sentence_b']
label = test_annotation['label_bin']
if label == '1':
y_true[idx] = 1
if sentences_cluster_id[sentence_a] == sentences_cluster_id[sentence_b]:
y_pred[idx] = 1
f_sim = f1_score(y_true, y_pred, pos_label=1)
f_dissim = f1_score(y_true, y_pred, pos_label=0)
f_mean = np.mean([f_sim, f_dissim])
all_f1_sim.append(f_sim)
all_f1_dissim.append(f_dissim)
all_f1_means.append(f_mean)
return np.mean(all_f1_sim), np.mean(all_f1_dissim), np.mean(all_f1_means)
def best_clustering_split(t2f_model, eval_method, split, test_path_tplt):
dev_file = test_path_tplt.format(split=split, mode="dev")
test_file = test_path_tplt.format(split=split, mode="test")
best_f1 = 0
best_threshold = 0
for threshold_int in range(0, 20):
threshold = threshold_int / 20
clusters = hclustering(t2f_model, dev_file, eval_method, threshold)
__, __, f1_mean = eval_split(clusters, dev_file)
if f1_mean > best_f1:
best_f1 = f1_mean
best_threshold = threshold
# print("Best threshold on dev:", best_threshold)
# Compute clusters on test
clusters = hclustering(t2f_model, test_file, eval_method, best_threshold)
return clusters
def eval_t2f_hcl(t2f_model, eval_method, project_path):
test_path_tplt = os.path.join(project_path, "datasets", "ukp_aspect", "splits",
"{split}", "{mode}.tsv")
all_f1_sim = []
all_f1_dissim = []
all_f1 = []
for split in [0, 1, 2, 3]:
# print("\n==================")
# print("Split:", split)
test_file = test_path_tplt.format(split=split, mode="test")
clusters = best_clustering_split(t2f_model, eval_method, split,
test_path_tplt)
f1_sim, f1_dissim, f1_mean = eval_split(clusters, test_file)
all_f1_sim.append(f1_sim)
all_f1_dissim.append(f1_dissim)
all_f1.append(f1_mean)
# print("Test-Performance on this split:")
# print("F-Mean: %.4f" % (f1_mean))
# print("F-sim: %.4f" % (f1_sim))
# print("F-dissim: %.4f" % (f1_dissim))
print("F-Mean: %.4f" % (np.mean(all_f1)))
print("F-sim: %.4f" % (np.mean(all_f1_sim)))
print("F-dissim: %.4f" % (np.mean(all_f1_dissim)))
# def eval_t2f_full(t2f_model, project_path):
# test_file = os.path.join(project_path, "datasets", "ukp_aspect", "splits",
# "all_data.tsv")
# test_data = []
# with open(test_file, 'r') as csvfile:
# csvreader = csv.reader(csvfile, delimiter='\t', quotechar=None)
# for splits in csvreader:
# splits = map(str.strip, splits)
# __, sentence_a, sentence_b, label = splits
# label_bin = 1 if label in ['SS', 'HS'] else 0
# test_data.append((sentence_a, sentence_b, label_bin))
#
# y_true = np.zeros(len(test_data))
# y_pred = np.zeros(len(test_data))
#
# for idx, row in enumerate(test_data):
# sentence_a, sentence_b, label_bin = row
# if label_bin == 1:
# y_true[idx] = 1
#
# idx_a = np.where(t2f_model.documents == sentence_a)[0][0]
# idx_b = np.where(t2f_model.documents == sentence_b)[0][0]
#
# topic_a = t2f_model.doc_topic_facet[idx_a]["topic"]
# topic_b = t2f_model.doc_topic_facet[idx_b]["topic"]
# facet_a = t2f_model.doc_topic_facet[idx_a]["facet"]
# facet_b = t2f_model.doc_topic_facet[idx_b]["facet"]
#
# if topic_a == topic_b and facet_a == facet_b:
# y_pred[idx] = 1
#
# f_sim = f1_score(y_true, y_pred, pos_label=1)
# f_dissim = f1_score(y_true, y_pred, pos_label=0)
# f_mean = np.mean([f_sim, f_dissim])
# print("F-Mean: %.4f" % f_mean)
# print("F-sim: %.4f" % f_sim)
# print("F-dissim: %.4f" % f_dissim)
| [
"numpy.mean",
"sklearn.cluster.AgglomerativeClustering",
"sklearn.metrics.f1_score",
"sklearn.metrics.pairwise.cosine_similarity",
"numpy.where",
"os.path.join",
"collections.defaultdict",
"csv.reader"
] | [((1520, 1638), 'sklearn.cluster.AgglomerativeClustering', 'AgglomerativeClustering', ([], {'n_clusters': 'None', 'affinity': '"""precomputed"""', 'linkage': '"""average"""', 'distance_threshold': 'threshold'}), "(n_clusters=None, affinity='precomputed', linkage=\n 'average', distance_threshold=threshold)\n", (1543, 1638), False, 'from sklearn.cluster import AgglomerativeClustering\n'), ((2488, 2505), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2499, 2505), False, 'from collections import defaultdict\n'), ((5057, 5148), 'os.path.join', 'os.path.join', (['project_path', '"""datasets"""', '"""ukp_aspect"""', '"""splits"""', '"""{split}"""', '"""{mode}.tsv"""'], {}), "(project_path, 'datasets', 'ukp_aspect', 'splits', '{split}',\n '{mode}.tsv')\n", (5069, 5148), False, 'import os\n'), ((692, 743), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '"""\t"""', 'quotechar': 'None'}), "(csvfile, delimiter='\\t', quotechar=None)\n", (702, 743), False, 'import csv\n'), ((2570, 2621), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '"""\t"""', 'quotechar': 'None'}), "(csvfile, delimiter='\\t', quotechar=None)\n", (2580, 2621), False, 'import csv\n'), ((3913, 3950), 'sklearn.metrics.f1_score', 'f1_score', (['y_true', 'y_pred'], {'pos_label': '(1)'}), '(y_true, y_pred, pos_label=1)\n', (3921, 3950), False, 'from sklearn.metrics import f1_score\n'), ((3970, 4007), 'sklearn.metrics.f1_score', 'f1_score', (['y_true', 'y_pred'], {'pos_label': '(0)'}), '(y_true, y_pred, pos_label=0)\n', (3978, 4007), False, 'from sklearn.metrics import f1_score\n'), ((4025, 4051), 'numpy.mean', 'np.mean', (['[f_sim, f_dissim]'], {}), '([f_sim, f_dissim])\n', (4032, 4051), True, 'import numpy as np\n'), ((4172, 4191), 'numpy.mean', 'np.mean', (['all_f1_sim'], {}), '(all_f1_sim)\n', (4179, 4191), True, 'import numpy as np\n'), ((4193, 4215), 'numpy.mean', 'np.mean', (['all_f1_dissim'], {}), '(all_f1_dissim)\n', (4200, 4215), True, 'import numpy as np\n'), ((4217, 4238), 'numpy.mean', 'np.mean', (['all_f1_means'], {}), '(all_f1_means)\n', (4224, 4238), True, 'import numpy as np\n'), ((1994, 2051), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['t2f_model.document_vectors[t_doc_idxs]'], {}), '(t2f_model.document_vectors[t_doc_idxs])\n', (2011, 2051), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((5930, 5945), 'numpy.mean', 'np.mean', (['all_f1'], {}), '(all_f1)\n', (5937, 5945), True, 'import numpy as np\n'), ((5975, 5994), 'numpy.mean', 'np.mean', (['all_f1_sim'], {}), '(all_f1_sim)\n', (5982, 5994), True, 'import numpy as np\n'), ((6027, 6049), 'numpy.mean', 'np.mean', (['all_f1_dissim'], {}), '(all_f1_dissim)\n', (6034, 6049), True, 'import numpy as np\n'), ((2163, 2202), 'numpy.where', 'np.where', (['(clustering.labels_ == cluster)'], {}), '(clustering.labels_ == cluster)\n', (2171, 2202), True, 'import numpy as np\n'), ((1125, 1162), 'numpy.where', 'np.where', (['(t2f_model.documents == sent)'], {}), '(t2f_model.documents == sent)\n', (1133, 1162), True, 'import numpy as np\n'), ((1911, 1947), 'numpy.where', 'np.where', (['(t2f_model.documents == doc)'], {}), '(t2f_model.documents == doc)\n', (1919, 1947), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 24 11:38:01 2017
@author: sajid
"""
import numpy as np
import numexpr as ne
import pyfftw
import dask.array as da
'''
contains functions propTF, propFF, prop1FT, propIR
'''
__all__ = ['propTF',
'prop1FT',
'propFF',
'propIR']
'''
Propogation using the Transfer function method.
Inputs -
u : profile of the beam at the input plane.
step : is the sampling step size at the input plane.
L1 : side length of the support.
wavel : the wavelength of the light
z : the propogation distance
fft_object :
Outputs -
u : beam profile at the output plane
L1 : the side length of the support at the output plane.
'''
def propTF(u, step, L1, wavel, z, fft_object=None):
M, N = np.shape(u)
FX, FY = da.meshgrid(np.fft.fftfreq(M, step), np.fft.fftfreq(N, step))
if fft_object is not None:
fft_object.run_fft2(u)
else:
u = np.fft.fft2(u)
u = ne.evaluate('exp(-1j*(2*pi*z/wavel)*sqrt(1-wavel**2*(FX**2+FY**2)))*u')
if fft_object is not None:
fft_object.run_ifft2(u)
else:
u = np.fft.ifft2(u)
return u, L1
'''
Propogation using the Single Fourier Transform approach.
Input convention as above.
Inputs -
u : profile of the beam at the input plane.
step : is the sampling step size at the input plane.
L1 : side length of the support.
wavel : the wavelength of the light
z :the propogation distance
Outputs -
u : beam profile at the output plane
L_out : the side length of the support at the output plane.
'''
def prop1FT(u, step, L1, wavel, z, fft_object=None):
M, N = np.shape(u)
x = np.linspace(-L1/2.0, L1/2.0-step, M)
y = np.linspace(-L1/2.0, L1/2.0-step, N)
'''
#Kenan's approach
fx = np.fft.fftfreq(M,d=step)
fy = np.fft.fftfreq(N,d=step)
fx = pyfftw.interfaces.numpy_fft.fftshift(fx)
fy = pyfftw.interfaces.numpy_fft.fftshift(fy)
FX,FY = da.meshgrid((fx),(fy))
c = np.exp((-1j*z*2*np.pi/wavel)*np.sqrt(1+wavel**2*(FX**2+FY**2)))
'''
L_out = wavel*z/step
X, Y = da.meshgrid(x, y)
u = ne.evaluate('exp((-1j*2*pi/wavel)*sqrt(X**2+Y**2+z**2))*u')
del X, Y
if fft_object is not None:
fft_object.run_fft2(u)
else:
u = np.fft.fft2(u)
u = np.fft.fftshift(u)
x2 = np.linspace(-L_out/2.0, L_out/2.0, M)
y2 = np.linspace(-L_out/2.0, L_out/2.0, N)
X2, Y2 = da.meshgrid(x2, y2)
u = ne.evaluate('exp((-1j*2*pi/wavel)*sqrt(X2**2+Y2**2+z**2))*u')
del X2, Y2
u = ne.evaluate('u*(1j/(wavel*z))*step*step')
return u, L_out
'''
Fraunhofer propogation.
Inputs -
u : profile of the beam at the input plane.
step : is the sampling step size at the input plane.
L1 : side length of the support.
wavel : the wavelength of the light
z :the propogation distance
Outputs -
u : beam profile at the output plane
L_out : the side length of the support at the output plane.
'''
def propFF(u, step, L1, wavel, z, fft_object=None):
M, N = np.shape(u)
L_out = wavel*z/step
x2 = np.linspace(-L_out/2.0, L_out/2.0, M)
y2 = np.linspace(-L_out/2.0, L_out/2.0, N)
X2, Y2 = np.meshgrid(x2, y2)
c = ne.evaluate('exp((1j*k*(1/(2*z)))*(X2**2+Y2**2))')*(1/(1j*wavel*z))
u = pyfftw.interfaces.numpy_fft.fftshift(u)
if fft_object is not None:
fft_object.run_fft2(u)
else:
u = np.fft.fft2(u)
u = pyfftw.interfaces.numpy_fft.ifftshift(u)
u = ne.evaluate('c*u')
u *= step*step
return u, L_out
'''
Warning : use is now Deprecated !
Propogation using the Impulse Response function. The convention of
shifting a function in realspace before performing the fourier transform
which is used in the reference is followed here. Input convention as above.
Use is deprecated since the implementation of 1FT for ranges that are
too large for TF but too small for FF.
'''
def propIR(u, step, L, wavel, z, fft_object=None):
M, N = np.shape(u)
x = np.linspace(-L/2.0, L/2.0-step, M)
y = np.linspace(-L/2.0, L/2.0-step, N)
X, Y = np.meshgrid(x, y)
h = ne.evaluate(
'(exp(1j*k*z)/(1j*wavel*z))*exp(1j*k*(1/(2*z))*(X**2+Y**2))')
h_in = pyfftw.empty_aligned((np.shape(h)))
h = pyfftw.interfaces.numpy_fft.fftshift(h)
#h_in = h
if fft_object is not None:
fft_object.run_fft2(h)
else:
h = np.fft.fft2(h)
#H = h*step*step
u = np.fft.fftshift(u)
if fft_object is not None:
fft_object.run_fft2(u)
else:
u = np.fft.fft2(u)
u = ne.evaluate('H * u')
if fft_object is not None:
fft_object.run_ifft2(u)
else:
u = np.fft.ifft2(u)
u = np.fft.ifftshift(u)
return u
| [
"pyfftw.interfaces.numpy_fft.fftshift",
"numpy.fft.ifft2",
"numpy.fft.fftfreq",
"pyfftw.interfaces.numpy_fft.ifftshift",
"dask.array.meshgrid",
"numpy.fft.fft2",
"numpy.linspace",
"numpy.meshgrid",
"numpy.fft.ifftshift",
"numexpr.evaluate",
"numpy.fft.fftshift",
"numpy.shape"
] | [((800, 811), 'numpy.shape', 'np.shape', (['u'], {}), '(u)\n', (808, 811), True, 'import numpy as np\n'), ((996, 1067), 'numexpr.evaluate', 'ne.evaluate', (['"""exp(-1j*(2*pi*z/wavel)*sqrt(1-wavel**2*(FX**2+FY**2)))*u"""'], {}), "('exp(-1j*(2*pi*z/wavel)*sqrt(1-wavel**2*(FX**2+FY**2)))*u')\n", (1007, 1067), True, 'import numexpr as ne\n'), ((1677, 1688), 'numpy.shape', 'np.shape', (['u'], {}), '(u)\n', (1685, 1688), True, 'import numpy as np\n'), ((1697, 1739), 'numpy.linspace', 'np.linspace', (['(-L1 / 2.0)', '(L1 / 2.0 - step)', 'M'], {}), '(-L1 / 2.0, L1 / 2.0 - step, M)\n', (1708, 1739), True, 'import numpy as np\n'), ((1742, 1784), 'numpy.linspace', 'np.linspace', (['(-L1 / 2.0)', '(L1 / 2.0 - step)', 'N'], {}), '(-L1 / 2.0, L1 / 2.0 - step, N)\n', (1753, 1784), True, 'import numpy as np\n'), ((2130, 2147), 'dask.array.meshgrid', 'da.meshgrid', (['x', 'y'], {}), '(x, y)\n', (2141, 2147), True, 'import dask.array as da\n'), ((2156, 2215), 'numexpr.evaluate', 'ne.evaluate', (['"""exp((-1j*2*pi/wavel)*sqrt(X**2+Y**2+z**2))*u"""'], {}), "('exp((-1j*2*pi/wavel)*sqrt(X**2+Y**2+z**2))*u')\n", (2167, 2215), True, 'import numexpr as ne\n'), ((2338, 2356), 'numpy.fft.fftshift', 'np.fft.fftshift', (['u'], {}), '(u)\n', (2353, 2356), True, 'import numpy as np\n'), ((2367, 2408), 'numpy.linspace', 'np.linspace', (['(-L_out / 2.0)', '(L_out / 2.0)', 'M'], {}), '(-L_out / 2.0, L_out / 2.0, M)\n', (2378, 2408), True, 'import numpy as np\n'), ((2414, 2455), 'numpy.linspace', 'np.linspace', (['(-L_out / 2.0)', '(L_out / 2.0)', 'N'], {}), '(-L_out / 2.0, L_out / 2.0, N)\n', (2425, 2455), True, 'import numpy as np\n'), ((2465, 2484), 'dask.array.meshgrid', 'da.meshgrid', (['x2', 'y2'], {}), '(x2, y2)\n', (2476, 2484), True, 'import dask.array as da\n'), ((2493, 2554), 'numexpr.evaluate', 'ne.evaluate', (['"""exp((-1j*2*pi/wavel)*sqrt(X2**2+Y2**2+z**2))*u"""'], {}), "('exp((-1j*2*pi/wavel)*sqrt(X2**2+Y2**2+z**2))*u')\n", (2504, 2554), True, 'import numexpr as ne\n'), ((2579, 2620), 'numexpr.evaluate', 'ne.evaluate', (['"""u*(1j/(wavel*z))*step*step"""'], {}), "('u*(1j/(wavel*z))*step*step')\n", (2590, 2620), True, 'import numexpr as ne\n'), ((3069, 3080), 'numpy.shape', 'np.shape', (['u'], {}), '(u)\n', (3077, 3080), True, 'import numpy as np\n'), ((3115, 3156), 'numpy.linspace', 'np.linspace', (['(-L_out / 2.0)', '(L_out / 2.0)', 'M'], {}), '(-L_out / 2.0, L_out / 2.0, M)\n', (3126, 3156), True, 'import numpy as np\n'), ((3162, 3203), 'numpy.linspace', 'np.linspace', (['(-L_out / 2.0)', '(L_out / 2.0)', 'N'], {}), '(-L_out / 2.0, L_out / 2.0, N)\n', (3173, 3203), True, 'import numpy as np\n'), ((3213, 3232), 'numpy.meshgrid', 'np.meshgrid', (['x2', 'y2'], {}), '(x2, y2)\n', (3224, 3232), True, 'import numpy as np\n'), ((3318, 3357), 'pyfftw.interfaces.numpy_fft.fftshift', 'pyfftw.interfaces.numpy_fft.fftshift', (['u'], {}), '(u)\n', (3354, 3357), False, 'import pyfftw\n'), ((3467, 3507), 'pyfftw.interfaces.numpy_fft.ifftshift', 'pyfftw.interfaces.numpy_fft.ifftshift', (['u'], {}), '(u)\n', (3504, 3507), False, 'import pyfftw\n'), ((3516, 3534), 'numexpr.evaluate', 'ne.evaluate', (['"""c*u"""'], {}), "('c*u')\n", (3527, 3534), True, 'import numexpr as ne\n'), ((4008, 4019), 'numpy.shape', 'np.shape', (['u'], {}), '(u)\n', (4016, 4019), True, 'import numpy as np\n'), ((4028, 4068), 'numpy.linspace', 'np.linspace', (['(-L / 2.0)', '(L / 2.0 - step)', 'M'], {}), '(-L / 2.0, L / 2.0 - step, M)\n', (4039, 4068), True, 'import numpy as np\n'), ((4071, 4111), 'numpy.linspace', 'np.linspace', (['(-L / 2.0)', '(L / 2.0 - step)', 'N'], {}), '(-L / 2.0, L / 2.0 - step, N)\n', (4082, 4111), True, 'import numpy as np\n'), ((4117, 4134), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (4128, 4134), True, 'import numpy as np\n'), ((4144, 4217), 'numexpr.evaluate', 'ne.evaluate', (['"""(exp(1j*k*z)/(1j*wavel*z))*exp(1j*k*(1/(2*z))*(X**2+Y**2))"""'], {}), "('(exp(1j*k*z)/(1j*wavel*z))*exp(1j*k*(1/(2*z))*(X**2+Y**2))')\n", (4155, 4217), True, 'import numexpr as ne\n'), ((4282, 4321), 'pyfftw.interfaces.numpy_fft.fftshift', 'pyfftw.interfaces.numpy_fft.fftshift', (['h'], {}), '(h)\n', (4318, 4321), False, 'import pyfftw\n'), ((4467, 4485), 'numpy.fft.fftshift', 'np.fft.fftshift', (['u'], {}), '(u)\n', (4482, 4485), True, 'import numpy as np\n'), ((4595, 4615), 'numexpr.evaluate', 'ne.evaluate', (['"""H * u"""'], {}), "('H * u')\n", (4606, 4615), True, 'import numexpr as ne\n'), ((4727, 4746), 'numpy.fft.ifftshift', 'np.fft.ifftshift', (['u'], {}), '(u)\n', (4743, 4746), True, 'import numpy as np\n'), ((837, 860), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['M', 'step'], {}), '(M, step)\n', (851, 860), True, 'import numpy as np\n'), ((862, 885), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['N', 'step'], {}), '(N, step)\n', (876, 885), True, 'import numpy as np\n'), ((972, 986), 'numpy.fft.fft2', 'np.fft.fft2', (['u'], {}), '(u)\n', (983, 986), True, 'import numpy as np\n'), ((1154, 1169), 'numpy.fft.ifft2', 'np.fft.ifft2', (['u'], {}), '(u)\n', (1166, 1169), True, 'import numpy as np\n'), ((2314, 2328), 'numpy.fft.fft2', 'np.fft.fft2', (['u'], {}), '(u)\n', (2325, 2328), True, 'import numpy as np\n'), ((3242, 3292), 'numexpr.evaluate', 'ne.evaluate', (['"""exp((1j*k*(1/(2*z)))*(X2**2+Y2**2))"""'], {}), "('exp((1j*k*(1/(2*z)))*(X2**2+Y2**2))')\n", (3253, 3292), True, 'import numexpr as ne\n'), ((3443, 3457), 'numpy.fft.fft2', 'np.fft.fft2', (['u'], {}), '(u)\n', (3454, 3457), True, 'import numpy as np\n'), ((4260, 4271), 'numpy.shape', 'np.shape', (['h'], {}), '(h)\n', (4268, 4271), True, 'import numpy as np\n'), ((4421, 4435), 'numpy.fft.fft2', 'np.fft.fft2', (['h'], {}), '(h)\n', (4432, 4435), True, 'import numpy as np\n'), ((4571, 4585), 'numpy.fft.fft2', 'np.fft.fft2', (['u'], {}), '(u)\n', (4582, 4585), True, 'import numpy as np\n'), ((4702, 4717), 'numpy.fft.ifft2', 'np.fft.ifft2', (['u'], {}), '(u)\n', (4714, 4717), True, 'import numpy as np\n')] |
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
# import sys
from time import time
import matplotlib.pyplot as plt
import os
import argparse as ap
# from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.svm import LinearSVC
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
opts = {}
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
opts['print_report'] = True
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
opts['select_chi2'] = 3
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
opts['print_cm'] = True
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
opts['print_top10'] = True
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
opts['all_categories'] = True
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
opts['use_hashing'] = True
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
opts['n_features'] = 2 ** 16
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
opts['filtered'] = False
op.add_option("--dataset",
action="store", type=str, default="dmoz-5",
help="n_features when using the hashing vectorizer.")
opts['dataset'] = 'dmoz-5'
opts = ap.Namespace(**opts)
# (opts, args) = op.parse_args()
# if len(args) > 0:
# op.error("this script takes no arguments.")
# sys.exit(1)
#
# print(__doc__)
# op.print_help()
# print()
###############################################################################
# Load some categories from the training set
root_path = "/Users/yuhui.lin/work/fastText/data/"
if opts.dataset == "dmoz-5":
data_path = os.path.join(root_path, "TFR_5-fast")
num_cats = 5
elif opts.dataset == "dmoz-10":
data_path = os.path.join(root_path, "TFR_10-fast")
num_cats = 10
elif opts.dataset == "ukwa":
data_path = os.path.join(root_path, "TFR_ukwa-fast")
num_cats = 10
else:
raise ValueError(opts.dataset)
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
# data_train = fetch_20newsgroups(subset='train', categories=categories,
# shuffle=True, random_state=42,
# remove=remove)
#
# data_test = fetch_20newsgroups(subset='test', categories=categories,
# shuffle=True, random_state=42,
# remove=remove)
def svm(num_cats, train_data, test_data):
# train_path = os.path.join(data_path, "train")
# test_path = os.path.join(data_path, "test")
data_train = {}
data_train["data"] = []
data_train["target"] = []
data_train["target_names"] = [str(i) for i in range(num_cats)]
data_test = {}
data_test["data"] = []
data_test["target"] = []
# with open(train_path, 'r') as train_f, open(test_path) as test_f:
# train_data = train_f.readlines()
# test_data = test_f.readlines()
for exam in train_data:
data_train["data"].append(exam[12:])
# print(exam)
data_train["target"].append(int(exam[9]))
for exam in test_data:
data_test["data"].append(exam[12:])
data_test["target"].append(int(exam[9]))
data_train = ap.Namespace(**data_train)
data_test = ap.Namespace(**data_test)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print(data_train.data[:10])
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='r')
plt.barh(indices + .3, training_time, .2, label="training time", color='g')
plt.barh(indices + .6, test_time, .2, label="test time", color='b')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| [
"sklearn.metrics.classification_report",
"sklearn.feature_selection.SelectKBest",
"numpy.array",
"numpy.argsort",
"argparse.Namespace",
"sklearn.feature_extraction.text.HashingVectorizer",
"matplotlib.pyplot.barh",
"numpy.asarray",
"numpy.max",
"matplotlib.pyplot.yticks",
"sklearn.metrics.confus... | [((575, 667), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s %(levelname)s %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s %(levelname)s %(message)s')\n", (594, 667), False, 'import logging\n'), ((720, 734), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (732, 734), False, 'from optparse import OptionParser\n'), ((2407, 2427), 'argparse.Namespace', 'ap.Namespace', ([], {}), '(**opts)\n', (2419, 2427), True, 'import argparse as ap\n'), ((2821, 2858), 'os.path.join', 'os.path.join', (['root_path', '"""TFR_5-fast"""'], {}), "(root_path, 'TFR_5-fast')\n", (2833, 2858), False, 'import os\n'), ((4669, 4695), 'argparse.Namespace', 'ap.Namespace', ([], {}), '(**data_train)\n', (4681, 4695), True, 'import argparse as ap\n'), ((4712, 4737), 'argparse.Namespace', 'ap.Namespace', ([], {}), '(**data_test)\n', (4724, 4737), True, 'import argparse as ap\n'), ((5501, 5507), 'time.time', 'time', ([], {}), '()\n', (5505, 5507), False, 'from time import time\n'), ((6197, 6203), 'time.time', 'time', ([], {}), '()\n', (6201, 6203), False, 'from time import time\n'), ((9517, 9544), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (9527, 9544), True, 'import matplotlib.pyplot as plt\n'), ((9549, 9567), 'matplotlib.pyplot.title', 'plt.title', (['"""Score"""'], {}), "('Score')\n", (9558, 9567), True, 'import matplotlib.pyplot as plt\n'), ((9572, 9627), 'matplotlib.pyplot.barh', 'plt.barh', (['indices', 'score', '(0.2)'], {'label': '"""score"""', 'color': '"""r"""'}), "(indices, score, 0.2, label='score', color='r')\n", (9580, 9627), True, 'import matplotlib.pyplot as plt\n'), ((9631, 9708), 'matplotlib.pyplot.barh', 'plt.barh', (['(indices + 0.3)', 'training_time', '(0.2)'], {'label': '"""training time"""', 'color': '"""g"""'}), "(indices + 0.3, training_time, 0.2, label='training time', color='g')\n", (9639, 9708), True, 'import matplotlib.pyplot as plt\n'), ((9711, 9780), 'matplotlib.pyplot.barh', 'plt.barh', (['(indices + 0.6)', 'test_time', '(0.2)'], {'label': '"""test time"""', 'color': '"""b"""'}), "(indices + 0.6, test_time, 0.2, label='test time', color='b')\n", (9719, 9780), True, 'import matplotlib.pyplot as plt\n'), ((9783, 9797), 'matplotlib.pyplot.yticks', 'plt.yticks', (['()'], {}), '(())\n', (9793, 9797), True, 'import matplotlib.pyplot as plt\n'), ((9802, 9824), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (9812, 9824), True, 'import matplotlib.pyplot as plt\n'), ((9829, 9859), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.25)'}), '(left=0.25)\n', (9848, 9859), True, 'import matplotlib.pyplot as plt\n'), ((9863, 9892), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.95)'}), '(top=0.95)\n', (9882, 9892), True, 'import matplotlib.pyplot as plt\n'), ((9896, 9928), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.05)'}), '(bottom=0.05)\n', (9915, 9928), True, 'import matplotlib.pyplot as plt\n'), ((10003, 10013), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10011, 10013), True, 'import matplotlib.pyplot as plt\n'), ((2924, 2962), 'os.path.join', 'os.path.join', (['root_path', '"""TFR_10-fast"""'], {}), "(root_path, 'TFR_10-fast')\n", (2936, 2962), False, 'import os\n'), ((5554, 5645), 'sklearn.feature_extraction.text.HashingVectorizer', 'HashingVectorizer', ([], {'stop_words': '"""english"""', 'non_negative': '(True)', 'n_features': 'opts.n_features'}), "(stop_words='english', non_negative=True, n_features=opts.\n n_features)\n", (5571, 5645), False, 'from sklearn.feature_extraction.text import HashingVectorizer\n'), ((5764, 5832), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'sublinear_tf': '(True)', 'max_df': '(0.5)', 'stop_words': '"""english"""'}), "(sublinear_tf=True, max_df=0.5, stop_words='english')\n", (5779, 5832), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((5944, 5950), 'time.time', 'time', ([], {}), '()\n', (5948, 5950), False, 'from time import time\n'), ((6269, 6275), 'time.time', 'time', ([], {}), '()\n', (6273, 6275), False, 'from time import time\n'), ((6754, 6760), 'time.time', 'time', ([], {}), '()\n', (6758, 6760), False, 'from time import time\n'), ((6775, 6812), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', (['chi2'], {'k': 'opts.select_chi2'}), '(chi2, k=opts.select_chi2)\n', (6786, 6812), False, 'from sklearn.feature_selection import SelectKBest, chi2\n'), ((7196, 7221), 'numpy.asarray', 'np.asarray', (['feature_names'], {}), '(feature_names)\n', (7206, 7221), True, 'import numpy as np\n'), ((7590, 7596), 'time.time', 'time', ([], {}), '()\n', (7594, 7596), False, 'from time import time\n'), ((7727, 7733), 'time.time', 'time', ([], {}), '()\n', (7731, 7733), False, 'from time import time\n'), ((7866, 7902), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y_test', 'pred'], {}), '(y_test, pred)\n', (7888, 7902), False, 'from sklearn import metrics\n'), ((9408, 9431), 'numpy.array', 'np.array', (['training_time'], {}), '(training_time)\n', (9416, 9431), True, 'import numpy as np\n'), ((9434, 9455), 'numpy.max', 'np.max', (['training_time'], {}), '(training_time)\n', (9440, 9455), True, 'import numpy as np\n'), ((9472, 9491), 'numpy.array', 'np.array', (['test_time'], {}), '(test_time)\n', (9480, 9491), True, 'import numpy as np\n'), ((9494, 9511), 'numpy.max', 'np.max', (['test_time'], {}), '(test_time)\n', (9500, 9511), True, 'import numpy as np\n'), ((9978, 9998), 'matplotlib.pyplot.text', 'plt.text', (['(-0.3)', 'i', 'c'], {}), '(-0.3, i, c)\n', (9986, 9998), True, 'import matplotlib.pyplot as plt\n'), ((3026, 3066), 'os.path.join', 'os.path.join', (['root_path', '"""TFR_ukwa-fast"""'], {}), "(root_path, 'TFR_ukwa-fast')\n", (3038, 3066), False, 'import os\n'), ((7652, 7658), 'time.time', 'time', ([], {}), '()\n', (7656, 7658), False, 'from time import time\n'), ((7789, 7795), 'time.time', 'time', ([], {}), '()\n', (7793, 7795), False, 'from time import time\n'), ((8553, 8621), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['y_test', 'pred'], {'target_names': 'categories'}), '(y_test, pred, target_names=categories)\n', (8582, 8621), False, 'from sklearn import metrics\n'), ((8755, 8793), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['y_test', 'pred'], {}), '(y_test, pred)\n', (8779, 8793), False, 'from sklearn import metrics\n'), ((9097, 9157), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'loss': '"""l2"""', 'penalty': 'penalty', 'dual': '(False)', 'tol': '(0.001)'}), "(loss='l2', penalty=penalty, dual=False, tol=0.001)\n", (9106, 9157), False, 'from sklearn.svm import LinearSVC\n'), ((7119, 7125), 'time.time', 'time', ([], {}), '()\n', (7123, 7125), False, 'from time import time\n'), ((8076, 8094), 'sklearn.utils.extmath.density', 'density', (['clf.coef_'], {}), '(clf.coef_)\n', (8083, 8094), False, 'from sklearn.utils.extmath import density\n'), ((8298, 8322), 'numpy.argsort', 'np.argsort', (['clf.coef_[i]'], {}), '(clf.coef_[i])\n', (8308, 8322), True, 'import numpy as np\n')] |
import scipy.stats as st
from sklearn import metrics
from sklearn.metrics import auc
import matplotlib.pyplot as plt
#import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import tensorflow_addons as tfa
import math
import sys
import os.path as osp
import numpy as np
import PIL.Image
import time
import keras
from keras import applications
from keras import backend as K
from keras.preprocessing import image
from tensorflow.keras.utils import to_categorical
import copy
def mean_ci(x):
mn = np.mean(x)
ci = st.t.interval(0.95, len(x)-1, loc=np.mean(x), scale=st.sem(x))
return (mn, ci[0], ci[1])
def printResults(model_preds, y_test):
acc = np.mean(np.round(model_preds)[:,0] == y_test[:,0])
print('Test accuracy: %0.4f' % acc)
fpr, tpr, thresholds = metrics.roc_curve(y_test[:,1], model_preds[:,1])
auc_score = auc(fpr,tpr)
print('AUC: %0.4f' % auc_score)
conf = mean_ci(np.max(model_preds, axis = 1))
print('Avg. Confidence: ' + '{0:.6f} '.format(conf[0]) + \
'({0:.6f}'.format(conf[1]) + ' - {0:.6f})'.format(conf[2]))
def deprocess_inception(y):
x = copy.copy(y).astype(np.float)
x += 1.
x /= 2.
#x *= 255.
return x
class Dermatology_image_loader(object):
def __init__(self):
self.X_test = np.load('data/test_x_preprocess_sample.npy')
self.y_test = np.load('data/test_y_sample.npy')
self.X_train = np.load('data/train_x_preprocess_sample.npy')
self.y_train = np.load('data/train_y_sample.npy')
self.true_labels = self.y_test
def training_random_minibatches(self, minibatch_size):
N = self.X_train.shape[0]
rand_ind = np.random.permutation(N)
X_shuffle = self.X_train[rand_ind]
Y_shuffle = self.y_train[rand_ind]
num_minibatches = int(N / minibatch_size)
minibatches = []
for n in range(num_minibatches):
minibatch = (X_shuffle[n * minibatch_size : (n + 1) * minibatch_size], Y_shuffle[n * minibatch_size : (n + 1) * minibatch_size])
minibatches.append(minibatch)
return minibatches
def get_test_images(self, n_images):
n_test = self.X_test.shape[0]
random_indices = np.random.randint(low = 0, high = n_test, size = n_images)
true_labels = self.y_test[random_indices]
return self.X_test[random_indices], random_indices, true_labels
def get_test_images_opp(self, target_label):
""" returns test images with labels that are opposite of target_label """
boolean_index = np.argmax(self.y_test, axis=1) != target_label
y_test_opp = self.y_test[boolean_index]
X_test_opp = self.X_test[boolean_index]
# indices of True
indices = np.where(boolean_index)[0]
return X_test_opp, y_test_opp, indices
def get_all_test_images_labels(self):
return self.X_test, self.y_test
class ModelContainer():
""" Encapsulates an Imagenet model, and methods for interacting with it. """
def __init__(self, model_name, verbose=True, peace_mask=None, peace_mask_overlay=0.0):
# Peace Mask: None, "Forward", "Backward"
self.model_name = model_name
self.graph = tf.Graph()
self.sess = tf.Session(graph=self.graph)
self.peace_mask = peace_mask
self.patch_shape = PATCH_SHAPE
self._peace_mask_overlay = peace_mask_overlay
self.load_model(verbose=verbose)
def patch(self, new_patch=None):
"""Retrieve or set the adversarial patch.
new_patch: The new patch to set, or None to get current patch.
Returns: Itself if it set a new patch, or the current patch."""
if new_patch is None:
return self._run(self._clipped_patch)
self._run(self._assign_patch, {self._patch_placeholder: new_patch})
return self
def reset_patch(self):
"""Reset the adversarial patch to all zeros."""
self.patch(np.zeros(self.patch_shape))
def train_step(self, images=None, target_ys=None, learning_rate=5.0, scale=(0.1, 1.0), dropout=None, patch_disguise=None, disguise_alpha=None):
"""Train the model for one step.
Args:
images: A batch of images to train on, it loads one if not present.
target_ys: Onehot target vector, defaults to TARGET_ONEHOT
learning_rate: Learning rate for this train step.
scale: Either a scalar value for the exact scale, or a (min, max) tuple for the scale range.
Returns: Loss on the target ys."""
# if images is None:
# # images = image_loader.get_images()
# images, random_indices, true_labels = image_loader.get_training_images()
if images is None:
minibatches = image_loader.training_random_minibatches(BATCH_SIZE)
if target_ys is None:
target_ys = TARGET_ONEHOT
epoch_loss = 0
for i, minibatch in enumerate(minibatches):
minibatch_X, minibatch_y = minibatch
feed_dict = {self._image_input : minibatch_X,
self._target_ys : target_ys,
self._learning_rate: learning_rate}
if patch_disguise is not None:
if disguise_alpha is None:
raise ValueError("You need disguise_alpha")
feed_dict[self.patch_disguise] = patch_disguise
feed_dict[self.disguise_alpha] = disguise_alpha
loss, _ = self._run([self._loss, self._train_op], feed_dict,
scale=scale, dropout=dropout)
print("(minibatch %s) loss: %s" % (i, loss))
sys.stdout.flush()
epoch_loss += loss / len(minibatches)
return epoch_loss
def inference_batch(self, target_label, images=None, target_ys=None, scale=None):
"""Report loss and label probabilities, and patched images for a batch.
Args:
target_label: Scalar target label (either 1 or 0) with which the patch was designed
images: A batch of images to train on, it loads if not present.
target_ys: The target_ys for loss calculation, TARGET_ONEHOT if not present."""
# target_y = np.argmax(target_ys, axis=1)[0]
if images is None:
images, true_labels, indices = image_loader.get_test_images_opp(target_label)
n_images = images.shape[0]
n_images = n_images // BATCH_SIZE * BATCH_SIZE
if target_ys is None:
# target_ys = TARGET_ONEHOT
target_ys = gen_target_ys(target_label=target_label, batch_size = n_images)
loss_per_example_arr, ps_arr, ims_arr = [], [], []
for i in range(n_images // BATCH_SIZE):
feed_dict = {self._image_input: images[i * BATCH_SIZE : (i + 1) * BATCH_SIZE],
self._target_ys: target_ys[i * BATCH_SIZE : (i + 1) * BATCH_SIZE]}
loss_per_example, ps, ims = self._run(
[self._loss_per_example, self._probabilities, self._patched_input],
feed_dict, scale=scale)
loss_per_example_arr.append(loss_per_example)
ps_arr.append(ps)
ims_arr.append(ims)
loss_per_example_arr = np.concatenate(loss_per_example_arr, axis=0)
ps_arr = np.concatenate(ps_arr, axis=0)
ims_arr = np.concatenate(ims_arr, axis=0)
return loss_per_example_arr, ps_arr, ims_arr, indices[:n_images]
def load_model(self, verbose=True):
patch = None
keras_mode = True
self._make_model_and_ops(None, keras_mode, patch, verbose)
def _run(self, target, feed_dict=None, scale=None, dropout=None):
K.set_session(self.sess)
if feed_dict is None:
feed_dict = {}
feed_dict[self.learning_phase] = False
if scale is not None:
if isinstance(scale, (tuple, list)):
scale_min, scale_max = scale
else:
scale_min, scale_max = (scale, scale)
feed_dict[self.scale_min] = scale_min
feed_dict[self.scale_max] = scale_max
if dropout is not None:
feed_dict[self.dropout] = dropout
return self.sess.run(target, feed_dict=feed_dict)
def _make_model_and_ops(self, M, keras_mode, patch_val, verbose):
def clip_to_valid_image(x):
return tf.clip_by_value(x, clip_value_min=-1.,clip_value_max=1.)
start = time.time()
K.set_session(self.sess)
with self.sess.graph.as_default():
self.learning_phase = K.learning_phase()
image_shape = (224, 224, 3)
self._image_input = keras.layers.Input(shape=image_shape)
self.scale_min = tf.placeholder_with_default(SCALE_MIN, [])
self.scale_max = tf.placeholder_with_default(SCALE_MAX, [])
self._scales = tf.random_uniform([BATCH_SIZE], minval=self.scale_min,
maxval=self.scale_max)
image_input = self._image_input
self.patch_disguise = tf.placeholder_with_default(tf.zeros(self.patch_shape),
shape=self.patch_shape)
self.disguise_alpha = tf.placeholder_with_default(0.0, [])
patch = tf.get_variable("patch", self.patch_shape, dtype=tf.float32,
initializer=tf.zeros_initializer)
self._patch_placeholder = tf.placeholder(dtype=tf.float32,
shape=self.patch_shape)
self._assign_patch = tf.assign(patch, self._patch_placeholder)
modified_patch = patch
if self.peace_mask == 'forward':
mask = get_peace_mask(self.patch_shape)
modified_patch = patch * (1 - mask) - np.ones(self.patch_shape) \
* mask + (1+patch) * mask * self._peace_mask_overlay
self._clipped_patch = clip_to_valid_image(modified_patch)
if keras_mode:
image_input = tf.image.resize_images(image_input, (224, 224))
image_shape = (224, 224, 3)
modified_patch = tf.image.resize_images(patch, (224, 224))
self.dropout = tf.placeholder_with_default(1.0, [])
patch_with_dropout = tf.nn.dropout(modified_patch, keep_prob=self.dropout)
patched_input = clip_to_valid_image(self._random_overlay(image_input,
patch_with_dropout,
image_shape))
# Since this is a return point, we do it before the Keras color shifts
# (but after the resize, so we can see what is really going on)
self._patched_input = patched_input
# Labels for our attack (e.g. always a toaster)
self._target_ys = tf.placeholder(tf.float32, shape=(None, 2))
# Load the model
model = keras.models.load_model('models/wb_model.h5')
if self.model_name == 'resnet2':
model.load_weights('models/bb_weights.hdf5')
new_input_layer = keras.layers.Input(tensor=patched_input)
model.layers.pop(0)
output = model(patched_input)
model = keras.models.Model(inputs = new_input_layer, outputs = output)
self._probabilities = model.outputs[0]
logits = self._probabilities.op.inputs[0]
self.model = model
self._loss_per_example = tf.nn.softmax_cross_entropy_with_logits(
labels=self._target_ys,
logits=logits
)
self._target_loss = tf.reduce_mean(self._loss_per_example)
self._patch_loss = tf.nn.l2_loss(patch - self.patch_disguise) * \
self.disguise_alpha
self._loss = self._target_loss + self._patch_loss
# Train our attack by only training on the patch variable
self._learning_rate = tf.placeholder(tf.float32)
self._train_op = tf.train.GradientDescentOptimizer(self._learning_rate) \
.minimize(self._loss, var_list=[patch])
if patch_val is not None:
self.patch(patch_val)
else:
self.reset_patch()
elapsed = time.time() - start
if verbose:
print("Finished loading {}, took {:.0f}s".format(self.model_name, elapsed))
def _pad_and_tile_patch(self, patch, image_shape):
# Calculate the exact padding
# Image shape req'd because it is sometimes 299 sometimes 224
# padding is the amount of space available on either side of the centered patch
# WARNING: This has been integer-rounded and could be off by one.
# See _pad_and_tile_patch for usage
return tf.stack([patch] * BATCH_SIZE)
def _random_overlay(self, imgs, patch, image_shape):
"""Augment images with random rotation, transformation.
Image: BATCHx299x299x3
Patch: 50x50x3
"""
image_mask = _circle_mask(image_shape)
if self.peace_mask == 'backward':
peace_mask = get_peace_mask(image_shape)
image_mask = (image_mask * peace_mask).astype(np.float32)
image_mask = tf.stack([image_mask] * BATCH_SIZE)
padded_patch = tf.stack([patch] * BATCH_SIZE)
transform_vecs = []
for i in range(BATCH_SIZE):
# Shift and scale the patch for each image in the batch
random_xform = tf.py_func(_random_transform,
[self.scale_min, self.scale_max, image_shape[0]],
tf.float32)
random_xform.set_shape([8])
transform_vecs.append(random_xform)
#image_mask = tf.contrib.image.transform(image_mask, transform_vecs, "BILINEAR")
#padded_patch = tf.contrib.image.transform(padded_patch, transform_vecs, "BILINEAR")
image_mask = tfa.image.transform(image_mask,transform_vecs,"bilinear")
padded_patch = tfa.image.transform(padded_patch,transform_vecs,"bilinear")
inverted_mask = (1 - image_mask)
return imgs * inverted_mask + padded_patch * image_mask
def _convert(im):
return ((im + 1) * 127.5).astype(np.uint8)
def show(im):
plt.axis('off')
plt.imshow(_convert(im), interpolation="nearest")
plt.show()
def load_image(image_path, size=299):
im = PIL.Image.open(image_path)
im = im.resize((size, size), PIL.Image.ANTIALIAS)
if image_path.endswith('.png'):
ch = 4
else:
ch = 3
im = np.array(im.getdata()).reshape(im.size[0], im.size[1], ch)[:,:,:3]
return im / 127.5 - 1
def _transform_vector(width, x_shift, y_shift, im_scale, rot_in_degrees):
"""
If one row of transforms is [a0, a1, a2, b0, b1, b2, c0, c1],
then it maps the output point (x, y) to a transformed input point
(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k),
where k = c0 x + c1 y + 1.
The transforms are inverted compared to the transform mapping input points to output points.
"""
rot = float(rot_in_degrees) / 90. * (math.pi/2)
# Standard rotation matrix
# (use negative rot because tf.contrib.image.transform will do the inverse)
rot_matrix = np.array(
[[math.cos(-rot), -math.sin(-rot)],
[math.sin(-rot), math.cos(-rot)]]
)
# Scale it
# (use inverse scale because tf.contrib.image.transform will do the inverse)
inv_scale = 1. / im_scale
xform_matrix = rot_matrix * inv_scale
a0, a1 = xform_matrix[0]
b0, b1 = xform_matrix[1]
# At this point, the image will have been rotated around the top left corner,
# rather than around the center of the image.
# To fix this, we will see where the center of the image got sent by our transform,
# and then undo that as part of the translation we apply.
x_origin = float(width) / 2
y_origin = float(width) / 2
x_origin_shifted, y_origin_shifted = np.matmul(
xform_matrix,
np.array([x_origin, y_origin]),
)
x_origin_delta = x_origin - x_origin_shifted
y_origin_delta = y_origin - y_origin_shifted
# Combine our desired shifts with the rotation-induced undesirable shift
a2 = x_origin_delta - (x_shift/(2*im_scale))
b2 = y_origin_delta - (y_shift/(2*im_scale))
# Return these values in the order that tf.contrib.image.transform expects
return np.array([a0, a1, a2, b0, b1, b2, 0, 0]).astype(np.float32)
def _random_transform(scale_min, scale_max, width):
im_scale = np.random.uniform(low=scale_min, high=scale_max)
padding_after_scaling = (1-im_scale) * width
x_delta = np.random.uniform(-padding_after_scaling, padding_after_scaling)
y_delta = np.random.uniform(-padding_after_scaling, padding_after_scaling)
rot = np.random.uniform(-MAX_ROTATION, MAX_ROTATION)
return _transform_vector(width,
x_shift=x_delta,
y_shift=y_delta,
im_scale=im_scale,
rot_in_degrees=rot)
def test_random_transform(scale_min=0.5, scale_max=1.0):
"""
Scales the image between min_scale and max_scale
"""
img_shape = [100,100,3]
img = np.ones(img_shape)
sess = tf.Session()
image_in = tf.placeholder(dtype=tf.float32, shape=img_shape)
width = img_shape[0]
random_xform = tf.py_func(_random_transform, [scale_min, 1.0, image_shape[0]],
tf.float32)
random_xform.set_shape([8])
#output = tf.contrib.image.transform(image_in, random_xform , "BILINEAR")
output = tfa.image.transform(image_in,random_xform,"bilinear")
xformed_img = sess.run(output, feed_dict={
image_in: img
})
show(xformed_img)
def get_peace_mask(shape):
path = osp.join(DATA_DIR, "peace_sign.png")
pic = PIL.Image.open(path)
pic = pic.resize(shape[:2], PIL.Image.ANTIALIAS)
if path.endswith('.png'):
ch = 4
else:
ch = 3
pic = np.array(pic.getdata()).reshape(pic.size[0], pic.size[1], ch)
pic = pic / 127.5 - 1
pic = pic[:,:,3]
peace_mask = (pic + 1.0) / 2
peace_mask = np.expand_dims(peace_mask, 2)
peace_mask = np.broadcast_to(peace_mask, shape)
return peace_mask
def _circle_mask(shape, sharpness = 40):
"""Return a circular mask of a given shape"""
assert shape[0] == shape[1], "circle_mask received a bad shape: " + shape
diameter = shape[0]
x = np.linspace(-1, 1, diameter)
y = np.linspace(-1, 1, diameter)
xx, yy = np.meshgrid(x, y, sparse=True)
z = (xx**2 + yy**2) ** sharpness
mask = 1 - np.clip(z, -1, 1)
mask = np.expand_dims(mask, axis=2)
mask = np.broadcast_to(mask, shape).astype(np.float32)
return mask
def gen_target_ys(batch_size, target_label=None):
if target_label is None:
label = TARGET_LABEL
else:
label = target_label
y_one_hot = np.zeros(2)
y_one_hot[label] = 1.0
y_one_hot = np.tile(y_one_hot, (batch_size, 1))
return y_one_hot
def _convert(im):
return ((im + 1) * 127.5).astype(np.uint8)
def show(im):
plt.axis('off')
plt.imshow(_convert(im), interpolation="nearest")
plt.show()
def show_patch(model_or_image):
if hasattr(model_or_image, 'patch'):
return show_patch(model_or_image.patch())
else:
circle = _circle_mask((299, 299, 3))
show(circle * model_or_image + (1-circle))
def show_patched_image(im, probs_patched_image, probs_original_image, true_label, image_index):
text1 = 'Model prediction (patched image): ' \
+ np.array2string(probs_patched_image, separator = ', ')
text2 = 'Model prediction (original image): ' \
+ np.array2string(probs_original_image, separator = ', ')
text3 = 'True label: %d' %true_label
text4 = 'Image index: %d' % image_index
text = text1 + '\n' + text2 + '\n' + text3 + '\n' + text4
plt.axis('off')
plt.imshow(_convert(im), interpolation="nearest")
plt.text(100, -5, text,
horizontalalignment='center',
verticalalignment='bottom')
plt.show()
def attack(model, patch, target_label, n_show=5, scale=0.5, show_indices=None,
predict_original=False):
"""
Applies the patch, run prediction of patched (and unpatched) images,
calculates the attack success rate, and plots the resulting patched images. This
works with images with opposite class labels.
Args:
model: Model to be used for prediction (ModelContainer object)
patch: Pretrained patch from a model that may be different from model.
target_label: Scalar target label (eithe 1 or 0) with which the patch was designed
target_ys: One hot encoded target label
n_show: Numer of images to display
scale: Size of the patch relative to the image
predict_original: If True, the prediction for unpatched images will be obtained.
Returns:
probs_patched_images: Probability of model object for the patched images
probs_original_images: Probability of model object for the unpatched images
random_indices: Indices used to suffle the test images
true_labels: True label of the test images
winp: Attack success rate
"""
model.reset_patch()
model.patch(patch)
# random_indices are the indices for the batch being reported
loss_per_example, probs_patched_images, patched_imgs, indices = \
model.inference_batch(scale=scale, target_label=target_label)
if predict_original:
probs_original_images, true_labels = predict_original_images()
else:
file_name = model.model_name + '_model_prediction_original_test_images.npy'
probs_original_images = np.load(file_name)
probs_original_images = probs_original_images[indices]
true_labels = np.argmax(image_loader.y_test[indices], axis=1)
loss = np.mean(loss_per_example)
n_images = len(indices)
winp = (np.argmax(probs_patched_images, axis=1) == target_label).sum() / n_images
for i in range(n_show):
show_patched_image(patched_imgs[i], probs_patched_images[i],
probs_original_images[i], true_labels[i], indices[i])
if show_indices:
for ind in show_indices:
# Find the index of show_index in indices
#print(ind,indices)
i = np.where(indices == ind)[0][0]
show_patched_image(patched_imgs[i], probs_patched_images[i],
probs_original_images[i], true_labels[i], indices[i])
return probs_patched_images, probs_original_images, indices, true_labels, winp
def predict_original_images(indices = None):
sess = tf.Session()
with sess.as_default():
model = keras.models.load_model('models/wb_model.h5')
X_test, y_test = image_loader.get_all_test_images_labels()
# probability prediction
model_prediction_original_image = model.predict(X_test)
# convert from onehot to 0, 1 label
true_labels = np.argmax(y_test, axis=1)
return model_prediction_original_image, true_labels
def train(model, target_label=1, epochs=1, learning_rate=5.0):
""" Learns the patch for taget_label
Args:
model: Model to be trained (ModelContainer object)
target_label: Target label for which the patch will be trained
epochs: Number of iteration through the training set
Returns:
None. The trained patch can be accessed by model.patch()
"""
model.reset_patch()
target_ys = gen_target_ys(target_label=target_label, batch_size = BATCH_SIZE)
for i in range(epochs):
epoch_loss = model.train_step(target_ys = target_ys, scale = (0.1, 1.0),
learning_rate = learning_rate)
print("Loss after epoch %s: %s" % (i, epoch_loss))
def attack_combined(model, patch_for_0, patch_for_1, n_show=1, scale=0.4,
show_indices0=None, show_indices1=None, predict_original=False):
""" A wrapper for attack.
Runs attack twice with target 1 and target 0, then combine the results.
Args:
model: Target model for the attack (ModelContainer object)
patch_for_0: Pretrained (with target_label = 0) patch from a model
that may be different from model (299 x 299 x 3 np array)
target_label: Target label with which the patch was designed
n_show: Numer of images to display
scale: Size of the patch relative to the image
show_indices0: indices of images in the testset to show with target label0
predict_original: If True, the prediction for unpatched images will be obtained.
Returns:
probs_patched_images: Probability of model object for the combined patched images
probs_original_images: Probability of model object for the combined unpatched images
indices: Indices used to suffle the test images
true_labels: True label of the test images
winp: Combined attack success rate
"""
# Attack with target_label = 0
probs_patched_images0, probs_original_images0, indices0, true_labels0, winp0 = \
attack(model, patch_for_0, target_label=0, n_show=n_show, scale=scale,
show_indices=show_indices0, predict_original=predict_original)
# Attack with target_label = 1
probs_patched_images1, probs_original_images1, indices1, true_labels1, winp1 = \
attack(model, patch_for_1, target_label=1, n_show=n_show, scale=scale,
show_indices=show_indices1, predict_original=predict_original)
# Concatenate the results of two attacks (order has to be reversed)
probs_patched_images = np.concatenate([probs_patched_images1,
probs_patched_images0], axis=0)
probs_original_images = np.concatenate([probs_original_images1,
probs_original_images0], axis=0)
indices = np.concatenate([indices1, indices0], axis=0)
true_labels = np.concatenate([true_labels1, true_labels0], axis=0)
# n_images0 are images with target 1
n_images0 = probs_patched_images0.shape[0]
n_images1 = probs_patched_images1.shape[0]
winp = (winp0 * n_images0 + winp1 * n_images1) / (n_images0 + n_images1)
return probs_patched_images, probs_original_images, indices, true_labels, winp
# Global variables
image_loader = Dermatology_image_loader()
TARGET_LABEL = 1
PATCH_SHAPE = (299, 299, 3)
BATCH_SIZE = 8
TARGET_ONEHOT = gen_target_ys(BATCH_SIZE)
SCALE_MIN = 0.3
SCALE_MAX = 1.5
MAX_ROTATION = 22.5 | [
"numpy.clip",
"tensorflow.compat.v1.disable_v2_behavior",
"sklearn.metrics.auc",
"numpy.array2string",
"keras.backend.learning_phase",
"math.cos",
"numpy.array",
"sklearn.metrics.roc_curve",
"scipy.stats.sem",
"tensorflow.compat.v1.zeros",
"tensorflow.compat.v1.train.GradientDescentOptimizer",
... | [((176, 200), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (198, 200), True, 'import tensorflow.compat.v1 as tf\n'), ((528, 538), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (535, 538), True, 'import numpy as np\n'), ((810, 860), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['y_test[:, 1]', 'model_preds[:, 1]'], {}), '(y_test[:, 1], model_preds[:, 1])\n', (827, 860), False, 'from sklearn import metrics\n'), ((875, 888), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (878, 888), False, 'from sklearn.metrics import auc\n'), ((14562, 14577), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (14570, 14577), True, 'import matplotlib.pyplot as plt\n'), ((14636, 14646), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14644, 14646), True, 'import matplotlib.pyplot as plt\n'), ((16859, 16907), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'scale_min', 'high': 'scale_max'}), '(low=scale_min, high=scale_max)\n', (16876, 16907), True, 'import numpy as np\n'), ((16971, 17035), 'numpy.random.uniform', 'np.random.uniform', (['(-padding_after_scaling)', 'padding_after_scaling'], {}), '(-padding_after_scaling, padding_after_scaling)\n', (16988, 17035), True, 'import numpy as np\n'), ((17050, 17114), 'numpy.random.uniform', 'np.random.uniform', (['(-padding_after_scaling)', 'padding_after_scaling'], {}), '(-padding_after_scaling, padding_after_scaling)\n', (17067, 17114), True, 'import numpy as np\n'), ((17125, 17171), 'numpy.random.uniform', 'np.random.uniform', (['(-MAX_ROTATION)', 'MAX_ROTATION'], {}), '(-MAX_ROTATION, MAX_ROTATION)\n', (17142, 17171), True, 'import numpy as np\n'), ((17599, 17617), 'numpy.ones', 'np.ones', (['img_shape'], {}), '(img_shape)\n', (17606, 17617), True, 'import numpy as np\n'), ((17630, 17642), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (17640, 17642), True, 'import tensorflow.compat.v1 as tf\n'), ((17658, 17707), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': 'img_shape'}), '(dtype=tf.float32, shape=img_shape)\n', (17672, 17707), True, 'import tensorflow.compat.v1 as tf\n'), ((17752, 17827), 'tensorflow.compat.v1.py_func', 'tf.py_func', (['_random_transform', '[scale_min, 1.0, image_shape[0]]', 'tf.float32'], {}), '(_random_transform, [scale_min, 1.0, image_shape[0]], tf.float32)\n', (17762, 17827), True, 'import tensorflow.compat.v1 as tf\n'), ((17988, 18043), 'tensorflow_addons.image.transform', 'tfa.image.transform', (['image_in', 'random_xform', '"""bilinear"""'], {}), "(image_in, random_xform, 'bilinear')\n", (18007, 18043), True, 'import tensorflow_addons as tfa\n'), ((18179, 18215), 'os.path.join', 'osp.join', (['DATA_DIR', '"""peace_sign.png"""'], {}), "(DATA_DIR, 'peace_sign.png')\n", (18187, 18215), True, 'import os.path as osp\n'), ((18540, 18569), 'numpy.expand_dims', 'np.expand_dims', (['peace_mask', '(2)'], {}), '(peace_mask, 2)\n', (18554, 18569), True, 'import numpy as np\n'), ((18587, 18621), 'numpy.broadcast_to', 'np.broadcast_to', (['peace_mask', 'shape'], {}), '(peace_mask, shape)\n', (18602, 18621), True, 'import numpy as np\n'), ((18849, 18877), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'diameter'], {}), '(-1, 1, diameter)\n', (18860, 18877), True, 'import numpy as np\n'), ((18886, 18914), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'diameter'], {}), '(-1, 1, diameter)\n', (18897, 18914), True, 'import numpy as np\n'), ((18928, 18958), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {'sparse': '(True)'}), '(x, y, sparse=True)\n', (18939, 18958), True, 'import numpy as np\n'), ((19041, 19069), 'numpy.expand_dims', 'np.expand_dims', (['mask'], {'axis': '(2)'}), '(mask, axis=2)\n', (19055, 19069), True, 'import numpy as np\n'), ((19310, 19321), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (19318, 19321), True, 'import numpy as np\n'), ((19365, 19400), 'numpy.tile', 'np.tile', (['y_one_hot', '(batch_size, 1)'], {}), '(y_one_hot, (batch_size, 1))\n', (19372, 19400), True, 'import numpy as np\n'), ((19511, 19526), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (19519, 19526), True, 'import matplotlib.pyplot as plt\n'), ((19585, 19595), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19593, 19595), True, 'import matplotlib.pyplot as plt\n'), ((20317, 20332), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (20325, 20332), True, 'import matplotlib.pyplot as plt\n'), ((20391, 20477), 'matplotlib.pyplot.text', 'plt.text', (['(100)', '(-5)', 'text'], {'horizontalalignment': '"""center"""', 'verticalalignment': '"""bottom"""'}), "(100, -5, text, horizontalalignment='center', verticalalignment=\n 'bottom')\n", (20399, 20477), True, 'import matplotlib.pyplot as plt\n'), ((20493, 20503), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20501, 20503), True, 'import matplotlib.pyplot as plt\n'), ((22320, 22345), 'numpy.mean', 'np.mean', (['loss_per_example'], {}), '(loss_per_example)\n', (22327, 22345), True, 'import numpy as np\n'), ((23137, 23149), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (23147, 23149), True, 'import tensorflow.compat.v1 as tf\n'), ((26144, 26214), 'numpy.concatenate', 'np.concatenate', (['[probs_patched_images1, probs_patched_images0]'], {'axis': '(0)'}), '([probs_patched_images1, probs_patched_images0], axis=0)\n', (26158, 26214), True, 'import numpy as np\n'), ((26286, 26358), 'numpy.concatenate', 'np.concatenate', (['[probs_original_images1, probs_original_images0]'], {'axis': '(0)'}), '([probs_original_images1, probs_original_images0], axis=0)\n', (26300, 26358), True, 'import numpy as np\n'), ((26418, 26462), 'numpy.concatenate', 'np.concatenate', (['[indices1, indices0]'], {'axis': '(0)'}), '([indices1, indices0], axis=0)\n', (26432, 26462), True, 'import numpy as np\n'), ((26481, 26533), 'numpy.concatenate', 'np.concatenate', (['[true_labels1, true_labels0]'], {'axis': '(0)'}), '([true_labels1, true_labels0], axis=0)\n', (26495, 26533), True, 'import numpy as np\n'), ((944, 971), 'numpy.max', 'np.max', (['model_preds'], {'axis': '(1)'}), '(model_preds, axis=1)\n', (950, 971), True, 'import numpy as np\n'), ((1313, 1357), 'numpy.load', 'np.load', (['"""data/test_x_preprocess_sample.npy"""'], {}), "('data/test_x_preprocess_sample.npy')\n", (1320, 1357), True, 'import numpy as np\n'), ((1380, 1413), 'numpy.load', 'np.load', (['"""data/test_y_sample.npy"""'], {}), "('data/test_y_sample.npy')\n", (1387, 1413), True, 'import numpy as np\n'), ((1437, 1482), 'numpy.load', 'np.load', (['"""data/train_x_preprocess_sample.npy"""'], {}), "('data/train_x_preprocess_sample.npy')\n", (1444, 1482), True, 'import numpy as np\n'), ((1506, 1540), 'numpy.load', 'np.load', (['"""data/train_y_sample.npy"""'], {}), "('data/train_y_sample.npy')\n", (1513, 1540), True, 'import numpy as np\n'), ((1701, 1725), 'numpy.random.permutation', 'np.random.permutation', (['N'], {}), '(N)\n', (1722, 1725), True, 'import numpy as np\n'), ((2253, 2305), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'n_test', 'size': 'n_images'}), '(low=0, high=n_test, size=n_images)\n', (2270, 2305), True, 'import numpy as np\n'), ((3258, 3268), 'tensorflow.compat.v1.Graph', 'tf.Graph', ([], {}), '()\n', (3266, 3268), True, 'import tensorflow.compat.v1 as tf\n'), ((3289, 3317), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {'graph': 'self.graph'}), '(graph=self.graph)\n', (3299, 3317), True, 'import tensorflow.compat.v1 as tf\n'), ((7294, 7338), 'numpy.concatenate', 'np.concatenate', (['loss_per_example_arr'], {'axis': '(0)'}), '(loss_per_example_arr, axis=0)\n', (7308, 7338), True, 'import numpy as np\n'), ((7356, 7386), 'numpy.concatenate', 'np.concatenate', (['ps_arr'], {'axis': '(0)'}), '(ps_arr, axis=0)\n', (7370, 7386), True, 'import numpy as np\n'), ((7405, 7436), 'numpy.concatenate', 'np.concatenate', (['ims_arr'], {'axis': '(0)'}), '(ims_arr, axis=0)\n', (7419, 7436), True, 'import numpy as np\n'), ((7745, 7769), 'keras.backend.set_session', 'K.set_session', (['self.sess'], {}), '(self.sess)\n', (7758, 7769), True, 'from keras import backend as K\n'), ((8514, 8525), 'time.time', 'time.time', ([], {}), '()\n', (8523, 8525), False, 'import time\n'), ((8534, 8558), 'keras.backend.set_session', 'K.set_session', (['self.sess'], {}), '(self.sess)\n', (8547, 8558), True, 'from keras import backend as K\n'), ((13062, 13092), 'tensorflow.compat.v1.stack', 'tf.stack', (['([patch] * BATCH_SIZE)'], {}), '([patch] * BATCH_SIZE)\n', (13070, 13092), True, 'import tensorflow.compat.v1 as tf\n'), ((13515, 13550), 'tensorflow.compat.v1.stack', 'tf.stack', (['([image_mask] * BATCH_SIZE)'], {}), '([image_mask] * BATCH_SIZE)\n', (13523, 13550), True, 'import tensorflow.compat.v1 as tf\n'), ((13574, 13604), 'tensorflow.compat.v1.stack', 'tf.stack', (['([patch] * BATCH_SIZE)'], {}), '([patch] * BATCH_SIZE)\n', (13582, 13604), True, 'import tensorflow.compat.v1 as tf\n'), ((14229, 14288), 'tensorflow_addons.image.transform', 'tfa.image.transform', (['image_mask', 'transform_vecs', '"""bilinear"""'], {}), "(image_mask, transform_vecs, 'bilinear')\n", (14248, 14288), True, 'import tensorflow_addons as tfa\n'), ((14310, 14371), 'tensorflow_addons.image.transform', 'tfa.image.transform', (['padded_patch', 'transform_vecs', '"""bilinear"""'], {}), "(padded_patch, transform_vecs, 'bilinear')\n", (14329, 14371), True, 'import tensorflow_addons as tfa\n'), ((16327, 16357), 'numpy.array', 'np.array', (['[x_origin, y_origin]'], {}), '([x_origin, y_origin])\n', (16335, 16357), True, 'import numpy as np\n'), ((19012, 19029), 'numpy.clip', 'np.clip', (['z', '(-1)', '(1)'], {}), '(z, -1, 1)\n', (19019, 19029), True, 'import numpy as np\n'), ((19987, 20039), 'numpy.array2string', 'np.array2string', (['probs_patched_image'], {'separator': '""", """'}), "(probs_patched_image, separator=', ')\n", (20002, 20039), True, 'import numpy as np\n'), ((20104, 20157), 'numpy.array2string', 'np.array2string', (['probs_original_image'], {'separator': '""", """'}), "(probs_original_image, separator=', ')\n", (20119, 20157), True, 'import numpy as np\n'), ((22148, 22166), 'numpy.load', 'np.load', (['file_name'], {}), '(file_name)\n', (22155, 22166), True, 'import numpy as np\n'), ((22252, 22299), 'numpy.argmax', 'np.argmax', (['image_loader.y_test[indices]'], {'axis': '(1)'}), '(image_loader.y_test[indices], axis=1)\n', (22261, 22299), True, 'import numpy as np\n'), ((23194, 23239), 'keras.models.load_model', 'keras.models.load_model', (['"""models/wb_model.h5"""'], {}), "('models/wb_model.h5')\n", (23217, 23239), False, 'import keras\n'), ((23476, 23501), 'numpy.argmax', 'np.argmax', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (23485, 23501), True, 'import numpy as np\n'), ((582, 592), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (589, 592), True, 'import numpy as np\n'), ((600, 609), 'scipy.stats.sem', 'st.sem', (['x'], {}), '(x)\n', (606, 609), True, 'import scipy.stats as st\n'), ((1145, 1157), 'copy.copy', 'copy.copy', (['y'], {}), '(y)\n', (1154, 1157), False, 'import copy\n'), ((2591, 2621), 'numpy.argmax', 'np.argmax', (['self.y_test'], {'axis': '(1)'}), '(self.y_test, axis=1)\n', (2600, 2621), True, 'import numpy as np\n'), ((2778, 2801), 'numpy.where', 'np.where', (['boolean_index'], {}), '(boolean_index)\n', (2786, 2801), True, 'import numpy as np\n'), ((4000, 4026), 'numpy.zeros', 'np.zeros', (['self.patch_shape'], {}), '(self.patch_shape)\n', (4008, 4026), True, 'import numpy as np\n'), ((5715, 5733), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5731, 5733), False, 'import sys\n'), ((8439, 8499), 'tensorflow.compat.v1.clip_by_value', 'tf.clip_by_value', (['x'], {'clip_value_min': '(-1.0)', 'clip_value_max': '(1.0)'}), '(x, clip_value_min=-1.0, clip_value_max=1.0)\n', (8455, 8499), True, 'import tensorflow.compat.v1 as tf\n'), ((8645, 8663), 'keras.backend.learning_phase', 'K.learning_phase', ([], {}), '()\n', (8661, 8663), True, 'from keras import backend as K\n'), ((8736, 8773), 'keras.layers.Input', 'keras.layers.Input', ([], {'shape': 'image_shape'}), '(shape=image_shape)\n', (8754, 8773), False, 'import keras\n'), ((8804, 8846), 'tensorflow.compat.v1.placeholder_with_default', 'tf.placeholder_with_default', (['SCALE_MIN', '[]'], {}), '(SCALE_MIN, [])\n', (8831, 8846), True, 'import tensorflow.compat.v1 as tf\n'), ((8876, 8918), 'tensorflow.compat.v1.placeholder_with_default', 'tf.placeholder_with_default', (['SCALE_MAX', '[]'], {}), '(SCALE_MAX, [])\n', (8903, 8918), True, 'import tensorflow.compat.v1 as tf\n'), ((8946, 9023), 'tensorflow.compat.v1.random_uniform', 'tf.random_uniform', (['[BATCH_SIZE]'], {'minval': 'self.scale_min', 'maxval': 'self.scale_max'}), '([BATCH_SIZE], minval=self.scale_min, maxval=self.scale_max)\n', (8963, 9023), True, 'import tensorflow.compat.v1 as tf\n'), ((9324, 9360), 'tensorflow.compat.v1.placeholder_with_default', 'tf.placeholder_with_default', (['(0.0)', '[]'], {}), '(0.0, [])\n', (9351, 9360), True, 'import tensorflow.compat.v1 as tf\n'), ((9381, 9480), 'tensorflow.compat.v1.get_variable', 'tf.get_variable', (['"""patch"""', 'self.patch_shape'], {'dtype': 'tf.float32', 'initializer': 'tf.zeros_initializer'}), "('patch', self.patch_shape, dtype=tf.float32, initializer=tf\n .zeros_initializer)\n", (9396, 9480), True, 'import tensorflow.compat.v1 as tf\n'), ((9550, 9606), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': 'self.patch_shape'}), '(dtype=tf.float32, shape=self.patch_shape)\n', (9564, 9606), True, 'import tensorflow.compat.v1 as tf\n'), ((9693, 9734), 'tensorflow.compat.v1.assign', 'tf.assign', (['patch', 'self._patch_placeholder'], {}), '(patch, self._patch_placeholder)\n', (9702, 9734), True, 'import tensorflow.compat.v1 as tf\n'), ((10364, 10400), 'tensorflow.compat.v1.placeholder_with_default', 'tf.placeholder_with_default', (['(1.0)', '[]'], {}), '(1.0, [])\n', (10391, 10400), True, 'import tensorflow.compat.v1 as tf\n'), ((10434, 10487), 'tensorflow.compat.v1.nn.dropout', 'tf.nn.dropout', (['modified_patch'], {'keep_prob': 'self.dropout'}), '(modified_patch, keep_prob=self.dropout)\n', (10447, 10487), True, 'import tensorflow.compat.v1 as tf\n'), ((11041, 11084), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, 2)'}), '(tf.float32, shape=(None, 2))\n', (11055, 11084), True, 'import tensorflow.compat.v1 as tf\n'), ((11135, 11180), 'keras.models.load_model', 'keras.models.load_model', (['"""models/wb_model.h5"""'], {}), "('models/wb_model.h5')\n", (11158, 11180), False, 'import keras\n'), ((11319, 11359), 'keras.layers.Input', 'keras.layers.Input', ([], {'tensor': 'patched_input'}), '(tensor=patched_input)\n', (11337, 11359), False, 'import keras\n'), ((11454, 11512), 'keras.models.Model', 'keras.models.Model', ([], {'inputs': 'new_input_layer', 'outputs': 'output'}), '(inputs=new_input_layer, outputs=output)\n', (11472, 11512), False, 'import keras\n'), ((11700, 11778), 'tensorflow.compat.v1.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'labels': 'self._target_ys', 'logits': 'logits'}), '(labels=self._target_ys, logits=logits)\n', (11739, 11778), True, 'import tensorflow.compat.v1 as tf\n'), ((11859, 11897), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['self._loss_per_example'], {}), '(self._loss_per_example)\n', (11873, 11897), True, 'import tensorflow.compat.v1 as tf\n'), ((12180, 12206), 'tensorflow.compat.v1.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (12194, 12206), True, 'import tensorflow.compat.v1 as tf\n'), ((13769, 13865), 'tensorflow.compat.v1.py_func', 'tf.py_func', (['_random_transform', '[self.scale_min, self.scale_max, image_shape[0]]', 'tf.float32'], {}), '(_random_transform, [self.scale_min, self.scale_max, image_shape[\n 0]], tf.float32)\n', (13779, 13865), True, 'import tensorflow.compat.v1 as tf\n'), ((16731, 16771), 'numpy.array', 'np.array', (['[a0, a1, a2, b0, b1, b2, 0, 0]'], {}), '([a0, a1, a2, b0, b1, b2, 0, 0])\n', (16739, 16771), True, 'import numpy as np\n'), ((19081, 19109), 'numpy.broadcast_to', 'np.broadcast_to', (['mask', 'shape'], {}), '(mask, shape)\n', (19096, 19109), True, 'import numpy as np\n'), ((699, 720), 'numpy.round', 'np.round', (['model_preds'], {}), '(model_preds)\n', (707, 720), True, 'import numpy as np\n'), ((9176, 9202), 'tensorflow.compat.v1.zeros', 'tf.zeros', (['self.patch_shape'], {}), '(self.patch_shape)\n', (9184, 9202), True, 'import tensorflow.compat.v1 as tf\n'), ((10169, 10216), 'tensorflow.compat.v1.image.resize_images', 'tf.image.resize_images', (['image_input', '(224, 224)'], {}), '(image_input, (224, 224))\n', (10191, 10216), True, 'import tensorflow.compat.v1 as tf\n'), ((10294, 10335), 'tensorflow.compat.v1.image.resize_images', 'tf.image.resize_images', (['patch', '(224, 224)'], {}), '(patch, (224, 224))\n', (10316, 10335), True, 'import tensorflow.compat.v1 as tf\n'), ((11930, 11972), 'tensorflow.compat.v1.nn.l2_loss', 'tf.nn.l2_loss', (['(patch - self.patch_disguise)'], {}), '(patch - self.patch_disguise)\n', (11943, 11972), True, 'import tensorflow.compat.v1 as tf\n'), ((12523, 12534), 'time.time', 'time.time', ([], {}), '()\n', (12532, 12534), False, 'import time\n'), ((15586, 15600), 'math.cos', 'math.cos', (['(-rot)'], {}), '(-rot)\n', (15594, 15600), False, 'import math\n'), ((15629, 15643), 'math.sin', 'math.sin', (['(-rot)'], {}), '(-rot)\n', (15637, 15643), False, 'import math\n'), ((15645, 15659), 'math.cos', 'math.cos', (['(-rot)'], {}), '(-rot)\n', (15653, 15659), False, 'import math\n'), ((12236, 12290), 'tensorflow.compat.v1.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['self._learning_rate'], {}), '(self._learning_rate)\n', (12269, 12290), True, 'import tensorflow.compat.v1 as tf\n'), ((15603, 15617), 'math.sin', 'math.sin', (['(-rot)'], {}), '(-rot)\n', (15611, 15617), False, 'import math\n'), ((22386, 22425), 'numpy.argmax', 'np.argmax', (['probs_patched_images'], {'axis': '(1)'}), '(probs_patched_images, axis=1)\n', (22395, 22425), True, 'import numpy as np\n'), ((22796, 22820), 'numpy.where', 'np.where', (['(indices == ind)'], {}), '(indices == ind)\n', (22804, 22820), True, 'import numpy as np\n'), ((9926, 9951), 'numpy.ones', 'np.ones', (['self.patch_shape'], {}), '(self.patch_shape)\n', (9933, 9951), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import numpy as np
import pandas as pd
import datetime as dt
import netCDF4 as cdf
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import cartopy
cartopy.config['data_dir'] = '/data/project/cartopy/'
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from cartopy.feature import NaturalEarthFeature, LAND, COASTLINE, LAKES
import json
import requests
#Import ICOS tools:
from icoscp.cpb.dobj import Dobj
from icoscp.sparql.runsparql import RunSparql
from icoscp.sparql import sparqls
my_home = os.getenv('HOME')
sys.path.insert(0,my_home+'/new_jupyter/modules')
from extra_sparqls import get_station_class, get_icos_stations_atc_samplingheight #, atc_station_tracer_query
# paths --- changed for new JupyterHub instance
path_stiltweb = '/data/stiltweb/'
path_stilt = '/data/stilt/'
path_edgar = '/data/stilt/Emissions/'
#path_plots = './plots/'
#------------------------------------------------------------------------------------------------------------------
def read_icos_data(f_icos,tracer,flag=True):
if (len(f_icos)>0):
df = Dobj(f_icos.dobj.iloc[0]).getColumns()
df.set_index('TIMESTAMP',inplace=True)
if flag:
if (tracer.lower() == 'mto'):
value_cols =[x.replace('-Flag','') for x in df.columns[df.columns.str.contains('Flag')]]
for x in value_cols:
df[x].loc[df[x+'-Flag']!='O']=np.nan
else:
df[tracer.lower()].loc[df['Flag']!='O']=np.nan
df['Stdev'].loc[df['Flag']!='O']=np.nan
else:
if (tracer.lower() != 'mto'):
df[tracer.lower()].loc[df[tracer.lower()]<0.0]=np.nan
df['Stdev'].loc[df['Stdev']<0.0]=np.nan
else:
df = pd.DataFrame(None)
return df
#------------------------------------------------------------------------------------
# function to convert station longitude and latitude (slat, slon) to indices of STILT model grid (ix,jy)
def lonlat_2_ixjy(slon,slat,mlon,mlat):
#slon, slat: longitude and latitude of station
#mlon, mlat: 1-dim. longitude and latitude of model grid
ix = (np.abs(mlon-slon)).argmin()
jy = (np.abs(mlat-slat)).argmin()
return ix,jy
#------------------------------------------------------------------------------------
# function to read annual mean EDGAR emissions
def read_emissions(filename):
# read EDAGR anthropogenic emissions
# latitude and longitude are for lower left corner of grid cell
f = cdf.Dataset(filename)
#print(f)
emis=f.variables["emission"][:,:,:]
lon_ll=f.variables["lon"][:]
lat_ll=f.variables["lat"][:]
time=f.variables["time"][:]
#shift lat and lon to cell center
dlon=np.abs(lon_ll[2]-lon_ll[1])
dlat=np.abs(lat_ll[2]-lat_ll[1])
emis_lon=lon_ll+0.5*dlon
emis_lat=lat_ll+0.5*dlat
unit="["+f.variables["emission"].units+"]"
datetime=cdf.num2date(f.variables["time"][:],units=f.variables["time"].units)
f.close()
return emis, emis_lon, emis_lat, datetime, unit
#------------------------------------------------------------------------------------
def create_STILT_dictionary():
# store all STILT station information in a dictionary
#
# Dictionary contains information on
# STILT station id
# Station coordinates (latitude, longitude)
# Altitude of tracer release in STILT simultation
# STILT location identifier
# Station name - if available
#
# get all ICOS station IDs by listing subdirectories in stiltweb
# extract location from filename of link
path_stations = path_stiltweb+'/stations/'
all_stations = os.listdir(path_stations)
# empty dictionary
stations = {}
# fill dictionary with ICOS station id, latitude, longitude and altitude
for ist in sorted(list(set(all_stations))):
stations[ist] = {}
# get filename of link (original stiltweb directory structure)
# and extract location information
if os.path.exists(path_stations+ist):
loc_ident = os.readlink(path_stations+ist)
clon = loc_ident[-13:-6]
lon = np.float(clon[:-1])
if clon[-1:] == 'W':
lon = -lon
clat = loc_ident[-20:-14]
lat = np.float(clat[:-1])
if clat[-1:] == 'S':
lat = -lat
alt = np.int(loc_ident[-5:])
stations[ist]['lat']=lat
stations[ist]['lon']=lon
stations[ist]['alt']=alt
stations[ist]['locIdent']=os.path.split(loc_ident)[-1]
# add information on station name (and new STILT station id) from stations.csv file used in stiltweb
url="https://stilt.icos-cp.eu/viewer/stationinfo"
df = pd.read_csv(url)
for ist in sorted(list(set(stations))):
stationName = df.loc[df['STILT id'] == ist]['STILT name']
if len(stationName.value_counts()) > 0:
stations[ist]['name'] = stationName.item()
else:
stations[ist]['name'] = ''
# Get list of ICOS class 1 and class 2 stations from Carbon Portal
#query = sparqls.get_station_class()
query = get_station_class()
fmt = 'pandas'
df_datatable = RunSparql(query, fmt).run()
# add information if ICOS class 1 or class 2 site
# add ICOS station name
for ist in sorted(list(set(stations))):
stations[ist]['stationClass'] = np.nan
stations[ist]['icosName'] = ''
stations[ist]['icosId'] = ''
stations[ist]['icosLon'] = np.nan
stations[ist]['icosLat'] = np.nan
for istICOS in df_datatable['stationId']:
ic = int(df_datatable[df_datatable['stationId']==istICOS].index.values)
if istICOS in ist:
stations[ist]['stationClass'] = int(df_datatable['stationClass'][ic])
stations[ist]['icosName'] = df_datatable['longName'][ic]
stations[ist]['icosId'] = df_datatable['stationId'][ic]
stations[ist]['icosLon'] = float(df_datatable['lon'][ic])
stations[ist]['icosLat'] = float(df_datatable['lat'][ic])
# add available sampling heights
# find all co2 data files for station
query = get_icos_stations_atc_samplingheight()
fmt = 'pandas'
df_datatable_hgt = RunSparql(query, fmt).run()
for ist in sorted(list(set(stations))):
stations[ist]['icosHeight'] = np.nan
if (len(stations[ist]['icosName']) > 0):
heights = df_datatable_hgt[df_datatable_hgt['stationId']==ist[0:3]]['height'].unique()
ih = np.nan
if (len(heights) > 0):
if (len(ist[3:])==0):
ih = int(float(heights[0]))
else:
ih = [int(float(el)) for el in heights if (abs(int(float(el))-int(ist[3:]))<6)]
if isinstance(ih, list):
if (len(ih) == 0):
stations[ist]['icosHeight'] = np.nan
else:
stations[ist]['icosHeight'] = ih[0]
else:
stations[ist]['icosHeight'] = ih
# print dictionary
#for ist in sorted(stations):
# print ('station:', ist)
# for k in stations[ist]:
# print (k,':', stations[ist][k])
# write dictionary to json file for further use
if os.access('./', os.W_OK):
with open("stationsDict", "w") as outfile:
json.dump(stations, outfile)
return stations
#---------------------------------------------------------------------------------------------
def read_STILT_dictionary():
# read STILT station dictionary from json file
if os.path.exists("stationsDict"):
with open("stationsDict") as jsonfile:
stations = json.load(jsonfile)
else:
print("no STILT station dictionary found")
stations={}
return stations
#------------------------------------------------------------------------------------------------
def print_STILT_dictionary(stilt_stations):
if not stilt_stations:
# read STILT station dictionary from json file
stilt_stations = read_STILT_dictionary()
# print dictionary
for ist in sorted(stilt_stations):
print ('station:', ist)
for k in stilt_stations[ist]:
print (k,':', stilt_stations[ist][k])
print(' ')
#------------------------------------------------------------------------------
# function to read STILT time series with hourly concentrations for RINGO T1.3
def read_stilt_timeseries_RINGO_T13(station,year,loc_ident):
filename=path_stilt+'Results_RINGO_T1.3_test/'+station+'/stiltresult'+str(year)+'x'+loc_ident+'.csv'
df= pd.read_csv(filename,delim_whitespace=True)
df.date = pd.to_datetime(df[['year', 'month', 'day', 'hour']])
df.name = station
df.model = 'STILT'
df['wind.speed']=np.sqrt((df['wind.u']**2)+(df['wind.v']**2))
df['co2.ff']=df['co2.fuel.coal']+df['co2.fuel.oil']+df['co2.fuel.gas']
df.set_index(['date'],inplace=True)
#print(df)
return df
#------------------------------------------------------------------------------
# function to read STILT time series with hourly concentrations for RINGO T1.3, updated with more STILT runs
def read_stilt_timeseries_RINGO_T13_update(station,year,loc_ident):
filename=path_stilt+'Results_RINGO_T1.3_test/'+station+'/stiltresult'+str(year)+'x'+loc_ident+'.csv'
df= pd.read_csv(filename,delim_whitespace=True)
df.date = pd.to_datetime(df[['year', 'month', 'day', 'hour']])
df.name = station
df.model = 'STILT'
df['wind.speed']=np.sqrt((df['wind.u']**2)+(df['wind.v']**2))
df['co2.ff']=df['co2.fuel.coal']+df['co2.fuel.oil']+df['co2.fuel.gas']
df.set_index(['date'],inplace=True)
#print(df)
return df | [
"sys.path.insert",
"extra_sparqls.get_station_class",
"numpy.sqrt",
"pandas.read_csv",
"icoscp.cpb.dobj.Dobj",
"extra_sparqls.get_icos_stations_atc_samplingheight",
"pandas.to_datetime",
"os.path.exists",
"os.listdir",
"os.readlink",
"netCDF4.num2date",
"netCDF4.Dataset",
"os.path.split",
... | [((597, 614), 'os.getenv', 'os.getenv', (['"""HOME"""'], {}), "('HOME')\n", (606, 614), False, 'import os\n'), ((615, 667), 'sys.path.insert', 'sys.path.insert', (['(0)', "(my_home + '/new_jupyter/modules')"], {}), "(0, my_home + '/new_jupyter/modules')\n", (630, 667), False, 'import sys\n'), ((2593, 2614), 'netCDF4.Dataset', 'cdf.Dataset', (['filename'], {}), '(filename)\n', (2604, 2614), True, 'import netCDF4 as cdf\n'), ((2816, 2845), 'numpy.abs', 'np.abs', (['(lon_ll[2] - lon_ll[1])'], {}), '(lon_ll[2] - lon_ll[1])\n', (2822, 2845), True, 'import numpy as np\n'), ((2853, 2882), 'numpy.abs', 'np.abs', (['(lat_ll[2] - lat_ll[1])'], {}), '(lat_ll[2] - lat_ll[1])\n', (2859, 2882), True, 'import numpy as np\n'), ((3005, 3074), 'netCDF4.num2date', 'cdf.num2date', (["f.variables['time'][:]"], {'units': "f.variables['time'].units"}), "(f.variables['time'][:], units=f.variables['time'].units)\n", (3017, 3074), True, 'import netCDF4 as cdf\n'), ((3769, 3794), 'os.listdir', 'os.listdir', (['path_stations'], {}), '(path_stations)\n', (3779, 3794), False, 'import os\n'), ((4877, 4893), 'pandas.read_csv', 'pd.read_csv', (['url'], {}), '(url)\n', (4888, 4893), True, 'import pandas as pd\n'), ((5286, 5305), 'extra_sparqls.get_station_class', 'get_station_class', ([], {}), '()\n', (5303, 5305), False, 'from extra_sparqls import get_station_class, get_icos_stations_atc_samplingheight\n'), ((6366, 6404), 'extra_sparqls.get_icos_stations_atc_samplingheight', 'get_icos_stations_atc_samplingheight', ([], {}), '()\n', (6402, 6404), False, 'from extra_sparqls import get_station_class, get_icos_stations_atc_samplingheight\n'), ((7482, 7506), 'os.access', 'os.access', (['"""./"""', 'os.W_OK'], {}), "('./', os.W_OK)\n", (7491, 7506), False, 'import os\n'), ((7814, 7844), 'os.path.exists', 'os.path.exists', (['"""stationsDict"""'], {}), "('stationsDict')\n", (7828, 7844), False, 'import os\n'), ((8854, 8898), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'delim_whitespace': '(True)'}), '(filename, delim_whitespace=True)\n', (8865, 8898), True, 'import pandas as pd\n'), ((8912, 8964), 'pandas.to_datetime', 'pd.to_datetime', (["df[['year', 'month', 'day', 'hour']]"], {}), "(df[['year', 'month', 'day', 'hour']])\n", (8926, 8964), True, 'import pandas as pd\n'), ((9031, 9077), 'numpy.sqrt', 'np.sqrt', (["(df['wind.u'] ** 2 + df['wind.v'] ** 2)"], {}), "(df['wind.u'] ** 2 + df['wind.v'] ** 2)\n", (9038, 9077), True, 'import numpy as np\n'), ((9591, 9635), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'delim_whitespace': '(True)'}), '(filename, delim_whitespace=True)\n', (9602, 9635), True, 'import pandas as pd\n'), ((9649, 9701), 'pandas.to_datetime', 'pd.to_datetime', (["df[['year', 'month', 'day', 'hour']]"], {}), "(df[['year', 'month', 'day', 'hour']])\n", (9663, 9701), True, 'import pandas as pd\n'), ((9768, 9814), 'numpy.sqrt', 'np.sqrt', (["(df['wind.u'] ** 2 + df['wind.v'] ** 2)"], {}), "(df['wind.u'] ** 2 + df['wind.v'] ** 2)\n", (9775, 9814), True, 'import numpy as np\n'), ((1835, 1853), 'pandas.DataFrame', 'pd.DataFrame', (['None'], {}), '(None)\n', (1847, 1853), True, 'import pandas as pd\n'), ((4116, 4151), 'os.path.exists', 'os.path.exists', (['(path_stations + ist)'], {}), '(path_stations + ist)\n', (4130, 4151), False, 'import os\n'), ((2223, 2242), 'numpy.abs', 'np.abs', (['(mlon - slon)'], {}), '(mlon - slon)\n', (2229, 2242), True, 'import numpy as np\n'), ((2261, 2280), 'numpy.abs', 'np.abs', (['(mlat - slat)'], {}), '(mlat - slat)\n', (2267, 2280), True, 'import numpy as np\n'), ((4175, 4207), 'os.readlink', 'os.readlink', (['(path_stations + ist)'], {}), '(path_stations + ist)\n', (4186, 4207), False, 'import os\n'), ((4261, 4280), 'numpy.float', 'np.float', (['clon[:-1]'], {}), '(clon[:-1])\n', (4269, 4280), True, 'import numpy as np\n'), ((4397, 4416), 'numpy.float', 'np.float', (['clat[:-1]'], {}), '(clat[:-1])\n', (4405, 4416), True, 'import numpy as np\n'), ((4495, 4517), 'numpy.int', 'np.int', (['loc_ident[-5:]'], {}), '(loc_ident[-5:])\n', (4501, 4517), True, 'import numpy as np\n'), ((5344, 5365), 'icoscp.sparql.runsparql.RunSparql', 'RunSparql', (['query', 'fmt'], {}), '(query, fmt)\n', (5353, 5365), False, 'from icoscp.sparql.runsparql import RunSparql\n'), ((6447, 6468), 'icoscp.sparql.runsparql.RunSparql', 'RunSparql', (['query', 'fmt'], {}), '(query, fmt)\n', (6456, 6468), False, 'from icoscp.sparql.runsparql import RunSparql\n'), ((7571, 7599), 'json.dump', 'json.dump', (['stations', 'outfile'], {}), '(stations, outfile)\n', (7580, 7599), False, 'import json\n'), ((7916, 7935), 'json.load', 'json.load', (['jsonfile'], {}), '(jsonfile)\n', (7925, 7935), False, 'import json\n'), ((1149, 1174), 'icoscp.cpb.dobj.Dobj', 'Dobj', (['f_icos.dobj.iloc[0]'], {}), '(f_icos.dobj.iloc[0])\n', (1153, 1174), False, 'from icoscp.cpb.dobj import Dobj\n'), ((4668, 4692), 'os.path.split', 'os.path.split', (['loc_ident'], {}), '(loc_ident)\n', (4681, 4692), False, 'import os\n')] |
"""
Population.py
Author: <NAME>
Affiliation: University of Colorado at Boulder
Created on: Fri May 29 18:30:49 MDT 2015
Description:
"""
import numpy as np
import matplotlib.pyplot as pl
from scipy.integrate import cumtrapz
from ..util.ReadData import read_lit
from ..physics.Constants import s_per_yr
from ..util.Aesthetics import labels
class Population(object):
def __init__(self, pop):
assert pop.is_ham_model, "These routines only apply for HAM models!"
self.pop = pop
def LuminosityFunction(self, z, ax=None, fig=1, cumulative=False, **kwargs):
if ax is None:
gotax = False
fig = pl.figure(fig)
ax = fig.add_subplot(111)
else:
gotax = True
Mh, Lh = self.pop.ham.Lh_of_M(z)
k = np.argmin(np.abs(z - self.pop.ham.halos.z))
dndlnm = self.pop.ham.halos.dndlnm[k]
integrand = Lh * dndlnm
Lhc = cumtrapz(integrand, x=self.pop.ham.halos.lnM, initial=0.0)
ax.loglog(Mh, Lhc / Lhc[-1], **kwargs)
ax.set_ylim(1e-3, 2)
return ax
def ObservedLF(self, source, z, ax=None, fig=1):
data = read_lit(source)
assert z in data.redshifts, "Requested redshift not in source %s" % source
uplims = np.array(data.data['lf'][z]['err']) < 0
err_lo = []; err_hi = []
for hh, err1 in enumerate(o13.data['lf'][z]['err']):
if uplims[hh]:
err_hi.append(0.0)
err_lo.append(0.8 * o13.data['lf'][z]['phi'][hh])
else:
err_hi.append(err1)
err_lo.append(err1)
mp.grid[i].errorbar(o13.data['lf'][z]['M'], o13.data['lf'][z]['phi'],
yerr=(err_lo, err_hi), uplims=list(uplims), fmt='o',
color='g', zorder=10, mec='g', ms=3)
def MassToLight(self, z, ax=None, fig=1, scatkw={}, **kwargs):
"""
Plot the halo mass to luminosity relationship yielded by AM.
"""
if ax is None:
gotax = False
fig = pl.figure(fig)
ax = fig.add_subplot(111)
else:
gotax = True
Mh, Lh = self.pop.ham.Lh_of_M(z)
ax.loglog(Mh, Lh, **kwargs)
ax.set_ylim(1e23, 1e33)
if z in self.pop.ham.redshifts:
k = self.pop.ham.redshifts.index(z)
ax.scatter(self.pop.ham.MofL_tab[k], self.pop.ham.LofM_tab[k],
**scatkw)
ax.set_xlim(1e6, 1e15)
ax.set_xlabel(labels['Mh'])
ax.set_ylabel(labels['Lh'])
pl.draw()
return ax
def SFE(self, z, ax=None, fig=1, scatkw={}, **kwargs):
if ax is None:
gotax = False
fig = pl.figure(fig)
ax = fig.add_subplot(111)
else:
gotax = True
Marr = np.logspace(8, 14)
fast = self.pop.ham.SFE(z=z, M=Marr)
ax.loglog(Marr, fast, **kwargs)
if z in self.pop.ham.redshifts:
k = self.pop.ham.redshifts.index(z)
ax.scatter(self.pop.ham.MofL_tab[k], self.pop.ham.fstar_tab[k],
**scatkw)
ax.set_xlabel(labels['Mh'])
ax.set_ylabel(labels['fstar'])
pl.draw()
return ax
def HMF_vs_LF(self, z, ax=None, fig=1, mags=False, data=None, **kwargs):
"""
Plot the halo mass function vs. the stellar mass function.
Plot SFR function instead?
Parameters
----------
z : int, float
Redshift of interest.
mags : bool
If True, luminosity function will be plotted in AB magnitude,
otherwise, in rest-frame 1600 Angstrom luminosity
"""
if ax is None:
gotax = False
fig = pl.figure(fig)
ax = fig.add_subplot(111)
else:
gotax = True
# HMF
Mh = self.pop.ham.halos.M
i_z = np.argmin(np.abs(z - self.pop.ham.halos.z))
nofm = self.pop.ham.halos.dndm[i_z]
above_Mmin = self.pop.halos.M >= self.pop.ham.Mmin[i_z]
phi_hmf = nofm[0:-1] * np.diff(Mh) * above_Mmin[0:-1]
ax.loglog(Mh[0:-1], phi_hmf)
# Now, LF
xLF, phi = self.pop.ham.LuminosityFunction(z, mags=mags)
phi_lf = phi[0:-1] * np.diff(xLF)
ax2 = ax.twiny()
ax2.semilogy(xLF[0:-1], phi_lf, 'r')
# Change tick colors
ax2.spines['top'].set_color('red')
ax2.xaxis.label.set_color('red')
ax2.tick_params(axis='x', colors='red', which='both')
if mags:
ax2.set_xlabel(r'Galaxy Luminosity $(M_{\mathrm{UV}})$', color='r')
ax2.set_xlim(-6, -25)
else:
ax2.set_xlabel(r'Galaxy Luminosity $(L_{\mathrm{UV}} / \mathrm{erg} \ \mathrm{s}^{-1} \ \mathrm{Hz}^{-1})$',
color='r')
ax2.set_xscale('log')
ax2.set_xlim(1e25, 1e33)
ax.set_xlabel(r'Halo Mass $(M_h / M_{\odot})$')
ax.set_xlim(1e6, 1e13)
ax.set_ylim(1e-9, 2)
ax.set_ylabel(r'Number Density $(\phi / \mathrm{cMpc}^{-3})$')
pl.draw()
return ax, ax2
def HMF_vs_SMF(self, z, ax=None, fig=1, **kwargs):
"""
Plot the halo mass function vs. the stellar mass function.
Plot SFR function instead?
Parameters
----------
z : int, float
Redshift of interest.
mags : bool
If True, luminosity function will be plotted in AB magnitude,
otherwise, in rest-frame 1600 Angstrom luminosity
"""
if ax is None:
gotax = False
fig = pl.figure(fig)
ax = fig.add_subplot(111)
else:
gotax = True
# HMF
Mh = self.pop.ham.halos.M
i_z = np.argmin(np.abs(z - self.pop.ham.halos.z))
nofm = self.pop.ham.halos.dndm[i_z]
above_Mmin = self.pop.halos.M >= self.pop.ham.Mmin[i_z]
ax.loglog(Mh, nofm * Mh * above_Mmin)
# Now, need SFR
Mh_, Ms_ = self.SMHM(z)
nofm_ = np.exp(np.interp(np.log(Mh_), np.log(self.pop.halos.M),
np.log(self.pop.halos.dndm[i_z])))
above_Mmin = Mh_ >= self.pop.ham.Mmin[i_z]
ax2 = ax.twiny()
ax2.loglog(Ms_, nofm_ * Mh_ * above_Mmin, 'r')
ax2.set_xlabel(r'$M_{\ast} / M_{\odot}$', color='r')
for tl in ax2.get_xticklabels():
tl.set_color('r')
for tl in ax2.get_xticks():
tl.set_color('r')
ax.set_xlabel(r'$M_h / M_{\odot}$')
ax.set_xlim(0.8 * self.pop.ham.Mmin[i_z], 1e14)
ax.set_ylim(1e-13, 1e2)
ax.set_ylabel('Number Density')
pl.draw()
return ax
def SMHM(self, z, ratio=False, Nz=100, zmax=40):
"""
Compute the stellar-mass halo-mass (SMHM) relation.
"""
# Array of formation redshifts from high to low
zarr = np.linspace(z, zmax, Nz)[-1::-1]
Mh_all = []
Mstar_all = []
for i in range(Nz):
# Obtain halo mass for all times since zmax = zarr[i]
zz, Mh = self.pop.ham.Mh_of_z(zarr[i:])
Mh_all.append(Mh[-1])
# Compute stellar mass
Macc_of_z = self.pop.ham.Macc(zz, Mh)
fstar_of_z = np.array([self.pop.ham.fstar(zz[j], Mh[j]) \
for j in range(len(zz))])
dtdz = -self.pop.cosm.dtdz(zz)
eta = np.interp(zz, self.pop.ham.halos.z, self.pop.ham.eta)
sfr_of_z = fstar_of_z * self.pop.cosm.fbaryon * Macc_of_z * eta
integrand = sfr_of_z * dtdz / s_per_yr
Mstar = np.trapz(integrand, x=zz)
Mstar_all.append(Mstar)
return np.array(Mh_all)[-1::-1], np.array(Mstar_all)[-1::-1]
def SamplePosterior(self, x, func, pars, errors, Ns=1e3):
"""
Draw random samples from posterior distributions.
Parameters
----------
x : np.ndarray
Independent variable of input function, `func`.
func : function
Function used to generate samples. Currently, support for single
independent variable (`x`) and an arbitrary number of keyword arguments.
pars : dict
Dictionary of best-fit parameter values
errors : dict
Dictionary of 1-sigma errors on the best-fit parameters
Ns : int
Number of samples to draw
Examples
--------
>>> import ares
>>> import numpy as np
>>> import matplotlib.pyplot as pl
>>>
>>> r15 = ares.util.read_lit('robertson2015')
>>> z = np.arange(0, 8, 0.05)
>>> pop = ares.analysis.Population(r15)
>>> models = pop.SamplePosterior(z, r15.SFRD, r15.sfrd_pars, r15.sfrd_err)
>>>
>>> for i in range(int(models.shape[1])):
>>> pl.plot(z, models[:,i], color='b', alpha=0.05)
Returns
-------
Array with dimensions `(len(x), Ns)`.
"""
# Generate arrays of random values. Keep in dictionary
kw = {key:np.random.normal(pars[key], errors[key], Ns) \
for key in errors}
# Handle non-vectorized case
try:
return np.array(map(lambda xx: func(xx, **kw), x))
except ValueError:
arr = np.zeros((len(x), Ns))
for i in range(int(Ns)):
new_kw = {key:kw[key][i] for key in kw}
arr[:,i] = map(lambda xx: func(xx, **new_kw), x)
return arr
def PlotLF(self, z):
"""
Plot the luminosity function.
"""
pass
def PlotLD(self, z):
"""
Plot the luminosity density.
"""
pass
def PlotSFRD(self, z):
"""
Plot the star formation rate density.
"""
pass
| [
"numpy.random.normal",
"numpy.abs",
"numpy.trapz",
"numpy.log",
"scipy.integrate.cumtrapz",
"numpy.diff",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.interp",
"matplotlib.pyplot.draw",
"numpy.logspace"
] | [((980, 1038), 'scipy.integrate.cumtrapz', 'cumtrapz', (['integrand'], {'x': 'self.pop.ham.halos.lnM', 'initial': '(0.0)'}), '(integrand, x=self.pop.ham.halos.lnM, initial=0.0)\n', (988, 1038), False, 'from scipy.integrate import cumtrapz\n'), ((2773, 2782), 'matplotlib.pyplot.draw', 'pl.draw', ([], {}), '()\n', (2780, 2782), True, 'import matplotlib.pyplot as pl\n'), ((3062, 3080), 'numpy.logspace', 'np.logspace', (['(8)', '(14)'], {}), '(8, 14)\n', (3073, 3080), True, 'import numpy as np\n'), ((3466, 3475), 'matplotlib.pyplot.draw', 'pl.draw', ([], {}), '()\n', (3473, 3475), True, 'import matplotlib.pyplot as pl\n'), ((5455, 5464), 'matplotlib.pyplot.draw', 'pl.draw', ([], {}), '()\n', (5462, 5464), True, 'import matplotlib.pyplot as pl\n'), ((7201, 7210), 'matplotlib.pyplot.draw', 'pl.draw', ([], {}), '()\n', (7208, 7210), True, 'import matplotlib.pyplot as pl\n'), ((660, 674), 'matplotlib.pyplot.figure', 'pl.figure', (['fig'], {}), '(fig)\n', (669, 674), True, 'import matplotlib.pyplot as pl\n'), ((845, 877), 'numpy.abs', 'np.abs', (['(z - self.pop.ham.halos.z)'], {}), '(z - self.pop.ham.halos.z)\n', (851, 877), True, 'import numpy as np\n'), ((1372, 1407), 'numpy.array', 'np.array', (["data.data['lf'][z]['err']"], {}), "(data.data['lf'][z]['err'])\n", (1380, 1407), True, 'import numpy as np\n'), ((2209, 2223), 'matplotlib.pyplot.figure', 'pl.figure', (['fig'], {}), '(fig)\n', (2218, 2223), True, 'import matplotlib.pyplot as pl\n'), ((2950, 2964), 'matplotlib.pyplot.figure', 'pl.figure', (['fig'], {}), '(fig)\n', (2959, 2964), True, 'import matplotlib.pyplot as pl\n'), ((4053, 4067), 'matplotlib.pyplot.figure', 'pl.figure', (['fig'], {}), '(fig)\n', (4062, 4067), True, 'import matplotlib.pyplot as pl\n'), ((4222, 4254), 'numpy.abs', 'np.abs', (['(z - self.pop.ham.halos.z)'], {}), '(z - self.pop.ham.halos.z)\n', (4228, 4254), True, 'import numpy as np\n'), ((4587, 4599), 'numpy.diff', 'np.diff', (['xLF'], {}), '(xLF)\n', (4594, 4599), True, 'import numpy as np\n'), ((6049, 6063), 'matplotlib.pyplot.figure', 'pl.figure', (['fig'], {}), '(fig)\n', (6058, 6063), True, 'import matplotlib.pyplot as pl\n'), ((6222, 6254), 'numpy.abs', 'np.abs', (['(z - self.pop.ham.halos.z)'], {}), '(z - self.pop.ham.halos.z)\n', (6228, 6254), True, 'import numpy as np\n'), ((7472, 7496), 'numpy.linspace', 'np.linspace', (['z', 'zmax', 'Nz'], {}), '(z, zmax, Nz)\n', (7483, 7496), True, 'import numpy as np\n'), ((8040, 8093), 'numpy.interp', 'np.interp', (['zz', 'self.pop.ham.halos.z', 'self.pop.ham.eta'], {}), '(zz, self.pop.ham.halos.z, self.pop.ham.eta)\n', (8049, 8093), True, 'import numpy as np\n'), ((8267, 8292), 'numpy.trapz', 'np.trapz', (['integrand'], {'x': 'zz'}), '(integrand, x=zz)\n', (8275, 8292), True, 'import numpy as np\n'), ((9769, 9813), 'numpy.random.normal', 'np.random.normal', (['pars[key]', 'errors[key]', 'Ns'], {}), '(pars[key], errors[key], Ns)\n', (9785, 9813), True, 'import numpy as np\n'), ((4396, 4407), 'numpy.diff', 'np.diff', (['Mh'], {}), '(Mh)\n', (4403, 4407), True, 'import numpy as np\n'), ((6533, 6544), 'numpy.log', 'np.log', (['Mh_'], {}), '(Mh_)\n', (6539, 6544), True, 'import numpy as np\n'), ((6546, 6570), 'numpy.log', 'np.log', (['self.pop.halos.M'], {}), '(self.pop.halos.M)\n', (6552, 6570), True, 'import numpy as np\n'), ((6585, 6617), 'numpy.log', 'np.log', (['self.pop.halos.dndm[i_z]'], {}), '(self.pop.halos.dndm[i_z])\n', (6591, 6617), True, 'import numpy as np\n'), ((8357, 8373), 'numpy.array', 'np.array', (['Mh_all'], {}), '(Mh_all)\n', (8365, 8373), True, 'import numpy as np\n'), ((8383, 8402), 'numpy.array', 'np.array', (['Mstar_all'], {}), '(Mstar_all)\n', (8391, 8402), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# encoding: utf-8
def acoustics(solver_type='classic',iplot=True,htmlplot=False,outdir='./_output',problem='figure 9.4'):
"""
This example solves the 1-dimensional variable-coefficient acoustics
equations in a medium with a single interface.
"""
from numpy import sqrt, abs
import pyclaw
if solver_type=='classic':
solver = pyclaw.ClawSolver1D()
elif solver_type=='sharpclaw':
solver = pyclaw.SharpClawSolver1D()
else: raise Exception('Unrecognized value of solver_type.')
solver.mwaves=2
solver.limiters = pyclaw.limiters.tvd.MC
solver.bc_lower[0] = pyclaw.BC.outflow
solver.bc_upper[0] = pyclaw.BC.outflow
solver.aux_bc_lower[0] = pyclaw.BC.outflow
solver.aux_bc_upper[0] = pyclaw.BC.outflow
x = pyclaw.Dimension('x',-5.0,5.0,500)
grid = pyclaw.Grid(x)
meqn = 2
maux = 2
state = pyclaw.State(grid,meqn,maux)
if problem == 'figure 9.4':
rhol = 1.0
cl = 1.0
rhor = 2.0
cr = 0.5
elif problem == 'figure 9.5':
rhol = 1.0
cl = 1.0
rhor = 4.0
cr = 0.5
zl = rhol*cl
zr = rhor*cr
xc = grid.x.center
state.aux[0,:] = (xc<=0)*zl + (xc>0)*zr # Impedance
state.aux[1,:] = (xc<=0)*cl + (xc>0)*cr # Sound speed
# initial condition: half-ellipse
state.q[0,:] = sqrt(abs(1.-(xc+3.)**2))*(xc>-4.)*(xc<-2.)
state.q[1,:] = state.q[0,:] + 0.
claw = pyclaw.Controller()
claw.solution = pyclaw.Solution(state)
claw.solver = solver
claw.tfinal = 5.0
claw.nout = 10
# Solve
status = claw.run()
# Plot results
if htmlplot: pyclaw.plot.html_plot(outdir=outdir)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir)
if __name__=="__main__":
from pyclaw.util import run_app_from_main
output = run_app_from_main(acoustics)
| [
"numpy.abs",
"pyclaw.Grid",
"pyclaw.State",
"pyclaw.Solution",
"pyclaw.plot.interactive_plot",
"pyclaw.SharpClawSolver1D",
"pyclaw.Controller",
"pyclaw.ClawSolver1D",
"pyclaw.plot.html_plot",
"pyclaw.Dimension",
"pyclaw.util.run_app_from_main"
] | [((809, 846), 'pyclaw.Dimension', 'pyclaw.Dimension', (['"""x"""', '(-5.0)', '(5.0)', '(500)'], {}), "('x', -5.0, 5.0, 500)\n", (825, 846), False, 'import pyclaw\n'), ((855, 869), 'pyclaw.Grid', 'pyclaw.Grid', (['x'], {}), '(x)\n', (866, 869), False, 'import pyclaw\n'), ((908, 938), 'pyclaw.State', 'pyclaw.State', (['grid', 'meqn', 'maux'], {}), '(grid, meqn, maux)\n', (920, 938), False, 'import pyclaw\n'), ((1480, 1499), 'pyclaw.Controller', 'pyclaw.Controller', ([], {}), '()\n', (1497, 1499), False, 'import pyclaw\n'), ((1520, 1542), 'pyclaw.Solution', 'pyclaw.Solution', (['state'], {}), '(state)\n', (1535, 1542), False, 'import pyclaw\n'), ((1870, 1898), 'pyclaw.util.run_app_from_main', 'run_app_from_main', (['acoustics'], {}), '(acoustics)\n', (1887, 1898), False, 'from pyclaw.util import run_app_from_main\n'), ((389, 410), 'pyclaw.ClawSolver1D', 'pyclaw.ClawSolver1D', ([], {}), '()\n', (408, 410), False, 'import pyclaw\n'), ((1686, 1722), 'pyclaw.plot.html_plot', 'pyclaw.plot.html_plot', ([], {'outdir': 'outdir'}), '(outdir=outdir)\n', (1707, 1722), False, 'import pyclaw\n'), ((1741, 1784), 'pyclaw.plot.interactive_plot', 'pyclaw.plot.interactive_plot', ([], {'outdir': 'outdir'}), '(outdir=outdir)\n', (1769, 1784), False, 'import pyclaw\n'), ((463, 489), 'pyclaw.SharpClawSolver1D', 'pyclaw.SharpClawSolver1D', ([], {}), '()\n', (487, 489), False, 'import pyclaw\n'), ((1393, 1419), 'numpy.abs', 'abs', (['(1.0 - (xc + 3.0) ** 2)'], {}), '(1.0 - (xc + 3.0) ** 2)\n', (1396, 1419), False, 'from numpy import sqrt, abs\n')] |
# coding=utf-8
##########################################################
# Authors: <NAME>, <NAME>, <NAME>
# Affiliation: University of Geneva
# Version: 1.4.5
# Date: 13.01.2022
# Downscaling of Swiss LCLU data
##########################################################
# import libraries
import numpy, math
import pandas as pd
import os
from osgeo import gdal # import GDAL
import shutil
##################################################################################################
# Step 1: create a land use grid at 100m resolution from Landuse100 statistics
# Step 2: remove from Landuse100 categories that correspond to linear features (river, road, train)
# Step 3: Rasterize the primary surfaces land cover vector base map at a 25m resolution (BaseMap25)
##################################################################################################
####################################################################################
# Step 4: Visit each BaseMap25 pixel
# Step 10: Loop from point 4 to 11 with next BaseMap25 pixel
# Input: BaseMap25 is the swisstopo layer for which we will visit each pixel
# Input: Expert table to get acceptable values and related weight
####################################################################################
#copy the input files to /scratch storage on the cluster -> accesible to all the nodes
originalp = 'PRI09_25.tiff'
targetp = '/scratch/PRI09_25.tiff'
shutil.copyfile(originalp, targetp)
originala = 'AS09_72_25.tiff'
targeta = '/scratch/AS09_72_25.tiff'
shutil.copyfile(originala, targeta)
# Get the size (columns/rows) of the Base Map 25 raster
raster = gdal.Open(targetp) # open raster
cols = raster.RasterXSize # get columns
rows = raster.RasterYSize # get rows
band = raster.GetRasterBand(1) # get band
data = band.ReadAsArray(0, 0, cols, rows) # read raster at once
print('BaseMap25 - Image Size: Rows:'+str(rows)+' Columns:'+str(cols))
# Get the size (columns/rows) of the Landuse 100 raster
#LUrast = gdal.Open('AS09_72s25.tiff')
LUrast = gdal.Open(targeta)
cols2 = LUrast.RasterXSize
rows2 = LUrast.RasterYSize
band2 = LUrast.GetRasterBand(1)
data2 = band2.ReadAsArray(0, 0, cols2, rows2)
print('Landuse100 - Image Size: Rows:'+str(rows2)+' Columns:'+str(cols2))
###### Baobab - chunking ######
nC = 30 #nr of columns
nR = 30 #nr of rows
rowst = rows / nR
colst = cols / nC
i = int( os.environ['SLURM_ARRAY_TASK_ID'] )
r = i //nC
c = i % nC
print (i,r,c)
col0 = int(c*colst)
col1 = int((c+1)*colst)
row0 = int(r*rowst)
row1 = int((r+1)*rowst)
print( f"Computing chunk {i} ({r}x{c})")
print( f"rows: {row0} - {row1}" )
print( f"cols: {col0} - {col1}" )
###### create output raster file ######
ds_raster = f"output/output_{r}x{c}.tif"
#ds_raster = 'LU-CH.tif' # filename
driver_tiff = gdal.GetDriverByName('GTiff') # GeoTiff
ds = driver_tiff.Create(ds_raster, col1-col0, row1-row0, 1, gdal.GDT_Byte) # create the output file
ds.SetGeoTransform(raster.GetGeoTransform()) # get the coordinate system
ds.SetProjection(raster.GetProjection()) # get the projection
ds.FlushCache() # save file
ds = None # close file
##### open expert table #####
#loc = 'expert_table_72cat_v4.xls' # path to the expert table
originalx = 'expert_table_72cat_v4.xls'
targetx = '/scratch/expert_table_72cat_v4.xls'
shutil.copyfile(originalx, targetx)
sheet = pd.read_excel(io=targetx, sheet_name="Sheet1")
xls_cols = len(sheet.columns)
print("Excel cols ", xls_cols)
xls_rows = len(sheet.index)
print("Excel rows ", xls_rows)
#iterate by lines and columns
for y in range(row0, row1):
for x in range(col0, col1):
value = data[y, x] #get pixel value (BaseMap25)
if value > 0 and value < 255: #only do something if the pixel value is greater than 0 (0=country mask) and smaller than 255 (no data)
#print('BaseMap25 - Row:'+str(y), 'Column:'+str(x), 'Value:'+str(value)) #to locate current pixel and value
##############################################################################################################
#Step 5: According to expert system table, select those categories that could be elected for the current pixel
##############################################################################################################
BMvalue1 = [] # create an empty array to be filled by values 1 for BaspeMap25
BMvalue2 = [] # create an empty array to be filled by values 2 for BaspeMap25
BMvalue3 = [] # create an empty array to be filled by values 3 for BaspeMap25
for i in range(xls_cols): #iterate in columns to find the BaseMap25 value
if sheet.iat[1, i] == value: #once identified the corresponding value
j = 2 #start at the 3rd row to remove headers
while j < xls_rows: #read the identified column
if sheet.iat[j, i] == 1: #acceptable weight values for 1, possible choices
BMvalue1.append(str(int(sheet.iat[j, 1]))+';'+str(sheet.iat[j, 2])+';'+str(int(sheet.iat[j, i]))) #insert [CODE, Landuse100, weight]
if sheet.iat[j, i] == 2: #acceptable weight values for 2, unique choice
BMvalue2.append(str(int(sheet.iat[j, 1]))+';'+str(sheet.iat[j, 2])+';'+str(int(sheet.iat[j, i]))) #insert [CODE, Landuse100, weight]
if sheet.iat[j, i] == 3: #acceptable weight values for 3, best replacement choice in case of lack of decision
BMvalue3.append(str(int(sheet.iat[j, 1]))+';'+str(sheet.iat[j, 2])+';'+str(int(sheet.iat[j, i]))) #insert [CODE, Landuse100, weight]
j = j+1 #iterate until last row of the expert table
#print('Number of acceptable values 1 in the expert table:' + str(len(BMvalue1)))
#print('Number of acceptable values 2 in the expert table:' + str(len(BMvalue2)))
#print('Number of acceptable values 3 in the expert table:' + str(len(BMvalue3)))
############################################################################################
# Step 6: Select among the 36 nearest Landuse100 neigbours those with acceptable categories
# Input: Landuse100 is from geostat and for which we will look for the 36 nearest neighboors
############################################################################################
sizeWin = 20 #definition of the size of the window to identify nearest neighboors, should be 24 to match 600m
value2 = data2[y, x] # get pixel value (Landuse100)
#print('Landuse100 - Row:'+str(y)+' Column:'+str(x)+' Value:'+str(value2))
LUvalue = [] # create an empty array to be filled by values for Landuse100
#iterate in the neighbours window starting from the UL corner
#yRow = y - round(sizeWin/2) #UL coordinate for origin of window
yRow = round(y/4)*4 - 9
#for a in range(sizeWin): # row
for a in range(6):
#xCol = x - round(sizeWin/2) # UL coordinate for origin of window
xCol = round(x/4)*4 - 10
#for b in range(sizeWin): # column
for b in range(6):
#print("yRow, xCol :", yRow, xCol)
if (yRow >= 0 and xCol >= 0 and yRow < rows and xCol < cols):
if data2[yRow, xCol] < 255: # only pixel values inside Switzerland, nodata = 255
LUvalue.append(
str(yRow) + ';' + str(xCol) + ';' + str(data2[yRow, xCol])) # insert [Row;Column;Value]
xCol = xCol + 4 #move from 4 pixels to correspond to a 100 pixel
#print("search x", xCol)
yRow = yRow + 4 #move form 4 pixels to correspond to a 100 pixel
#print("search y", yRow)
#print('Number of acceptable values in Landuse100:' + str(len(LUvalue)))
if (len(LUvalue)) == 0: #if not acceptable values, empty array
print('Landuse100 array is empty')
uniqueValues = [0] #then the uniqueValues array is equal to 0 > pixelArrayValue will be empty
########################################################################
# Step 7: Calculate the inverse distance to each neighbour
# Step 8: Sum up the inverse distances for each category
# Step 9: Assign the category with higher score to the BaseMap25 pixel
# Input: LUvalue array; [optional] Alti 25 for Z values
########################################################################
newArray = []
pixelValueArray = []
pixelValue = 0
uniqueValues = []
###### Case 2 #####
#print('BMValue1 length:' + str(len(BMvalue1)))
#print('BMValue2 length:' + str(len(BMvalue2)))
#print('BMValue3 length:' + str(len(BMvalue3)))
if len(BMvalue2) > 0: # unique value case; BM25 value = 2 then assign the only value possible in LU100
pixelValue = BMvalue2[0].split(';')[0] # directly assign the value
#print('Assigned pixel value case 2: ' + str(pixelValue))
###### Case 3 #####
if len(BMvalue1) > 0 and len(BMvalue3) > 0 and len(BMvalue2)==0: #case with possible value (1) and (3); (3) = default choice
for d in range(len(LUvalue)):
newArray.append(LUvalue[d].split(';')[2]) # position 2 is the value
uniqueValues = numpy.unique(newArray) # get unique values from the array
for m in range(len(BMvalue1)): #iterate in all possible values for BM25 class = 1
for n in range(len(uniqueValues)): #iterate in all possible unique values of LU100
if uniqueValues[n] == BMvalue1[m].split(';')[0]: #compare values from BM25 and LU100
pixelValueArray.append(int(uniqueValues[n])) #insert in array only acceptable values
for m in range(len(BMvalue3)): #iterate in all possible values for BM25 class = 3
for n in range(len(uniqueValues)): #iterate in all possible unique values of LU100
if uniqueValues[n] == BMvalue3[m].split(';')[0]: #compare values from BM25 and LU100
pixelValueArray.append(int(uniqueValues[n])) #insert in array only acceptable values
if len(pixelValueArray) == 1: # if only 1 value is stored in the array
pixelValue = int(pixelValueArray[0]) # assign the new pixel value to be written in the new raster file
#print('Assigned pixel value DD: ' + str(pixelValue))
elif len(pixelValueArray) == 0: #in case the acceptable value array is empty, assign the default (3) value
pixelValue = BMvalue3[0].split(';')[0] # assign the default (3) value
#print('Assigned default pixel value case 3 ' + str(pixelValue))
else:
pxVal = [] # store class and sum of IDW
pxVal2 = [] # store only IDW values to identify the highest one
for l in range(
len(pixelValueArray)): # iterate in LUvalue array to get position and calculate distances
px = [] # array for measuring distance
idwClass = 0 # used for summing IDW
for i in range(len(LUvalue)):
if pixelValueArray[l] == int(LUvalue[i].split(';')[2]): # ensure that we iterate only with acceptable LU100 values
px.append(LUvalue[i])
# initial pixel position corresponds to BM25; y and x variables
dY = abs(y - int(LUvalue[i].split(';')[0])) # distance following rows in pixel value
dX = abs(x - int(LUvalue[i].split(';')[1])) # distance following columns in pixel value
# Le rangeXY doit permettre de standardiser des distances qui seraient d’en d’autres dimensions, par ex. des mètres versus des réflectances d’images satellites.
distXYZ = math.sqrt((dX ** 2) + (dY ** 2)) # hypotenuse
rangeXY = 18.38 # sqrt (13^2+13^2) distance max 13 pixels
lissage = 0.1 # entre 0.01 et 1
IDW = 1 / (distXYZ/rangeXY + lissage)
idwClass = idwClass + IDW # sum IDW by acceptable categories
#rangeXY = 13 # NOT SURE, (maybe 24) as I understand range corresponds to the extent of the window
#distXYZ = (dX / rangeXY) + (dY / rangeXY)
#distXYZ = math.sqrt((dX ** 2) + (dY ** 2)) # hypotenuse
#distXYZ= (dX - dY) / 36
#print("dx", dX, "dy", dY, "dist", distXYZ)
#if distXYZ >= 0 and distXYZ < 2.3: # 0 means that we are at the same location of BM25 pixel; avoid div 0
# IDW = 1/(distXYZ+0.1)
#if distXYZ >= 2.3: # 0 means that we are at the same location of BM25 pixel; avoid div 0
# IDW = 0.25/(distXYZ)
#idwClass = idwClass + IDW # sum IDW by acceptable categories
#print('Number of pixels for class ' + str(pixelValueArray[l]) + ': ' + str(len(px)))
#print('IDW for class ' + str(pixelValueArray[l]) + ': ' + str(idwClass))
pxVal.append(str(pixelValueArray[l]) + ';' + str(idwClass)) # array with class and sum of IDW
pxVal2.append(str(idwClass))
# assign pixel value to the category with highest IDW
highIDW3 = max(pxVal2, key=lambda x: float(x)) # get the highest sum of IDW
for g in range(len(pxVal)):
if highIDW3 == pxVal[g].split(';')[1]:
pixelValue = pxVal[g].split(';')[0]
#print('Assigned pixel value case 3f: ' + str(value) + ":" + str(pixelValue))
###########################################################################################
#Write output raster file
# Input: ds_raster
###########################################################################################
#IDEA: instead of replacing value > fill an array and write it as once at the end
ras_out = gdal.Open(ds_raster, gdal.GA_Update)
band1 = ras_out.GetRasterBand(1).ReadAsArray() #read the output file
#print('Assigned pixel value final: ' + str(x) + ' ' + str(y)+ ' ' + str(pixelValue))
#band1[0][x-col0] = 255
band1[y-row0][x-col0] = pixelValue
ras_out.GetRasterBand(1).WriteArray(band1) #write value
ras_out.FlushCache() # save file
ras_out = None #clear
#quit() #stop after first value (only for test)
##################################################################################################################
#Step 11: [optional] Replace categories wherever river, road or train linear segments are available from BaseMap25
##################################################################################################################
| [
"osgeo.gdal.Open",
"numpy.unique",
"math.sqrt",
"shutil.copyfile",
"pandas.read_excel",
"osgeo.gdal.GetDriverByName"
] | [((1436, 1471), 'shutil.copyfile', 'shutil.copyfile', (['originalp', 'targetp'], {}), '(originalp, targetp)\n', (1451, 1471), False, 'import shutil\n'), ((1541, 1576), 'shutil.copyfile', 'shutil.copyfile', (['originala', 'targeta'], {}), '(originala, targeta)\n', (1556, 1576), False, 'import shutil\n'), ((1643, 1661), 'osgeo.gdal.Open', 'gdal.Open', (['targetp'], {}), '(targetp)\n', (1652, 1661), False, 'from osgeo import gdal\n'), ((2041, 2059), 'osgeo.gdal.Open', 'gdal.Open', (['targeta'], {}), '(targeta)\n', (2050, 2059), False, 'from osgeo import gdal\n'), ((2798, 2827), 'osgeo.gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (2818, 2827), False, 'from osgeo import gdal\n'), ((3315, 3350), 'shutil.copyfile', 'shutil.copyfile', (['originalx', 'targetx'], {}), '(originalx, targetx)\n', (3330, 3350), False, 'import shutil\n'), ((3360, 3406), 'pandas.read_excel', 'pd.read_excel', ([], {'io': 'targetx', 'sheet_name': '"""Sheet1"""'}), "(io=targetx, sheet_name='Sheet1')\n", (3373, 3406), True, 'import pandas as pd\n'), ((15017, 15053), 'osgeo.gdal.Open', 'gdal.Open', (['ds_raster', 'gdal.GA_Update'], {}), '(ds_raster, gdal.GA_Update)\n', (15026, 15053), False, 'from osgeo import gdal\n'), ((9753, 9775), 'numpy.unique', 'numpy.unique', (['newArray'], {}), '(newArray)\n', (9765, 9775), False, 'import numpy, math\n'), ((12519, 12547), 'math.sqrt', 'math.sqrt', (['(dX ** 2 + dY ** 2)'], {}), '(dX ** 2 + dY ** 2)\n', (12528, 12547), False, 'import numpy, math\n')] |
from games.abstract_state import GameState, ArraySlice
from utils import np_one_hot, one_hot_to_int, one_hot_arrays_to_list_of_ints
import numpy as np
from numba import jit
@jit(nopython=True)
def extract_tricks(full_state, first_row, num_played_cards):
finished_tricks = max(0, (num_played_cards - 1) // 3)
first_row_of_trick = first_row + 3 * finished_tricks
num_cards_in_trick = num_played_cards - 3 * finished_tricks
return full_state[first_row_of_trick:first_row_of_trick + num_cards_in_trick]
class RamschState(GameState):
status_rows = 2
hand_rows = 4
gameplay_rows = 30
implication_rows = 4
redundant_rows = 3
dealer = ArraySlice(slice_in_array=(0, slice(0, 3)))
current_scores = ArraySlice(slice_in_array=(0, slice(3, 6)))
num_played_cards = ArraySlice(slice_in_array=(-1, 0))
player_id_sequence = ArraySlice(slice_in_array=1)
active_player = ArraySlice(slice_in_array=(0, 6))
all_hands = ArraySlice(slice_in_array=slice(
status_rows, status_rows + hand_rows))
skat = ArraySlice(slice_in_array=status_rows + hand_rows-1)
implications = ArraySlice(slice_in_array=slice(status_rows + hand_rows + gameplay_rows,
status_rows + hand_rows + gameplay_rows + implication_rows))
played_cards = ArraySlice(slice_in_array=slice(
status_rows + hand_rows, status_rows + hand_rows+gameplay_rows))
@classmethod
def from_initial_hands(cls, initial_hands, dealer, hands_as_ints=True):
instance = cls(full_state=None)
if hands_as_ints:
np_hands = [np_one_hot(hand, 32)[None, :]
for hand in initial_hands]
instance.all_hands = np.concatenate(np_hands, axis=0)
else:
instance.all_hands = initial_hands
instance.dealer = np_one_hot(dealer, 3)
instance.active_player = (dealer + 1) % 3
return instance
def state_for_player(self, player_id):
new_state = RamschState(self.full_state.copy())
new_state.add_private_implications(player_id)
for i in range(self.hand_rows):
if i != player_id:
new_state.full_state[self.status_rows + i] = 0
return new_state
@property
def state_for_nn(self):
new_state = RamschState(self.full_state.copy(), dtype=np.float32)
new_state.current_scores = new_state.current_scores / 30.0
new_state.player_id_sequence = (
new_state.player_id_sequence - 1.0) / 10.0
result = new_state.full_state[:-self.redundant_rows]
return result
@staticmethod
def full_state_from_partial_and_initial_hands(partial_state, initial_hands):
inconsistencies = partial_state.all_hands * (1-initial_hands)
# guess on initial hands shouldn't be inconsistent
assert inconsistencies.sum() == 0
still_in_play = 1 - np.sum(partial_state.played_cards, axis=0)
full_state = RamschState(partial_state.full_state.copy())
full_state.all_hands = initial_hands * still_in_play[None, :]
return full_state
def recover_init_state(self, initial_hands):
new_state = RamschState.from_initial_hands(
initial_hands, one_hot_to_int(self.dealer), hands_as_ints=False)
return new_state
@property
def actions_taken(self):
return self.played_cards_as_ints
def add_private_implications(self, player_id):
active_row = self.status_rows + self.hand_rows + \
self.gameplay_rows + self.active_player
for card in self.hands_as_ints[player_id]:
assert self.full_state[active_row][card] != -1
self.full_state[active_row][card] = 1
@property
def hands_as_ints(self):
return [one_hot_to_int(hand) for hand in self.all_hands]
@property
def skat_as_ints(self):
return one_hot_to_int(self.skat)
@property
def current_trick_as_ints(self):
# returns a list of ints
return one_hot_arrays_to_list_of_ints(self.current_trick)
@property
def played_cards_as_ints(self):
played_cards = [one_hot_to_int(card) for card in self.played_cards]
return [item[0] for item in played_cards[:self.num_played_cards]]
@property
def players_of_all_played_cards(self):
return self.player_id_sequence[:self.num_played_cards]
@property
def starter_of_current_trick(self):
finished_tricks = max(0, (self.num_played_cards - 1) // 3)
return self.player_id_sequence[3 * finished_tricks]
def play_card(self, card):
# The player has the card
assert self.all_hands[self.active_player][card] == 1
card_as_one_hot = np_one_hot(card, 32)
# Add card to new trick
current_row = self.status_rows + self.hand_rows + self.num_played_cards
self.full_state[current_row] = card_as_one_hot
# Take the card from player
player_row = self.status_rows + self.active_player
self.full_state[player_row][card] = 0
sequence = self.player_id_sequence
sequence[self.num_played_cards] = self.active_player
self.player_id_sequence = sequence
self.num_played_cards = self.num_played_cards + 1
self.active_player = (self.active_player + 1) % 3
def apply_public_implications(self, has_cards, doesnt_have_cards):
current_row = self.status_rows + self.hand_rows + \
self.gameplay_rows + self.active_player
for card in has_cards:
assert self.full_state[current_row][card] != -1
self.full_state[current_row][card] = 1
for card in doesnt_have_cards:
if self.full_state[current_row][card] == 0: # If status unknown
self.full_state[current_row][card] = -1
@property
def current_trick(self):
return extract_tricks(self.full_state, self.status_rows + self.hand_rows, self.num_played_cards)
def check_sound(self):
card_row_start = self.status_rows
num_card_rows = self.hand_rows + self.gameplay_rows
card_sums = np.sum(
self.full_state[card_row_start:card_row_start+num_card_rows], axis=0)
assert np.abs(card_sums - 1).sum() == 0, card_sums
| [
"numpy.abs",
"utils.one_hot_arrays_to_list_of_ints",
"utils.np_one_hot",
"games.abstract_state.ArraySlice",
"numpy.sum",
"numba.jit",
"numpy.concatenate",
"utils.one_hot_to_int"
] | [((176, 194), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (179, 194), False, 'from numba import jit\n'), ((804, 838), 'games.abstract_state.ArraySlice', 'ArraySlice', ([], {'slice_in_array': '(-1, 0)'}), '(slice_in_array=(-1, 0))\n', (814, 838), False, 'from games.abstract_state import GameState, ArraySlice\n'), ((864, 892), 'games.abstract_state.ArraySlice', 'ArraySlice', ([], {'slice_in_array': '(1)'}), '(slice_in_array=1)\n', (874, 892), False, 'from games.abstract_state import GameState, ArraySlice\n'), ((913, 946), 'games.abstract_state.ArraySlice', 'ArraySlice', ([], {'slice_in_array': '(0, 6)'}), '(slice_in_array=(0, 6))\n', (923, 946), False, 'from games.abstract_state import GameState, ArraySlice\n'), ((1054, 1108), 'games.abstract_state.ArraySlice', 'ArraySlice', ([], {'slice_in_array': '(status_rows + hand_rows - 1)'}), '(slice_in_array=status_rows + hand_rows - 1)\n', (1064, 1108), False, 'from games.abstract_state import GameState, ArraySlice\n'), ((1854, 1875), 'utils.np_one_hot', 'np_one_hot', (['dealer', '(3)'], {}), '(dealer, 3)\n', (1864, 1875), False, 'from utils import np_one_hot, one_hot_to_int, one_hot_arrays_to_list_of_ints\n'), ((3909, 3934), 'utils.one_hot_to_int', 'one_hot_to_int', (['self.skat'], {}), '(self.skat)\n', (3923, 3934), False, 'from utils import np_one_hot, one_hot_to_int, one_hot_arrays_to_list_of_ints\n'), ((4035, 4085), 'utils.one_hot_arrays_to_list_of_ints', 'one_hot_arrays_to_list_of_ints', (['self.current_trick'], {}), '(self.current_trick)\n', (4065, 4085), False, 'from utils import np_one_hot, one_hot_to_int, one_hot_arrays_to_list_of_ints\n'), ((4743, 4763), 'utils.np_one_hot', 'np_one_hot', (['card', '(32)'], {}), '(card, 32)\n', (4753, 4763), False, 'from utils import np_one_hot, one_hot_to_int, one_hot_arrays_to_list_of_ints\n'), ((6137, 6215), 'numpy.sum', 'np.sum', (['self.full_state[card_row_start:card_row_start + num_card_rows]'], {'axis': '(0)'}), '(self.full_state[card_row_start:card_row_start + num_card_rows], axis=0)\n', (6143, 6215), True, 'import numpy as np\n'), ((1734, 1766), 'numpy.concatenate', 'np.concatenate', (['np_hands'], {'axis': '(0)'}), '(np_hands, axis=0)\n', (1748, 1766), True, 'import numpy as np\n'), ((2925, 2967), 'numpy.sum', 'np.sum', (['partial_state.played_cards'], {'axis': '(0)'}), '(partial_state.played_cards, axis=0)\n', (2931, 2967), True, 'import numpy as np\n'), ((3259, 3286), 'utils.one_hot_to_int', 'one_hot_to_int', (['self.dealer'], {}), '(self.dealer)\n', (3273, 3286), False, 'from utils import np_one_hot, one_hot_to_int, one_hot_arrays_to_list_of_ints\n'), ((3802, 3822), 'utils.one_hot_to_int', 'one_hot_to_int', (['hand'], {}), '(hand)\n', (3816, 3822), False, 'from utils import np_one_hot, one_hot_to_int, one_hot_arrays_to_list_of_ints\n'), ((4161, 4181), 'utils.one_hot_to_int', 'one_hot_to_int', (['card'], {}), '(card)\n', (4175, 4181), False, 'from utils import np_one_hot, one_hot_to_int, one_hot_arrays_to_list_of_ints\n'), ((1620, 1640), 'utils.np_one_hot', 'np_one_hot', (['hand', '(32)'], {}), '(hand, 32)\n', (1630, 1640), False, 'from utils import np_one_hot, one_hot_to_int, one_hot_arrays_to_list_of_ints\n'), ((6242, 6263), 'numpy.abs', 'np.abs', (['(card_sums - 1)'], {}), '(card_sums - 1)\n', (6248, 6263), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import threading
import operator
from random import random, randint, gauss, shuffle, choice
from math import exp
from scipy.spatial.distance import euclidean
import numpy as np
import numba as nb
# class GOThread(threading.Thread):
# def __init__(self, target, *args, **kwargs):
# self.target = operator.methodcaller(target) if isinstance(target, str) else target
# super(GOThread, self).__init__(target=target, *args, **kwargs)
# def parallel(func, individuals, *args, **kwargs):
# threads = [GOThread(target=func, args=(individual,)+args, kwargs=kwargs) for individual in individuals]
# for thread in threads:
# thread.start()
# for thread in threads:
# thread.join()
# return [thread.result for thread in threads]
def binary_select(a, b, p=0.5):
if random() < p:
return a
else:
return b
from scipy.special import softmax
from scipy.stats import rv_discrete
def boltzmann_select(xs, fs, T=1):
L = len(xs)
ps = softmax(np.array(fs) /T)
rv = rv_discrete(values=(np.arange(L), ps))
k = rv.rvs()
return xs[k]
def choice_with_prob(xs, ps, n=1):
L = len(xs)
ps /= np.sum(ps)
X = np.arange(L)
ks = []
for _ in range(n):
rv = rv_discrete(values=(np.arange(L), ps))
k = rv.rvs()
ks.append(X[k])
X = np.delete(X, k)
ps = np.delete(ps, k)
ps /= np.sum(ps)
L -= 1
return [xs[k] for k in ks]
def choice_with_prob_replace(xs, ps, n=1):
L = len(xs)
ps /= np.sum(ps)
rv = rv_discrete(values=(np.arange(L), ps))
ks = rv.rvs(size=n)
return [xs[k] for k in ks]
from toolz import unique
def choice_with_prob_unique(xs, ps, n=1):
L = len(xs)
ps /= np.sum(ps)
rv = rv_discrete(values=(np.arange(L), ps))
ks = unique(rv.rvs(size=n))
return [xs[k] for k in ks]
def choice_with_fitness(xs, fs=None, n=1, T=1):
if fs is None:
fs = [x.fitness for x in xs]
ps = softmax(np.array(fs) /T)
return choice_with_prob(xs, ps, n=1)
def choice_uniform(xs, n=1):
L = len(xs)
ks = np.random.choice(L, n)
return [xs[k] for k in ks]
def randint2(lb=0, ub=9, ordered=False):
"""Select two different numbers in [lb, ub] randomly
Formally i != j ~ U(lb, ub)
Applied in GA operations.
Keyword Arguments:
lb {number} -- lower bound of interval (default: {0})
ub {number} -- upper bound of interval (default: {9})
Returns:
two numbers
"""
i = randint(lb, ub)
d = ub - lb
j = randint(i+1, d+i)
if j > ub:
j -= (d + 1)
if ordered:
if j < i:
return j, i
return i, j
@nb.vectorize()
def max0(x):
return 0 if x<=0 else x
def max_lb(lb):
@nb.vectorize()
def m(x):
return lb if x<=lb else x
return m
@nb.vectorize()
def hl(x):
return 0 if x<=0 else (1 if x>=1 else x)
def metropolis_rule(D, T, epsilon=0.000001):
if D < 0:
p = exp(D/max(T, epsilon))
return random() < p
else:
return True
def proportion(n):
if n is None:
n = D
elif 0 < n < 1:
n = int(N * n) | [
"numba.vectorize",
"numpy.random.choice",
"numpy.delete",
"numpy.sum",
"numpy.array",
"random.random",
"random.randint",
"numpy.arange"
] | [((2760, 2774), 'numba.vectorize', 'nb.vectorize', ([], {}), '()\n', (2772, 2774), True, 'import numba as nb\n'), ((2917, 2931), 'numba.vectorize', 'nb.vectorize', ([], {}), '()\n', (2929, 2931), True, 'import numba as nb\n'), ((1226, 1236), 'numpy.sum', 'np.sum', (['ps'], {}), '(ps)\n', (1232, 1236), True, 'import numpy as np\n'), ((1245, 1257), 'numpy.arange', 'np.arange', (['L'], {}), '(L)\n', (1254, 1257), True, 'import numpy as np\n'), ((1591, 1601), 'numpy.sum', 'np.sum', (['ps'], {}), '(ps)\n', (1597, 1601), True, 'import numpy as np\n'), ((1799, 1809), 'numpy.sum', 'np.sum', (['ps'], {}), '(ps)\n', (1805, 1809), True, 'import numpy as np\n'), ((2158, 2180), 'numpy.random.choice', 'np.random.choice', (['L', 'n'], {}), '(L, n)\n', (2174, 2180), True, 'import numpy as np\n'), ((2590, 2605), 'random.randint', 'randint', (['lb', 'ub'], {}), '(lb, ub)\n', (2597, 2605), False, 'from random import random, randint, gauss, shuffle, choice\n'), ((2630, 2651), 'random.randint', 'randint', (['(i + 1)', '(d + i)'], {}), '(i + 1, d + i)\n', (2637, 2651), False, 'from random import random, randint, gauss, shuffle, choice\n'), ((2838, 2852), 'numba.vectorize', 'nb.vectorize', ([], {}), '()\n', (2850, 2852), True, 'import numba as nb\n'), ((866, 874), 'random.random', 'random', ([], {}), '()\n', (872, 874), False, 'from random import random, randint, gauss, shuffle, choice\n'), ((1402, 1417), 'numpy.delete', 'np.delete', (['X', 'k'], {}), '(X, k)\n', (1411, 1417), True, 'import numpy as np\n'), ((1431, 1447), 'numpy.delete', 'np.delete', (['ps', 'k'], {}), '(ps, k)\n', (1440, 1447), True, 'import numpy as np\n'), ((1462, 1472), 'numpy.sum', 'np.sum', (['ps'], {}), '(ps)\n', (1468, 1472), True, 'import numpy as np\n'), ((1064, 1076), 'numpy.array', 'np.array', (['fs'], {}), '(fs)\n', (1072, 1076), True, 'import numpy as np\n'), ((2044, 2056), 'numpy.array', 'np.array', (['fs'], {}), '(fs)\n', (2052, 2056), True, 'import numpy as np\n'), ((3104, 3112), 'random.random', 'random', ([], {}), '()\n', (3110, 3112), False, 'from random import random, randint, gauss, shuffle, choice\n'), ((1110, 1122), 'numpy.arange', 'np.arange', (['L'], {}), '(L)\n', (1119, 1122), True, 'import numpy as np\n'), ((1631, 1643), 'numpy.arange', 'np.arange', (['L'], {}), '(L)\n', (1640, 1643), True, 'import numpy as np\n'), ((1839, 1851), 'numpy.arange', 'np.arange', (['L'], {}), '(L)\n', (1848, 1851), True, 'import numpy as np\n'), ((1326, 1338), 'numpy.arange', 'np.arange', (['L'], {}), '(L)\n', (1335, 1338), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2016 <NAME> (http://www.jdhp.org)
# This script is provided under the terms and conditions of the MIT license:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import copy
import datetime
import json
import os
import numpy as np
import random
import sys
import time
import traceback
from pywi.benchmark.metrics.refbased import mse
from pywi.processing.filtering.pixel_clusters import filter_pixels_clusters_stats
from pywi.processing.filtering.pixel_clusters import number_of_pixels_clusters
from pywi.io.images import image_generator
import pywi.io.images
###############################################################################
class AbstractCleaningAlgorithm(object):
"""A convenient optional wrapper to simplify the image cleaning analysis.
Common processing to run and assess the image cleaning procedure on a set
of images and save results. This class gather some common procedures to
avoid code duplication in image cleaning modules:
* call the cleaning algorithm on an image set;
* assess the cleaning procedure using a set of estimators;
* apply various pre-processing and post-processing procedures (e.g.
geometry conversion);
* collect and save metadata, results and intermediate values that are
useful for analysis;
* measure and save the execution time;
* manage exceptions;
* ...
This abstract class is supposed to be inherited by the others image
cleaning classes."""
def __init__(self):
self.label = "Unknown" # Name to show in plots
self.verbose = False # Debug mode
def __call__(self, *pargs, **kargs):
return self.clean_image(*pargs, **kargs)
def __str__(self):
return "{}".format(self.algorithm_label)
def run(self,
cleaning_function_params,
input_file_or_dir_path_list,
benchmark_method,
output_file_path,
plot=False,
saveplot=None,
ref_img_as_input=False, # A hack to easily produce CSV files...
max_num_img=None,
debug=False):
"""A convenient optional wrapper to simplify the image cleaning analysis.
Apply the image cleaning analysis on `input_file_or_dir_path_list`,
apply some pre-processing and post-processing procedures, collect and
return results, intermediate values and metadata.
Parameters
----------
cleaning_function_params
A dictionary containing the parameters required for the image
cleaning method.
input_file_or_dir_path_list
A list of file to clean. Can be a list of simtel files, fits files
or directories containing such files.
benchmark_method
The list of estimators to use to assess the image cleaning. If
`None`, images are cleaned but nothing is returned (can be used
with e.g. the `plot` and/or `saveplot` options).
output_file_path
The result file path (a JSON file).
plot
The result of each cleaning is plot if `True`.
saveplot
The result of each cleaning is saved if `True`.
ref_img_as_input
This option is a hack to easily produce a "flatten" CSV results
files.
max_num_img
The number of images to process among the input set
(`input_file_or_dir_path_list`).
debug
Stop the execution and print the full traceback when an exception
is encountered if this parameter is `True`. Report exceptions and
continue with the next input image if this parameter is `False`.
Returns
-------
dict
Results, intermediate values and metadata.
"""
launch_time = time.perf_counter()
if benchmark_method is not None:
io_list = [] # The list of returned dictionaries
for image in image_generator(input_file_or_dir_path_list,
max_num_images=max_num_img):
input_file_path = image.meta['file_path']
if self.verbose:
print(input_file_path)
# `image_dict` contains metadata (to be returned) on the current image
image_dict = {"input_file_path": input_file_path}
try:
# READ THE INPUT FILE #####################################
reference_img = image.reference_image
pixels_position = image.pixels_position
if ref_img_as_input:
# This option is a hack to easily produce CSV files with
# the "null_ref" "cleaning" module...
input_img = copy.deepcopy(reference_img)
else:
input_img = image.input_image
image_dict.update(image.meta)
if benchmark_method is not None:
# FETCH ADDITIONAL IMAGE METADATA #####################
delta_pe, delta_abs_pe, delta_num_pixels = filter_pixels_clusters_stats(reference_img) # TODO: NaN
num_islands = number_of_pixels_clusters(reference_img) # TODO: NaN
image_dict["img_ref_islands_delta_pe"] = delta_pe
image_dict["img_ref_islands_delta_abs_pe"] = delta_abs_pe
image_dict["img_ref_islands_delta_num_pixels"] = delta_num_pixels
image_dict["img_ref_num_islands"] = num_islands
image_dict["img_ref_sum_pe"] = float(np.nansum(reference_img))
image_dict["img_ref_min_pe"] = float(np.nanmin(reference_img))
image_dict["img_ref_max_pe"] = float(np.nanmax(reference_img))
image_dict["img_ref_num_pix"] = int( (reference_img[np.isfinite(reference_img)] > 0).sum() )
image_dict["img_in_sum_pe"] = float(np.nansum(input_img))
image_dict["img_in_min_pe"] = float(np.nanmin(input_img))
image_dict["img_in_max_pe"] = float(np.nanmax(input_img))
image_dict["img_in_num_pix"] = int( (input_img[np.isfinite(input_img)] > 0).sum() )
# CLEAN THE INPUT IMAGE ###################################
# Copy the image (otherwise some cleaning functions like Tailcut may change it)
#input_img_copy = copy.deepcopy(input_img)
input_img_copy = input_img.astype('float64', copy=True)
cleaning_function_params["output_data_dict"] = {}
initial_time = time.perf_counter()
cleaned_img = self.clean_image(input_img_copy, **cleaning_function_params) # TODO: NaN
full_clean_execution_time_sec = time.perf_counter() - initial_time
if benchmark_method is not None:
image_dict.update(cleaning_function_params["output_data_dict"])
del cleaning_function_params["output_data_dict"]
# ASSESS OR PRINT THE CLEANED IMAGE #######################
if benchmark_method is not None:
# ASSESS THE CLEANING #################################
kwargs = {} # TODO GEOM
score = mse(cleaned_img, reference_img)
image_dict["score"] = [score]
image_dict["score_name"] = ["mse"]
image_dict["full_clean_execution_time_sec"] = full_clean_execution_time_sec
image_dict["img_cleaned_sum_pe"] = float(np.nansum(cleaned_img))
image_dict["img_cleaned_min_pe"] = float(np.nanmin(cleaned_img))
image_dict["img_cleaned_max_pe"] = float(np.nanmax(cleaned_img))
image_dict["img_cleaned_num_pix"] = int( (cleaned_img[np.isfinite(cleaned_img)] > 0).sum() )
# PLOT IMAGES #########################################################
if plot or (saveplot is not None):
image_list = [input_img, reference_img, cleaned_img]
title_list = ["Input image", "Reference image", "Cleaned image"]
if plot:
pywi.io.images.plot_list(image_list,
title_list=title_list,
metadata_dict=image.meta)
if saveplot is not None:
plot_file_path = saveplot
print("Saving {}".format(plot_file_path))
pywi.io.images.mpl_save_list(image_list,
output_file_path=plot_file_path,
title_list=title_list,
metadata_dict=image.meta)
except Exception as e:
print("Abort image {}: {} ({})".format(input_file_path, e, type(e)))
if debug:
# The following line print the full trackback
traceback.print_tb(e.__traceback__, file=sys.stdout)
if benchmark_method is not None:
# http://docs.python.org/2/library/sys.html#sys.exc_info
exc_type, exc_value, exc_traceback = sys.exc_info() # most recent (if any) by default
'''
Reason this _can_ be bad: If an (unhandled) exception happens AFTER this,
or if we do not delete the labels on (not much) older versions of Py, the
reference we created can linger.
traceback.format_exc/print_exc do this very thing, BUT note this creates a
temp scope within the function.
'''
error_dict = {
'filename': exc_traceback.tb_frame.f_code.co_filename,
'lineno' : exc_traceback.tb_lineno,
'name' : exc_traceback.tb_frame.f_code.co_name,
'type' : exc_type.__name__,
#'message' : exc_value.message
'message' : str(e)
}
del(exc_type, exc_value, exc_traceback) # So we don't leave our local labels/objects dangling
# This still isn't "completely safe", though!
#error_dict = {"type": str(type(e)),
# "message": str(e)}
image_dict["error"] = error_dict
finally:
if benchmark_method is not None:
io_list.append(image_dict)
if benchmark_method is not None:
error_list = [image_dict["error"] for image_dict in io_list if "error" in image_dict]
print("{} images aborted".format(len(error_list)))
# GENERAL EXPERIMENT METADATA
output_dict = {}
output_dict["benchmark_execution_time_sec"] = str(time.perf_counter() - launch_time)
output_dict["date_time"] = str(datetime.datetime.now())
output_dict["class_name"] = self.__class__.__name__
output_dict["algo_code_ref"] = str(self.__class__.clean_image.__code__)
output_dict["label"] = self.label
output_dict["cmd"] = " ".join(sys.argv)
output_dict["algo_params"] = cleaning_function_params
if "noise_distribution" in output_dict["algo_params"]:
del output_dict["algo_params"]["noise_distribution"] # not JSON serializable...
output_dict["benchmark_method"] = benchmark_method
output_dict["system"] = " ".join(os.uname())
output_dict["io"] = io_list
with open(output_file_path, "w") as fd:
json.dump(output_dict, fd, sort_keys=True, indent=4) # pretty print format
return output_dict
| [
"pywi.io.images.image_generator",
"pywi.processing.filtering.pixel_clusters.filter_pixels_clusters_stats",
"pywi.benchmark.metrics.refbased.mse",
"time.perf_counter",
"traceback.print_tb",
"datetime.datetime.now",
"sys.exc_info",
"numpy.isfinite",
"numpy.nanmax",
"copy.deepcopy",
"numpy.nanmin",... | [((4905, 4924), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4922, 4924), False, 'import time\n'), ((5060, 5132), 'pywi.io.images.image_generator', 'image_generator', (['input_file_or_dir_path_list'], {'max_num_images': 'max_num_img'}), '(input_file_or_dir_path_list, max_num_images=max_num_img)\n', (5075, 5132), False, 'from pywi.io.images import image_generator\n'), ((7792, 7811), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (7809, 7811), False, 'import time\n'), ((12449, 12472), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (12470, 12472), False, 'import datetime\n'), ((13060, 13070), 'os.uname', 'os.uname', ([], {}), '()\n', (13068, 13070), False, 'import os\n'), ((13181, 13233), 'json.dump', 'json.dump', (['output_dict', 'fd'], {'sort_keys': '(True)', 'indent': '(4)'}), '(output_dict, fd, sort_keys=True, indent=4)\n', (13190, 13233), False, 'import json\n'), ((5851, 5879), 'copy.deepcopy', 'copy.deepcopy', (['reference_img'], {}), '(reference_img)\n', (5864, 5879), False, 'import copy\n'), ((6190, 6233), 'pywi.processing.filtering.pixel_clusters.filter_pixels_clusters_stats', 'filter_pixels_clusters_stats', (['reference_img'], {}), '(reference_img)\n', (6218, 6233), False, 'from pywi.processing.filtering.pixel_clusters import filter_pixels_clusters_stats\n'), ((6286, 6326), 'pywi.processing.filtering.pixel_clusters.number_of_pixels_clusters', 'number_of_pixels_clusters', (['reference_img'], {}), '(reference_img)\n', (6311, 6326), False, 'from pywi.processing.filtering.pixel_clusters import number_of_pixels_clusters\n'), ((7965, 7984), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (7982, 7984), False, 'import time\n'), ((8481, 8512), 'pywi.benchmark.metrics.refbased.mse', 'mse', (['cleaned_img', 'reference_img'], {}), '(cleaned_img, reference_img)\n', (8484, 8512), False, 'from pywi.benchmark.metrics.refbased import mse\n'), ((12371, 12390), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (12388, 12390), False, 'import time\n'), ((6744, 6768), 'numpy.nansum', 'np.nansum', (['reference_img'], {}), '(reference_img)\n', (6753, 6768), True, 'import numpy as np\n'), ((6827, 6851), 'numpy.nanmin', 'np.nanmin', (['reference_img'], {}), '(reference_img)\n', (6836, 6851), True, 'import numpy as np\n'), ((6910, 6934), 'numpy.nanmax', 'np.nanmax', (['reference_img'], {}), '(reference_img)\n', (6919, 6934), True, 'import numpy as np\n'), ((7106, 7126), 'numpy.nansum', 'np.nansum', (['input_img'], {}), '(input_img)\n', (7115, 7126), True, 'import numpy as np\n'), ((7184, 7204), 'numpy.nanmin', 'np.nanmin', (['input_img'], {}), '(input_img)\n', (7193, 7204), True, 'import numpy as np\n'), ((7262, 7282), 'numpy.nanmax', 'np.nanmax', (['input_img'], {}), '(input_img)\n', (7271, 7282), True, 'import numpy as np\n'), ((8777, 8799), 'numpy.nansum', 'np.nansum', (['cleaned_img'], {}), '(cleaned_img)\n', (8786, 8799), True, 'import numpy as np\n'), ((8862, 8884), 'numpy.nanmin', 'np.nanmin', (['cleaned_img'], {}), '(cleaned_img)\n', (8871, 8884), True, 'import numpy as np\n'), ((8947, 8969), 'numpy.nanmax', 'np.nanmax', (['cleaned_img'], {}), '(cleaned_img)\n', (8956, 8969), True, 'import numpy as np\n'), ((10345, 10397), 'traceback.print_tb', 'traceback.print_tb', (['e.__traceback__'], {'file': 'sys.stdout'}), '(e.__traceback__, file=sys.stdout)\n', (10363, 10397), False, 'import traceback\n'), ((10583, 10597), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (10595, 10597), False, 'import sys\n'), ((7008, 7034), 'numpy.isfinite', 'np.isfinite', (['reference_img'], {}), '(reference_img)\n', (7019, 7034), True, 'import numpy as np\n'), ((7351, 7373), 'numpy.isfinite', 'np.isfinite', (['input_img'], {}), '(input_img)\n', (7362, 7373), True, 'import numpy as np\n'), ((9045, 9069), 'numpy.isfinite', 'np.isfinite', (['cleaned_img'], {}), '(cleaned_img)\n', (9056, 9069), True, 'import numpy as np\n')] |
import sys
sys.path.append('..')
import argparse
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.training.model_restore import load_model_and_checkpoint_files
from fvcore.nn.flop_count import _DEFAULT_SUPPORTED_OPS, FlopCountAnalysis, flop_count
import numpy as np
import torch
import os
join = os.path.join
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', help="2d, 3d_lowres, 3d_fullres or 3d_cascade_fullres. Default: 3d_fullres", default="3d_fullres", required=False)
args = parser.parse_args()
model = args.model
model_path = join('./nnUNet_raw_data_base/RESULTS_FOLDER/nnUNet', model, 'Task000_FLARE21Baseline/nnUNetTrainerV2__nnUNetPlansv2.1')
trainer, params = load_model_and_checkpoint_files(model_path, folds='all', checkpoint_name='model_final_checkpoint')
pkl_file = join(model_path, "all/model_final_checkpoint.model.pkl")
info = load_pickle(pkl_file)
if model == '2d' or model == '3d_lowres':
patch_size = info['plans']['plans_per_stage'][0]['patch_size']
else:
patch_size = info['plans']['plans_per_stage'][1]['patch_size']
patch_size = np.append(np.array(1), patch_size)
inputs = (torch.randn(tuple(np.append(np.array(1),patch_size))).cuda(),)
flops = FlopCountAnalysis(trainer.network, inputs)
print('Total FLOPs:', flops.total())
| [
"argparse.ArgumentParser",
"nnunet.training.model_restore.load_model_and_checkpoint_files",
"fvcore.nn.flop_count.FlopCountAnalysis",
"numpy.array",
"sys.path.append"
] | [((11, 32), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (26, 32), False, 'import sys\n'), ((349, 374), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (372, 374), False, 'import argparse\n'), ((725, 828), 'nnunet.training.model_restore.load_model_and_checkpoint_files', 'load_model_and_checkpoint_files', (['model_path'], {'folds': '"""all"""', 'checkpoint_name': '"""model_final_checkpoint"""'}), "(model_path, folds='all', checkpoint_name=\n 'model_final_checkpoint')\n", (756, 828), False, 'from nnunet.training.model_restore import load_model_and_checkpoint_files\n'), ((1233, 1275), 'fvcore.nn.flop_count.FlopCountAnalysis', 'FlopCountAnalysis', (['trainer.network', 'inputs'], {}), '(trainer.network, inputs)\n', (1250, 1275), False, 'from fvcore.nn.flop_count import _DEFAULT_SUPPORTED_OPS, FlopCountAnalysis, flop_count\n'), ((1126, 1137), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (1134, 1137), True, 'import numpy as np\n'), ((1190, 1201), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (1198, 1201), True, 'import numpy as np\n')] |
'''
This file contains a dictionary with the standard value of the model parameters.
Unelss otherwise specified the parameters will take this value.
'''
import numpy as np
import copy
# ---- Evaluate mutation probability
L = 50 # lenght of AA chain involved in binding
pmut_per_base_per_division = 1e-3
p_sil = 0.5 # probability of silent mutation
p_aa = 0.2 # probability of affinity-affecting mutation
p_let = 0.3 # probability of lethal mutation
assert p_sil + p_aa + p_let == 1
# probability of at least one mutation in one nucleotide
p_mut = 1 - np.power(1 - pmut_per_base_per_division, 3 * L)
# effective probabilities of mutation per duplication event
p_sil_eff = p_sil * p_mut + (1 - p_mut) # no mutation + silent mutation
p_aa_eff = p_aa * p_mut
p_let_eff = p_let * p_mut
# High energy threshold, low and high energy cutoffs
low_en_threshold = np.log(50e-9)
high_en_exp_cutoff = np.log(500e-9)
low_en_exp_cutoff = np.log(0.1e-9)
# standard parameters value
std_par_val = {
# --- Ag dynamics
'k_decay_per_day': 1.22248e-2,
'k_consumption_per_day': 2.0664491210220598e-05,
'k_release_per_day': 0.97856,
# conversion factor Ag. C to dosage, in unit of micrograms of Ag
'alpha_C': 0.02251841057006763,
# --- GC time specifications
'days_per_turn': 0.5, # days per turn
'T_GC_formation_days': 6,
'GC_carrying_capacity': 2500, # carrying capacity
# mc seeding fraction. If set to 'pop' then the fraction depends on the mc population size
'f_mem_reinit': 'pop',
# --- mutations
'ker_xlim': 20, # numerical mutation kernel energy half-extension
'ker_ln_mu': 1.9,
'ker_ln_sigma': 0.5,
'ker_ln_offset': -3.,
'p_sil_eff': p_sil_eff,
'p_aa_eff': p_aa_eff,
'p_let_eff': p_let_eff,
'n_duplications': 2, # number of dupl. (and therefore mut.) per round
# --- B-selection
'B_sel': False,
'eps_B': -13.59,
# --- T-selection
'T_sel': True,
'a_selection': 0.11980624471882932, # selection permissivity
'b_selection': 0.6613446219950894, # selection additional rejection rate
# --- differentiation
# differentiation probability (MC + PC)
'diff_prob': 0.1,
# fraction of residual MC/PC differentiation after/before the switch
'diff_residual_fraction': 0.,
# MC/PC switch time in days. If None the switch is not performed and
# differentiation occurs with 1/2 probability for each fate.
'diff_switch_time': 11,
# width of the sigmoid in days
'diff_switch_sigma': 2,
# --- initial naive cell distribution
'mu_i': -14.59387180800652,
'sigma_i': 1.661288953177667,
'N_i': 2500,
'N_founders': 100, # n. of founder clones (only for stochastic sim.)
# --- simulation energy discretization and limits
'dx': 0.01,
'xlim_minus': -50,
'xlim_plus': 20,
# --- measurement PC/MC mixture
'g_1d': 0.5591246767438458, # MC/PC ratio, measurement 1 day after boost
'g_4d': 0.0, # MC/PC ratio, measurement 4 days after injection
}
def st_par():
'''
Returns a copy of the standard parameters dictionary. Otherwise it risks
getting modified while running the code.
'''
return copy.deepcopy(std_par_val)
| [
"numpy.log",
"copy.deepcopy",
"numpy.power"
] | [((928, 941), 'numpy.log', 'np.log', (['(5e-08)'], {}), '(5e-08)\n', (934, 941), True, 'import numpy as np\n'), ((963, 976), 'numpy.log', 'np.log', (['(5e-07)'], {}), '(5e-07)\n', (969, 976), True, 'import numpy as np\n'), ((998, 1011), 'numpy.log', 'np.log', (['(1e-10)'], {}), '(1e-10)\n', (1004, 1011), True, 'import numpy as np\n'), ((624, 671), 'numpy.power', 'np.power', (['(1 - pmut_per_base_per_division)', '(3 * L)'], {}), '(1 - pmut_per_base_per_division, 3 * L)\n', (632, 671), True, 'import numpy as np\n'), ((3252, 3278), 'copy.deepcopy', 'copy.deepcopy', (['std_par_val'], {}), '(std_par_val)\n', (3265, 3278), False, 'import copy\n')] |
# coding: utf8
# Functions used by nipype interface.
# Initiate the pipeline
def init_input_node(pet):
from clinica.utils.filemanip import get_subject_id
from clinica.utils.ux import print_begin_image
# Extract image ID
image_id = get_subject_id(pet)
print_begin_image(image_id)
return pet
# Concatenate two transformation in one transformation list
def concatenate_transforms(pet_to_t1w_tranform, t1w_to_mni_tranform):
"""Concatenate two input transformation files into a list.
Args:
transform1 (str): first transformation to apply
transform2 (str): second transformation to apply
Returns:
transform_list (list of string): both transform files path in a list
"""
return [t1w_to_mni_tranform, pet_to_t1w_tranform]
# Normalize the images based on the reference mask region
def suvr_normalization(input_img, norm_img, ref_mask):
"""Normalize the input image according to the reference region.
It uses nilearn `resample_to_img` and scipy `trim_mean` functions.
This function is different than the one in other PET pipelines
because there is a downsampling step.
Args:
input_img (str): image to be processed
norm_img (str): image used to compute the mean of the reference region
ref_mask (str): mask of the reference region
Returns:
output_img (nifty image): normalized nifty image
mask_template (nifty image): output mask on disk
"""
import os
import nibabel as nib
import numpy as np
from nilearn.image import resample_to_img
from scipy.stats import trim_mean
pet = nib.load(input_img)
norm = nib.load(norm_img)
mask = nib.load(ref_mask)
# Downsample the pet image used for normalization so we can multiply it with the mask
ds_img = resample_to_img(norm, mask, interpolation="nearest")
# Compute the mean of the region
region = np.multiply(ds_img.get_fdata(), mask.get_fdata(dtype="float32"))
array_region = np.where(region != 0, region, np.nan).flatten()
region_mean = trim_mean(array_region[~np.isnan(array_region)], 0.1)
from clinica.utils.stream import cprint
cprint(region_mean)
# Divide the value of the image voxels by the computed mean
data = pet.get_fdata(dtype="float32") / region_mean
# Create and save the normalized image
output_img = os.path.join(
os.getcwd(),
os.path.basename(input_img).split(".nii")[0] + "_suvr_normalized.nii.gz",
)
normalized_img = nib.Nifti1Image(data, pet.affine, header=pet.header)
normalized_img.to_filename(output_img)
return output_img
# It crops an image based on the reference.
def crop_nifti(input_img, ref_crop):
"""Crop input image based on the reference. It uses nilearn
`resample_to_img` function.
Args:
input_img (str): image to be processed
ref_img (str): template used to crop the image
Returns:
output_img (nifty image): crop image on disk.
crop_template (nifty image): output template on disk.
"""
import os
import nibabel as nib
import numpy as np
from nilearn.image import resample_to_img
basedir = os.getcwd()
# resample the individual MRI into the cropped template image
crop_img = resample_to_img(input_img, ref_crop, force_resample=True)
output_img = os.path.join(
basedir, os.path.basename(input_img).split(".nii")[0] + "_cropped.nii.gz"
)
crop_img.to_filename(output_img)
return output_img
def rename_into_caps(
in_bids_pet,
fname_pet,
fname_trans,
suvr_reference_region,
uncropped_image,
fname_pet_in_t1w=None,
):
"""
Rename the outputs of the pipelines into CAPS format.
Args:
in_bids_pet (str): Input BIDS PET to extract the <source_file>
fname_pet (str): Preprocessed PET file.
fname_trans (str): Transformation file from PET to MRI space
suvr_reference_region (str): SUVR mask name for file name output
uncropped_image (bool): Pipeline argument for image cropping
fname_pet_in_t1w (bool): Pipeline argument for saving intermediate file
Returns:
The different outputs in CAPS format
"""
import os
from nipype.interfaces.utility import Rename
from nipype.utils.filemanip import split_filename
_, source_file_pet, _ = split_filename(in_bids_pet)
# Rename into CAPS PET:
rename_pet = Rename()
rename_pet.inputs.in_file = fname_pet
if not uncropped_image:
suffix = f"_space-MNI152NLin2009cSym_desc-Crop_res-1x1x1_suvr-{suvr_reference_region}_pet.nii.gz"
rename_pet.inputs.format_string = source_file_pet + suffix
else:
suffix = f"_space-MNI152NLin2009cSym_res-1x1x1_suvr-{suvr_reference_region}_pet.nii.gz"
rename_pet.inputs.format_string = source_file_pet + suffix
out_caps_pet = rename_pet.run().outputs.out_file
# Rename into CAPS transformation file:
rename_trans = Rename()
rename_trans.inputs.in_file = fname_trans
rename_trans.inputs.format_string = source_file_pet + "_space-T1w_rigid.mat"
out_caps_trans = rename_trans.run().outputs.out_file
# Rename intermediate PET in T1w MRI space
if fname_pet_in_t1w is not None:
rename_pet_in_t1w = Rename()
rename_pet_in_t1w.inputs.in_file = fname_pet_in_t1w
rename_pet_in_t1w.inputs.format_string = (
source_file_pet + "_space-T1w_pet.nii.gz"
)
out_caps_pet_in_t1w = rename_pet_in_t1w.run().outputs.out_file
else:
out_caps_pet_in_t1w = None
return out_caps_pet, out_caps_trans, out_caps_pet_in_t1w
def print_end_pipeline(pet, final_file):
"""
Display end message for <subject_id> when <final_file> is connected.
"""
from clinica.utils.filemanip import get_subject_id
from clinica.utils.ux import print_end_image
print_end_image(get_subject_id(pet))
| [
"nipype.interfaces.utility.Rename",
"nibabel.load",
"numpy.where",
"nipype.utils.filemanip.split_filename",
"clinica.utils.filemanip.get_subject_id",
"clinica.utils.stream.cprint",
"os.getcwd",
"nilearn.image.resample_to_img",
"numpy.isnan",
"os.path.basename",
"nibabel.Nifti1Image",
"clinica.... | [((251, 270), 'clinica.utils.filemanip.get_subject_id', 'get_subject_id', (['pet'], {}), '(pet)\n', (265, 270), False, 'from clinica.utils.filemanip import get_subject_id\n'), ((275, 302), 'clinica.utils.ux.print_begin_image', 'print_begin_image', (['image_id'], {}), '(image_id)\n', (292, 302), False, 'from clinica.utils.ux import print_begin_image\n'), ((1627, 1646), 'nibabel.load', 'nib.load', (['input_img'], {}), '(input_img)\n', (1635, 1646), True, 'import nibabel as nib\n'), ((1658, 1676), 'nibabel.load', 'nib.load', (['norm_img'], {}), '(norm_img)\n', (1666, 1676), True, 'import nibabel as nib\n'), ((1688, 1706), 'nibabel.load', 'nib.load', (['ref_mask'], {}), '(ref_mask)\n', (1696, 1706), True, 'import nibabel as nib\n'), ((1811, 1863), 'nilearn.image.resample_to_img', 'resample_to_img', (['norm', 'mask'], {'interpolation': '"""nearest"""'}), "(norm, mask, interpolation='nearest')\n", (1826, 1863), False, 'from nilearn.image import resample_to_img\n'), ((2169, 2188), 'clinica.utils.stream.cprint', 'cprint', (['region_mean'], {}), '(region_mean)\n', (2175, 2188), False, 'from clinica.utils.stream import cprint\n'), ((2516, 2568), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['data', 'pet.affine'], {'header': 'pet.header'}), '(data, pet.affine, header=pet.header)\n', (2531, 2568), True, 'import nibabel as nib\n'), ((3185, 3196), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3194, 3196), False, 'import os\n'), ((3279, 3336), 'nilearn.image.resample_to_img', 'resample_to_img', (['input_img', 'ref_crop'], {'force_resample': '(True)'}), '(input_img, ref_crop, force_resample=True)\n', (3294, 3336), False, 'from nilearn.image import resample_to_img\n'), ((4368, 4395), 'nipype.utils.filemanip.split_filename', 'split_filename', (['in_bids_pet'], {}), '(in_bids_pet)\n', (4382, 4395), False, 'from nipype.utils.filemanip import split_filename\n'), ((4442, 4450), 'nipype.interfaces.utility.Rename', 'Rename', ([], {}), '()\n', (4448, 4450), False, 'from nipype.interfaces.utility import Rename\n'), ((4984, 4992), 'nipype.interfaces.utility.Rename', 'Rename', ([], {}), '()\n', (4990, 4992), False, 'from nipype.interfaces.utility import Rename\n'), ((2393, 2404), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2402, 2404), False, 'import os\n'), ((5290, 5298), 'nipype.interfaces.utility.Rename', 'Rename', ([], {}), '()\n', (5296, 5298), False, 'from nipype.interfaces.utility import Rename\n'), ((5909, 5928), 'clinica.utils.filemanip.get_subject_id', 'get_subject_id', (['pet'], {}), '(pet)\n', (5923, 5928), False, 'from clinica.utils.filemanip import get_subject_id\n'), ((1999, 2036), 'numpy.where', 'np.where', (['(region != 0)', 'region', 'np.nan'], {}), '(region != 0, region, np.nan)\n', (2007, 2036), True, 'import numpy as np\n'), ((2089, 2111), 'numpy.isnan', 'np.isnan', (['array_region'], {}), '(array_region)\n', (2097, 2111), True, 'import numpy as np\n'), ((2414, 2441), 'os.path.basename', 'os.path.basename', (['input_img'], {}), '(input_img)\n', (2430, 2441), False, 'import os\n'), ((3386, 3413), 'os.path.basename', 'os.path.basename', (['input_img'], {}), '(input_img)\n', (3402, 3413), False, 'import os\n')] |
import time
load_start_time = time.time()
import csv
import nltk
import re
import numpy as np
import pandas as pd
from dateutil import parser
import gensim, logging
from gensim.models import Word2Vec
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from sklearn.preprocessing import StandardScaler
stop_words = stopwords.words('english')
def custom_preprocessor(text):
porter = PorterStemmer()
#split into sentences
sentences = sent_tokenize(text)
final_sentences = []
for sentence in sentences:
sentence_split = sentence.split(" ")
#remove words in not in stop words, and make lowercase
words = [word.lower() for word in sentence_split if word.lower() not in stop_words]
#get rid of words with non alphanumeric characters in it
#(should we replace these with a token?)
words = [word for word in words if word.isalpha()]
#stem words
words = [porter.stem(word) for word in words]
final_sentences.append(" ".join(words))
#consider joining sentences with a stop token
return " ".join(final_sentences), final_sentences, sentences
filepaths = ['./android/ebay/total_info.txt', './android/clean_master/total_info.txt', './android/swiftkey_keyboard/total_info.txt', './android/viber/total_info.txt', './ios/noaa-radar-pro-severe-weather/total_info.txt', './ios/youtube/total_info.txt']
app_names = ['ebay', 'clean_master', 'swiftkey_keyboard', 'viber', 'noaa-radar-pro-severe-weather', 'youtube']
oses = ['android', 'android', 'android', 'android', 'ios', 'ios']
###load data
for i in range(len(filepaths)):
filepath = filepaths[i]
app_name = app_names[i]
os = oses[i]
print('Loading data for', app_name, '........')
with open(filepath) as f:
reader = csv.reader(f, delimiter="\t")
d = list(reader)
ratings = []
reviews = []
titles = []
dates = []
versions = []
#account for the different file structures for android/ios apps
if os == 'android':
for line in d:
vals = line[0].split("******")
ratings.append(float(vals[0]))
reviews.append(vals[1])
dates.append(vals[2])
versions.append(vals[3])
elif os == 'ios':
for line in d:
vals = line[0].split("******")
ratings.append(float(vals[0]))
reviews.append(vals[1])
#ios review dates are like 'Apr 01, 2017'
#we'll turn them into '2017-04-01' to make it consistent with the android ones
date = parser.parse(vals[3]).strftime('%Y-%m-%d')
dates.append(date)
versions.append(vals[4])
processed_reviews = []
for review in reviews:
processed_review = custom_preprocessor(review)
processed_reviews.append(processed_review[0])
#get rid of reviews that are empty after preprocessing
#(not that many)
processed_review_lens = np.array([len(review) for review in [r.split(" ") for r in processed_reviews]])
#if using stop tokens "<END>" then empty reviews have a length of 6
nonzero_indeces = np.where(processed_review_lens > 1)
#print(len(nonzero_indeces))
#print(min(processed_review_lens))
final_reviews_processed = np.array(processed_reviews)[nonzero_indeces]
final_reviews = [review.split(" ") for review in final_reviews_processed]
final_reviews_unprocessed = np.array(reviews)[nonzero_indeces]
final_ratings = [float(rating) for rating in np.array(ratings)[nonzero_indeces]]
#final_titles = np.array(titles)[nonzero_indeces]
final_dates = np.array(dates)[nonzero_indeces]
unique_dates = np.unique(np.array(final_dates))
unique_date_indices = []
for date in unique_dates:
date_indices = np.where(np.array(final_dates)==date)[0]
unique_date_indices.append(date_indices)
final_versions = np.array(versions)[nonzero_indeces]
model = Word2Vec(final_reviews, min_count=1)
model_filename = "../../large files/"+app_name+".model"
model.save(model_filename)
d = {'reviews_unprocessed':final_reviews_unprocessed, 'reviews_processed':final_reviews_processed, 'dates':final_dates, 'versions':final_versions, 'ratings':final_ratings}
df = pd.DataFrame(data = d)
data_filename = "../../large files/"+app_name+"_post_processing_data.csv"
df.to_csv(data_filename)
print('All done!!') | [
"dateutil.parser.parse",
"nltk.corpus.stopwords.words",
"numpy.where",
"gensim.models.Word2Vec",
"nltk.stem.porter.PorterStemmer",
"numpy.array",
"nltk.tokenize.sent_tokenize",
"csv.reader",
"pandas.DataFrame",
"time.time"
] | [((30, 41), 'time.time', 'time.time', ([], {}), '()\n', (39, 41), False, 'import time\n'), ((398, 424), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (413, 424), False, 'from nltk.corpus import stopwords\n'), ((469, 484), 'nltk.stem.porter.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (482, 484), False, 'from nltk.stem.porter import PorterStemmer\n'), ((528, 547), 'nltk.tokenize.sent_tokenize', 'sent_tokenize', (['text'], {}), '(text)\n', (541, 547), False, 'from nltk.tokenize import sent_tokenize, word_tokenize\n'), ((3268, 3303), 'numpy.where', 'np.where', (['(processed_review_lens > 1)'], {}), '(processed_review_lens > 1)\n', (3276, 3303), True, 'import numpy as np\n'), ((4091, 4127), 'gensim.models.Word2Vec', 'Word2Vec', (['final_reviews'], {'min_count': '(1)'}), '(final_reviews, min_count=1)\n', (4099, 4127), False, 'from gensim.models import Word2Vec\n'), ((4407, 4427), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'd'}), '(data=d)\n', (4419, 4427), True, 'import pandas as pd\n'), ((1906, 1935), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""\t"""'}), "(f, delimiter='\\t')\n", (1916, 1935), False, 'import csv\n'), ((3407, 3434), 'numpy.array', 'np.array', (['processed_reviews'], {}), '(processed_reviews)\n', (3415, 3434), True, 'import numpy as np\n'), ((3564, 3581), 'numpy.array', 'np.array', (['reviews'], {}), '(reviews)\n', (3572, 3581), True, 'import numpy as np\n'), ((3756, 3771), 'numpy.array', 'np.array', (['dates'], {}), '(dates)\n', (3764, 3771), True, 'import numpy as np\n'), ((3818, 3839), 'numpy.array', 'np.array', (['final_dates'], {}), '(final_dates)\n', (3826, 3839), True, 'import numpy as np\n'), ((4042, 4060), 'numpy.array', 'np.array', (['versions'], {}), '(versions)\n', (4050, 4060), True, 'import numpy as np\n'), ((3648, 3665), 'numpy.array', 'np.array', (['ratings'], {}), '(ratings)\n', (3656, 3665), True, 'import numpy as np\n'), ((3936, 3957), 'numpy.array', 'np.array', (['final_dates'], {}), '(final_dates)\n', (3944, 3957), True, 'import numpy as np\n'), ((2690, 2711), 'dateutil.parser.parse', 'parser.parse', (['vals[3]'], {}), '(vals[3])\n', (2702, 2711), False, 'from dateutil import parser\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.