code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
import xarray as xr
from copy import deepcopy
from skimage.draw import circle
def generate_test_dist_matrix(num_A=100, num_B=100, num_C=100,
distr_AB=(10, 1), distr_random=(200, 1),
seed=None):
"""
This function will return a random dist matrix specifying the distance between cells of types
A and B and between cells of all other groups (type C).
Each row and column representing a cell. We generate the points using Gaussian distributions
Ideally, the parameters for A to B distances will be set such that they produce a lower range
of values than A to C distances.
Note that these distance matrices created are non-Euclidean.
Args:
num_A (int):
the number of A cells we wish to generate. Default 100
num_B (int):
the number of B cells we wish to generate. Default 100
num_C (int):
the number of C cells we wish to generate. Default 100
distr_AB (tuple):
if specified, will be a tuple listing the mean and variance of the Gaussian
distribution we wish to generate numbers from. Default mean=10 and var=1
distr_random (tuple):
similar to dist_AB, except it's what we set the distribution of all other distances to
be. Default mean=200 and var=1
seed (int):
whether to fix the random seed or not. Useful for testing. Should be a specified
integer value. Default None.
Returns:
xarray.DataArray:
The randomized distance matrix we generate directly from predefined distributions
where the average distances between cell types of a and b > average distances between
cell types of b and c
"""
# set the mean and variance of the Gaussian distributions of both AB and AC distances
mean_ab = distr_AB[0]
var_ab = distr_AB[1]
mean_random = distr_random[0]
var_random = distr_random[1]
# set random seed if set
if seed:
np.random.seed(seed)
# we initialize the random distances across different types of points
# completely randomize aa, ac, bb, bc, and cc distances, use params for ab distances
random_aa = np.abs(np.random.normal(mean_random, var_random, (num_A, num_A)))
random_ab = np.abs(np.random.normal(mean_ab, var_ab, (num_A, num_B)))
random_ac = np.abs(np.random.normal(mean_random, var_random, (num_A, num_C)))
random_bb = np.abs(np.random.normal(mean_random, var_random, (num_B, num_B)))
random_bc = np.abs(np.random.normal(mean_random, var_random, (num_B, num_C)))
random_cc = np.abs(np.random.normal(mean_random, var_random, (num_C, num_C)))
# create each partition one-by-one first and ensure symmetry
a_partition = np.concatenate(((random_aa + random_aa.T) / 2, random_ab, random_ac), axis=1)
b_partition = np.concatenate((random_ab.T, (random_bb + random_bb.T) / 2, random_bc), axis=1)
c_partition = np.concatenate((random_ac.T, random_bc.T, (random_cc + random_cc.T) / 2), axis=1)
# concatenate partitions together
dist_mat = np.concatenate((a_partition, b_partition, c_partition), axis=0)
# ensure a proper dist mat
np.fill_diagonal(dist_mat, 0)
# randomly permute dist_mat to make more realistic
coords_in_order = np.arange(dist_mat.shape[0])
coords_permuted = deepcopy(coords_in_order)
np.random.shuffle(coords_permuted)
dist_mat = dist_mat[np.ix_(coords_permuted, coords_permuted)]
# 1-index coords because that's where cell labels start at
coords_dist_mat = [coords_permuted + 1, coords_permuted + 1]
dist_mat = xr.DataArray(dist_mat, coords=coords_dist_mat)
return dist_mat
def generate_random_centroids(size_img=(1024, 1024), num_A=100, num_B=100, num_C=100,
mean_A_factor=None, cov_A=None, mean_B_factor=None, cov_B=None,
mean_C_factor=None, cov_C=None, seed=None):
"""
Generate a set of random centroids given distribution parameters. Used as a helper function by
generate_test_label_map.
Args:
size_img (tuple):
a tuple indicating the size of the image. Default (1024, 1024)
num_A (int):
the number of A centroids to generate. Default 100.
num_B (int):
the number of B centroids to generate. Default 100.
num_C (int):
the number of C centroids to generate. Default 100.
mean_A_factor (tuple):
a tuple to determine which number to multiply the height and width by to indicate the
center (mean) of the distribution to generate A points. Will be randomly set to a
predefined value if None.
cov_A (numpy.ndarray):
the covariance used to generate A points as [[varXX, varXY], [varYX, varYY]]. Will be
randomly set to a predefined value if None.
mean_B_factor (tuple):
similar to mean_A_factor
cov_B (numpy.ndarray):
similar to cov_A
mean_C_factor (tuple):
similar to mean_A_factor
cov_C (numpy.ndarray):
similar to cov_A
seed (int):
whether to fix the random seed or not. Useful for testing. Should be a specified
integer value. Default None.
Returns:
list:
List of non-duplicated cell centroids.
"""
# extract different variable factors
height = size_img[0]
width = size_img[1]
a_mean = (height * mean_A_factor, width * mean_A_factor) if mean_A_factor else (0.5, 0.5)
a_cov = cov_A if cov_A else [[200, 0], [0, 200]]
b_mean = (height * mean_B_factor, width * mean_B_factor) if mean_B_factor else (0.6, 0.6)
b_cov = cov_B if cov_B else [[200, 0], [0, 200]]
c_mean = (height * mean_C_factor, width * mean_C_factor) if mean_C_factor else (0.1, 0.1)
c_cov = cov_C if cov_C else [[200, 0], [0, 200]]
# if specified, set the random seed
if seed:
np.random.seed(seed)
# use the multivariate_normal distribution, convert to int for label mat generation
a_points = np.random.multivariate_normal(a_mean, a_cov, num_A).astype(np.int16)
b_points = np.random.multivariate_normal(b_mean, b_cov, num_B).astype(np.int16)
c_points = np.random.multivariate_normal(c_mean, c_cov, num_C).astype(np.int16)
# combine points
total_points = np.concatenate((a_points, b_points, c_points), axis=0)
# remote out-of-range points
total_points = total_points[
np.logical_and(total_points[:, 0] >= 0, total_points[:, 1] >= 0), :]
total_points = total_points[
np.logical_and(total_points[:, 0] < size_img[0], total_points[:, 1] < size_img[1]), :]
# only keep the non-duplicate points
non_dup_points, non_dup_counts = np.unique(total_points, axis=0, return_counts=True)
total_points = non_dup_points[non_dup_counts == 1]
# randomly permute order to make more realistic
total_points = total_points[np.random.permutation(total_points.shape[0]), :]
return total_points
def generate_test_label_map(size_img=(1024, 1024), num_A=100, num_B=100, num_C=100,
mean_A_factor=None, cov_A=None, mean_B_factor=None, cov_B=None,
mean_C_factor=None, cov_C=None, seed=None):
"""
This function generates random centroid centers in the form of a label map such that those of
type A will have centers closer on average to those of type B than those of type C
We will use a multivariate Gaussian distribution for A, B, and C type cells to generate their
respective centers.
Args:
size_img (tuple):
a tuple indicating the size of the image. Default (1024, 1024)
num_A (int):
the number of A centroids to generate. Default 100.
num_B (int):
the number of B centroids to generate. Default 100.
num_C (int):
the number of C centroids to generate. Default 100.
mean_A_factor (tuple):
a tuple to determine which number to multiply the height and width by to indicate the
center (mean) of the distribution to generate A points. Will be randomly set to a
predefined value if None.
cov_A (numpy.ndarray):
the covariance used to generate A points as [[varXX, varXY], [varYX, varYY]]. Will be
randomly set to a predefined value if None.
mean_B_factor (tuple):
similar to mean_A_factor
cov_B (numpy.ndarray):
similar to cov_A
mean_C_factor (tuple):
similar to mean_A_factor
cov_C (numpy.ndarray):
similar to cov_A
seed (int):
whether to fix the random seed or not. Useful for testing. Should be a specified
integer value. Default None.
Returns:
xarray.DataArray:
Data in xarray format containing the randomized label matrix based on the randomized
centroid centers we generated. The label mat portion of sample_img_xr is generated
from a randomly initialized set of cell centroids where those of type a are on average
closer to those of type b than they are to those of type c.
"""
# generate the list of centroids
all_centroids = \
generate_random_centroids(size_img=size_img, num_A=num_A, num_B=num_B, num_C=num_C,
mean_A_factor=mean_A_factor, cov_A=cov_A,
mean_B_factor=mean_B_factor, cov_B=cov_B,
mean_C_factor=mean_C_factor, cov_C=cov_C,
seed=seed)
point_x_coords, point_y_coords = zip(*all_centroids)
# assign each centroid a unique label
centroid_indices = np.arange(len(all_centroids))
label_mat = np.zeros(size_img)
label_mat[point_x_coords, point_y_coords] = centroid_indices + 1
# generate label mat
sample_img = np.zeros((1, size_img[0], size_img[1], 1)).astype(np.int16)
sample_img[0, :, :, 0] = deepcopy(label_mat)
sample_img_xr = xr.DataArray(
sample_img,
coords=[[1], range(size_img[0]), range(size_img[1]), ['segmentation_label']],
dims=['fovs', 'rows', 'cols', 'channels']
)
return sample_img_xr
def generate_two_cell_seg_mask(size_img=(1024, 1024), cell_radius=10):
"""
This function generates a test segmentation mask with each separate cell labeled separately.
Args:
size_img (tuple):
the dimensions of the image we wish to generate
cell_radius (int):
the radius of each cell
Returns:
numpy.ndarray:
An array of dimensions size_img with two separate labeled cells that border each other
"""
# define the segmentation mask
sample_segmentation_mask = np.zeros(size_img)
# define the centers of the cells
center_1 = (size_img[0] // 2, size_img[0] // 2)
center_2 = (size_img[0] // 2, size_img[0] // 2 + cell_radius * 2 - 1)
# generate the coordinates of each nuclear disk
cell_region_1_x, cell_region_1_y = circle(center_1[0], center_1[1], cell_radius,
shape=size_img)
cell_region_2_x, cell_region_2_y = circle(center_2[0], center_2[1], cell_radius,
shape=size_img)
# assign the respective cells value according to their label
sample_segmentation_mask[cell_region_1_x, cell_region_1_y] = 1
sample_segmentation_mask[cell_region_2_x, cell_region_2_y] = 2
# store the centers and return for reference purposes
cell_centers = {1: center_1, 2: center_2}
return sample_segmentation_mask, cell_centers
def generate_two_cell_nuc_signal(segmentation_mask, cell_centers,
size_img=(1024, 1024), nuc_cell_ids=[1],
nuc_radius=3, nuc_signal_strength=10,
nuc_uncertainty_length=0):
"""
This function generates nuclear signal for the provided cells
Args:
segmentation_mask (numpy.ndarray):
an array which contains the labeled cell regions
cell_centers (dict):
a dictionary which contains the centers associated with each cell region
size_img (tuple):
the dimensions of the image we wish to generate
nuc_cell_ids (list):
a list of cells we wish to generate nuclear signal for, if None assume just cell 1
nuc_radius (int):
the radius of the nucleus of each cell
nuc_signal_strength (int):
the value we want to assign for nuclear signal
nuc_uncertainty_length (int):
will extend nuc_radius by the specified length
Returns:
numpy.ndarray:
An array of equal dimensions to segmentation_mask which have nuclear signal generated
for the provided cell ids
"""
# define the nuclear signal array
sample_nuclear_signal = np.zeros(segmentation_mask.shape)
for cell in nuc_cell_ids:
center = cell_centers[cell]
# generate nuclear region
nuc_region_x, nuc_region_y = circle(center[0], center[1],
nuc_radius + nuc_uncertainty_length, shape=size_img)
# set nuclear signal
sample_nuclear_signal[nuc_region_x, nuc_region_y] = nuc_signal_strength
# TODO: include jitter (probably after context-spatial randomization is done)
return sample_nuclear_signal
def generate_two_cell_memb_signal(segmentation_mask, cell_centers,
size_img=(1024, 1024), cell_radius=10,
memb_cell_ids=[2], memb_thickness=5,
memb_signal_strength=10, memb_uncertainty_length=0):
"""
This function generates membrane signal for the provided cells
Args:
segmentation_mask (numpy.ndarray):
an array which contains the labeled cell regions
cell_centers (dict):
a dictionary which contains the centers associated with each cell region
size_img (tuple):
the dimensions of the image we wish to generate
cell_radius (int):
the radius of the entire cell, needed to do proper circle subtraction for a
ring-shaped membrane
memb_cell_ids (list):
a list of cells we wish to generate nuclear signal for, if None assume just cell 2
memb_thickness (int):
the diameter of the membrane ring of each cell
memb_signal_strength (int):
the value we want to assign to membrane signal
memb_uncertainty_length (int):
will extend memb_radius by the specified length
Returns:
numpy.ndarray:
An array of equal dimensions to segmentation_mask which have membrane signal generated
for the provided cell ids
"""
# define the nuclear signal array
sample_membrane_signal = np.zeros(segmentation_mask.shape)
for cell in memb_cell_ids:
center = cell_centers[cell]
# generate coordinates of the cell region
cell_region_x, cell_region_y = circle(center[0], center[1],
cell_radius + memb_uncertainty_length,
shape=size_img)
# generate coordinates of the non-membrane region
non_memb_region_x, non_memb_region_y = circle(center[0], center[1],
cell_radius - memb_thickness,
shape=size_img)
# perform circle subtraction to generate membrane region
sample_membrane_signal[cell_region_x, cell_region_y] = memb_signal_strength
sample_membrane_signal[non_memb_region_x, non_memb_region_y] = 0
# TODO: include jitter (probably after context-spatial randomization is done)
return sample_membrane_signal
def generate_two_cell_chan_data(size_img=(1024, 1024), cell_radius=10,
nuc_radius=3, memb_thickness=5, nuc_cell_ids=[1],
memb_cell_ids=[2], nuc_signal_strength=10,
memb_signal_strength=10,
nuc_uncertainty_length=0,
memb_uncertainty_length=0):
"""
This function generates the complete package of channel-level synthetic data we're looking for
Args:
size_img (tuple):
the dimensions of the image we wish to generate
cell_radius (int):
the radius of each cell
nuc_radius (int):
the radius of each nucleus
memb_thickness (int):
the thickness of each membrane
nuc_cell_ids (list):
a list of which cells we wish to generate nuclear signal for, if None assume just
cell 1
memb_cell_ids (list):
a list of which cells we wish to generate membrane signal for, if None assume just
cell 2
nuc_signal_strength (int):
defines the constant value we want to assign to nuclear signal
memb_signal_strength (int):
defines the constant value we want to assign to membrane signal
nuc_uncertainty_length (int):
will extend nuc_radius by specified length
memb_uncertainty_length (int):
will extend memb_radius by specified length
Returns:
tuple (numpy.ndarray, numpy.ndarray, numpy.ndarray):
- an array with the labeled cell regions
- an array defining the nuclear signal for the desired cells
- an array defining the membrane signal for the desired cells
"""
# generate the segmentation mask
sample_segmentation_mask, sample_cell_centers = generate_two_cell_seg_mask(
size_img=size_img,
cell_radius=cell_radius
)
# generate the nuclear and membrane-level signal
sample_nuclear_signal = generate_two_cell_nuc_signal(
segmentation_mask=sample_segmentation_mask,
cell_centers=sample_cell_centers,
size_img=size_img,
nuc_cell_ids=nuc_cell_ids,
nuc_radius=nuc_radius,
nuc_signal_strength=nuc_signal_strength,
nuc_uncertainty_length=nuc_uncertainty_length
)
sample_membrane_signal = generate_two_cell_memb_signal(
segmentation_mask=sample_segmentation_mask,
cell_centers=sample_cell_centers,
size_img=size_img,
cell_radius=cell_radius,
memb_cell_ids=memb_cell_ids,
memb_thickness=memb_thickness,
memb_signal_strength=memb_signal_strength,
memb_uncertainty_length=memb_uncertainty_length)
# generate the channel data matrix
sample_channel_data = np.zeros((size_img[0], size_img[1], 2))
sample_channel_data[:, :, 0] = sample_nuclear_signal
sample_channel_data[:, :, 1] = sample_membrane_signal
return sample_segmentation_mask, sample_channel_data
| [
"numpy.fill_diagonal",
"copy.deepcopy",
"numpy.random.seed",
"numpy.concatenate",
"numpy.logical_and",
"numpy.ix_",
"numpy.unique",
"numpy.zeros",
"numpy.arange",
"xarray.DataArray",
"numpy.random.normal",
"numpy.random.multivariate_normal",
"numpy.random.permutation",
"numpy.random.shuffl... | [((2829, 2906), 'numpy.concatenate', 'np.concatenate', (['((random_aa + random_aa.T) / 2, random_ab, random_ac)'], {'axis': '(1)'}), '(((random_aa + random_aa.T) / 2, random_ab, random_ac), axis=1)\n', (2843, 2906), True, 'import numpy as np\n'), ((2925, 3004), 'numpy.concatenate', 'np.concatenate', (['(random_ab.T, (random_bb + random_bb.T) / 2, random_bc)'], {'axis': '(1)'}), '((random_ab.T, (random_bb + random_bb.T) / 2, random_bc), axis=1)\n', (2939, 3004), True, 'import numpy as np\n'), ((3023, 3108), 'numpy.concatenate', 'np.concatenate', (['(random_ac.T, random_bc.T, (random_cc + random_cc.T) / 2)'], {'axis': '(1)'}), '((random_ac.T, random_bc.T, (random_cc + random_cc.T) / 2),\n axis=1)\n', (3037, 3108), True, 'import numpy as np\n'), ((3159, 3222), 'numpy.concatenate', 'np.concatenate', (['(a_partition, b_partition, c_partition)'], {'axis': '(0)'}), '((a_partition, b_partition, c_partition), axis=0)\n', (3173, 3222), True, 'import numpy as np\n'), ((3259, 3288), 'numpy.fill_diagonal', 'np.fill_diagonal', (['dist_mat', '(0)'], {}), '(dist_mat, 0)\n', (3275, 3288), True, 'import numpy as np\n'), ((3367, 3395), 'numpy.arange', 'np.arange', (['dist_mat.shape[0]'], {}), '(dist_mat.shape[0])\n', (3376, 3395), True, 'import numpy as np\n'), ((3418, 3443), 'copy.deepcopy', 'deepcopy', (['coords_in_order'], {}), '(coords_in_order)\n', (3426, 3443), False, 'from copy import deepcopy\n'), ((3448, 3482), 'numpy.random.shuffle', 'np.random.shuffle', (['coords_permuted'], {}), '(coords_permuted)\n', (3465, 3482), True, 'import numpy as np\n'), ((3693, 3739), 'xarray.DataArray', 'xr.DataArray', (['dist_mat'], {'coords': 'coords_dist_mat'}), '(dist_mat, coords=coords_dist_mat)\n', (3705, 3739), True, 'import xarray as xr\n'), ((6463, 6517), 'numpy.concatenate', 'np.concatenate', (['(a_points, b_points, c_points)'], {'axis': '(0)'}), '((a_points, b_points, c_points), axis=0)\n', (6477, 6517), True, 'import numpy as np\n'), ((6869, 6920), 'numpy.unique', 'np.unique', (['total_points'], {'axis': '(0)', 'return_counts': '(True)'}), '(total_points, axis=0, return_counts=True)\n', (6878, 6920), True, 'import numpy as np\n'), ((9930, 9948), 'numpy.zeros', 'np.zeros', (['size_img'], {}), '(size_img)\n', (9938, 9948), True, 'import numpy as np\n'), ((10150, 10169), 'copy.deepcopy', 'deepcopy', (['label_mat'], {}), '(label_mat)\n', (10158, 10169), False, 'from copy import deepcopy\n'), ((10941, 10959), 'numpy.zeros', 'np.zeros', (['size_img'], {}), '(size_img)\n', (10949, 10959), True, 'import numpy as np\n'), ((11217, 11278), 'skimage.draw.circle', 'circle', (['center_1[0]', 'center_1[1]', 'cell_radius'], {'shape': 'size_img'}), '(center_1[0], center_1[1], cell_radius, shape=size_img)\n', (11223, 11278), False, 'from skimage.draw import circle\n'), ((11364, 11425), 'skimage.draw.circle', 'circle', (['center_2[0]', 'center_2[1]', 'cell_radius'], {'shape': 'size_img'}), '(center_2[0], center_2[1], cell_radius, shape=size_img)\n', (11370, 11425), False, 'from skimage.draw import circle\n'), ((13130, 13163), 'numpy.zeros', 'np.zeros', (['segmentation_mask.shape'], {}), '(segmentation_mask.shape)\n', (13138, 13163), True, 'import numpy as np\n'), ((15156, 15189), 'numpy.zeros', 'np.zeros', (['segmentation_mask.shape'], {}), '(segmentation_mask.shape)\n', (15164, 15189), True, 'import numpy as np\n'), ((18992, 19031), 'numpy.zeros', 'np.zeros', (['(size_img[0], size_img[1], 2)'], {}), '((size_img[0], size_img[1], 2))\n', (19000, 19031), True, 'import numpy as np\n'), ((2076, 2096), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2090, 2096), True, 'import numpy as np\n'), ((2284, 2341), 'numpy.random.normal', 'np.random.normal', (['mean_random', 'var_random', '(num_A, num_A)'], {}), '(mean_random, var_random, (num_A, num_A))\n', (2300, 2341), True, 'import numpy as np\n'), ((2366, 2415), 'numpy.random.normal', 'np.random.normal', (['mean_ab', 'var_ab', '(num_A, num_B)'], {}), '(mean_ab, var_ab, (num_A, num_B))\n', (2382, 2415), True, 'import numpy as np\n'), ((2440, 2497), 'numpy.random.normal', 'np.random.normal', (['mean_random', 'var_random', '(num_A, num_C)'], {}), '(mean_random, var_random, (num_A, num_C))\n', (2456, 2497), True, 'import numpy as np\n'), ((2522, 2579), 'numpy.random.normal', 'np.random.normal', (['mean_random', 'var_random', '(num_B, num_B)'], {}), '(mean_random, var_random, (num_B, num_B))\n', (2538, 2579), True, 'import numpy as np\n'), ((2604, 2661), 'numpy.random.normal', 'np.random.normal', (['mean_random', 'var_random', '(num_B, num_C)'], {}), '(mean_random, var_random, (num_B, num_C))\n', (2620, 2661), True, 'import numpy as np\n'), ((2686, 2743), 'numpy.random.normal', 'np.random.normal', (['mean_random', 'var_random', '(num_C, num_C)'], {}), '(mean_random, var_random, (num_C, num_C))\n', (2702, 2743), True, 'import numpy as np\n'), ((3507, 3547), 'numpy.ix_', 'np.ix_', (['coords_permuted', 'coords_permuted'], {}), '(coords_permuted, coords_permuted)\n', (3513, 3547), True, 'import numpy as np\n'), ((6060, 6080), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (6074, 6080), True, 'import numpy as np\n'), ((13303, 13389), 'skimage.draw.circle', 'circle', (['center[0]', 'center[1]', '(nuc_radius + nuc_uncertainty_length)'], {'shape': 'size_img'}), '(center[0], center[1], nuc_radius + nuc_uncertainty_length, shape=\n size_img)\n', (13309, 13389), False, 'from skimage.draw import circle\n'), ((15348, 15436), 'skimage.draw.circle', 'circle', (['center[0]', 'center[1]', '(cell_radius + memb_uncertainty_length)'], {'shape': 'size_img'}), '(center[0], center[1], cell_radius + memb_uncertainty_length, shape=\n size_img)\n', (15354, 15436), False, 'from skimage.draw import circle\n'), ((15630, 15704), 'skimage.draw.circle', 'circle', (['center[0]', 'center[1]', '(cell_radius - memb_thickness)'], {'shape': 'size_img'}), '(center[0], center[1], cell_radius - memb_thickness, shape=size_img)\n', (15636, 15704), False, 'from skimage.draw import circle\n'), ((6185, 6236), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['a_mean', 'a_cov', 'num_A'], {}), '(a_mean, a_cov, num_A)\n', (6214, 6236), True, 'import numpy as np\n'), ((6269, 6320), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['b_mean', 'b_cov', 'num_B'], {}), '(b_mean, b_cov, num_B)\n', (6298, 6320), True, 'import numpy as np\n'), ((6353, 6404), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['c_mean', 'c_cov', 'num_C'], {}), '(c_mean, c_cov, num_C)\n', (6382, 6404), True, 'import numpy as np\n'), ((6593, 6657), 'numpy.logical_and', 'np.logical_and', (['(total_points[:, 0] >= 0)', '(total_points[:, 1] >= 0)'], {}), '(total_points[:, 0] >= 0, total_points[:, 1] >= 0)\n', (6607, 6657), True, 'import numpy as np\n'), ((6703, 6789), 'numpy.logical_and', 'np.logical_and', (['(total_points[:, 0] < size_img[0])', '(total_points[:, 1] < size_img[1])'], {}), '(total_points[:, 0] < size_img[0], total_points[:, 1] <\n size_img[1])\n', (6717, 6789), True, 'import numpy as np\n'), ((7061, 7105), 'numpy.random.permutation', 'np.random.permutation', (['total_points.shape[0]'], {}), '(total_points.shape[0])\n', (7082, 7105), True, 'import numpy as np\n'), ((10061, 10103), 'numpy.zeros', 'np.zeros', (['(1, size_img[0], size_img[1], 1)'], {}), '((1, size_img[0], size_img[1], 1))\n', (10069, 10103), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# @Author: liuyulin
# @Date: 2018-10-16 16:36:20
# @Last Modified by: <NAME>
# @Last Modified time: 2019-06-23 21:03:01
from matplotlib.patches import Polygon
import pyproj
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import pickle
from utils import g
import os
# Borrow from https://stackoverflow.com/questions/8161144/drawing-ellipses-on-matplotlib-basemap-projections
class Basemap(Basemap):
def ellipse(self, x0, y0, a, b, n, ax=None, **kwargs):
"""
Draws a polygon centered at ``x0, y0``. The polygon approximates an
ellipse on the surface of the Earth with semi-major-axis ``a`` and
semi-minor axis ``b`` degrees longitude and latitude, made up of
``n`` vertices.
For a description of the properties of ellipsis, please refer to [1].
The polygon is based upon code written do plot Tissot's indicatrix
found on the matplotlib mailing list at [2].
Extra keyword ``ax`` can be used to override the default axis instance.
Other \**kwargs passed on to matplotlib.patches.Polygon
RETURNS
poly : a maptplotlib.patches.Polygon object.
REFERENCES
[1] : http://en.wikipedia.org/wiki/Ellipse
"""
ax = kwargs.pop('ax', None) or self._check_ax()
g = pyproj.Geod(a=self.rmajor, b=self.rminor)
# Gets forward and back azimuths, plus distances between initial
# points (x0, y0)
azf, azb, dist = g.inv([x0, x0], [y0, y0], [x0+a, x0], [y0, y0+b])
tsid = dist[0] * dist[1] # a * b
# Initializes list of segments, calculates \del azimuth, and goes on
# for every vertex
seg = [self(x0+a, y0)]
AZ = np.linspace(azf[0], 360. + azf[0], n)
for i, az in enumerate(AZ):
# Skips segments along equator (Geod can't handle equatorial arcs).
if np.allclose(0., y0) and (np.allclose(90., az) or
np.allclose(270., az)):
continue
# In polar coordinates, with the origin at the center of the
# ellipse and with the angular coordinate ``az`` measured from the
# major axis, the ellipse's equation is [1]:
#
# a * b
# r(az) = ------------------------------------------
# ((b * cos(az))**2 + (a * sin(az))**2)**0.5
#
# Azymuth angle in radial coordinates and corrected for reference
# angle.
azr = 2. * np.pi / 360. * (az + 90.)
A = dist[0] * np.sin(azr)
B = dist[1] * np.cos(azr)
r = tsid / (B**2. + A**2.)**0.5
lon, lat, azb = g.fwd(x0, y0, az, r)
x, y = self(lon, lat)
# Add segment if it is in the map projection region.
if x < 1e20 and y < 1e20:
seg.append((x, y))
poly = Polygon(seg, **kwargs)
ax.add_patch(poly)
# Set axes limits to fit map region.
self.set_axes_limits(ax=ax)
return poly
def get_cov_ellipse_wh(cov, nstd = 3):
"""
Return a matplotlib Ellipse patch representing the covariance matrix
cov centred at centre and scaled by the factor nstd.
"""
# Find and sort eigenvalues and eigenvectors into descending order
eigvals, eigvecs = np.linalg.eigh(cov)
order = eigvals.argsort()[::-1]
eigvals, eigvecs = eigvals[order], eigvecs[:, order]
# The anti-clockwise angle to rotate our ellipse by
vx, vy = eigvecs[:,0][1], eigvecs[:,0][0]
theta = np.arctan2(vy, vx)
# Width and height of ellipse to draw
width, height = 2 * nstd * np.sqrt(eigvals)
return width, height, theta
def plot_fp_act(FP_ID,
FP_utilize_df,
act_track_data,
flight_plan_data,
feed_track = None,
pred_track = None,
pred_track_mu = None,
pred_track_cov = None,
k = 9,
nstd = 3,
sort = True,
plot_weather_info = False,
**kwargs):
# TODO: plot error ellipse
ori_lat = 29.98333333; ori_lon = -95.33333333
des_lat = 42.36666667; des_lon = -71
fig = plt.figure(figsize=(8,6))
m = Basemap(llcrnrlon = -100,llcrnrlat = 27,urcrnrlon = -68,urcrnrlat = 46,projection='merc')
m.drawmapboundary(fill_color='#8aeaff')
m.fillcontinents(color='#c5c5c5', lake_color='#8aeaff')
m.drawcoastlines(linewidth=0.5)
m.drawcountries(linewidth=0.5)
m.drawstates(linewidth=0.5)
m.drawparallels(np.arange(10.,35.,5.))
m.drawmeridians(np.arange(-120.,-80.,10.))
x1, y1 = m(ori_lon, ori_lat)
x2, y2 = m(des_lon, des_lat)
plt.plot(x1,y1, 'r*', ms = 15, zorder = 10)
plt.plot(x2,y2, 'r*', ms = 15, zorder = 10)
fid_fp1 = FP_utilize_df.loc[FP_utilize_df.FLT_PLAN_ID == FP_ID, 'FID'].values
print('%d flights filed flight plan %s'%(fid_fp1.shape[0], FP_ID))
plot_track = act_track_data.loc[act_track_data.FID.isin(fid_fp1)]
plot_fp = flight_plan_data.loc[flight_plan_data.FLT_PLAN_ID_REAL == FP_ID]
x_fp, y_fp = m(plot_fp.LONGITUDE.values, plot_fp.LATITUDE.values)
feed_x, feed_y = m(feed_track.Lon.values, feed_track.Lat.values)
feed, = plt.plot(feed_x, feed_y, 'o-', ms = 4, linewidth = 3, color='g', label = 'Feed tracks', zorder = 9)
for gpidx, gp in plot_track.groupby('FID'):
x,y = m(gp.Lon.values, gp.Lat.values)
actual, = plt.plot(x,y,'--', linewidth = 2, color='b', label = 'Actual Tracks', zorder = 8)
fp, = plt.plot(x_fp, y_fp, '-', linewidth = 2, color='r', label = 'Flight Plans', zorder = 5)
if pred_track is not None:
if sort:
x, y = m(pred_track[k][pred_track[k][:,3].argsort()][:, 1], pred_track[k][pred_track[k][:,3].argsort()][:, 0])
else:
x, y = m(pred_track[k, :, 1], pred_track[k, :, 0])
pred_fig, = plt.plot(x,y, 'o--', ms = 3, zorder = 7)
if pred_track_mu is not None:
if sort:
x, y = m(pred_track_mu[k][pred_track_mu[k][:,3].argsort()][:, 1],
pred_track_mu[k][pred_track_mu[k][:,3].argsort()][:, 0])
else:
x, y = m(pred_track_mu[k][:, 1], pred_track_mu[k][:, 0])
plt.plot(x,y, 'mo--', ms = 4, zorder = 7, label = 'Predicted tracks')
if pred_track_cov is not None:
for t in range(pred_track_mu[k].shape[0]):
lon_a = np.sqrt(pred_track_cov[k, t, 1, 1]) * nstd
lat_b = np.sqrt(pred_track_cov[k, t, 0, 0]) * nstd
# assume independency
# cov_width, cov_height, cov_theta = get_cov_ellipse_wh(predicted_tracks_cov[k, t, :2, :2], nstd = 3)
centre_lon, centre_lat = (pred_track_mu[k, t, 1], pred_track_mu[k, t, 0])
poly = m.ellipse(centre_lon,
centre_lat,
lon_a,
lat_b,
50,
facecolor='green', zorder=6, alpha=0.25)
plt.legend(fontsize = 12, loc = 2)
if plot_weather_info:
grbs_common_info_file = kwargs.get('grbs_common_info_file', '/media/storage/DATA/filtered_weather_data/grbs_common_info.npz')
wind_file_root = kwargs.get('wind_file_root', '../../DATA/filtered_weather_data/namanl_small_npz/')
wx_file_root = kwargs.get('wx_file_root', '../../DATA/NCWF/gridded_storm_hourly/')
resolution = kwargs.get('resolution', 50)
wind_scale = kwargs.get('wind_scale', 1000)
wind_fname_list = [os.path.join(wind_file_root, wf) for wf in np.unique(plot_track['wind_fname'])]
wx_file_time = plot_track['wx_fname'].values
wx_file_time = np.unique(wx_file_time[~pd.isnull(wx_file_time)])
wx_fname_list = [os.path.join(wx_file_root, wf.replace('-', '_').replace(' ', '_').replace(':', '')[:15] + 'Z.npz') for wf in wx_file_time]
_ = plot_wx(m,
wind_fname_list = wind_fname_list,
wx_fname_list = wx_fname_list,
grbs_common_info_file = grbs_common_info_file,
resolution = resolution,
wind_scale = wind_scale)
plt.title('Flight %s with flight plan %s'%(plot_track.FID.unique(), FP_ID))
plt.show()
return plot_track, fig
def plot_wx(m,
wind_fname_list,
wx_fname_list,
grbs_common_info_file = '/media/storage/DATA/filtered_weather_data/grbs_common_info.npz',
resolution = 50,
wind_scale = 1000,
**kwargs):
from scipy.interpolate import griddata
grbs_common_info = np.load(grbs_common_info_file)
smallgrid = grbs_common_info['smallgrid']
i = 0
for wind_file in wind_fname_list:
wind_npz = np.load(wind_file)
if i == 0:
tmp_uwind = wind_npz['uwind']
tmp_vwind = wind_npz['vwind']
tmp_tempr = wind_npz['tempr']
else:
tmp_uwind += wind_npz['uwind']
tmp_vwind += wind_npz['vwind']
tmp_tempr += wind_npz['tempr']
i += 1
tmp_uwind /= i
tmp_vwind /= i
tmp_tempr /= i
if len(wx_fname_list) == 0:
pass
else:
wx_data = np.empty(shape = (0, smallgrid.shape[0]))
for wx_file in wx_fname_list:
wx_npz = np.load(wx_file)
wx_data = np.concatenate((wx_data, wx_npz['ncwf_arr']), axis = 0)
wx_grid = smallgrid[np.any(wx_data, axis = 0)]
# fig = plt.figure(figsize=(8,6))
x_grid, y_grid = m(wx_grid[:, 0], wx_grid[:, 1])
plt.scatter(x_grid, y_grid, zorder = 5, c = 'r', s = 0.5, label = 'convective weather')
# wind m/s
t = tmp_tempr[0]
u = tmp_uwind[0]
v = tmp_vwind[0]
# z = np.sqrt(u**2 + v**2)
x = smallgrid[:, 0]
y = smallgrid[:, 1]
xi = np.linspace(min(x), max(x), resolution)
yi = np.linspace(min(y), max(y), resolution)
ui = griddata((x, y), u, (xi[None,:], yi[:,None]), method='linear')
vi = griddata((x, y), v, (xi[None,:], yi[:,None]), method='linear')
ti = griddata((x, y), t, (xi[None,:], yi[:,None]), method='linear')
xi,yi = m(xi,yi)
Q = plt.quiver(xi,yi,ui,vi, scale = wind_scale, zorder = 4, label = 'wind speed')
qk = plt.quiverkey(Q, 0, 0, 100, '100 m/s', labelpos='W', color = 'r', labelcolor='k')
plt.pcolormesh(xi, yi, ti, zorder = 3, alpha = 0.35, cmap = 'bwr', label = 'temperature')
return | [
"utils.g.fwd",
"numpy.load",
"numpy.arctan2",
"numpy.empty",
"numpy.allclose",
"matplotlib.pyplot.quiver",
"matplotlib.patches.Polygon",
"matplotlib.pyplot.figure",
"utils.g.inv",
"numpy.arange",
"numpy.sin",
"os.path.join",
"numpy.unique",
"numpy.linspace",
"pyproj.Geod",
"matplotlib.... | [((3452, 3471), 'numpy.linalg.eigh', 'np.linalg.eigh', (['cov'], {}), '(cov)\n', (3466, 3471), True, 'import numpy as np\n'), ((3681, 3699), 'numpy.arctan2', 'np.arctan2', (['vy', 'vx'], {}), '(vy, vx)\n', (3691, 3699), True, 'import numpy as np\n'), ((4383, 4409), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (4393, 4409), True, 'import matplotlib.pyplot as plt\n'), ((4417, 4506), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'llcrnrlon': '(-100)', 'llcrnrlat': '(27)', 'urcrnrlon': '(-68)', 'urcrnrlat': '(46)', 'projection': '"""merc"""'}), "(llcrnrlon=-100, llcrnrlat=27, urcrnrlon=-68, urcrnrlat=46,\n projection='merc')\n", (4424, 4506), False, 'from mpl_toolkits.basemap import Basemap\n'), ((4879, 4919), 'matplotlib.pyplot.plot', 'plt.plot', (['x1', 'y1', '"""r*"""'], {'ms': '(15)', 'zorder': '(10)'}), "(x1, y1, 'r*', ms=15, zorder=10)\n", (4887, 4919), True, 'import matplotlib.pyplot as plt\n'), ((4927, 4967), 'matplotlib.pyplot.plot', 'plt.plot', (['x2', 'y2', '"""r*"""'], {'ms': '(15)', 'zorder': '(10)'}), "(x2, y2, 'r*', ms=15, zorder=10)\n", (4935, 4967), True, 'import matplotlib.pyplot as plt\n'), ((5430, 5526), 'matplotlib.pyplot.plot', 'plt.plot', (['feed_x', 'feed_y', '"""o-"""'], {'ms': '(4)', 'linewidth': '(3)', 'color': '"""g"""', 'label': '"""Feed tracks"""', 'zorder': '(9)'}), "(feed_x, feed_y, 'o-', ms=4, linewidth=3, color='g', label=\n 'Feed tracks', zorder=9)\n", (5438, 5526), True, 'import matplotlib.pyplot as plt\n'), ((5739, 5824), 'matplotlib.pyplot.plot', 'plt.plot', (['x_fp', 'y_fp', '"""-"""'], {'linewidth': '(2)', 'color': '"""r"""', 'label': '"""Flight Plans"""', 'zorder': '(5)'}), "(x_fp, y_fp, '-', linewidth=2, color='r', label='Flight Plans',\n zorder=5)\n", (5747, 5824), True, 'import matplotlib.pyplot as plt\n'), ((7223, 7253), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(12)', 'loc': '(2)'}), '(fontsize=12, loc=2)\n', (7233, 7253), True, 'import matplotlib.pyplot as plt\n'), ((8477, 8487), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8485, 8487), True, 'import matplotlib.pyplot as plt\n'), ((8844, 8874), 'numpy.load', 'np.load', (['grbs_common_info_file'], {}), '(grbs_common_info_file)\n', (8851, 8874), True, 'import numpy as np\n'), ((10156, 10220), 'scipy.interpolate.griddata', 'griddata', (['(x, y)', 'u', '(xi[None, :], yi[:, None])'], {'method': '"""linear"""'}), "((x, y), u, (xi[None, :], yi[:, None]), method='linear')\n", (10164, 10220), False, 'from scipy.interpolate import griddata\n'), ((10228, 10292), 'scipy.interpolate.griddata', 'griddata', (['(x, y)', 'v', '(xi[None, :], yi[:, None])'], {'method': '"""linear"""'}), "((x, y), v, (xi[None, :], yi[:, None]), method='linear')\n", (10236, 10292), False, 'from scipy.interpolate import griddata\n'), ((10300, 10364), 'scipy.interpolate.griddata', 'griddata', (['(x, y)', 't', '(xi[None, :], yi[:, None])'], {'method': '"""linear"""'}), "((x, y), t, (xi[None, :], yi[:, None]), method='linear')\n", (10308, 10364), False, 'from scipy.interpolate import griddata\n'), ((10393, 10467), 'matplotlib.pyplot.quiver', 'plt.quiver', (['xi', 'yi', 'ui', 'vi'], {'scale': 'wind_scale', 'zorder': '(4)', 'label': '"""wind speed"""'}), "(xi, yi, ui, vi, scale=wind_scale, zorder=4, label='wind speed')\n", (10403, 10467), True, 'import matplotlib.pyplot as plt\n'), ((10480, 10559), 'matplotlib.pyplot.quiverkey', 'plt.quiverkey', (['Q', '(0)', '(0)', '(100)', '"""100 m/s"""'], {'labelpos': '"""W"""', 'color': '"""r"""', 'labelcolor': '"""k"""'}), "(Q, 0, 0, 100, '100 m/s', labelpos='W', color='r', labelcolor='k')\n", (10493, 10559), True, 'import matplotlib.pyplot as plt\n'), ((10566, 10652), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['xi', 'yi', 'ti'], {'zorder': '(3)', 'alpha': '(0.35)', 'cmap': '"""bwr"""', 'label': '"""temperature"""'}), "(xi, yi, ti, zorder=3, alpha=0.35, cmap='bwr', label=\n 'temperature')\n", (10580, 10652), True, 'import matplotlib.pyplot as plt\n'), ((1397, 1438), 'pyproj.Geod', 'pyproj.Geod', ([], {'a': 'self.rmajor', 'b': 'self.rminor'}), '(a=self.rmajor, b=self.rminor)\n', (1408, 1438), False, 'import pyproj\n'), ((1563, 1616), 'utils.g.inv', 'g.inv', (['[x0, x0]', '[y0, y0]', '[x0 + a, x0]', '[y0, y0 + b]'], {}), '([x0, x0], [y0, y0], [x0 + a, x0], [y0, y0 + b])\n', (1568, 1616), False, 'from utils import g\n'), ((1804, 1842), 'numpy.linspace', 'np.linspace', (['azf[0]', '(360.0 + azf[0])', 'n'], {}), '(azf[0], 360.0 + azf[0], n)\n', (1815, 1842), True, 'import numpy as np\n'), ((3009, 3031), 'matplotlib.patches.Polygon', 'Polygon', (['seg'], {}), '(seg, **kwargs)\n', (3016, 3031), False, 'from matplotlib.patches import Polygon\n'), ((3774, 3790), 'numpy.sqrt', 'np.sqrt', (['eigvals'], {}), '(eigvals)\n', (3781, 3790), True, 'import numpy as np\n'), ((4734, 4760), 'numpy.arange', 'np.arange', (['(10.0)', '(35.0)', '(5.0)'], {}), '(10.0, 35.0, 5.0)\n', (4743, 4760), True, 'import numpy as np\n'), ((4777, 4807), 'numpy.arange', 'np.arange', (['(-120.0)', '(-80.0)', '(10.0)'], {}), '(-120.0, -80.0, 10.0)\n', (4786, 4807), True, 'import numpy as np\n'), ((5647, 5724), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""--"""'], {'linewidth': '(2)', 'color': '"""b"""', 'label': '"""Actual Tracks"""', 'zorder': '(8)'}), "(x, y, '--', linewidth=2, color='b', label='Actual Tracks', zorder=8)\n", (5655, 5724), True, 'import matplotlib.pyplot as plt\n'), ((6100, 6137), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""o--"""'], {'ms': '(3)', 'zorder': '(7)'}), "(x, y, 'o--', ms=3, zorder=7)\n", (6108, 6137), True, 'import matplotlib.pyplot as plt\n'), ((6440, 6504), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""mo--"""'], {'ms': '(4)', 'zorder': '(7)', 'label': '"""Predicted tracks"""'}), "(x, y, 'mo--', ms=4, zorder=7, label='Predicted tracks')\n", (6448, 6504), True, 'import matplotlib.pyplot as plt\n'), ((8989, 9007), 'numpy.load', 'np.load', (['wind_file'], {}), '(wind_file)\n', (8996, 9007), True, 'import numpy as np\n'), ((9443, 9482), 'numpy.empty', 'np.empty', ([], {'shape': '(0, smallgrid.shape[0])'}), '(shape=(0, smallgrid.shape[0]))\n', (9451, 9482), True, 'import numpy as np\n'), ((9802, 9881), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_grid', 'y_grid'], {'zorder': '(5)', 'c': '"""r"""', 's': '(0.5)', 'label': '"""convective weather"""'}), "(x_grid, y_grid, zorder=5, c='r', s=0.5, label='convective weather')\n", (9813, 9881), True, 'import matplotlib.pyplot as plt\n'), ((2799, 2819), 'utils.g.fwd', 'g.fwd', (['x0', 'y0', 'az', 'r'], {}), '(x0, y0, az, r)\n', (2804, 2819), False, 'from utils import g\n'), ((7748, 7780), 'os.path.join', 'os.path.join', (['wind_file_root', 'wf'], {}), '(wind_file_root, wf)\n', (7760, 7780), False, 'import os\n'), ((9544, 9560), 'numpy.load', 'np.load', (['wx_file'], {}), '(wx_file)\n', (9551, 9560), True, 'import numpy as np\n'), ((9583, 9636), 'numpy.concatenate', 'np.concatenate', (["(wx_data, wx_npz['ncwf_arr'])"], {'axis': '(0)'}), "((wx_data, wx_npz['ncwf_arr']), axis=0)\n", (9597, 9636), True, 'import numpy as np\n'), ((9667, 9690), 'numpy.any', 'np.any', (['wx_data'], {'axis': '(0)'}), '(wx_data, axis=0)\n', (9673, 9690), True, 'import numpy as np\n'), ((1973, 1993), 'numpy.allclose', 'np.allclose', (['(0.0)', 'y0'], {}), '(0.0, y0)\n', (1984, 1993), True, 'import numpy as np\n'), ((2677, 2688), 'numpy.sin', 'np.sin', (['azr'], {}), '(azr)\n', (2683, 2688), True, 'import numpy as np\n'), ((2715, 2726), 'numpy.cos', 'np.cos', (['azr'], {}), '(azr)\n', (2721, 2726), True, 'import numpy as np\n'), ((6617, 6652), 'numpy.sqrt', 'np.sqrt', (['pred_track_cov[k, t, 1, 1]'], {}), '(pred_track_cov[k, t, 1, 1])\n', (6624, 6652), True, 'import numpy as np\n'), ((6680, 6715), 'numpy.sqrt', 'np.sqrt', (['pred_track_cov[k, t, 0, 0]'], {}), '(pred_track_cov[k, t, 0, 0])\n', (6687, 6715), True, 'import numpy as np\n'), ((7791, 7826), 'numpy.unique', 'np.unique', (["plot_track['wind_fname']"], {}), "(plot_track['wind_fname'])\n", (7800, 7826), True, 'import numpy as np\n'), ((1998, 2019), 'numpy.allclose', 'np.allclose', (['(90.0)', 'az'], {}), '(90.0, az)\n', (2009, 2019), True, 'import numpy as np\n'), ((2038, 2060), 'numpy.allclose', 'np.allclose', (['(270.0)', 'az'], {}), '(270.0, az)\n', (2049, 2060), True, 'import numpy as np\n'), ((7928, 7951), 'pandas.isnull', 'pd.isnull', (['wx_file_time'], {}), '(wx_file_time)\n', (7937, 7951), True, 'import pandas as pd\n')] |
from pydnameth.infrastucture.path import get_data_base_path
import numpy as np
import os.path
import pickle
def get_line_list(line):
line_list = line.split('\t')
for val_id in range(0, len(line_list)):
line_list[val_id] = line_list[val_id].replace('"', '').rstrip()
return line_list
def load_cpg(config):
fn_dict = get_data_base_path(config) + '/' + 'cpg_dict.pkl'
fn_data = get_data_base_path(config) + '/' + config.data.name
fn_txt = fn_data + '.txt'
fn_npz = fn_data + '.npz'
if os.path.isfile(fn_dict) and os.path.isfile(fn_npz):
f = open(fn_dict, 'rb')
config.cpg_dict = pickle.load(f)
f.close()
data = np.load(fn_npz)
config.cpg_data = data['cpg_data']
else:
config.cpg_dict = {}
f = open(fn_txt)
f.readline()
cpg_id = 0
for line in f:
line_list = get_line_list(line)
cpg = line_list[0]
config.cpg_dict[cpg] = cpg_id
cpg_id += 1
f.close()
f = open(fn_dict, 'wb')
pickle.dump(config.cpg_dict, f, pickle.HIGHEST_PROTOCOL)
f.close()
num_cpgs = cpg_id
f = open(fn_txt)
header_line = f.readline()
headers = header_line.split('\t')
headers = [x.rstrip() for x in headers]
subjects = headers[1:len(headers)]
config.cpg_data = np.zeros((num_cpgs, len(subjects)), dtype=np.float32)
cpg_id = 0
for line in f:
line_list = get_line_list(line)
curr_data = list(map(np.float32, line_list[1::]))
config.cpg_data[cpg_id] = curr_data
cpg_id += 1
f.close()
np.savez_compressed(fn_npz, cpg_data=config.cpg_data)
| [
"numpy.load",
"pickle.dump",
"numpy.savez_compressed",
"pickle.load",
"pydnameth.infrastucture.path.get_data_base_path"
] | [((638, 652), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (649, 652), False, 'import pickle\n'), ((687, 702), 'numpy.load', 'np.load', (['fn_npz'], {}), '(fn_npz)\n', (694, 702), True, 'import numpy as np\n'), ((1076, 1132), 'pickle.dump', 'pickle.dump', (['config.cpg_dict', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(config.cpg_dict, f, pickle.HIGHEST_PROTOCOL)\n', (1087, 1132), False, 'import pickle\n'), ((1701, 1754), 'numpy.savez_compressed', 'np.savez_compressed', (['fn_npz'], {'cpg_data': 'config.cpg_data'}), '(fn_npz, cpg_data=config.cpg_data)\n', (1720, 1754), True, 'import numpy as np\n'), ((343, 369), 'pydnameth.infrastucture.path.get_data_base_path', 'get_data_base_path', (['config'], {}), '(config)\n', (361, 369), False, 'from pydnameth.infrastucture.path import get_data_base_path\n'), ((407, 433), 'pydnameth.infrastucture.path.get_data_base_path', 'get_data_base_path', (['config'], {}), '(config)\n', (425, 433), False, 'from pydnameth.infrastucture.path import get_data_base_path\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
import os
import numpy as np
from sharkpylib.file.file_handlers import Directory
from sharkpylib.geography import latlon_distance
class MaskAreasDirectory(object):
def __init__(self):
self.files_object = Directory(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data'), prefix='mask_areas')
def get_file_object(self, file_id):
file_path = self.files_object.get_path(file_id)
return MaskAreas(file_path)
class MaskAreas(object):
def __init__(self, file_path):
self.file_path = file_path
self.data = []
self._load_file()
def _load_file(self):
with open(self.file_path) as fid:
for r, line in enumerate(fid):
line = line.strip()
if not line:
continue
split_line = [item.strip() for item in line.split('\t')]
if r == 0:
header = split_line
else:
self.data.append(dict(zip(header, map(float, split_line))))
def get_masked_boolean(self, lat_list, lon_list):
if len(lat_list) != len(lon_list):
raise ValueError('Input lists son the same length!')
combined_boolean = np.zeros(len(lat_list), dtype=bool)
dist = [] # save distance in meters
for item in self.data:
for la, lo in zip(lat_list, lon_list):
dist.append(latlon_distance((item['lat'], item['lon']), (la, lo))*1000)
boolean = np.array(dist) <= float(item['radius'])
combined_boolean = combined_boolean | boolean
return combined_boolean
if __name__ == '__main__':
mask_dir = MaskAreasDirectory()
mask_obj = mask_dir.get_file_object('mask_areas_tavastland.txt')
lat_list = [63.41, 66.31, 63.36, 63.11, 63.61, 65.31]
lon_list = [19.1, 19.2, 19.14, 19.3, 18.14, 19.9]
b = mask_obj.get_masked_boolean(lat_list, lon_list)
print(b)
| [
"sharkpylib.geography.latlon_distance",
"os.path.realpath",
"numpy.array"
] | [((1713, 1727), 'numpy.array', 'np.array', (['dist'], {}), '(dist)\n', (1721, 1727), True, 'import numpy as np\n'), ((463, 489), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (479, 489), False, 'import os\n'), ((1631, 1684), 'sharkpylib.geography.latlon_distance', 'latlon_distance', (["(item['lat'], item['lon'])", '(la, lo)'], {}), "((item['lat'], item['lon']), (la, lo))\n", (1646, 1684), False, 'from sharkpylib.geography import latlon_distance\n')] |
import random
import numpy as np
import pandas as pd
import plotnine as g
def trace_plot(y, title):
p = (g.qplot(y = y) +
g.geom_line() +
g.ggtitle(title)
)
return p
class MCMC:
"""A class to execute a Gibbs Sampler for MCMC"""
def __init__(self, data, niter=500):
# data = dictionary with key=hour and value = past values
self.data = data
# ni is the count of data points for a given hour
# normally, this will be the same across every hour in Campaign
# But if you only run a campaign at certain hours on certain days,
# then this would differ across hours
self.ni = [len(self.data[k]) for k, v in self.data.items()]
# number of hours in a day. Add 1 index at before time=0 and 1 index
# after time = 0 for the model to work
self.k = len(self.data.keys())
self.niter = niter
# Hyperpriors
# Distribution of the variance of theta: sigma^2
self.a_sig = 3
self.b_sig = 1
# Distribution of the variance of theta i minus 1: tau
self.a_tau = 3
self.b_tau = 1
# the sampler's estimates aren't reliable until they've stabilized
# This is a hyperparameter that I've fixed at 10%
self.burnin = int(self.niter * .1)
# assumes theta starts at 1
self.theta = np.ones((self.niter, self.k))
self.sig2 = np.array([1.0] * self.niter)
self.tau2 = np.array([1.0] * self.niter)
def _theta_sample(self, i):
"""Sampler for theta"""
# I noticed the sampler wasn't casting a wide-enough net, so I had to increase
# the variance (multiply by 10)
tau2prev = self.tau2[i - 1] * 10
sig2prev = self.sig2[i - 1]
# estimate theta_
for j in range(0, self.k):
if j == 0:
# value for theta 0 (invented data point)
mu = self.theta[i - 1, (self.k - 1)] + self.theta[i - 1, j + 1]
elif j == self.k - 1:
# value for theta k (invented data point)
# + self.theta[i - 1, 0] = 0th point
mu = self.theta[i, j - 1] + self.theta[i - 1, 0]
else:
# Add the prior row's estimate with the subsequent row's estimate
mu = self.theta[i, j - 1] + self.theta[i - 1, j + 1]
denom = (self.ni[j] * tau2prev + 2 * sig2prev)
mustar = (np.mean(self.data[j]) * self.ni[j] * tau2prev + mu * sig2prev) / denom
sigstar = (tau2prev * sig2prev) / denom
theta_sample = np.random.normal(mustar, np.sqrt(sigstar), 1)
if theta_sample < 0:
theta_sample = 0
self.theta[i, j] = theta_sample
def _update_ssq(self, i):
"""Update sum of squares (ssq) for the model after fitting theta"""
self.ssq = 0
for j in range(0, self.k):
self.ssq = self.ssq + np.sum((self.data[j] - self.theta[i, j]) ** 2)
def _sig2_sample(self, i):
"""Sampler for sig2"""
s_shape = self.a_sig + sum(self.ni) * 0.5
s_scale = (1/self.b_sig + self.ssq/2) ** (-1)
assert s_scale > 0, print(self.b_sig, self.ssq)
denom = np.random.gamma(shape=s_shape,
scale=s_scale,
size=1)
sig2_sample = 1.0/denom
if sig2_sample <= 0:
sig2_sample = 0.001
self.sig2[i] = sig2_sample
def _tau2_sample(self, i):
"""Sampler for tau2"""
# Calculate sstau for tau2
sstau = np.sum((self.theta[i,] - self.theta[i - 1,])**2)
#generate tau2
t_shape = self.a_tau + (self.k * 0.5)
t_scale = (1/self.b_tau + .5 * sstau)**(-1)
denom = np.random.gamma(shape=t_shape,
scale=t_scale,
size=1)
tau2_sample = 1.0/denom
if tau2_sample <= 0:
tau2_sample = self.tau[i - 1]
self.tau2[i] = tau2_sample
def fit(self):
"""Return the MCMC for input parameters"""
for i in range(1, self.niter):
self._theta_sample(i)
self._update_ssq(i)
self._sig2_sample(i)
self._tau2_sample(i)
self.results = {'theta': self.theta,
'sig2': self.sig2,
'tau2': self.tau2}
self.mu = {i: np.mean(self.results['theta'][self.burnin:, i]) for i in range(0, self.k)}
def estimates_as_json(self):
return self.results
def estimates_as_json_noburn(self):
return {k: v[self.burnin:] for k, v in self.results.items()}
def mu_theta(self):
return self.mu
def plot_theta(self, i, burnin=True):
y = self.results['theta'][:, i]
if not burnin:
y = y[self.burnin:]
p = (trace_plot(y = y, title=f'theta {i}') +
g.geom_hline(yintercept = np.mean(self.data[i]), color='blue') +
g.geom_hline(yintercept = np.mean(self.mu[i]), color='red') +
g.geom_line()
)
display(p)
def plot_tau2(self, burnin=True):
y = self.results['tau2']
if burnin:
y = y[self.burnin:]
p = trace_plot(y = y, title='tau2')
display(p)
def plot_sig2(self, burnin=True):
y = self.results['sig2']
if burnin:
y = y[self.burnin:]
p = trace_plot(y = y, title='sig2')
display(p)
def plot_day(self, i):
t = self.results['theta']
p = g.qplot(y = t[i, ]) + g.geom_line() + g.labs(title=f'day {i}', x='hour', y='theta')
display(p)
def plot_hours(self, burnin=True):
"""Plot mean for the hour and error bars"""
dat = pd.DataFrame(self.results['theta']).melt()
if not burnin:
dat = dat.loc[dat.index > self.burnin]
dat.columns = ['hour', 'estimate']
# https://gist.github.com/HenrikEckermann/1d334a44f61349ac71f0e235b3443a69
p = (g.ggplot(dat, g.aes(x='hour', y='estimate')) +
#g.stat_summary(fun_data = np.max, geom = 'point', fill='blue') +
#g.stat_summary(fun_data = np.min, geom = 'point', fill='blue') +
g.stat_summary(fun_data = 'mean_sdl', fun_args = {'mult':1}, geom = 'errorbar') +
g.stat_summary(fun_y = np.mean, geom = 'point', fill = 'red')
)
display(p)
| [
"pandas.DataFrame",
"plotnine.stat_summary",
"plotnine.geom_line",
"numpy.sum",
"plotnine.labs",
"plotnine.qplot",
"plotnine.ggtitle",
"numpy.ones",
"numpy.random.gamma",
"numpy.mean",
"numpy.array",
"plotnine.aes",
"numpy.sqrt"
] | [((168, 184), 'plotnine.ggtitle', 'g.ggtitle', (['title'], {}), '(title)\n', (177, 184), True, 'import plotnine as g\n'), ((1388, 1417), 'numpy.ones', 'np.ones', (['(self.niter, self.k)'], {}), '((self.niter, self.k))\n', (1395, 1417), True, 'import numpy as np\n'), ((1438, 1466), 'numpy.array', 'np.array', (['([1.0] * self.niter)'], {}), '([1.0] * self.niter)\n', (1446, 1466), True, 'import numpy as np\n'), ((1487, 1515), 'numpy.array', 'np.array', (['([1.0] * self.niter)'], {}), '([1.0] * self.niter)\n', (1495, 1515), True, 'import numpy as np\n'), ((3259, 3312), 'numpy.random.gamma', 'np.random.gamma', ([], {'shape': 's_shape', 'scale': 's_scale', 'size': '(1)'}), '(shape=s_shape, scale=s_scale, size=1)\n', (3274, 3312), True, 'import numpy as np\n'), ((3621, 3671), 'numpy.sum', 'np.sum', (['((self.theta[i,] - self.theta[i - 1,]) ** 2)'], {}), '((self.theta[i,] - self.theta[i - 1,]) ** 2)\n', (3627, 3671), True, 'import numpy as np\n'), ((3808, 3861), 'numpy.random.gamma', 'np.random.gamma', ([], {'shape': 't_shape', 'scale': 't_scale', 'size': '(1)'}), '(shape=t_shape, scale=t_scale, size=1)\n', (3823, 3861), True, 'import numpy as np\n'), ((111, 123), 'plotnine.qplot', 'g.qplot', ([], {'y': 'y'}), '(y=y)\n', (118, 123), True, 'import plotnine as g\n'), ((140, 153), 'plotnine.geom_line', 'g.geom_line', ([], {}), '()\n', (151, 153), True, 'import plotnine as g\n'), ((4460, 4507), 'numpy.mean', 'np.mean', (["self.results['theta'][self.burnin:, i]"], {}), "(self.results['theta'][self.burnin:, i])\n", (4467, 4507), True, 'import numpy as np\n'), ((5122, 5135), 'plotnine.geom_line', 'g.geom_line', ([], {}), '()\n', (5133, 5135), True, 'import plotnine as g\n'), ((5657, 5702), 'plotnine.labs', 'g.labs', ([], {'title': 'f"""day {i}"""', 'x': '"""hour"""', 'y': '"""theta"""'}), "(title=f'day {i}', x='hour', y='theta')\n", (5663, 5702), True, 'import plotnine as g\n'), ((6393, 6448), 'plotnine.stat_summary', 'g.stat_summary', ([], {'fun_y': 'np.mean', 'geom': '"""point"""', 'fill': '"""red"""'}), "(fun_y=np.mean, geom='point', fill='red')\n", (6407, 6448), True, 'import plotnine as g\n'), ((2644, 2660), 'numpy.sqrt', 'np.sqrt', (['sigstar'], {}), '(sigstar)\n', (2651, 2660), True, 'import numpy as np\n'), ((2973, 3019), 'numpy.sum', 'np.sum', (['((self.data[j] - self.theta[i, j]) ** 2)'], {}), '((self.data[j] - self.theta[i, j]) ** 2)\n', (2979, 3019), True, 'import numpy as np\n'), ((5619, 5635), 'plotnine.qplot', 'g.qplot', ([], {'y': 't[i,]'}), '(y=t[i,])\n', (5626, 5635), True, 'import plotnine as g\n'), ((5641, 5654), 'plotnine.geom_line', 'g.geom_line', ([], {}), '()\n', (5652, 5654), True, 'import plotnine as g\n'), ((5828, 5863), 'pandas.DataFrame', 'pd.DataFrame', (["self.results['theta']"], {}), "(self.results['theta'])\n", (5840, 5863), True, 'import pandas as pd\n'), ((6299, 6373), 'plotnine.stat_summary', 'g.stat_summary', ([], {'fun_data': '"""mean_sdl"""', 'fun_args': "{'mult': 1}", 'geom': '"""errorbar"""'}), "(fun_data='mean_sdl', fun_args={'mult': 1}, geom='errorbar')\n", (6313, 6373), True, 'import plotnine as g\n'), ((6098, 6127), 'plotnine.aes', 'g.aes', ([], {'x': '"""hour"""', 'y': '"""estimate"""'}), "(x='hour', y='estimate')\n", (6103, 6127), True, 'import plotnine as g\n'), ((5070, 5089), 'numpy.mean', 'np.mean', (['self.mu[i]'], {}), '(self.mu[i])\n', (5077, 5089), True, 'import numpy as np\n'), ((2469, 2490), 'numpy.mean', 'np.mean', (['self.data[j]'], {}), '(self.data[j])\n', (2476, 2490), True, 'import numpy as np\n'), ((4989, 5010), 'numpy.mean', 'np.mean', (['self.data[i]'], {}), '(self.data[i])\n', (4996, 5010), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 15 21:37:42 2019
@author: leandrohirai
"""
import os
import numpy as np
from shutil import copyfile
path = os.path.dirname(os.path.realpath(__file__))
#path = path+'/data/daySegData/JPEGImages'
path = path+'/data/day2nightData/JPEGImages'
txtPath = os.path.dirname(os.path.realpath(__file__))
#txtPath = txtPath+'/data/daySegData/ImageSets/Segmentation'
txtPath = txtPath+'/data/day2nightData/ImageSets/Segmentation'
imageNames = []
for r, d, f in os.walk(path):
for file in f:
if '.png' in file or '.jpg' in file:
imageNames.append(file)
datasetSize = len(imageNames)
validation_split = 0.3
indices = list(range(datasetSize))
split = int(np.floor(validation_split*datasetSize))
np.random.seed(42)
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
trainNames = [imageNames[i] for i in train_indices]
valNames =[imageNames[i] for i in val_indices]
train_txt = open(txtPath+'/train.txt',"w")
val_txt = open(txtPath+'/val.txt',"w")
trainval_txt = open(txtPath+'/trainval.txt',"w")
for i in range(0,len(trainNames)):
train_txt.write(trainNames[i]+'\n')
trainval_txt.write(trainNames[i]+'\n')
for i in range(0, len(valNames)):
val_txt.write(valNames[i]+'\n')
trainval_txt.write(valNames[i]+'\n')
train_txt.close()
val_txt.close()
trainval_txt.close() | [
"numpy.random.seed",
"os.path.realpath",
"os.walk",
"numpy.floor",
"numpy.random.shuffle"
] | [((522, 535), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (529, 535), False, 'import os\n'), ((790, 808), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (804, 808), True, 'import numpy as np\n'), ((809, 835), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (826, 835), True, 'import numpy as np\n'), ((196, 222), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (212, 222), False, 'import os\n'), ((338, 364), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (354, 364), False, 'import os\n'), ((750, 790), 'numpy.floor', 'np.floor', (['(validation_split * datasetSize)'], {}), '(validation_split * datasetSize)\n', (758, 790), True, 'import numpy as np\n')] |
#-*- coding: utf8
from __future__ import division, print_function
import numpy as np
def _compute_centroids(X, assign, num_clusters):
C = np.zeros(shape=(num_clusters, X.shape[1]), dtype='d')
for k in xrange(num_clusters):
if not (assign == k).any():
continue
K = X[assign == k]
if K.ndim == 1:
K = K[np.newaxis]
C[k] = X[assign == k].mean(axis=0)
return C
def _surprisal_mat(X):
#Some elements have zero prob, ingore and treat warnings
with np.errstate(divide='ignore', invalid='ignore'):
L = np.log2(X)
L[np.isnan(L)] = 0
L[np.isinf(L)] = 0
return L
def _dist_all(X, C):
S_x = _surprisal_mat(X)
S_c = _surprisal_mat(C)
D = (X * (S_x - S_c[:, np.newaxis,: ])).sum(axis=2).T
return D
def _base_kmeans(X, C, n_iters=-1):
num_clusters = C.shape[0]
n = X.shape[0]
C_final = C
#KMeans algorithm
cent_dists = None
assign = None
prev_assign = None
best_shift = None
iters = n_iters
converged = False
while iters != 0 and not converged:
#assign elements to new clusters
D = _dist_all(X, C)
assign = D.argmin(axis=1)
#check if converged, if not compute new centroids
if prev_assign is not None and not (prev_assign - assign).any():
converged = True
else:
C_final = _compute_centroids(X, assign, num_clusters)
prev_assign = assign
iters -= 1
return C_final, assign
def cost(X, C, assign):
cost = 0
for k in set(assign):
idx = assign == k
cost += _dist_all(X[idx], C[k][np.newaxis]).sum()
return cost
def klkmeans(X, num_clusters, n_iters=-1, n_runs=10):
min_cost = float('+inf')
best_C = None
best_assign = None
for _ in xrange(n_runs):
assign = np.random.randint(0, num_clusters, X.shape[0])
C = _compute_centroids(X, assign, num_clusters)
C, assign = _base_kmeans(X, C, n_iters)
clust_cost = cost(X, C, assign)
if clust_cost < min_cost:
best_C = C
best_assign = assign
return best_C, best_assign
if __name__ == '__main__':
np.seterr(all='raise')
X = np.zeros((200, 1000))
X[0:100] = 1
X[100:200, 500:] = 1
X += 1e-20
X = (X.T / X.sum(axis=1)).T
C, assign = klkmeans(X, 2)
assert ((C.sum(axis=1) - 1) < 1e-10).all()
assert (assign[0:100] != assign[100:]).all()
import os
dir_ = os.path.dirname(__file__)
fpath = os.path.join(dir_, 'testdata.dat')
X = np.genfromtxt(fpath)
C, assign = klkmeans(X, 5)
assert ((C.sum(axis=1) - 1) < 1e-10).all()
assert len(set(assign)) == 5
| [
"numpy.seterr",
"numpy.log2",
"os.path.dirname",
"numpy.zeros",
"numpy.genfromtxt",
"numpy.errstate",
"numpy.isnan",
"numpy.isinf",
"numpy.random.randint",
"os.path.join"
] | [((144, 197), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_clusters, X.shape[1])', 'dtype': '"""d"""'}), "(shape=(num_clusters, X.shape[1]), dtype='d')\n", (152, 197), True, 'import numpy as np\n'), ((2238, 2260), 'numpy.seterr', 'np.seterr', ([], {'all': '"""raise"""'}), "(all='raise')\n", (2247, 2260), True, 'import numpy as np\n'), ((2269, 2290), 'numpy.zeros', 'np.zeros', (['(200, 1000)'], {}), '((200, 1000))\n', (2277, 2290), True, 'import numpy as np\n'), ((2547, 2572), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2562, 2572), False, 'import os\n'), ((2585, 2619), 'os.path.join', 'os.path.join', (['dir_', '"""testdata.dat"""'], {}), "(dir_, 'testdata.dat')\n", (2597, 2619), False, 'import os\n'), ((2628, 2648), 'numpy.genfromtxt', 'np.genfromtxt', (['fpath'], {}), '(fpath)\n', (2641, 2648), True, 'import numpy as np\n'), ((524, 570), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (535, 570), True, 'import numpy as np\n'), ((584, 594), 'numpy.log2', 'np.log2', (['X'], {}), '(X)\n', (591, 594), True, 'import numpy as np\n'), ((1891, 1937), 'numpy.random.randint', 'np.random.randint', (['(0)', 'num_clusters', 'X.shape[0]'], {}), '(0, num_clusters, X.shape[0])\n', (1908, 1937), True, 'import numpy as np\n'), ((605, 616), 'numpy.isnan', 'np.isnan', (['L'], {}), '(L)\n', (613, 616), True, 'import numpy as np\n'), ((632, 643), 'numpy.isinf', 'np.isinf', (['L'], {}), '(L)\n', (640, 643), True, 'import numpy as np\n')] |
import random
import numpy as np
import rltorch
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
import gym
class Policy(nn.Module):
def __init__(self, state_size, action_size):
super(Policy, self).__init__()
self.state_size = state_size
self.action_size = action_size
self.fc1 = nn.Linear(state_size, 125)
self.fc_norm = nn.LayerNorm(125)
self.fc2 = nn.Linear(125, 125)
self.fc2_norm = nn.LayerNorm(125)
self.action_prob = nn.Linear(125, action_size)
def forward(self, x):
x = F.relu(self.fc_norm(self.fc1(x)))
x = F.relu(self.fc2_norm(self.fc2(x)))
x = F.softmax(self.action_prob(x), dim = 1)
return x
env = gym.make("Acrobot-v1")
def fitness(model_dict):
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
model = Policy(state_size, action_size)
model.load_state_dict(model_dict)
state = torch.from_numpy(env.reset()).float().unsqueeze(0)
total_reward = 0
done = False
while not done:
action_probabilities = model(state)
distribution = Categorical(action_probabilities)
action = distribution.sample().item()
next_state, reward, done, _ = env.step(action)
total_reward += reward
state = torch.from_numpy(next_state).float().unsqueeze(0)
return total_reward
# make_model should be a function that returns a nn.Module
class Population:
def __init__(self, model, population_size, fitness_fn, keep_best = 1, mutation_rate = 0.01, sigma = 0.1):
self.model = model
self.population_size = population_size
self.mutation_rate = mutation_rate
self.keep_best = keep_best
self.sigma = sigma
assert self.sigma >= 0
assert self.keep_best >= 0
assert self.population_size > 0
assert self.keep_best < self.population_size
self.pop = self._generate_population(model, population_size)
# Probability that an individual will last to the next generation
self.survivability = np.full(shape=(population_size), fill_value = 1 / population_size)
self.calculate_fitness = fitness_fn
def _generate_population(self, model, population_size):
pop = []
for i in range(population_size):
member = {}
for key, value in model.state_dict().items():
member[key] = value + self.sigma * torch.randn(*value.shape)
pop.append(member)
return pop
def _calculate_survivability(self, pop):
fitness = np.array(list(map(self.calculate_fitness, pop)))
# Make fitness non-negative
if fitness.min() <= 0:
fitness += (-1 * fitness.min()) + 1e-10 # Add some random constant to avoid 0 probability
return fitness / fitness.sum()
def _select_survivors(self, population, survivability):
population_size = len(population)
survivors_indices = np.random.choice(range(0, population_size), size=(population_size - self.keep_best) * 2, p=survivability)
return [population[i] for i in survivors_indices]
def _crossover(self, parents):
parent_ind = np.array(range(0, len(parents)))
parent1_ind = np.random.choice(parent_ind, size = len(parents) // 2, replace=False)
parent2_ind = np.setdiff1d(parent_ind, parent1_ind)
parent1 = [parents[i] for i in parent1_ind]
parent2 = [parents[i] for i in parent1_ind]
children = []
for parent1, parent2 in zip(parent1, parent2):
child = {}
for key in parent1.keys():
crossover_ind = random.randint(0, len(parent1[key]))
child_value = torch.cat((parent1[key][:crossover_ind], parent2[key][crossover_ind:]))
child_value = self._mutate(child_value)
child[key] = child_value
children.append(child)
return children
def _mutate(self, child):
if np.random.rand() < self.mutation_rate:
child += self.sigma * torch.randn(*child.shape)
return child
def __iter__(self):
return self
# This function is suppose to take us to the next generation
def __next__(self):
survivability = self._calculate_survivability(self.pop)
if self.keep_best > 0:
survivor_ind = np.argsort(survivability)[-self.keep_best:]
parents = self._select_survivors(self.pop, survivability)
children = self._crossover(parents)
next_pop = [self.pop[i] for i in survivor_ind] + children
self.pop = next_pop
return next_pop
def solution(self):
return self.pop[self.survivability[-1]]
def test():
p = Population(Policy(6, 3), 100, fitness)
for i in range(100):
next(p)
return p.solution() | [
"numpy.full",
"torch.distributions.Categorical",
"gym.make",
"numpy.setdiff1d",
"torch.cat",
"torch.randn",
"numpy.argsort",
"torch.nn.LayerNorm",
"torch.nn.Linear",
"numpy.random.rand",
"torch.from_numpy"
] | [((735, 757), 'gym.make', 'gym.make', (['"""Acrobot-v1"""'], {}), "('Acrobot-v1')\n", (743, 757), False, 'import gym\n'), ((362, 388), 'torch.nn.Linear', 'nn.Linear', (['state_size', '(125)'], {}), '(state_size, 125)\n', (371, 388), True, 'import torch.nn as nn\n'), ((408, 425), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['(125)'], {}), '(125)\n', (420, 425), True, 'import torch.nn as nn\n'), ((446, 465), 'torch.nn.Linear', 'nn.Linear', (['(125)', '(125)'], {}), '(125, 125)\n', (455, 465), True, 'import torch.nn as nn\n'), ((486, 503), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['(125)'], {}), '(125)\n', (498, 503), True, 'import torch.nn as nn\n'), ((528, 555), 'torch.nn.Linear', 'nn.Linear', (['(125)', 'action_size'], {}), '(125, action_size)\n', (537, 555), True, 'import torch.nn as nn\n'), ((1138, 1171), 'torch.distributions.Categorical', 'Categorical', (['action_probabilities'], {}), '(action_probabilities)\n', (1149, 1171), False, 'from torch.distributions import Categorical\n'), ((2102, 2164), 'numpy.full', 'np.full', ([], {'shape': 'population_size', 'fill_value': '(1 / population_size)'}), '(shape=population_size, fill_value=1 / population_size)\n', (2109, 2164), True, 'import numpy as np\n'), ((3370, 3407), 'numpy.setdiff1d', 'np.setdiff1d', (['parent_ind', 'parent1_ind'], {}), '(parent_ind, parent1_ind)\n', (3382, 3407), True, 'import numpy as np\n'), ((4041, 4057), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4055, 4057), True, 'import numpy as np\n'), ((3750, 3821), 'torch.cat', 'torch.cat', (['(parent1[key][:crossover_ind], parent2[key][crossover_ind:])'], {}), '((parent1[key][:crossover_ind], parent2[key][crossover_ind:]))\n', (3759, 3821), False, 'import torch\n'), ((4114, 4139), 'torch.randn', 'torch.randn', (['*child.shape'], {}), '(*child.shape)\n', (4125, 4139), False, 'import torch\n'), ((4426, 4451), 'numpy.argsort', 'np.argsort', (['survivability'], {}), '(survivability)\n', (4436, 4451), True, 'import numpy as np\n'), ((1320, 1348), 'torch.from_numpy', 'torch.from_numpy', (['next_state'], {}), '(next_state)\n', (1336, 1348), False, 'import torch\n'), ((2465, 2490), 'torch.randn', 'torch.randn', (['*value.shape'], {}), '(*value.shape)\n', (2476, 2490), False, 'import torch\n')] |
# pylint: skip-file
import datetime
import json
import logging
import traceback
import numpy as np
from api.infrastructure.mysql import connection
logger = logging.getLogger(__name__)
class ChurnRiskHistory:
def __init__(self, database_name, kam=None):
super().__init__()
self.results_db = "results_{}".format(database_name)
self.data_db = "data_{}".format(database_name)
self.mysql_connection = connection.MySQLConnection(self.results_db)
self.con = self.mysql_connection.connect()
self.cur = self.con.cursor()
self.kam = kam
self.results = self.read_history()
def read_history(self):
today = datetime.datetime.now()
try:
if self.kam is None:
query = "SELECT ROUND(cr.risk, 2) as risk FROM {results_db}.critters AS cr " \
"LEFT JOIN {data_db}.customers AS c ON cr.name=c.id".format(
results_db=self.results_db,
data_db=self.data_db
)
elif isinstance(self.kam, list):
customer_ids = [customer.id for k in self.kam for customer in k.customers]
query = "SELECT ROUND(cr.risk, 2) as risk FROM {results_db}.critters AS cr " \
"LEFT JOIN {data_db}.customers AS c ON cr.name=c.id " \
"WHERE c.id IN {customer_ids}" \
.format(
results_db=self.results_db,
data_db=self.data_db,
customer_ids=str(tuple(customer_ids))
)
self.cur.execute(query)
risk_data = np.asarray(self.cur.fetchall())
risk_data_cols = [desc[0] for desc in self.cur.description]
results = dict()
for i in range(len(risk_data_cols)):
values = np.ravel(risk_data[:, i])
c = risk_data_cols[i]
if c == 'risk':
results['rawRisk'] = values.tolist()
values[values == -np.inf] = 0
results[c] = values.tolist()
return {
'churn_risk': results,
}
except (
NameError,
TypeError,
KeyError,
ValueError,
AttributeError,
IndexError
) as exception:
logger.error(
"message %s, error %s",
exception,
traceback.format_exc(),
extra={
'type': 'Login'
}
)
def as_json(self):
return json.dumps(self.results)
| [
"numpy.ravel",
"json.dumps",
"api.infrastructure.mysql.connection.MySQLConnection",
"traceback.format_exc",
"datetime.datetime.now",
"logging.getLogger"
] | [((160, 187), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (177, 187), False, 'import logging\n'), ((441, 484), 'api.infrastructure.mysql.connection.MySQLConnection', 'connection.MySQLConnection', (['self.results_db'], {}), '(self.results_db)\n', (467, 484), False, 'from api.infrastructure.mysql import connection\n'), ((686, 709), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (707, 709), False, 'import datetime\n'), ((2709, 2733), 'json.dumps', 'json.dumps', (['self.results'], {}), '(self.results)\n', (2719, 2733), False, 'import json\n'), ((1917, 1942), 'numpy.ravel', 'np.ravel', (['risk_data[:, i]'], {}), '(risk_data[:, i])\n', (1925, 1942), True, 'import numpy as np\n'), ((2554, 2576), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2574, 2576), False, 'import traceback\n')] |
import unittest
import numpy as np
import numpy.testing as npt
from uts import gradient
class TestGradient(unittest.TestCase):
def test_gradient_cfd_even(self):
x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])
y = np.array([1, 4, 9, 16, 25, 36, 49, 64, 81])
result = gradient.cfd(x, y)
desired = np.array([2., 4., 6., 8., 10., 12., 14., 16., 18.])
npt.assert_almost_equal(result, desired, decimal=2)
def test_gradient_cfd_even_2(self):
x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])
y = x**3
result = gradient.cfd(x, y)
desired = np.array([1, 13, 28, 49, 76, 109, 148, 193, 241])
npt.assert_almost_equal(result, desired, decimal=2)
def test_gradient_csd_even(self):
x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])
y = np.array([1, 4, 9, 16, 25, 36, 49, 64, 81])
result = gradient.csd(x, y)
desired = np.array([2, 2, 2, 2, 2, 2, 2, 2, 2])
npt.assert_almost_equal(result, desired, decimal=2)
def test_gradient_cfd_uneven(self):
x = np.array([1, 3, 4, 5, 6, 8, 9])
y = np.array([1, 9, 16, 25, 36, 64, 81])
result = gradient.cfd(x, y)
desired = np.array([2, 6, 8, 10, 12, 16, 18])
npt.assert_almost_equal(result, desired, decimal=2)
def test_gradient_csd_uneven(self):
x = np.array([1, 3, 4, 5, 6, 8, 9])
y = np.array([1, 9, 16, 25, 36, 64, 81])
result = gradient.csd(x, y)
desired = np.array([2, 2, 2, 2, 2, 2, 2])
npt.assert_almost_equal(result, desired, decimal=2)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"uts.gradient.csd",
"numpy.testing.assert_almost_equal",
"numpy.array",
"uts.gradient.cfd"
] | [((1619, 1634), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1632, 1634), False, 'import unittest\n'), ((180, 217), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9])\n', (188, 217), True, 'import numpy as np\n'), ((230, 273), 'numpy.array', 'np.array', (['[1, 4, 9, 16, 25, 36, 49, 64, 81]'], {}), '([1, 4, 9, 16, 25, 36, 49, 64, 81])\n', (238, 273), True, 'import numpy as np\n'), ((291, 309), 'uts.gradient.cfd', 'gradient.cfd', (['x', 'y'], {}), '(x, y)\n', (303, 309), False, 'from uts import gradient\n'), ((328, 388), 'numpy.array', 'np.array', (['[2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0]'], {}), '([2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0])\n', (336, 388), True, 'import numpy as np\n'), ((391, 442), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['result', 'desired'], {'decimal': '(2)'}), '(result, desired, decimal=2)\n', (414, 442), True, 'import numpy.testing as npt\n'), ((496, 533), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9])\n', (504, 533), True, 'import numpy as np\n'), ((568, 586), 'uts.gradient.cfd', 'gradient.cfd', (['x', 'y'], {}), '(x, y)\n', (580, 586), False, 'from uts import gradient\n'), ((605, 654), 'numpy.array', 'np.array', (['[1, 13, 28, 49, 76, 109, 148, 193, 241]'], {}), '([1, 13, 28, 49, 76, 109, 148, 193, 241])\n', (613, 654), True, 'import numpy as np\n'), ((666, 717), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['result', 'desired'], {'decimal': '(2)'}), '(result, desired, decimal=2)\n', (689, 717), True, 'import numpy.testing as npt\n'), ((769, 806), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9])\n', (777, 806), True, 'import numpy as np\n'), ((819, 862), 'numpy.array', 'np.array', (['[1, 4, 9, 16, 25, 36, 49, 64, 81]'], {}), '([1, 4, 9, 16, 25, 36, 49, 64, 81])\n', (827, 862), True, 'import numpy as np\n'), ((880, 898), 'uts.gradient.csd', 'gradient.csd', (['x', 'y'], {}), '(x, y)\n', (892, 898), False, 'from uts import gradient\n'), ((917, 954), 'numpy.array', 'np.array', (['[2, 2, 2, 2, 2, 2, 2, 2, 2]'], {}), '([2, 2, 2, 2, 2, 2, 2, 2, 2])\n', (925, 954), True, 'import numpy as np\n'), ((963, 1014), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['result', 'desired'], {'decimal': '(2)'}), '(result, desired, decimal=2)\n', (986, 1014), True, 'import numpy.testing as npt\n'), ((1068, 1099), 'numpy.array', 'np.array', (['[1, 3, 4, 5, 6, 8, 9]'], {}), '([1, 3, 4, 5, 6, 8, 9])\n', (1076, 1099), True, 'import numpy as np\n'), ((1112, 1148), 'numpy.array', 'np.array', (['[1, 9, 16, 25, 36, 64, 81]'], {}), '([1, 9, 16, 25, 36, 64, 81])\n', (1120, 1148), True, 'import numpy as np\n'), ((1166, 1184), 'uts.gradient.cfd', 'gradient.cfd', (['x', 'y'], {}), '(x, y)\n', (1178, 1184), False, 'from uts import gradient\n'), ((1203, 1238), 'numpy.array', 'np.array', (['[2, 6, 8, 10, 12, 16, 18]'], {}), '([2, 6, 8, 10, 12, 16, 18])\n', (1211, 1238), True, 'import numpy as np\n'), ((1249, 1300), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['result', 'desired'], {'decimal': '(2)'}), '(result, desired, decimal=2)\n', (1272, 1300), True, 'import numpy.testing as npt\n'), ((1354, 1385), 'numpy.array', 'np.array', (['[1, 3, 4, 5, 6, 8, 9]'], {}), '([1, 3, 4, 5, 6, 8, 9])\n', (1362, 1385), True, 'import numpy as np\n'), ((1398, 1434), 'numpy.array', 'np.array', (['[1, 9, 16, 25, 36, 64, 81]'], {}), '([1, 9, 16, 25, 36, 64, 81])\n', (1406, 1434), True, 'import numpy as np\n'), ((1452, 1470), 'uts.gradient.csd', 'gradient.csd', (['x', 'y'], {}), '(x, y)\n', (1464, 1470), False, 'from uts import gradient\n'), ((1489, 1520), 'numpy.array', 'np.array', (['[2, 2, 2, 2, 2, 2, 2]'], {}), '([2, 2, 2, 2, 2, 2, 2])\n', (1497, 1520), True, 'import numpy as np\n'), ((1534, 1585), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['result', 'desired'], {'decimal': '(2)'}), '(result, desired, decimal=2)\n', (1557, 1585), True, 'import numpy.testing as npt\n')] |
"""
LALinference posterior samples class and methods
<NAME>, <NAME>
"""
import numpy as np
import healpy as hp
from scipy.stats import gaussian_kde
from scipy import integrate, interpolate, random
from astropy import units as u
from astropy import constants as const
from astropy.table import Table
import h5py
from bilby.core.prior import Uniform, PowerLaw, PriorDict, Constraint
from bilby import gw
from ..utilities.standard_cosmology import z_dlH0, fast_cosmology
from ..prior.priors import *
from astropy.cosmology import FlatLambdaCDM, z_at_value
import astropy.units as u
import astropy.constants as constants
from scipy.interpolate import interp1d
Om0 = 0.308
zmin = 0.0001
zmax = 10
zs = np.linspace(zmin, zmax, 10000)
cosmo = fast_cosmology(Omega_m=Om0)
def constrain_m1m2(parameters):
converted_parameters = parameters.copy()
converted_parameters['m1m2'] = parameters['mass_1'] - parameters['mass_2']
return converted_parameters
class posterior_samples(object):
"""
Posterior samples class and methods.
Parameters
----------
posterior_samples : Path to posterior samples file to be loaded.
"""
def __init__(self, posterior_samples=None):
self.posterior_samples = posterior_samples
try:
self.load_posterior_samples()
except:
print("No posterior samples were specified")
def E(self,z,Om):
return np.sqrt(Om*(1+z)**3 + (1.0-Om))
def dL_by_z_H0(self,z,H0,Om0):
speed_of_light = constants.c.to('km/s').value
cosmo = fast_cosmology(Omega_m=Om0)
return cosmo.dl_zH0(z, H0)/(1+z) + speed_of_light*(1+z)/(H0*self.E(z,Om0))
def jacobian_times_prior(self,z,H0,Om0=0.308):
cosmo = fast_cosmology(Omega_m=Om0)
jacobian = np.power(1+z,2)*self.dL_by_z_H0(z,H0,Om0)
dl = cosmo.dl_zH0(z, H0)
return jacobian*(dl**2)
def load_posterior_samples(self):
"""
Method to handle different types of posterior samples file formats.
Currently it supports .dat (LALinference), .hdf5 (GWTC-1),
.h5 (PESummary) and .hdf (pycbcinference) formats.
"""
if self.posterior_samples[-3:] == 'dat':
samples = np.genfromtxt(self.posterior_samples, names=True)
try:
self.distance = samples['dist']
except KeyError:
try:
self.distance = samples['distance']
except KeyError:
print("No distance samples found.")
self.ra = samples['ra']
self.dec = samples['dec']
self.mass_1 = samples['mass_1']
self.mass_2 = samples['mass_2']
self.nsamples = len(self.distance)
if self.posterior_samples[-4:] == 'hdf5':
if self.posterior_samples[-11:] == 'GWTC-1.hdf5':
if self.posterior_samples[-20:] == 'GW170817_GWTC-1.hdf5':
dataset_name = 'IMRPhenomPv2NRT_lowSpin_posterior'
else:
dataset_name = 'IMRPhenomPv2_posterior'
file = h5py.File(self.posterior_samples, 'r')
data = file[dataset_name]
self.distance = data['luminosity_distance_Mpc']
self.ra = data['right_ascension']
self.dec = data['declination']
self.mass_1 = data['m1_detector_frame_Msun']
self.mass_2 = data['m2_detector_frame_Msun']
self.nsamples = len(self.distance)
file.close()
if self.posterior_samples[-2:] == 'h5':
file = h5py.File(self.posterior_samples, 'r')
approximants = ['C01:PhenomPNRT-HS', 'C01:NRSur7dq4',
'C01:IMRPhenomPv3HM', 'C01:IMRPhenomPv2',
'C01:IMRPhenomD']
for approximant in approximants:
try:
data = file[approximant]
print("Using "+approximant+" posterior")
break
except KeyError:
continue
self.distance = data['posterior_samples']['luminosity_distance']
self.ra = data['posterior_samples']['ra']
self.dec = data['posterior_samples']['dec']
self.mass_1 = data['posterior_samples']['mass_1']
self.mass_2 = data['posterior_samples']['mass_2']
self.nsamples = len(self.distance)
file.close()
if self.posterior_samples[-3:] == 'hdf':
file = h5py.File(self.posterior_samples, 'r')
self.distance = file['samples/distance'][:]
self.ra = file['samples/ra'][:]
self.dec = file['samples/dec'][:]
self.mass_1 = file['samples/mass_1'][:]
self.mass_2 = file['samples/mass_2'][:]
self.nsamples = len(self.distance)
file.close()
def marginalized_sky(self):
"""
Computes the marginalized sky localization posterior KDE.
"""
return gaussian_kde(np.vstack((self.ra, self.dec)))
def compute_source_frame_samples(self, H0):
cosmo = FlatLambdaCDM(H0=H0, Om0=Om0)
dLs = cosmo.luminosity_distance(zs).to(u.Mpc).value
z_at_dL = interp1d(dLs,zs)
redshift = z_at_dL(self.distance)
mass_1_source = self.mass_1/(1+redshift)
mass_2_source = self.mass_2/(1+redshift)
return redshift, mass_1_source, mass_2_source
def reweight_samples(self, H0, name, alpha=1.6, mmin=5, mmax=100, seed=1):
# Prior distribution used in the LVC analysis
prior = distance_distribution(name=name)
# Prior distribution used in this work
new_prior = mass_distribution(name=name, alpha=alpha, mmin=mmin, mmax=mmax)
# Get source frame masses
redshift, mass_1_source, mass_2_source = self.compute_source_frame_samples(H0)
# Re-weight
weights = new_prior.joint_prob(mass_1_source,mass_2_source)/ prior.prob(self.distance)
np.random.seed(seed)
draws = np.random.uniform(0, max(weights), weights.shape)
keep = weights > draws
m1det = self.mass_1[keep]
m2det = self.mass_2[keep]
dl = self.distance[keep]
return dl, weights
def marginalized_redshift_reweight(self, H0, name, alpha=1.6, mmin=5, mmax=100):
"""
Computes the marginalized distance posterior KDE.
"""
# Prior distribution used in this work
new_prior = mass_distribution(name=name, alpha=alpha, mmin=mmin, mmax=mmax)
# Get source frame masses
redshift, mass_1_source, mass_2_source = self.compute_source_frame_samples(H0)
# Re-weight
weights = new_prior.joint_prob(mass_1_source,mass_2_source)/self.jacobian_times_prior(redshift,H0)
norm = np.sum(weights)
return gaussian_kde(redshift,weights=weights), norm
def marginalized_redshift(self, H0):
"""
Computes the marginalized distance posterior KDE.
"""
# Get source frame masses
redshift, mass_1_source, mass_2_source = self.compute_source_frame_samples(H0)
# remove dl^2 prior and include dz/ddL jacobian
weights = 1/(self.dL_by_z_H0(redshift,H0,Om0)*cosmo.dl_zH0(redshift,H0)**2)
norm = np.sum(weights)
return gaussian_kde(redshift,weights=weights), norm
def marginalized_distance_reweight(self, H0, name, alpha=1.6, mmin=5, mmax=100, seed=1):
"""
Computes the marginalized distance posterior KDE.
"""
dl, weights = self.reweight_samples(H0, name, alpha=alpha, mmin=mmin, mmax=mmax, seed=seed)
norm = np.sum(weights)/len(weights)
return gaussian_kde(dl), norm
def marginalized_distance(self, H0):
"""
Computes the marginalized distance posterior KDE.
"""
norm = 1
return gaussian_kde(self.distance), norm
| [
"astropy.cosmology.FlatLambdaCDM",
"h5py.File",
"numpy.random.seed",
"numpy.sum",
"astropy.constants.c.to",
"numpy.power",
"scipy.stats.gaussian_kde",
"numpy.genfromtxt",
"numpy.linspace",
"scipy.interpolate.interp1d",
"numpy.vstack",
"numpy.sqrt"
] | [((699, 729), 'numpy.linspace', 'np.linspace', (['zmin', 'zmax', '(10000)'], {}), '(zmin, zmax, 10000)\n', (710, 729), True, 'import numpy as np\n'), ((1413, 1452), 'numpy.sqrt', 'np.sqrt', (['(Om * (1 + z) ** 3 + (1.0 - Om))'], {}), '(Om * (1 + z) ** 3 + (1.0 - Om))\n', (1420, 1452), True, 'import numpy as np\n'), ((5159, 5188), 'astropy.cosmology.FlatLambdaCDM', 'FlatLambdaCDM', ([], {'H0': 'H0', 'Om0': 'Om0'}), '(H0=H0, Om0=Om0)\n', (5172, 5188), False, 'from astropy.cosmology import FlatLambdaCDM, z_at_value\n'), ((5267, 5284), 'scipy.interpolate.interp1d', 'interp1d', (['dLs', 'zs'], {}), '(dLs, zs)\n', (5275, 5284), False, 'from scipy.interpolate import interp1d\n'), ((6038, 6058), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (6052, 6058), True, 'import numpy as np\n'), ((6849, 6864), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (6855, 6864), True, 'import numpy as np\n'), ((7326, 7341), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (7332, 7341), True, 'import numpy as np\n'), ((1506, 1528), 'astropy.constants.c.to', 'constants.c.to', (['"""km/s"""'], {}), "('km/s')\n", (1520, 1528), True, 'import astropy.constants as constants\n'), ((1778, 1796), 'numpy.power', 'np.power', (['(1 + z)', '(2)'], {}), '(1 + z, 2)\n', (1786, 1796), True, 'import numpy as np\n'), ((2221, 2270), 'numpy.genfromtxt', 'np.genfromtxt', (['self.posterior_samples'], {'names': '(True)'}), '(self.posterior_samples, names=True)\n', (2234, 2270), True, 'import numpy as np\n'), ((3616, 3654), 'h5py.File', 'h5py.File', (['self.posterior_samples', '"""r"""'], {}), "(self.posterior_samples, 'r')\n", (3625, 3654), False, 'import h5py\n'), ((4550, 4588), 'h5py.File', 'h5py.File', (['self.posterior_samples', '"""r"""'], {}), "(self.posterior_samples, 'r')\n", (4559, 4588), False, 'import h5py\n'), ((5062, 5092), 'numpy.vstack', 'np.vstack', (['(self.ra, self.dec)'], {}), '((self.ra, self.dec))\n', (5071, 5092), True, 'import numpy as np\n'), ((6880, 6919), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['redshift'], {'weights': 'weights'}), '(redshift, weights=weights)\n', (6892, 6919), False, 'from scipy.stats import gaussian_kde\n'), ((7357, 7396), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['redshift'], {'weights': 'weights'}), '(redshift, weights=weights)\n', (7369, 7396), False, 'from scipy.stats import gaussian_kde\n'), ((7693, 7708), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (7699, 7708), True, 'import numpy as np\n'), ((7737, 7753), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['dl'], {}), '(dl)\n', (7749, 7753), False, 'from scipy.stats import gaussian_kde\n'), ((7916, 7943), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['self.distance'], {}), '(self.distance)\n', (7928, 7943), False, 'from scipy.stats import gaussian_kde\n'), ((3104, 3142), 'h5py.File', 'h5py.File', (['self.posterior_samples', '"""r"""'], {}), "(self.posterior_samples, 'r')\n", (3113, 3142), False, 'import h5py\n')] |
import math
import h5py
import matplotlib.pylab as plt
import numpy as np
from matplotlib import rc
# rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
# for Palatino and other serif fonts use:
rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
plt.rcParams['figure.dpi'] = 100
tableau_colors=['tab:blue', 'tab:orange', 'tab:green',
'tab:red', 'tab:purple', 'tab:brown',
'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan']
pic_run = "driving_test_dipole"
pic_run_dir = "/global/cscratch1/sd/xiaocan/tail_problem/" + pic_run + "/"
vpic_info = get_vpic_info(pic_run_dir)
def get_vpic_info(pic_run_dir):
"""Get information of the VPIC simulation
"""
with open(pic_run_dir + '/info') as f:
content = f.readlines()
f.close()
vpic_info = {}
for line in content[1:]:
if "=" in line:
line_splits = line.split("=")
elif ":" in line:
line_splits = line.split(":")
tail = line_splits[1].split("\n")
vpic_info[line_splits[0].strip()] = float(tail[0])
return vpic_info
def plot_jy(tframe, yslice=0, show_plot=True):
"""Plot the y-component of the current density
"""
fields_interval = int(vpic_info["fields_interval"])
tindex = fields_interval * tframe
smime = math.sqrt(vpic_info["mi/me"])
lx_de = vpic_info["Lx/di"] * smime
lz_de = vpic_info["Lz/di"] * smime
xmin, xmax = 0, lx_de
zmin, zmax = -0.5 * lz_de, 0.5 * lz_de
fname = (pic_run_dir + "hydro_hdf5/T." + str(tindex) +
"/hydro_electron_" + str(tindex) + ".h5")
with h5py.File(fname, 'r') as fh:
group = fh["Timestep_" + str(tindex)]
dset = group["jy"]
jey = dset[:, yslice, :]
fname = (pic_run_dir + "hydro_hdf5/T." + str(tindex) +
"/hydro_ion_" + str(tindex) + ".h5")
with h5py.File(fname, 'r') as fh:
group = fh["Timestep_" + str(tindex)]
dset = group["jy"]
jiy = dset[:, yslice, :]
nx = int(vpic_info["nx"])
nz = int(vpic_info["nz"])
xgrid = np.linspace(xmin, xmax, nx)
zgrid = np.linspace(zmin, zmax, nz)
len0 = 10
fig = plt.figure(figsize=[len0, len0*lz_de/lx_de])
rect = [0.12, 0.14, 0.78, 0.78]
ax = fig.add_axes(rect)
jy = np.squeeze(jey + jiy)
im1 = ax.imshow(jy.T,
extent=[xmin, xmax, zmin, zmax],
vmin=-0.06, vmax=0.06,
cmap=plt.cm.coolwarm, aspect='auto',
origin='lower', interpolation='bicubic')
# Magnetic field lines
fname = (pic_run_dir + "field_hdf5/T." + str(tindex) +
"/fields_" + str(tindex) + ".h5")
bvec = {}
with h5py.File(fname, 'r') as fh:
group = fh["Timestep_" + str(tindex)]
for var in ["cbx", "cbz"]:
dset = group[var]
bvec[var] = dset[:, 0, :]
xmesh, zmesh = np.meshgrid(xgrid, zgrid)
xmesh_r, zmesh_r = np.meshgrid(xgrid[::16], zgrid[::16])
start_points = np.vstack([xmesh_r.flatten(), zmesh_r.flatten()]).T
ax.streamplot(xmesh, zmesh, np.squeeze(bvec["cbx"]).T,
np.squeeze(bvec["cbz"]).T, color='k',
linewidth=0.5, density=2)
ax.set_xlim([xmin, xmax])
ax.set_ylim([zmin, zmax])
ax.set_xlabel(r'$x/d_e$', fontsize=20)
ax.set_ylabel(r'$z/d_e$', fontsize=20)
ax.tick_params(labelsize=16)
rect_cbar = np.copy(rect)
rect_cbar[0] += rect[2] + 0.01
rect_cbar[2] = 0.02
rect_cbar[1] += rect[3] * 0.25
rect_cbar[3] = rect[3] * 0.5
cbar_ax = fig.add_axes(rect_cbar)
cbar = fig.colorbar(im1, cax=cbar_ax, extend='both')
cbar_ax.set_title(r'$j_y$', fontsize=20)
cbar.ax.tick_params(labelsize=12)
twpe = math.ceil(tindex * vpic_info["dt*wpe"] / 0.1) * 0.1
text1 = r'$t\omega_{pe}=' + ("{%0.0f}" % twpe) + '$'
fig.suptitle(text1, fontsize=20)
# img_dir = '../img/rate_problem/absj/' + pic_run + '/'
# mkdir_p(img_dir)
# fname = img_dir + "absj_" + str(tframe) + ".jpg"
# fig.savefig(fname, dpi=200)
plot_jy(tframe, yslice=0)
| [
"matplotlib.rc",
"numpy.meshgrid",
"h5py.File",
"math.sqrt",
"numpy.copy",
"math.ceil",
"numpy.linspace",
"numpy.squeeze",
"matplotlib.pylab.figure"
] | [((210, 266), 'matplotlib.rc', 'rc', (['"""font"""'], {}), "('font', **{'family': 'serif', 'serif': ['Palatino']})\n", (212, 266), False, 'from matplotlib import rc\n'), ((263, 286), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (265, 286), False, 'from matplotlib import rc\n'), ((1339, 1368), 'math.sqrt', 'math.sqrt', (["vpic_info['mi/me']"], {}), "(vpic_info['mi/me'])\n", (1348, 1368), False, 'import math\n'), ((2102, 2129), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'nx'], {}), '(xmin, xmax, nx)\n', (2113, 2129), True, 'import numpy as np\n'), ((2142, 2169), 'numpy.linspace', 'np.linspace', (['zmin', 'zmax', 'nz'], {}), '(zmin, zmax, nz)\n', (2153, 2169), True, 'import numpy as np\n'), ((2195, 2243), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '[len0, len0 * lz_de / lx_de]'}), '(figsize=[len0, len0 * lz_de / lx_de])\n', (2205, 2243), True, 'import matplotlib.pylab as plt\n'), ((2313, 2334), 'numpy.squeeze', 'np.squeeze', (['(jey + jiy)'], {}), '(jey + jiy)\n', (2323, 2334), True, 'import numpy as np\n'), ((2928, 2953), 'numpy.meshgrid', 'np.meshgrid', (['xgrid', 'zgrid'], {}), '(xgrid, zgrid)\n', (2939, 2953), True, 'import numpy as np\n'), ((2977, 3014), 'numpy.meshgrid', 'np.meshgrid', (['xgrid[::16]', 'zgrid[::16]'], {}), '(xgrid[::16], zgrid[::16])\n', (2988, 3014), True, 'import numpy as np\n'), ((3441, 3454), 'numpy.copy', 'np.copy', (['rect'], {}), '(rect)\n', (3448, 3454), True, 'import numpy as np\n'), ((1640, 1661), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (1649, 1661), False, 'import h5py\n'), ((1894, 1915), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (1903, 1915), False, 'import h5py\n'), ((2731, 2752), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (2740, 2752), False, 'import h5py\n'), ((3771, 3816), 'math.ceil', 'math.ceil', (["(tindex * vpic_info['dt*wpe'] / 0.1)"], {}), "(tindex * vpic_info['dt*wpe'] / 0.1)\n", (3780, 3816), False, 'import math\n'), ((3118, 3141), 'numpy.squeeze', 'np.squeeze', (["bvec['cbx']"], {}), "(bvec['cbx'])\n", (3128, 3141), True, 'import numpy as np\n'), ((3163, 3186), 'numpy.squeeze', 'np.squeeze', (["bvec['cbz']"], {}), "(bvec['cbz'])\n", (3173, 3186), True, 'import numpy as np\n')] |
import torch
import simplecv._impl.metric.function as mF
from simplecv.util.logger import get_console_file_logger
import logging
import prettytable as pt
import numpy as np
from scipy import sparse
class THMeanIntersectionOverUnion(object):
def __init__(self, num_classes):
self.num_classes = num_classes
self._total_cm = torch.zeros(num_classes, num_classes).to_sparse()
def __call__(self, y_true, y_pred):
sparse_cm = mF.th_confusion_matrix(y_true.view(-1), y_pred.view(-1), self.num_classes, to_dense=False)
self._total_cm += sparse_cm
def summary(self, log_dir=None):
iou_per_class = mF.intersection_over_union_per_class(self._total_cm.to_dense())
miou = iou_per_class.mean()
tb = pt.PrettyTable()
tb.field_names = ['class', 'iou']
for idx, iou in enumerate(iou_per_class):
tb.add_row([idx, iou])
tb.add_row(['mIoU', miou])
if log_dir is not None:
logger = get_console_file_logger('mIoU', logging.INFO, log_dir)
logger.info('\n' + tb.get_string())
else:
print(tb)
return iou_per_class, miou
class NPMeanIntersectionOverUnion(object):
def __init__(self, num_classes, logdir=None):
self.num_classes = num_classes
self._total = sparse.coo_matrix((num_classes, num_classes), dtype=np.float32)
self.logdir = logdir
@staticmethod
def compute_iou_per_class(confusion_matrix):
"""
Args:
confusion_matrix: numpy array [num_classes, num_classes] row - gt, col - pred
Returns:
iou_per_class: float32 [num_classes, ]
"""
sum_over_row = np.sum(confusion_matrix, axis=0)
sum_over_col = np.sum(confusion_matrix, axis=1)
diag = np.diag(confusion_matrix)
denominator = sum_over_row + sum_over_col - diag
iou_per_class = diag / denominator
return iou_per_class
def forward(self, y_true, y_pred):
if isinstance(y_pred, torch.Tensor):
y_pred = y_pred.cpu().numpy()
if isinstance(y_true, torch.Tensor):
y_true = y_true.cpu().numpy()
y_pred = y_pred.reshape((-1,))
y_true = y_true.reshape((-1,))
v = np.ones_like(y_pred)
cm = sparse.coo_matrix((v, (y_true, y_pred)), shape=(self.num_classes, self.num_classes), dtype=np.float32)
self._total += cm
def summary(self):
dense_cm = self._total.toarray()
iou_per_class = NPMeanIntersectionOverUnion.compute_iou_per_class(dense_cm)
miou = iou_per_class.mean()
tb = pt.PrettyTable()
tb.field_names = ['class', 'iou']
for idx, iou in enumerate(iou_per_class):
tb.add_row([idx, iou])
tb.add_row(['mIoU', miou])
if self.logdir is not None:
logger = get_console_file_logger('mIoU', logging.INFO, self.logdir)
logger.info('\n' + tb.get_string())
else:
print(tb)
return iou_per_class, miou
| [
"numpy.sum",
"numpy.ones_like",
"simplecv.util.logger.get_console_file_logger",
"scipy.sparse.coo_matrix",
"prettytable.PrettyTable",
"torch.zeros",
"numpy.diag"
] | [((758, 774), 'prettytable.PrettyTable', 'pt.PrettyTable', ([], {}), '()\n', (772, 774), True, 'import prettytable as pt\n'), ((1320, 1383), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(num_classes, num_classes)'], {'dtype': 'np.float32'}), '((num_classes, num_classes), dtype=np.float32)\n', (1337, 1383), False, 'from scipy import sparse\n'), ((1700, 1732), 'numpy.sum', 'np.sum', (['confusion_matrix'], {'axis': '(0)'}), '(confusion_matrix, axis=0)\n', (1706, 1732), True, 'import numpy as np\n'), ((1756, 1788), 'numpy.sum', 'np.sum', (['confusion_matrix'], {'axis': '(1)'}), '(confusion_matrix, axis=1)\n', (1762, 1788), True, 'import numpy as np\n'), ((1804, 1829), 'numpy.diag', 'np.diag', (['confusion_matrix'], {}), '(confusion_matrix)\n', (1811, 1829), True, 'import numpy as np\n'), ((2267, 2287), 'numpy.ones_like', 'np.ones_like', (['y_pred'], {}), '(y_pred)\n', (2279, 2287), True, 'import numpy as np\n'), ((2301, 2408), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(v, (y_true, y_pred))'], {'shape': '(self.num_classes, self.num_classes)', 'dtype': 'np.float32'}), '((v, (y_true, y_pred)), shape=(self.num_classes, self.\n num_classes), dtype=np.float32)\n', (2318, 2408), False, 'from scipy import sparse\n'), ((2629, 2645), 'prettytable.PrettyTable', 'pt.PrettyTable', ([], {}), '()\n', (2643, 2645), True, 'import prettytable as pt\n'), ((990, 1044), 'simplecv.util.logger.get_console_file_logger', 'get_console_file_logger', (['"""mIoU"""', 'logging.INFO', 'log_dir'], {}), "('mIoU', logging.INFO, log_dir)\n", (1013, 1044), False, 'from simplecv.util.logger import get_console_file_logger\n'), ((2865, 2923), 'simplecv.util.logger.get_console_file_logger', 'get_console_file_logger', (['"""mIoU"""', 'logging.INFO', 'self.logdir'], {}), "('mIoU', logging.INFO, self.logdir)\n", (2888, 2923), False, 'from simplecv.util.logger import get_console_file_logger\n'), ((344, 381), 'torch.zeros', 'torch.zeros', (['num_classes', 'num_classes'], {}), '(num_classes, num_classes)\n', (355, 381), False, 'import torch\n')] |
# coding: utf-8
import os
import re
import numpy as np
import pandas as pd
import seaborn as sns
from kerasy.utils import findLowerUpper
# set the `plotly.io.orca.config.executable` property to the full path of your orca executable.
# If you use `environment.yml` to create anaconda environment, this command automatically correct the path.
import plotly
conda_env_name = "triple-net-diagnosis"
result = re.findall(pattern=fr"\/.*\/versions\/{conda_env_name}\/", string=plotly.__path__[0])
if len(result)>0:
plotly.io.orca.config.executable = os.path.join(result[0], "bin/orca")
import plotly.express as px
def volcanoplot(df, logFC=1.2, p_val=0.05, hover_data=[],
FC_col="FC.Value", p_val_col="P.Value", logFC_col="logFC", log_p_val_col="-log10(P.Value)"):
""" Volcano plot.
@params df : (pd.DataFrame) Input data. This
@params logFC : (float) threshold value.
@params p_val : (float) threshold value.
@params hover_data : (list) colnames for hover labels.
"""
col_names = df.columns
hover_data = [col for col in hover_data if col in col_names]
# Add necessary data.
if log_p_val_col not in col_names:
df.loc[:, log_p_val_col] = -np.log10(df[p_val_col].values)
if logFC_col not in col_names:
df.loc[:, logFC_col] = np.log2(df[FC_col].values)
# Add coloring labels.
df.loc[:, "p_significance"] = df[p_val_col] < p_val
df.loc[:, "Significant"] = "normal"
df.loc[df["p_significance"] & (df[logFC_col] > logFC), "Significant"] = "up"
df.loc[df["p_significance"] & (df[logFC_col] < -logFC), "Significant"] = "down"
color_discrete_map={"up": "red", "down": "blue", "normal": "lightgray"}
# Fill NaN & get data info.
df = df.fillna("-")
(x_min,y_min),(x_max,y_max) = findLowerUpper(df[[logFC_col, log_p_val_col]].values, margin=0)
# Plot.
fig = px.scatter(df, x=logFC_col, y=log_p_val_col, color="Significant", color_discrete_map=color_discrete_map, hover_data=hover_data)
fig.add_shape(type="line", y0=y_min, y1=y_max, x0=-logFC, x1=-logFC, line=dict(color="black", width=2, dash="dot"))
fig.add_shape(type="line", y0=y_min, y1=y_max, x0=logFC, x1=logFC, line=dict(color="black", width=2, dash="dot"))
fig.add_shape(type="line", x0=x_min, x1=x_max, y0=-np.log10(p_val), y1=-np.log10(p_val), line=dict(color="black", width=2, dash="dot"))
return fig
def MAthresholding(x, logFC=1.2, p_val=0.05, verbose=True,
plot_volcano=False, hover_data=[], title="", save_path=None):
"""
@params x : (str/pd.DataFrame) Input data.
@params logFC : (float) threshold value.
@params p_val : (float) threshold value.
@params verbose : (bool) Whether print information or not.
@params plot_volcano : (bool) Whethre plot volcanoplot or not.
@params hover_data : (list) colnames for hover labels.
@params title : (str) title for figure.
@params save_path : (str) A string representing a local file path.
"""
# Load original DataFrame.
df = x if isinstance(x, pd.DataFrame) else pd.read_csv(str(x), sep="\t")
before = len(df)
# Drop NaN data.
df = df.dropna(axis=0, how="any", subset=["logFC", "P.Value"])
middle = len(df)
if plot_volcano:
fig = volcanoplot(df=df, logFC=logFC, p_val=p_val, hover_data=hover_data)
fig.update_layout(title=title, font=dict(family="Monaco", size=18, color="#7f7f7f"))
fig.show()
# Save figure.
if save_path is not None:
fig.write_image(save_path)
if verbose: print(f"volcano plot was saved at `{save_path}`")
# Thresholding
df = df[(abs(df["logFC"])>logFC) & (df["P.Value"]<p_val)]
after = len(df)
# print result.
if verbose:
title = title + " : " if len(title)>0 else title
print(f"{title}{before} --[drop NaN]-> {middle} --[threshold]-> {after} ({after/before:>6.1%})")
return df
def clustermapplot(eset_path, thred_path, colnames=[], fillna=0):
df_thred = pd.read_csv(thred_path, sep="\t")
colname = [col for col in colnames if col in df_thred.columns][0]
symbol = df_thred[colname]
col_colors = np.where(df_thred["logFC"]>=0, "red", "blue")
df_eset = pd.read_csv(eset_path, sep="\t")
data = df_eset.loc[df_thred.ID, :].fillna(fillna)
data = data.transpose()
data.columns = symbol
fig = sns.clustermap(data=data, cmap="bwr", figsize=(18,6), col_colors=col_colors)
return fig
# def applyMAthresh2all(gen, colnames=[], logFC=1.2, p_val=0.05, verbose=True,
# plot_volcano=False, volcano_save_dir=".",
# plot_clustermap=False, clustermap_save_dir=".",):
# """Apply `MAthresholding` to all data.
# @params colnames : (list) Column Names for features you want to extract.
# @params logFC : (float) threshold value.
# @params p_val : (float) threshold value.
# @params verbose : (bool) Whether print info or not.
# @params plot_* : (bool) Whether plot or not.
# @params *_save_dir : (str) Where you want to save *plot.
# """
# if verbose:
# print("Apply MicroArray thresholding to all data.")
# print(f"|logFC| > {logFC}")
# print(f"p_val < {p_val}")
# print("="*42)
# if isinstance(colnames, str):
# colnames = [colnames]
# lst = []
# for path in gen:
# if isinstance(path, tuple) and len(path)==2:
# path, eset_path = path
# fn = str(path).split("/")[-1]
# volcano_fig_path = os.path.join(volcano_save_dir, fn + ".png")
# df = MAthresholding(x=path, logFC=logFC, p_val=p_val, verbose=verbose, title=fn, plot_volcano=True, hover_data=colnames, save_path=volcano_fig_path)
# feature_col = [col for col in colnames if col in df.columns][0]
# extracted_ids = df[feature_col].values.tolist()
# lst.extend(extracted_ids)
# if plot_clustermap:
# df_eset = pd.read_csv(eset_path, sep="\t")
# col_colors = np.where(df["logFC"]>=0, "red", "blue")
# data = df_eset.loc[df.ID, :].fillna(0).T
# data.columns = extracted_ids
# fig = sns.clustermap(data=data, cmap="bwr", figsize=(18,6), col_colors=col_colors)
# clustermap_fig_path = os.path.join(clustermap_save_dir, fn + ".png")
# fig.savefig(clustermap_fig_path)
# before = len(lst)
# num_nan_data = len([e for e in lst if str(e)=="nan"])
# unique_list = list(set(lst))
# after = len(unique_list)
# print(f"Not annotated data : {num_nan_data} ({num_nan_data/before:>6.1%})")
# print(f"Unique data : {before} -> {after} ({after/before:>6.1%})")
# return unique_list
| [
"kerasy.utils.findLowerUpper",
"seaborn.clustermap",
"pandas.read_csv",
"numpy.log2",
"re.findall",
"numpy.where",
"numpy.log10",
"os.path.join",
"plotly.express.scatter"
] | [((405, 498), 're.findall', 're.findall', ([], {'pattern': 'f"""\\\\/.*\\\\/versions\\\\/{conda_env_name}\\\\/"""', 'string': 'plotly.__path__[0]'}), "(pattern=f'\\\\/.*\\\\/versions\\\\/{conda_env_name}\\\\/', string=plotly\n .__path__[0])\n", (415, 498), False, 'import re\n'), ((548, 583), 'os.path.join', 'os.path.join', (['result[0]', '"""bin/orca"""'], {}), "(result[0], 'bin/orca')\n", (560, 583), False, 'import os\n'), ((1802, 1865), 'kerasy.utils.findLowerUpper', 'findLowerUpper', (['df[[logFC_col, log_p_val_col]].values'], {'margin': '(0)'}), '(df[[logFC_col, log_p_val_col]].values, margin=0)\n', (1816, 1865), False, 'from kerasy.utils import findLowerUpper\n'), ((1888, 2019), 'plotly.express.scatter', 'px.scatter', (['df'], {'x': 'logFC_col', 'y': 'log_p_val_col', 'color': '"""Significant"""', 'color_discrete_map': 'color_discrete_map', 'hover_data': 'hover_data'}), "(df, x=logFC_col, y=log_p_val_col, color='Significant',\n color_discrete_map=color_discrete_map, hover_data=hover_data)\n", (1898, 2019), True, 'import plotly.express as px\n'), ((4117, 4150), 'pandas.read_csv', 'pd.read_csv', (['thred_path'], {'sep': '"""\t"""'}), "(thred_path, sep='\\t')\n", (4128, 4150), True, 'import pandas as pd\n'), ((4269, 4316), 'numpy.where', 'np.where', (["(df_thred['logFC'] >= 0)", '"""red"""', '"""blue"""'], {}), "(df_thred['logFC'] >= 0, 'red', 'blue')\n", (4277, 4316), True, 'import numpy as np\n'), ((4330, 4362), 'pandas.read_csv', 'pd.read_csv', (['eset_path'], {'sep': '"""\t"""'}), "(eset_path, sep='\\t')\n", (4341, 4362), True, 'import pandas as pd\n'), ((4481, 4558), 'seaborn.clustermap', 'sns.clustermap', ([], {'data': 'data', 'cmap': '"""bwr"""', 'figsize': '(18, 6)', 'col_colors': 'col_colors'}), "(data=data, cmap='bwr', figsize=(18, 6), col_colors=col_colors)\n", (4495, 4558), True, 'import seaborn as sns\n'), ((1318, 1344), 'numpy.log2', 'np.log2', (['df[FC_col].values'], {}), '(df[FC_col].values)\n', (1325, 1344), True, 'import numpy as np\n'), ((1221, 1251), 'numpy.log10', 'np.log10', (['df[p_val_col].values'], {}), '(df[p_val_col].values)\n', (1229, 1251), True, 'import numpy as np\n'), ((2351, 2366), 'numpy.log10', 'np.log10', (['p_val'], {}), '(p_val)\n', (2359, 2366), True, 'import numpy as np\n'), ((2372, 2387), 'numpy.log10', 'np.log10', (['p_val'], {}), '(p_val)\n', (2380, 2387), True, 'import numpy as np\n')] |
import numpy as np
from tabulate import tabulate
class TruncatedDisplay(object):
""" Performs similar functionality as less command in unix OS where stdout is chunked up into a set number of
lines and user needs to provide input to continue displaying lines """
def __init__(self, num_lines=10):
# TODO make num_lines config driven
self.num_lines = num_lines
def __ror__(self, other):
s = str(other).split("\n")
# Print only self.num_lines from the string to print
for i in range(0, len(s), self.num_lines):
print(*s[i: i + self.num_lines], sep="\n")
# Prompt the user if they want to continue displaying line batches or not
val = input('Press <Enter> to continue or <q> to quit\n')
if val == 'q':
exit(0)
class DataFrameTabularDisplay(object):
""" Displays entire data frame in psql command line format """
def __init__(self, data_frame):
self.data_frame = data_frame
def run(self):
printable_df = self.data_frame.reset_index().drop(columns='index')
print(tabulate(printable_df, headers='keys', tablefmt='psql'))
class DataFrameTruncatedDisplay(object):
""" Displays batches of lines from a data frame """
def __init__(self, data_frame, num_lines=10):
# TODO make num_lines config driven
self.data_frame = data_frame
self.num_lines = num_lines
def run(self):
# Start by printing out the column names of the data frame
string_to_print = ''
row_count = 0
# Then print batches of self.num_lines
for index, row in self.data_frame.iterrows():
if row_count % (self.num_lines - 1) == 0:
string_to_print += ' '.join([column.upper() for column in self.data_frame.columns])
string_to_print += '\n'
row_dict = dict(np.ndenumerate(row))
string_to_print += ' '.join(map(str, row_dict.values()))
string_to_print += '\n'
row_count += 1
# Given text with strings separated by \n, this will print each line similar to the unix less command
display = TruncatedDisplay(self.num_lines)
string_to_print[:-1] | display
def get_display(display_option="truncated_df"):
""" Factory function to retrieve a particular display type from a given display option
:param string display_option: a config driven option for how to display results to the user
:returns a display class corresponding to the display_option given
"""
# TODO make display_option config driven
available_displays = {
"truncated_df": DataFrameTruncatedDisplay,
"truncated": TruncatedDisplay,
"tabular_df": DataFrameTabularDisplay
}
if display_option not in available_displays:
raise KeyError("{0} display not one of {1}".format(display_option, set(available_displays)))
return available_displays[display_option]
| [
"tabulate.tabulate",
"numpy.ndenumerate"
] | [((1122, 1177), 'tabulate.tabulate', 'tabulate', (['printable_df'], {'headers': '"""keys"""', 'tablefmt': '"""psql"""'}), "(printable_df, headers='keys', tablefmt='psql')\n", (1130, 1177), False, 'from tabulate import tabulate\n'), ((1908, 1927), 'numpy.ndenumerate', 'np.ndenumerate', (['row'], {}), '(row)\n', (1922, 1927), True, 'import numpy as np\n')] |
import numpy as np
try:
from scipy.cluster.hierarchy import DisjointSet
except ImportError:
pass
from .common import Benchmark
class Bench(Benchmark):
params = [[100, 1000, 10000]]
param_names = ['n']
def setup(self, n):
# Create random edges
rng = np.random.RandomState(seed=0)
self.edges = rng.randint(0, 10 * n, (n, 2))
self.nodes = np.unique(self.edges)
self.disjoint_set = DisjointSet(self.nodes)
self.pre_merged = DisjointSet(self.nodes)
for a, b in self.edges:
self.pre_merged.merge(a, b)
self.pre_merged_found = DisjointSet(self.nodes)
for a, b in self.edges:
self.pre_merged_found.merge(a, b)
for x in self.nodes:
self.pre_merged_found[x]
def time_merge(self, n):
dis = self.disjoint_set
for a, b in self.edges:
dis.merge(a, b)
def time_merge_already_merged(self, n):
dis = self.pre_merged
for a, b in self.edges:
dis.merge(a, b)
def time_find(self, n):
dis = self.pre_merged
return [dis[i] for i in self.nodes]
def time_find_already_found(self, n):
dis = self.pre_merged_found
return [dis[i] for i in self.nodes]
def time_contains(self, n):
assert self.nodes[0] in self.pre_merged
assert self.nodes[n // 2] in self.pre_merged
assert self.nodes[-1] in self.pre_merged
def time_absence(self, n):
# Test for absence
assert None not in self.pre_merged
assert "dummy" not in self.pre_merged
assert (1, 2, 3) not in self.pre_merged
| [
"numpy.unique",
"numpy.random.RandomState",
"scipy.cluster.hierarchy.DisjointSet"
] | [((290, 319), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(0)'}), '(seed=0)\n', (311, 319), True, 'import numpy as np\n'), ((393, 414), 'numpy.unique', 'np.unique', (['self.edges'], {}), '(self.edges)\n', (402, 414), True, 'import numpy as np\n'), ((443, 466), 'scipy.cluster.hierarchy.DisjointSet', 'DisjointSet', (['self.nodes'], {}), '(self.nodes)\n', (454, 466), False, 'from scipy.cluster.hierarchy import DisjointSet\n'), ((494, 517), 'scipy.cluster.hierarchy.DisjointSet', 'DisjointSet', (['self.nodes'], {}), '(self.nodes)\n', (505, 517), False, 'from scipy.cluster.hierarchy import DisjointSet\n'), ((623, 646), 'scipy.cluster.hierarchy.DisjointSet', 'DisjointSet', (['self.nodes'], {}), '(self.nodes)\n', (634, 646), False, 'from scipy.cluster.hierarchy import DisjointSet\n')] |
import numpy as np
from cv2 import cv2
import os
import pafy
import argparse
from tensorflow.keras.models import load_model
output_directory = 'Youtube_Videos'
os.makedirs(output_directory, exist_ok = True)
categories = ["Biking", "Drumming", "Basketball", "Diving","Billiards","HorseRiding","Mixing","PushUps","Skiing","Swing"]
image_height = 64
image_width = 64
#Function to download videos from youtube with pafy
def download_youtube_video(youtube_video_url, output_directory):
video = pafy.new(youtube_video_url)
# Getting the best available quality
video_best = video.getbest()
output_file_path = f'{output_directory}/{video.title}.mp4'
video_best.download(filepath = output_file_path, quiet = True)
return video.title
def predict(video, count,model):
''' function that return predicted probabilities for the activities in a video,
the prediction is done every "count" slice of frames, and got the final prediction
by averaging all the predictions'''
model_ouput_size = 10
predictions = {}
probabilities_np = np.zeros((count, model_ouput_size), dtype = np.float)
video_reader = cv2.VideoCapture(video)
video_frames_count = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT))
# we will skip some frames at the beginning so the frames count are dividable by the count we chosed
window = video_frames_count // count
for frame_counter in range(count):
#control the video position to read every "count " slice of frames
video_reader.set(cv2.CAP_PROP_POS_FRAMES, frame_counter * window)
_ , frame = video_reader.read()
resized_frame = cv2.resize(frame, (image_height, image_width))
normalized_frame = resized_frame / 255
#predict with the model
probabilities = model.predict(np.expand_dims(normalized_frame, axis = 0))[0]
probabilities_np[frame_counter] = probabilities
#calculate the average probabilities for the chosen frames
average_probabilities = probabilities_np.mean(axis = 0)
# Sorting the Probabilities
final_probabilities_indexes = np.argsort(average_probabilities)[::-1]
for label in final_probabilities_indexes:
category = categories[label]
category_probability = average_probabilities[label]
predictions[category] = category_probability
video_reader.release()
return predictions
model = load_model("model_VGG16_CNN_LSTM.h5")
if __name__ == '__main__':
frames_count = 50
parser=argparse.ArgumentParser(description='This program predicts the human ctivity in a youtube video \n current list Activities: \n [Biking, Drumming, Basketball, Diving,Billiards,HorseRiding,Mixing,PushUps,Skiing,Swing] ')
parser.add_argument('youtube_link',help='give a youtube link with one of those activities \n please give a link to a short video with good quality that contains one person doing that activity')
args=parser.parse_args()
title = download_youtube_video(args.youtube_link,output_directory)
path = f'{output_directory}/{title}.mp4'
predictions = predict(path,frames_count,model)
#get the first activity(first key in dict)
activity = list(predictions.keys())[0]
print('\n Human Activity Recognition model predicts the following: \n')
print(f' The Activity being done in your video is mostly: {activity} ')
print('\n The probabilities for all the activities are given as following:\n')
for activity,probability in predictions.items():
print(activity, ' ',round(probability,3)) | [
"cv2.cv2.VideoCapture",
"tensorflow.keras.models.load_model",
"os.makedirs",
"argparse.ArgumentParser",
"pafy.new",
"numpy.zeros",
"numpy.expand_dims",
"cv2.cv2.resize",
"numpy.argsort"
] | [((161, 205), 'os.makedirs', 'os.makedirs', (['output_directory'], {'exist_ok': '(True)'}), '(output_directory, exist_ok=True)\n', (172, 205), False, 'import os\n'), ((2394, 2431), 'tensorflow.keras.models.load_model', 'load_model', (['"""model_VGG16_CNN_LSTM.h5"""'], {}), "('model_VGG16_CNN_LSTM.h5')\n", (2404, 2431), False, 'from tensorflow.keras.models import load_model\n'), ((498, 525), 'pafy.new', 'pafy.new', (['youtube_video_url'], {}), '(youtube_video_url)\n', (506, 525), False, 'import pafy\n'), ((1070, 1121), 'numpy.zeros', 'np.zeros', (['(count, model_ouput_size)'], {'dtype': 'np.float'}), '((count, model_ouput_size), dtype=np.float)\n', (1078, 1121), True, 'import numpy as np\n'), ((1143, 1166), 'cv2.cv2.VideoCapture', 'cv2.VideoCapture', (['video'], {}), '(video)\n', (1159, 1166), False, 'from cv2 import cv2\n'), ((2498, 2729), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""This program predicts the human ctivity in a youtube video \n current list Activities: \n [Biking, Drumming, Basketball, Diving,Billiards,HorseRiding,Mixing,PushUps,Skiing,Swing] """'}), '(description=\n """This program predicts the human ctivity in a youtube video \n current list Activities: \n [Biking, Drumming, Basketball, Diving,Billiards,HorseRiding,Mixing,PushUps,Skiing,Swing] """\n )\n', (2521, 2729), False, 'import argparse\n'), ((1641, 1687), 'cv2.cv2.resize', 'cv2.resize', (['frame', '(image_height, image_width)'], {}), '(frame, (image_height, image_width))\n', (1651, 1687), False, 'from cv2 import cv2\n'), ((2097, 2130), 'numpy.argsort', 'np.argsort', (['average_probabilities'], {}), '(average_probabilities)\n', (2107, 2130), True, 'import numpy as np\n'), ((1805, 1845), 'numpy.expand_dims', 'np.expand_dims', (['normalized_frame'], {'axis': '(0)'}), '(normalized_frame, axis=0)\n', (1819, 1845), True, 'import numpy as np\n')] |
"""Robust influence maximization.
This module implements an algorithm for robust influence
maximiztion.
"""
import sys
import argparse
import ast
import numpy as np
import robinmax_bac as bac
import robinmax_graph as gr
import robinmax_cover_generator as cg
import robinmax_utils as util
import robinmax_heuristics as heurs
import time
def register_options(parser):
"""Add options to the command line parser.
Register all the options for the optimization algorithm.
Parameters
----------
parser : argparse.ArgumentParser
The parser.
"""
parser.add_argument('graph_file', action = 'store', type = str,
help = 'File containing graph connectivity information.')
parser.add_argument('num_seeds', action= 'store', type = int,
help = 'Number of seed nodes.')
parser.add_argument('--debug', action = 'store', dest = 'debug',
default = False, type = ast.literal_eval,
help = 'Print debug info. Default False.')
parser.add_argument('--robust_thresh_budget', '-rtb', type = float,
default = 0.0, help = 'Node threshold budget robustness.')
parser.add_argument('--max_thresh_dev', '-td', type = float,
default = 0.0, help = 'Maximum node threshold deviation.')
parser.add_argument('--robust_weight_budget', '-rwb', type = float,
default = 0.0, help = 'Arc weight budget robustness.')
parser.add_argument('--max_weight_dev', '-wd', type = float,
default = 0.0, help = 'Maximum arc weight deviation.')
parser.add_argument('--max_cover_size', '-cs', type = int,
default = -1, help = 'Maximum size for the ' +
'activation covers generated. Default -1,' +
' in which case it is the number of nodes.')
parser.add_argument('--time', '-t', type = float,
default = 3600,
help = 'Max time. Default 3600.')
parser.add_argument('--heuristics', '-heurs', action='store',
dest='heuristics',
default=-1, type=int,
help='Solve the problem using heuristics.' +
' 1 = Column generation, 2 = Random,' +
' 3 = Two-opt.' +
' Default -1, no heuristic.')
parser.add_argument('--disable_cuts', '-dc', action='store',
dest='disable_cuts',
default=False, type=ast.literal_eval,
help='Disable CPLEX cuts. Default False.')
parser.add_argument('--cg_init_iters', '-cgi', action='store',
dest='cg_init_iters', default=20, type=int,
help='Number of CG initialization interations. ' +
'Default 20.')
parser.add_argument('--max_columns_per_round', '-mcr', action='store',
dest='max_columns_per_round', default=0,
type=int, help='Maximum number of generated columns ' +
'per round as a fraction of the number of nodes (n). ' +
'Default 2n.')
parser.add_argument('--max_col_iters_per_round', '-mci', action='store',
dest='max_col_iters_per_round', default=0,
type=int, help='Maximum number of iterations when ' +
'trying to generate columns at each round ' +
'as a fraction of the number of nodes (n). ' +
'Default 2n.')
parser.add_argument('--max_pricing_iters', '-mpi', action='store',
dest='max_pricing_iters', default=0,
type=int, help='Maximum number of pricing problems ' +
'as a fraction of the number of nodes (n). ' +
'Default 0.025n.')
parser.add_argument('--num_init_covers', '-nic', action='store',
dest='num_init_covers', default=2500,
type=int, help='The initial number of generated ' +
'covers. Default 2500.')
parser.add_argument('--random_seed', '-rs', action='store',
dest='random_seed', default=1981231712,
type=int, help='Random seed. Default 1981231712.')
parser.add_argument('--lp', '-lp', action='store',
dest='lp',
default=False, type=ast.literal_eval,
help='Solve the relaxed problem. Default False.')
# -- end function
def robinmax(graph, num_seeds, max_cover_size, thresh_budget=0,
max_thresh_dev=0.0, weight_budget=0.0,
max_weight_dev=0.0, max_time=3600,
heuristics=-1, cg_init_iters=20, max_columns_per_round=0,
max_col_iters_per_round=0, max_pricing_iters=0,
num_init_covers=2500, debugging=False, disable_cuts=False,
lp=False, out_f=sys.__stdout__):
# Compute the epsilon to use throughout the algorithm
epsilon = util.epsilon(graph)
# Print info
print('\nGRAPH')
print('Name: {:s}'.format(graph.name))
print('Nodes: {:d}'.format(graph.num_nodes))
print('Arcs: {:d}'.format(graph.num_arcs))
print('')
print('PARAMETERS')
print('Seeds: {:d}'.format(int(num_seeds)))
print('Cover size: {:d}'.format(int(max_cover_size)))
print('Robustness threshold budget: {:.2f}'.format(thresh_budget))
print('Max threshold deviation: {:.2f}'.format(max_thresh_dev))
print('Robustness weight budget: {:.2f}'.format(weight_budget))
print('Max weight deviation: {:.2f}'.format(max_weight_dev))
print('Time limit: {:.1f}'.format(max_time))
print('Disable cuts: {:s}'.format(str(disable_cuts)))
print('Solve as LP: {:s}'.format(str(lp)))
print('Epsilon: {:.2e}'.format(np.mean(epsilon)))
print('Debugging: {:s}'.format(str(debugging)))
print('Heuristics: {:d}'.format(heuristics))
print('Max columns per round: {:d}'.format(max_columns_per_round))
print('Max column iters per round: {:d}'.format(max_col_iters_per_round))
print('Max pricing iters: {:d}'.format(max_pricing_iters))
print('Number initial covers: {:d}'.format(num_init_covers))
print('Output file: {:s}'.format(str(out_f.name)))
print('')
str_args = ';'.join(['Name', graph.name, 'Nodes', str(graph.num_nodes),
'Arcs', str(graph.num_arcs), 'Seeds', str(num_seeds),
'Max cover size', str(max_cover_size),
'Robustness threshold budget', str(thresh_budget),
'max threshold deviation', str(max_thresh_dev),
'Robustness weight budget', str(weight_budget),
'Max weigh deviation', str(max_weight_dev),
'Time limit', str(max_time),
'Disable cuts', str(disable_cuts),
'Solve as LP', str(lp), 'Epsilon', str(np.mean(epsilon)),
'Debugging', str(debugging), 'Heuristics', str(heuristics)])
try:
if (heuristics == 2):
start_time = time.time()
best_obj, it = heurs.random_heuristic(graph, num_seeds, thresh_budget,
max_thresh_dev, weight_budget, max_weight_dev,
epsilon, debugging, out_f, max_time)
str_results = ';'.join(['Elapsed time', str(time.time() - start_time),
'Iterations', str(it), 'Best objective', str(best_obj)])
elif (heuristics == 3):
start_time = time.time()
best_obj, iterations = heurs.two_opt_heuristic(
graph, num_seeds, thresh_budget,
max_thresh_dev, weight_budget, max_weight_dev,
epsilon, max_time, debugging, out_f)
str_results = ';'.join([
'Elapsed time', str(time.time() - start_time),
'Iterations', str(iterations),
'Best objective', str(best_obj)])
elif (heuristics == 1):
start_time = time.time()
covers = [list() for _ in range(graph.num_nodes)]
thresholds = [list() for _ in range(graph.num_nodes)]
max_size_covers = 2
num_covers = sum([len(c) for c in covers])
while (num_covers <= num_init_covers and
max_size_covers < graph.num_nodes):
max_size_covers += 1
covers, thresholds = cg.generate_minimal_covers(
graph, max_size_covers, thresh_budget, max_thresh_dev,
weight_budget, max_weight_dev)
new_num_covers = sum([len(c) for c in covers])
if (new_num_covers == num_covers and new_num_covers > 0):
max_size_covers = graph.num_nodes
num_covers = new_num_covers
time_left = max(0, (max_time - (time.time() - start_time)))
results = bac.bac_restart(graph, num_seeds, max_cover_size, thresh_budget,
max_thresh_dev, weight_budget, max_weight_dev,
time_left, epsilon, debugging, disable_cuts, lp,
covers=covers, thresholds=thresholds, save_pruned=False,
run_as_heuristic=True, cg_init_iters=cg_init_iters,
max_columns_per_round=max_columns_per_round,
max_pricing_iters=max_pricing_iters,
max_col_iters_per_round=max_col_iters_per_round, out_f=out_f)
str_results = ';'.join(['Elapsed time', str(time.time() - start_time),
'Iterations', str(results[11]), 'Generated covers',
str(sum([len(covers[i]) for i in range(graph.num_nodes)])),
'Best bound', str(results[3]),
'Best objective', str(results[4])])
else:
start_time = time.time()
covers, thresholds = cg.generate_minimal_covers(
graph, max_cover_size, thresh_budget, max_thresh_dev,
weight_budget, max_weight_dev)
time_left = max(0, (max_time - (time.time() - start_time)))
results = bac.bac_restart(graph, num_seeds, max_cover_size, thresh_budget,
max_thresh_dev, weight_budget, max_weight_dev,
time_left, epsilon, debugging, disable_cuts, lp, covers=covers,
thresholds=thresholds, save_pruned=False,
run_as_heuristic=False, out_f=out_f)
print('Cover time (s): {:.2f}'.format(max_time - time_left), file=out_f)
print('')
str_results = ';'.join(['Elapsed time', str(time.time() - start_time),
'CPLEX time', str(results[0]),
'Cover time', str(max_time - time_left),
'Nodes (#)', str(results[1]), 'Gap (%)', str(results[2]),
'Best bound', str(results[3]), 'Best objective', str(results[4]),
'Covers (#)', str(results[5]), 'Lazy cuts', str(results[6]),
'Nonzero theta at optimum (#)', str(results[7]),
'Max theta at optimum', str(results[8]),
'Nonzero phi at optimum (#)', str(results[9]),
'Max theta at optimum', str(results[10])])
except Exception as e:
print('Problem with graph: {:s}. \n'.format(graph.name) + str(e))
str_results = '{:s}'.format(str(e))
raise
print("")
print(str_args + ';' + str_results, flush=True)
return
# -- end function
if (__name__ == '__main__'):
if (sys.version_info[0] < 3):
print('Error: this software requires Python 3 or later')
exit()
parser = argparse.ArgumentParser(description = 'Branch-and-Cut for ' +
'robust influence maximization.')
# Add options to parser and parse arguments
register_options(parser)
args = parser.parse_args()
# Set random seed
np.random.seed(args.random_seed)
graph = gr.read_text_graph(args.graph_file)
# Setting default value of max_cover_size
max_cover_size = args.max_cover_size
if (max_cover_size == -1):
max_cover_size = graph.num_nodes
if args.heuristics not in [-1, 1, 2, 3]:
print('Invalid value for heuristics parameter.' +
' Check python3 robinmax.py --help.')
exit()
robinmax(graph, args.num_seeds, max_cover_size,
args.robust_thresh_budget, args.max_thresh_dev,
args.robust_weight_budget, args.max_weight_dev,
args.time, args.heuristics, args.cg_init_iters,
args.max_columns_per_round, args.max_col_iters_per_round,
args.max_pricing_iters, args.num_init_covers, args.debug,
args.disable_cuts, args.lp)
#y_names = ['y_' + str(i) for i in range(graph.num_nodes)]
#for i, name in enumerate(y_names):
# print(name, data.best_incumbent[i])
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"robinmax_graph.read_text_graph",
"time.time",
"robinmax_cover_generator.generate_minimal_covers",
"robinmax_heuristics.random_heuristic",
"numpy.mean",
"robinmax_utils.epsilon",
"robinmax_heuristics.two_opt_heuristic",
"robinmax_bac.bac_restart"
] | [((5220, 5239), 'robinmax_utils.epsilon', 'util.epsilon', (['graph'], {}), '(graph)\n', (5232, 5239), True, 'import robinmax_utils as util\n'), ((11735, 11832), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': "('Branch-and-Cut for ' + 'robust influence maximization.')"}), "(description='Branch-and-Cut for ' +\n 'robust influence maximization.')\n", (11758, 11832), False, 'import argparse\n'), ((12003, 12035), 'numpy.random.seed', 'np.random.seed', (['args.random_seed'], {}), '(args.random_seed)\n', (12017, 12035), True, 'import numpy as np\n'), ((12049, 12084), 'robinmax_graph.read_text_graph', 'gr.read_text_graph', (['args.graph_file'], {}), '(args.graph_file)\n', (12067, 12084), True, 'import robinmax_graph as gr\n'), ((6023, 6039), 'numpy.mean', 'np.mean', (['epsilon'], {}), '(epsilon)\n', (6030, 6039), True, 'import numpy as np\n'), ((7180, 7191), 'time.time', 'time.time', ([], {}), '()\n', (7189, 7191), False, 'import time\n'), ((7220, 7363), 'robinmax_heuristics.random_heuristic', 'heurs.random_heuristic', (['graph', 'num_seeds', 'thresh_budget', 'max_thresh_dev', 'weight_budget', 'max_weight_dev', 'epsilon', 'debugging', 'out_f', 'max_time'], {}), '(graph, num_seeds, thresh_budget, max_thresh_dev,\n weight_budget, max_weight_dev, epsilon, debugging, out_f, max_time)\n', (7242, 7363), True, 'import robinmax_heuristics as heurs\n'), ((7026, 7042), 'numpy.mean', 'np.mean', (['epsilon'], {}), '(epsilon)\n', (7033, 7042), True, 'import numpy as np\n'), ((7630, 7641), 'time.time', 'time.time', ([], {}), '()\n', (7639, 7641), False, 'import time\n'), ((7678, 7822), 'robinmax_heuristics.two_opt_heuristic', 'heurs.two_opt_heuristic', (['graph', 'num_seeds', 'thresh_budget', 'max_thresh_dev', 'weight_budget', 'max_weight_dev', 'epsilon', 'max_time', 'debugging', 'out_f'], {}), '(graph, num_seeds, thresh_budget, max_thresh_dev,\n weight_budget, max_weight_dev, epsilon, max_time, debugging, out_f)\n', (7701, 7822), True, 'import robinmax_heuristics as heurs\n'), ((8132, 8143), 'time.time', 'time.time', ([], {}), '()\n', (8141, 8143), False, 'import time\n'), ((9028, 9466), 'robinmax_bac.bac_restart', 'bac.bac_restart', (['graph', 'num_seeds', 'max_cover_size', 'thresh_budget', 'max_thresh_dev', 'weight_budget', 'max_weight_dev', 'time_left', 'epsilon', 'debugging', 'disable_cuts', 'lp'], {'covers': 'covers', 'thresholds': 'thresholds', 'save_pruned': '(False)', 'run_as_heuristic': '(True)', 'cg_init_iters': 'cg_init_iters', 'max_columns_per_round': 'max_columns_per_round', 'max_pricing_iters': 'max_pricing_iters', 'max_col_iters_per_round': 'max_col_iters_per_round', 'out_f': 'out_f'}), '(graph, num_seeds, max_cover_size, thresh_budget,\n max_thresh_dev, weight_budget, max_weight_dev, time_left, epsilon,\n debugging, disable_cuts, lp, covers=covers, thresholds=thresholds,\n save_pruned=False, run_as_heuristic=True, cg_init_iters=cg_init_iters,\n max_columns_per_round=max_columns_per_round, max_pricing_iters=\n max_pricing_iters, max_col_iters_per_round=max_col_iters_per_round,\n out_f=out_f)\n', (9043, 9466), True, 'import robinmax_bac as bac\n'), ((9923, 9934), 'time.time', 'time.time', ([], {}), '()\n', (9932, 9934), False, 'import time\n'), ((9969, 10084), 'robinmax_cover_generator.generate_minimal_covers', 'cg.generate_minimal_covers', (['graph', 'max_cover_size', 'thresh_budget', 'max_thresh_dev', 'weight_budget', 'max_weight_dev'], {}), '(graph, max_cover_size, thresh_budget,\n max_thresh_dev, weight_budget, max_weight_dev)\n', (9995, 10084), True, 'import robinmax_cover_generator as cg\n'), ((10210, 10476), 'robinmax_bac.bac_restart', 'bac.bac_restart', (['graph', 'num_seeds', 'max_cover_size', 'thresh_budget', 'max_thresh_dev', 'weight_budget', 'max_weight_dev', 'time_left', 'epsilon', 'debugging', 'disable_cuts', 'lp'], {'covers': 'covers', 'thresholds': 'thresholds', 'save_pruned': '(False)', 'run_as_heuristic': '(False)', 'out_f': 'out_f'}), '(graph, num_seeds, max_cover_size, thresh_budget,\n max_thresh_dev, weight_budget, max_weight_dev, time_left, epsilon,\n debugging, disable_cuts, lp, covers=covers, thresholds=thresholds,\n save_pruned=False, run_as_heuristic=False, out_f=out_f)\n', (10225, 10476), True, 'import robinmax_bac as bac\n'), ((8543, 8659), 'robinmax_cover_generator.generate_minimal_covers', 'cg.generate_minimal_covers', (['graph', 'max_size_covers', 'thresh_budget', 'max_thresh_dev', 'weight_budget', 'max_weight_dev'], {}), '(graph, max_size_covers, thresh_budget,\n max_thresh_dev, weight_budget, max_weight_dev)\n', (8569, 8659), True, 'import robinmax_cover_generator as cg\n'), ((7473, 7484), 'time.time', 'time.time', ([], {}), '()\n', (7482, 7484), False, 'import time\n'), ((7942, 7953), 'time.time', 'time.time', ([], {}), '()\n', (7951, 7953), False, 'import time\n'), ((8977, 8988), 'time.time', 'time.time', ([], {}), '()\n', (8986, 8988), False, 'import time\n'), ((10159, 10170), 'time.time', 'time.time', ([], {}), '()\n', (10168, 10170), False, 'import time\n'), ((9612, 9623), 'time.time', 'time.time', ([], {}), '()\n', (9621, 9623), False, 'import time\n'), ((10695, 10706), 'time.time', 'time.time', ([], {}), '()\n', (10704, 10706), False, 'import time\n')] |
import cv2
import os
import numpy as np
import tensorflow as tf
import random
def brightness_level(files,path):
count=1
while(count <= (len(files)//4)):
image=random.choice(files)
img = cv2.imread(os.path.join(path,image))
increment=1
while(increment<=2):
file_path = path + 'brightness' + str(increment) + '_' + image
img=tf.keras.preprocessing.image.random_brightness(
img, (0.6,0.9)
)
img=np.array(img)
cv2.imwrite(file_path,img)
increment = increment + 1
count = count+1
| [
"tensorflow.keras.preprocessing.image.random_brightness",
"cv2.imwrite",
"random.choice",
"numpy.array",
"os.path.join"
] | [((176, 196), 'random.choice', 'random.choice', (['files'], {}), '(files)\n', (189, 196), False, 'import random\n'), ((222, 247), 'os.path.join', 'os.path.join', (['path', 'image'], {}), '(path, image)\n', (234, 247), False, 'import os\n'), ((388, 451), 'tensorflow.keras.preprocessing.image.random_brightness', 'tf.keras.preprocessing.image.random_brightness', (['img', '(0.6, 0.9)'], {}), '(img, (0.6, 0.9))\n', (434, 451), True, 'import tensorflow as tf\n'), ((501, 514), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (509, 514), True, 'import numpy as np\n'), ((527, 554), 'cv2.imwrite', 'cv2.imwrite', (['file_path', 'img'], {}), '(file_path, img)\n', (538, 554), False, 'import cv2\n')] |
import matplotlib.pyplot as plt
from collections import Counter
import numpy as np
import pandas as pd
# USe LaTeX
# import matplotlib
# matplotlib.rcParams['text.usetex'] = True
with open("data/ex2") as f:
data = f.readlines()
## Convert to int
data = [int(d) for d in data]
## Old stuff - dont use.
# plt.plot(data)
# plt.savefig("data/ex2-plot.png")
# plt.close()
# plt.hist(data, bins = 32-1) # read docs
# plt.savefig("data/ex2-hist.png")
## Make Histrogram
n = len(data)
maxsize = 16
width = 0.2
count = Counter(data)
vals = [count[i]/n for i in range(maxsize)]
labels = [i for i in range(maxsize)]
indexes = np.arange(len(vals))
theoretical = [0.5*2**(-i) for i in labels]
plt.bar(indexes, vals, width, label = "Experimental Distribution")
plt.xticks(indexes + width * 0.5, labels)
plt.plot(labels, theoretical,"r--", label = "Theoretical Distribution")
plt.title("Distribution of p(h(x))", size = 18)
plt.xlabel("Value",size = 16)
plt.ylabel("Frequency",size = 16)
plt.legend()
plt.savefig("../tex/figs/ex2-hist.png")
### Exercise 4
df = pd.read_csv("../results/results.csv")
n = 1000000
ms = [16,256,1024]
for m in ms:
d = df[df["m"] == m]
h = plt.hist(d["Estimation"],bins = 100)
plt.xlabel("HyperLogLog Estimation",size = 16)
plt.ylabel("Frequency",size = 16)
sigma = 1.04/np.sqrt(m)
plt.title(f"m = {m}",size = 18)
# normal_distribution = (np.exp(-((x-1000000)^2)/(2*sigma^2)))/(np.sqrt(2*(np.pi)*(sigma^2)))
a = min(d["Estimation"])
b = max(d["Estimation"])
std = np.std(d["Estimation"])
scale = max(h[0])
nu = np.mean(d["Estimation"])
x = np.linspace(a,b,1000)
y = scale*(np.exp(-((x-1000000)**2)/(2*std**2)))
plt.axvline(x=1000000, c = "b")
plt.axvline(x=n*(1+sigma), c = "y")
plt.axvline(x=n*(1-sigma), c = "y")
plt.axvline(x=n*(1+2*sigma), c = "r")
plt.axvline(x=n*(1-2*sigma), c = "r")
plt.plot(x,y)
plt.savefig(f"data/ex4{m}.png")
plt.show()
## Calculate fractions
for m in ms:
d = df[df["m"] == m]
sigma = 1.04/np.sqrt(m)
extimations = d["Estimation"].values
count_sigma1 = 0
count_sigma2 = 0
for i in range(len(extimations)):
if (n*(1-sigma) < extimations[i]) and (n*(1+sigma) > extimations[i]):
count_sigma1 += 1
if (n*(1-2*sigma) < extimations[i]) and (n*(1+2*sigma) > extimations[i]):
count_sigma2 += 1
print("m",m)
print("Fraction 1 sigma",count_sigma1/len(extimations))
print("Fraction 2 sigma",count_sigma2/len(extimations))
#y
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.hist",
"pandas.read_csv",
"numpy.std",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"numpy.mean",
"numpy.exp",
"numpy.linspace",
... | [((521, 534), 'collections.Counter', 'Counter', (['data'], {}), '(data)\n', (528, 534), False, 'from collections import Counter\n'), ((692, 756), 'matplotlib.pyplot.bar', 'plt.bar', (['indexes', 'vals', 'width'], {'label': '"""Experimental Distribution"""'}), "(indexes, vals, width, label='Experimental Distribution')\n", (699, 756), True, 'import matplotlib.pyplot as plt\n'), ((759, 800), 'matplotlib.pyplot.xticks', 'plt.xticks', (['(indexes + width * 0.5)', 'labels'], {}), '(indexes + width * 0.5, labels)\n', (769, 800), True, 'import matplotlib.pyplot as plt\n'), ((801, 871), 'matplotlib.pyplot.plot', 'plt.plot', (['labels', 'theoretical', '"""r--"""'], {'label': '"""Theoretical Distribution"""'}), "(labels, theoretical, 'r--', label='Theoretical Distribution')\n", (809, 871), True, 'import matplotlib.pyplot as plt\n'), ((873, 918), 'matplotlib.pyplot.title', 'plt.title', (['"""Distribution of p(h(x))"""'], {'size': '(18)'}), "('Distribution of p(h(x))', size=18)\n", (882, 918), True, 'import matplotlib.pyplot as plt\n'), ((921, 949), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Value"""'], {'size': '(16)'}), "('Value', size=16)\n", (931, 949), True, 'import matplotlib.pyplot as plt\n'), ((951, 983), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {'size': '(16)'}), "('Frequency', size=16)\n", (961, 983), True, 'import matplotlib.pyplot as plt\n'), ((985, 997), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (995, 997), True, 'import matplotlib.pyplot as plt\n'), ((998, 1037), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../tex/figs/ex2-hist.png"""'], {}), "('../tex/figs/ex2-hist.png')\n", (1009, 1037), True, 'import matplotlib.pyplot as plt\n'), ((1062, 1099), 'pandas.read_csv', 'pd.read_csv', (['"""../results/results.csv"""'], {}), "('../results/results.csv')\n", (1073, 1099), True, 'import pandas as pd\n'), ((1179, 1214), 'matplotlib.pyplot.hist', 'plt.hist', (["d['Estimation']"], {'bins': '(100)'}), "(d['Estimation'], bins=100)\n", (1187, 1214), True, 'import matplotlib.pyplot as plt\n'), ((1221, 1266), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""HyperLogLog Estimation"""'], {'size': '(16)'}), "('HyperLogLog Estimation', size=16)\n", (1231, 1266), True, 'import matplotlib.pyplot as plt\n'), ((1272, 1304), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {'size': '(16)'}), "('Frequency', size=16)\n", (1282, 1304), True, 'import matplotlib.pyplot as plt\n'), ((1339, 1369), 'matplotlib.pyplot.title', 'plt.title', (['f"""m = {m}"""'], {'size': '(18)'}), "(f'm = {m}', size=18)\n", (1348, 1369), True, 'import matplotlib.pyplot as plt\n'), ((1540, 1563), 'numpy.std', 'np.std', (["d['Estimation']"], {}), "(d['Estimation'])\n", (1546, 1563), True, 'import numpy as np\n'), ((1595, 1619), 'numpy.mean', 'np.mean', (["d['Estimation']"], {}), "(d['Estimation'])\n", (1602, 1619), True, 'import numpy as np\n'), ((1628, 1651), 'numpy.linspace', 'np.linspace', (['a', 'b', '(1000)'], {}), '(a, b, 1000)\n', (1639, 1651), True, 'import numpy as np\n'), ((1708, 1737), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': '(1000000)', 'c': '"""b"""'}), "(x=1000000, c='b')\n", (1719, 1737), True, 'import matplotlib.pyplot as plt\n'), ((1744, 1781), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': '(n * (1 + sigma))', 'c': '"""y"""'}), "(x=n * (1 + sigma), c='y')\n", (1755, 1781), True, 'import matplotlib.pyplot as plt\n'), ((1784, 1821), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': '(n * (1 - sigma))', 'c': '"""y"""'}), "(x=n * (1 - sigma), c='y')\n", (1795, 1821), True, 'import matplotlib.pyplot as plt\n'), ((1824, 1865), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': '(n * (1 + 2 * sigma))', 'c': '"""r"""'}), "(x=n * (1 + 2 * sigma), c='r')\n", (1835, 1865), True, 'import matplotlib.pyplot as plt\n'), ((1866, 1907), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': '(n * (1 - 2 * sigma))', 'c': '"""r"""'}), "(x=n * (1 - 2 * sigma), c='r')\n", (1877, 1907), True, 'import matplotlib.pyplot as plt\n'), ((1908, 1922), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (1916, 1922), True, 'import matplotlib.pyplot as plt\n'), ((1926, 1957), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""data/ex4{m}.png"""'], {}), "(f'data/ex4{m}.png')\n", (1937, 1957), True, 'import matplotlib.pyplot as plt\n'), ((1962, 1972), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1970, 1972), True, 'import matplotlib.pyplot as plt\n'), ((1324, 1334), 'numpy.sqrt', 'np.sqrt', (['m'], {}), '(m)\n', (1331, 1334), True, 'import numpy as np\n'), ((1665, 1709), 'numpy.exp', 'np.exp', (['(-(x - 1000000) ** 2 / (2 * std ** 2))'], {}), '(-(x - 1000000) ** 2 / (2 * std ** 2))\n', (1671, 1709), True, 'import numpy as np\n'), ((2053, 2063), 'numpy.sqrt', 'np.sqrt', (['m'], {}), '(m)\n', (2060, 2063), True, 'import numpy as np\n')] |
# %% import packages
import warnings
import numpy as np
import pandas as pd
from autokeras import StructuredDataRegressor
from dask import delayed, compute
from dask.distributed import Client
from autogluon import TabularPrediction as task
from ngboost import NGBRegressor
from pydfs_lineup_optimizer import get_optimizer, Site, Sport, Player, LineupOptimizerException
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.preprocessing import MaxAbsScaler
from sqlalchemy import create_engine
from xgboost import XGBRegressor
# %% PGA Simulator
class PgaSimulator(object):
def __init__(self, model='ngb', limit_features=False, transform_target=False,
add_projections=False, advanced_stats=True, advanced_only=False, roll=False, window=4, diff=False,
start_tournament=381, entry_tournament=392, training_tournaments=4, site='DraftKings', mode='Classic',
predictions_only=True, n_lineups=10, exposure= .2, min_salary=True, salary=49800, unique=True, uniques=2, random=False,
dev=False, min_dev=0, max_dev=.3, min_cost=4, max_cost=10, min_entries=10, max_entries=150,
min_payout=5000, contest_name=None):
self.model = model
self.limit_features = limit_features
self.transform_target = transform_target
self.add_projections = add_projections
self.advanced_stats = advanced_stats
self.advanced_only = advanced_only
self.roll = roll
self.window = window
self.diff = diff
self.start_tournament = start_tournament
self.entry_tournament = entry_tournament
self.training_tournaments = training_tournaments
self.site = site
self.mode = mode
self.predictions_only = predictions_only
self.n_lineups = n_lineups
self.exposure = exposure
self.min_salary = min_salary
self.salary = salary
self.unique = unique
self.uniques = uniques
self.random = random
self.dev = dev
self.min_dev = min_dev
self.max_dev = max_dev
self.min_cost = min_cost
self.max_cost = max_cost
self.min_entries = min_entries
self.max_entries = max_entries
self.min_payout = min_payout
self.contest_name = contest_name
self.ids = ['playerid', 'tournamentid']
self.slate_info = ['slateid', 'playertournamentprojectionid', 'operatorslateplayerid', 'operatorposition',
'operatorsalary']
self.optimizer_info = ['TeamAbbrev', 'ID', 'Name', 'Position', 'Salary', 'AvgPointsPercontest', 'projection',
'actual']
self.target = ['fantasypoints', 'fantasypointsdraftkings', 'fantasypointsfanduel', 'fantasypointsyahoo',
'fantasypointsfantasydraft']
self.salaries = ['draftkingssalary', 'fanduelsalary', 'yahoosalary', 'fantasydraftsalary']
self.tournaments = None
self.tourneys = None
self.t = {}
self.start_date = None
self.entry_date = None
self.players = None
self.predictions = None
self.lineup_results = None
self.contest_results = None
self.contests = None
self.cost = None
self.winnings = None
def simulate(self, verbose=False, tracking=False):
engine = create_engine(
'postgresql://username:password@pga-postgresql.cxmbk6ooy1lu.us-east-1.rds.amazonaws.com/pga')
conn = engine.connect()
self.tournaments = pd.read_sql("SELECT tournamentid FROM tournament", conn).values[::-1]
for x in range(len(self.tournaments)):
self.t[int(self.tournaments[x])] = x
self.tourneys = [int(x) for x in
self.tournaments[int(np.where(self.tournaments == self.start_tournament)[0]):int(
np.where(self.tournaments == self.entry_tournament)[0]) + 1]]
self.start_date = \
conn.execute(f'SELECT startdate FROM tournament WHERE tournamentid = {self.start_tournament}').fetchone()[0]
self.entry_date = \
conn.execute(f'SELECT startdate FROM tournament WHERE tournamentid = {self.entry_tournament}').fetchone()[0]
conn.close()
warnings.simplefilter(action='ignore', category=(FutureWarning, UserWarning, DeprecationWarning))
np.random.seed(seed=0)
self._clean_data()
self._run_historical_simulation()
if not self.predictions_only:
self._backtest_contest_results()
if verbose:
self._print_results()
def _clean_data(self):
def _datagolf():
datagolf_query = "SELECT t.tournamentid, p.playerid, d.sg_app as approach, d.sg_arg as short_contest, d.sg_ott \
as driving, d.sg_putt as putting, d.sg_t2g as tee_to_green, d.sg_total FROM datagolf d \
INNER JOIN player p on d.player_num = p.pgatourplayerid \
INNER JOIN tournament t on d.tournamentid = t.tournamentid \
WHERE startdate >= '2018-10-04'"
engine = create_engine(
'postgresql://username:password@pga-postgresql.cxmbk6ooy1lu.us-east-1.rds.amazonaws.com/pga')
conn = engine.connect()
datagolf_players = pd.read_sql(datagolf_query, conn)
conn.close()
return datagolf_players
engine = create_engine(
'postgresql://username:password@pga-postgresql.cxmbk6ooy1lu.us-east-1.rds.amazonaws.com/pga')
conn = engine.connect()
players_query = "SELECT pt.* FROM playertournament pt INNER JOIN playertournamentprojection ptj on \
pt.playertournamentid = ptj.playertournamentid WHERE (pt.teetime >= '2018-10-04' or pt.teetime is null) \
and ptj.iswithdrawn is False"
print('Grabbing data......')
self.players = pd.read_sql(players_query, conn)
conn.close()
if self.advanced_stats:
datagolf_players = _datagolf()
self.players = self.players.merge(datagolf_players, on=['tournamentid', 'playerid'])
dg_cols = ['approach', 'short_contest', 'driving', 'putting', 'tee_to_green', 'sg_total']
self.players[dg_cols] = self.players[dg_cols].fillna(0)
self.players['fpdk'] = self.players.fantasypointsdraftkings
cols = ['country', 'totalthrough', 'tournamentstatus', 'isalternate', 'oddstowin', 'fantasydraftsalary',
'madecutdidnotfinish', 'oddstowindescription', 'iswithdrawn', 'teetime']
self.players.drop(cols, axis=1, inplace=True)
if self.advanced_only:
self.players = self.players[
dg_cols + self.target + self.ids + self.salaries[:2] + ['playertournamentid', 'fpdk', 'name']].copy()
def _transform_data(self, slate_players):
df = slate_players.groupby(['playerid', 'tourney']).mean()
d = []
if self.roll:
rolling = df.drop(self.target + self.salaries[:2], axis=1).rolling(
window=self.window)
d.append(rolling.mean())
lagged_stats = pd.concat(d, axis=1)
new = lagged_stats.groupby(level=0).shift(1)
else:
new = df.drop(self.target + self.salaries[:2], axis=1).groupby(level=0).shift(1)
df = df[self.target + self.salaries[:2]].merge(new, left_index=True, right_index=True)
df['y_diff'] = pd.concat(
[z.diff(1) for z in [y for x, y in df.fantasypointsdraftkings.groupby(level=0)]],
axis=0)
if not self.advanced_only:
df.dropna(axis=0, subset=['totalstrokes'], inplace=True)
df.fillna(0, inplace=True)
return df
def _grab_slate_data(self, tournament):
engine = create_engine(
'postgresql://username:password@pga-postgresql.cxmbk6ooy1lu.us-east-1.rds.amazonaws.com/pga')
conn = engine.connect()
slate_info = [('projection', 'projection') if tournament == self.entry_tournament else ('actual', '')][0]
slate = f"SELECT t.slateid, \
t.tournamentid, \
a.operatorslateplayerid, \
a.playertournamentprojectionid, \
p.playerid, \
a.operatorplayername, \
a.operatorposition, \
a.operatorsalary, \
p.fantasypoints{self.site.lower()} as {slate_info[0]} \
FROM dfsslate s \
INNER JOIN dfsslatetournament t on s.slateid = t.slateid \
INNER JOIN dfsslateplayer a on s.slateid = a.slateid \
INNER JOIN playertournament{slate_info[1]} p on a.playerid = p.playerid AND p.tournamentid = t.tournamentid \
WHERE t.tournamentid = {tournament} AND s.operator = '{self.site}' AND s.operatorgametype = '{self.mode}'"
slate_data = pd.read_sql(slate, conn)
slate_data.sort_values(['operatorplayername', 'operatorsalary'], ascending=False).reset_index(
inplace=True, drop=True)
conn.close()
player_data = slate_data.drop(
self.slate_info + ['actual' if tournament != self.entry_tournament else 'projection'],
axis=1).drop_duplicates()
slate_players = player_data.merge(self.players, how='outer')
slate_players['tourney'] = [self.t[x] for x in slate_players.tournamentid]
slate_players.drop(['tournamentid', 'playertournamentid'], axis=1, inplace=True)
slate_players.operatorplayername.fillna(slate_players.name, inplace=True)
if self.limit_features:
feature_cols = ['earnings', 'sg_total', 'tee_to_green', 'pars', 'fedexpoints',
'streaksofthreebirdiesorbetter']
slate_players = slate_players[feature_cols + ['playerid', 'tourney'] + self.target + self.salaries[:2]]
return slate_data, slate_players
def _train_predict(self, slate_data, player_data, tournament):
np.random.seed(0)
df = player_data
if self.add_projections:
engine = create_engine(
'postgresql://username:password@pga-postgresql.cxmbk6ooy1lu.us-east-1.rds.amazonaws.com/pga')
conn = engine.connect()
projections = pd.read_sql("SELECT tournamentid, \"Player\", \"Own\", \"FC Proj\" FROM \"cruncher-rewind\"",
conn)
players = pd.read_sql('SELECT playerid, CONCAT(firstname, \' \', lastname) as \"Player\" FROM player', conn)
projections = projections.merge(players, on='Player').drop('Player', axis=1)
projections['tourney'] = [self.t[x] for x in projections.tournamentid]
projections = projections.groupby(['playerid', 'tourney']).mean().dropna(
axis=0)
df = df.join(projections, rsuffix='proj')
conn.close()
if len(slate_data) > 0:
tourney = int(np.where(self.tournaments == tournament)[0])
if self.training_tournaments == 0:
df_train = df.loc(axis=0)[:, :(tourney - 1)].copy()
else:
df_train = df.loc(axis=0)[:, (tourney - self.training_tournaments):(tourney - 1)].copy()
for i in range(1, 20):
if len(df_train) == 0:
df_train = df.loc(axis=0)[:, (tourney - self.training_tournaments - i):(tourney - 1)].copy()
else:
break
df_train.dropna(inplace=True)
X_train = df_train.drop(self.target + ['fpdk', 'y_diff'], axis=1)
if self.site == 'DraftKings':
y_train = df_train.fantasypointsdraftkings
elif self.site == 'FanDuel':
y_train = df_train.fantasypointsfanduel
if self.diff:
y_train = df_train.y_diff
df_pred = df.loc(axis=0)[:, tourney].copy()
X_pred = df_pred.drop(self.target + ['fpdk', 'y_diff'], axis=1)
X_pred.dropna(inplace=True)
y_pred = pd.Series()
if len(X_pred) > 0:
ss = MaxAbsScaler()
ys = MaxAbsScaler()
X_trains = ss.fit_transform(X_train)
X_preds = ss.fit_transform(X_pred)
if self.transform_target:
y_trains = ys.fit_transform(y_train.values.reshape(-1, 1))
y_trains = [y[0] for y in y_trains]
y_trains = pd.Series(y_trains)
else:
y_trains = y_train
np.random.seed(seed=0)
if self.model == 'autogluon':
df_train.drop(['fpdk', 'y_diff'], axis=1).to_csv(f'/mnt/efs/{tourney}-df_train.csv', index=False)
X_pred.to_csv(f'/mnt/efs/{tourney}-X_pred.csv', index=False)
model = task.fit(train_data=task.Dataset(file_path=f'/mnt/efs/{tourney}-df_train.csv'),
label='fantasypointsdraftkings', output_directory='/mnt/efs/', auto_stack=True)
if self.model == 'xgb':
model = XGBRegressor(verbose=False)
model.fit(X_trains, y_trains)
y_preds = model.predict(X_preds)
if self.model == 'ngb':
model = NGBRegressor(verbose=False)
model.fit(X_trains, y_trains)
y_preds = model.predict(X_preds)
if self.model == 'autokeras':
model = StructuredDataRegressor(max_trials=100, seed=0)
model.fit(x=X_trains, y=y_trains, epochs=10)
y_preds = model.predict(X_preds)
y_preds = [y[0] for y in y_preds]
if self.model == 'autogluon':
y_preds = model.predict(dataset=task.Dataset(file_path=f'/mnt/efs/{tourney}-X_pred.csv'))
y_preds = [y[0] for y in y_preds]
if self.transform_target:
y_preds = pd.DataFrame(y_preds)
y_preds = ys.inverse_transform(y_preds)
y_preds = [y[0] for y in y_preds]
y_preds = pd.Series(y_preds, index=X_pred.index, name='AvgPointsPerGame')
y_preds.fillna(0, inplace=True)
y_pred = pd.Series(y_preds, index=X_pred.index, name='AvgPointsPerGame')
if self.diff:
y_pred += df_pred.fpdk
return y_pred.reset_index()
else:
return pd.Series()
def _generate_lineups(self, slate_data, predictions, tournament):
def _optimize(predictions, slate_data, slate):
slate_projections = predictions[predictions.slateid == slate].copy()
slate_projections.sort_values(by=['operatorplayername', 'operatorsalary'], ascending=True).reset_index(
drop=True, inplace=True)
if tournament == self.entry_tournament:
export = []
fantasy_points = 'projection'
else:
fantasy_points = 'actual'
optimizer_projections = slate_projections.drop(
['slateid', 'tournamentid', 'tourney', 'playertournamentprojectionid', 'playerid', fantasy_points],
axis=1).copy()
optimizer_projections.columns = self.optimizer_info[1:6]
players = []
for row in optimizer_projections.iterrows():
row = row[1]
players.append(
Player(str(row.ID), row.Name.split(' ')[0], row.Name.split(' ')[1], [row.Position], '',
row.Salary,
row.AvgPointsPercontest))
if self.site == 'DraftKings':
operator_site = Site.DRAFTKINGS
elif self.site == 'FanDuel':
operator_site = Site.FANDUEL
optimizer = get_optimizer(operator_site, Sport.GOLF)
if self.site == 'FanDuel':
optimizer.settings.min_teams = None
optimizer.load_players(players)
lineups, points, pts_summary = [], [], []
optimizer_lineups = optimizer.optimize(n=self.n_lineups, max_exposure=self.exposure, randomness=self.random)
if self.dev:
optimizer.set_deviation(min_deviation=self.min_dev, max_deviation=self.max_dev)
if self.unique:
optimizer.set_max_repeating_players(6 - self.uniques)
if self.min_salary:
optimizer.set_min_salary_cap(self.salary)
try:
for lineup in optimizer_lineups:
p_id = []
[p_id.append(player._player.id) for player in lineup.lineup]
lineups.append(
optimizer_projections[optimizer_projections.ID.isin(p_id)][['Name', 'ID']].values)
if tournament == self.entry_tournament:
export.append(p_id)
except LineupOptimizerException:
pass
if tournament != self.entry_tournament:
[points.append(slate_data[slate_data.operatorslateplayerid.isin([x[1] for x in lineup])].actual.sum())
for
lineup in lineups]
try:
pts_summary.append((float(max(points)), float(sum(points) / len(points))))
except ValueError:
pass
if tournament == self.entry_tournament:
exports = pd.DataFrame(export, columns=['G'] * 6)
exports.to_csv('/mnt/efs/' + str(tournament) + '-' + str(slate) + 'lineupexports.csv', index=False)
lineup_result = [tournament, slate, lineups, points, pts_summary]
return lineup_result
lineup_results = []
if len(predictions) > 0:
predictions = slate_data.merge(predictions)
if tournament != self.entry_tournament:
rmse = np.sqrt(mean_squared_error(predictions.actual, predictions.AvgPointsPerGame))
mae = mean_absolute_error(predictions.actual, predictions.AvgPointsPerGame)
metrics = rmse, mae
else:
metrics = 0, 0
slates = slate_data.slateid.unique()
for slate in slates:
results = _optimize(predictions, slate_data, slate)
results.append(metrics)
lineup_results.append(results)
return lineup_results, predictions
@delayed
def _tournament_simulation(self, tournament):
slate_data, slate_players = self._grab_slate_data(tournament)
player_data = self._transform_data(slate_players)
prediction = []
predictions = self._train_predict(slate_data, player_data, tournament)
if not predictions.empty:
prediction.append(predictions)
predictions = pd.concat(prediction, axis=0)
predictions = slate_data.merge(predictions)
if tournament != self.entry_tournament:
rmse = np.sqrt(mean_squared_error(predictions.actual, predictions.AvgPointsPerGame))
mae = mean_absolute_error(predictions.actual, predictions.AvgPointsPerGame)
metrics = rmse, mae
else:
metrics = 0, 0
if self.predictions_only:
results = [predictions, metrics]
else:
results = self._generate_lineups(slate_data, predictions, tournament)
return results
def _simulations_wrapper(self):
np.random.seed(0)
results = []
print('Simulating tournaments...')
for tournament in self.tourneys:
result = self._tournament_simulation(tournament)
results.append(result)
return results
def _run_historical_simulation(self):
np.random.seed(0)
results = compute(self._simulations_wrapper())
if self.predictions_only:
self.lineup_results = results
else:
self.lineup_results = results[0]
self.lineup_results = [(x, y) for x, y in self.lineup_results if x]
self.predictions = [x[1].sort_values(by='AvgPointsPerGame', axis=0, ascending=False) for x in
self.lineup_results]
self.lineup_results = [x[0] for x in [x[0] for x in self.lineup_results]]
@delayed
def _tally_winnings(self, contestid):
engine = create_engine(
'postgresql://username:password@pga-postgresql.cxmbk6ooy1lu.us-east-1.rds.amazonaws.com/pga')
conn = engine.connect()
contest_info = f"SELECT l.contestid, \
l.rank as place, \
l.points, \
l.cash_amt, \
t.tournamentid, \
t.slateid, \
c.period, \
c.cost, \
c.max_entries, \
c.prizepool, \
c.winning_score, \
c.mincash_score, \
c.winning_payout, \
c.mincash_payout \
FROM contest c \
INNER JOIN dfsslate s on c.slate = s.operatorslateid \
INNER JOIN dfsslatetournament t on s.slateid = t.slateid \
INNER JOIN contestlineup l on c.contestid = l.contestid \
WHERE l.contestid = {contestid} and l.cash_amt > 0"
contest = pd.read_sql(contest_info, conn).sort_values(by='place')
payout = pd.read_sql(
f'SELECT rank as place, by_rank, ranges, last_paid_rank, contestid FROM contestpayout WHERE contestid = {contestid}',
conn)
conn.close()
scores = []
try:
scores = \
[x[3] for x in self.lineup_results if x[0] == contest.tournamentid.unique()
and x[1] == contest.slateid.unique()][0]
except IndexError:
pass
costs, winnings = 0, 0
if not payout.empty and not contest.empty and scores:
payout['place'] = [int(x) for x in payout['place'].values]
payout = payout.sort_values(by='place').fillna(method='ffill').drop(
['last_paid_rank', 'contestid'], axis=1)
payout[['ranges', 'by_rank']] = payout[['ranges', 'by_rank']].apply(lambda x: x / 100)
ranks = payout['place'].values
places = [i for i in range(1, ranks[-1] + 1)]
ranks = pd.Series([x if x in ranks else None for x in places]).fillna(method='ffill')
ranks = [x for x in ranks.values]
ranges = [[int(x) for x in payout[payout['place'] == i]['ranges'].values] for i in ranks]
ranges = [int(r[0]) for r in ranges]
wins_or_ties = []
contest_points = contest[['place', 'points']].copy()
n_entries = int(contest.max_entries.unique())
scores = scores[:n_entries]
scores = sorted(scores, reverse=True)
for score in scores:
cost = float(contest.cost.unique())
if cost == 0:
cost = .25
costs += cost
for row in contest_points.iterrows():
row = row[1]
if score > row.points:
wins_or_ties.append((row.place + len(wins_or_ties), score, 0))
break
elif score == row.points:
wins_or_ties.append((row.place + len(wins_or_ties), score, 1))
break
for place in wins_or_ties:
if place[2] == 0:
contest_points[contest_points.place >= place[0]].place.apply(lambda x: x + 1)
contest_points = contest_points.append({'place': place[0], 'points': place[1]},
ignore_index=True)
if place[2] == 1:
contest_points[contest_points.place > place[1]].place.apply(lambda x: x + 1)
contest_points = contest_points.append({'place': place[0], 'points': place[1]},
ignore_index=True)
contest_points = contest_points[contest_points.place <= contest.place.values[-1]].copy()
new_rankings = places, ranges, [x for x in contest_points['place'].values]
new_rankings = pd.DataFrame(new_rankings).T
new_rankings = new_rankings.rename(columns={0: 'place', 1: 'ranges', 2: 'rank'}) \
.sort_values(by='place').dropna(axis=0)
new_payout = []
for rank in [x for x in new_rankings['rank'].unique()]:
ties = new_rankings[new_rankings['rank'] == rank].copy()
tie_amount = len(ties)
[new_payout.append(sum(ties.ranges.values) / tie_amount) for _ in range(tie_amount)]
new_rankings['payout'] = new_payout
placements = wins_or_ties
if placements and not new_rankings.empty:
for placement in placements:
try:
winnings += new_rankings[new_rankings.place == placement[0]].payout.values[0]
except IndexError:
pass
return costs, winnings
def _run_historical_winnings_calculation(self):
contests, cost, winnings = [], [], []
print('Simulating contests...')
for contest in [x for x in self.contests.contestid.unique()]:
contests.append(contest)
tally = self._tally_winnings(contest)
cost.append(tally[0])
winnings.append(tally[1])
contest_results = [contests, cost, winnings]
cost = sum(cost)
winnings = sum(winnings)
return [contest_results, cost, winnings]
def _backtest_contest_results(self):
engine = create_engine(
'postgresql://username:password@pga-postgresql.cxmbk6ooy1lu.us-east-1.rds.amazonaws.com/pga')
conn = engine.connect()
contests = f"SELECT DISTINCT(l.contestid), \
t.tournamentid, \
c.name as contest_name\
FROM contest c \
INNER JOIN dfsslate s on c.slate = s.operatorslateid \
INNER JOIN dfsslatetournament t on s.slateid = t.slateid \
INNER JOIN contestlineup l on c.contestid = l.contestid \
WHERE c.site = '{self.site.lower()}' and s.operatorgametype = '{self.mode}' \
and c.cost >= {self.min_cost} and c.cost <= {self.max_cost} \
and c.winning_payout >={self.min_payout} and c.max_entries >= {self.min_entries} \
and c.max_entries <={self.max_entries} and c.period >= '{self.start_date}' \
and c.period < '{self.entry_date}'"
self.contests = pd.read_sql(contests, conn)
conn.close()
if self.contest_name:
self.contests = self.contests[self.contests.contest_name.str.contains(f'{self.contest_name}')]
contest_results = compute(self._run_historical_winnings_calculation())
contest_results = contest_results[0]
self.contest_results = contest_results[0]
self.cost = contest_results[1]
self.winnings = contest_results[2]
def _print_results(self):
try:
print(
'\n+++++++++++++++++++++++++++++++++++++++++++++CONTEST '
'RESULTS++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')
print('Tournaments: {}'.format(int(self.contests.tournamentid.nunique())), end=' ')
print('Contests: {}'.format(int(self.contests.contestid.nunique())), end=' ')
print('Cost: ${:.0f}'.format(self.cost), end=' ')
print('Winnings: ${:.0f}'.format(self.winnings), end=' ')
print('Profit: ${:.0f}'.format(self.winnings - self.cost))
print('\nCost/tournament: ${:.0f}/tournament'.format(self.cost / int(self.contests.tournamentid.nunique())),
end=' ')
print('Profit/tournament: ${:.0f}/tournament'.format(
(self.winnings - self.cost) / int(self.contests.tournamentid.nunique())))
print('Cost/contest: ${:.0f}/contest'.format(self.cost / int(self.contests.contestid.nunique())),
end=' ')
print('Profit/contest: ${:.0f}/contest'.format(
(self.winnings - self.cost) / int(self.contests.contestid.nunique())))
except ZeroDivisionError:
print('No contests to calculate!')
# %%
if __name__ == '__main__':
client = Client()
pga = PgaSimulator(training_tournaments=4)
pga.simulate()
| [
"pandas.DataFrame",
"dask.distributed.Client",
"pydfs_lineup_optimizer.get_optimizer",
"numpy.random.seed",
"warnings.simplefilter",
"autokeras.StructuredDataRegressor",
"autogluon.TabularPrediction.Dataset",
"sklearn.metrics.mean_absolute_error",
"sklearn.preprocessing.MaxAbsScaler",
"ngboost.NGB... | [((29254, 29262), 'dask.distributed.Client', 'Client', ([], {}), '()\n', (29260, 29262), False, 'from dask.distributed import Client\n'), ((3412, 3529), 'sqlalchemy.create_engine', 'create_engine', (['"""postgresql://username:password@pga-postgresql.cxmbk6ooy1lu.us-east-1.rds.amazonaws.com/pga"""'], {}), "(\n 'postgresql://username:password@pga-postgresql.cxmbk6ooy1lu.us-east-1.rds.amazonaws.com/pga'\n )\n", (3425, 3529), False, 'from sqlalchemy import create_engine\n'), ((4328, 4429), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': '(FutureWarning, UserWarning, DeprecationWarning)'}), "(action='ignore', category=(FutureWarning, UserWarning,\n DeprecationWarning))\n", (4349, 4429), False, 'import warnings\n'), ((4434, 4456), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(0)'}), '(seed=0)\n', (4448, 4456), True, 'import numpy as np\n'), ((5548, 5665), 'sqlalchemy.create_engine', 'create_engine', (['"""postgresql://username:password@pga-postgresql.cxmbk6ooy1lu.us-east-1.rds.amazonaws.com/pga"""'], {}), "(\n 'postgresql://username:password@pga-postgresql.cxmbk6ooy1lu.us-east-1.rds.amazonaws.com/pga'\n )\n", (5561, 5665), False, 'from sqlalchemy import create_engine\n'), ((6022, 6054), 'pandas.read_sql', 'pd.read_sql', (['players_query', 'conn'], {}), '(players_query, conn)\n', (6033, 6054), True, 'import pandas as pd\n'), ((7920, 8037), 'sqlalchemy.create_engine', 'create_engine', (['"""postgresql://username:password@pga-postgresql.cxmbk6ooy1lu.us-east-1.rds.amazonaws.com/pga"""'], {}), "(\n 'postgresql://username:password@pga-postgresql.cxmbk6ooy1lu.us-east-1.rds.amazonaws.com/pga'\n )\n", (7933, 8037), False, 'from sqlalchemy import create_engine\n'), ((9052, 9076), 'pandas.read_sql', 'pd.read_sql', (['slate', 'conn'], {}), '(slate, conn)\n', (9063, 9076), True, 'import pandas as pd\n'), ((10159, 10176), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (10173, 10176), True, 'import numpy as np\n'), ((19165, 19194), 'pandas.concat', 'pd.concat', (['prediction'], {'axis': '(0)'}), '(prediction, axis=0)\n', (19174, 19194), True, 'import pandas as pd\n'), ((19798, 19815), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (19812, 19815), True, 'import numpy as np\n'), ((20092, 20109), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (20106, 20109), True, 'import numpy as np\n'), ((20699, 20816), 'sqlalchemy.create_engine', 'create_engine', (['"""postgresql://username:password@pga-postgresql.cxmbk6ooy1lu.us-east-1.rds.amazonaws.com/pga"""'], {}), "(\n 'postgresql://username:password@pga-postgresql.cxmbk6ooy1lu.us-east-1.rds.amazonaws.com/pga'\n )\n", (20712, 20816), False, 'from sqlalchemy import create_engine\n'), ((21839, 21984), 'pandas.read_sql', 'pd.read_sql', (['f"""SELECT rank as place, by_rank, ranges, last_paid_rank, contestid FROM contestpayout WHERE contestid = {contestid}"""', 'conn'], {}), "(\n f'SELECT rank as place, by_rank, ranges, last_paid_rank, contestid FROM contestpayout WHERE contestid = {contestid}'\n , conn)\n", (21850, 21984), True, 'import pandas as pd\n'), ((26272, 26389), 'sqlalchemy.create_engine', 'create_engine', (['"""postgresql://username:password@pga-postgresql.cxmbk6ooy1lu.us-east-1.rds.amazonaws.com/pga"""'], {}), "(\n 'postgresql://username:password@pga-postgresql.cxmbk6ooy1lu.us-east-1.rds.amazonaws.com/pga'\n )\n", (26285, 26389), False, 'from sqlalchemy import create_engine\n'), ((27393, 27420), 'pandas.read_sql', 'pd.read_sql', (['contests', 'conn'], {}), '(contests, conn)\n', (27404, 27420), True, 'import pandas as pd\n'), ((5242, 5359), 'sqlalchemy.create_engine', 'create_engine', (['"""postgresql://username:password@pga-postgresql.cxmbk6ooy1lu.us-east-1.rds.amazonaws.com/pga"""'], {}), "(\n 'postgresql://username:password@pga-postgresql.cxmbk6ooy1lu.us-east-1.rds.amazonaws.com/pga'\n )\n", (5255, 5359), False, 'from sqlalchemy import create_engine\n'), ((5434, 5467), 'pandas.read_sql', 'pd.read_sql', (['datagolf_query', 'conn'], {}), '(datagolf_query, conn)\n', (5445, 5467), True, 'import pandas as pd\n'), ((7269, 7289), 'pandas.concat', 'pd.concat', (['d'], {'axis': '(1)'}), '(d, axis=1)\n', (7278, 7289), True, 'import pandas as pd\n'), ((10257, 10374), 'sqlalchemy.create_engine', 'create_engine', (['"""postgresql://username:password@pga-postgresql.cxmbk6ooy1lu.us-east-1.rds.amazonaws.com/pga"""'], {}), "(\n 'postgresql://username:password@pga-postgresql.cxmbk6ooy1lu.us-east-1.rds.amazonaws.com/pga'\n )\n", (10270, 10374), False, 'from sqlalchemy import create_engine\n'), ((10445, 10546), 'pandas.read_sql', 'pd.read_sql', (['"""SELECT tournamentid, "Player", "Own", "FC Proj" FROM "cruncher-rewind\\""""', 'conn'], {}), '(\n \'SELECT tournamentid, "Player", "Own", "FC Proj" FROM "cruncher-rewind"\',\n conn)\n', (10456, 10546), True, 'import pandas as pd\n'), ((10606, 10712), 'pandas.read_sql', 'pd.read_sql', (['"""SELECT playerid, CONCAT(firstname, \' \', lastname) as "Player" FROM player"""', 'conn'], {}), '(\n \'SELECT playerid, CONCAT(firstname, \\\' \\\', lastname) as "Player" FROM player\'\n , conn)\n', (10617, 10712), True, 'import pandas as pd\n'), ((12227, 12238), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (12236, 12238), True, 'import pandas as pd\n'), ((14745, 14756), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (14754, 14756), True, 'import pandas as pd\n'), ((16133, 16173), 'pydfs_lineup_optimizer.get_optimizer', 'get_optimizer', (['operator_site', 'Sport.GOLF'], {}), '(operator_site, Sport.GOLF)\n', (16146, 16173), False, 'from pydfs_lineup_optimizer import get_optimizer, Site, Sport, Player, LineupOptimizerException\n'), ((19411, 19480), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['predictions.actual', 'predictions.AvgPointsPerGame'], {}), '(predictions.actual, predictions.AvgPointsPerGame)\n', (19430, 19480), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((3592, 3648), 'pandas.read_sql', 'pd.read_sql', (['"""SELECT tournamentid FROM tournament"""', 'conn'], {}), "('SELECT tournamentid FROM tournament', conn)\n", (3603, 3648), True, 'import pandas as pd\n'), ((12294, 12308), 'sklearn.preprocessing.MaxAbsScaler', 'MaxAbsScaler', ([], {}), '()\n', (12306, 12308), False, 'from sklearn.preprocessing import MaxAbsScaler\n'), ((12330, 12344), 'sklearn.preprocessing.MaxAbsScaler', 'MaxAbsScaler', ([], {}), '()\n', (12342, 12344), False, 'from sklearn.preprocessing import MaxAbsScaler\n'), ((12757, 12779), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(0)'}), '(seed=0)\n', (12771, 12779), True, 'import numpy as np\n'), ((14533, 14596), 'pandas.Series', 'pd.Series', (['y_preds'], {'index': 'X_pred.index', 'name': '"""AvgPointsPerGame"""'}), "(y_preds, index=X_pred.index, name='AvgPointsPerGame')\n", (14542, 14596), True, 'import pandas as pd\n'), ((17770, 17809), 'pandas.DataFrame', 'pd.DataFrame', (['export'], {'columns': "(['G'] * 6)"}), "(export, columns=['G'] * 6)\n", (17782, 17809), True, 'import pandas as pd\n'), ((18332, 18401), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['predictions.actual', 'predictions.AvgPointsPerGame'], {}), '(predictions.actual, predictions.AvgPointsPerGame)\n', (18351, 18401), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((19323, 19391), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['predictions.actual', 'predictions.AvgPointsPerGame'], {}), '(predictions.actual, predictions.AvgPointsPerGame)\n', (19341, 19391), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((21766, 21797), 'pandas.read_sql', 'pd.read_sql', (['contest_info', 'conn'], {}), '(contest_info, conn)\n', (21777, 21797), True, 'import pandas as pd\n'), ((24780, 24806), 'pandas.DataFrame', 'pd.DataFrame', (['new_rankings'], {}), '(new_rankings)\n', (24792, 24806), True, 'import pandas as pd\n'), ((11125, 11165), 'numpy.where', 'np.where', (['(self.tournaments == tournament)'], {}), '(self.tournaments == tournament)\n', (11133, 11165), True, 'import numpy as np\n'), ((12659, 12678), 'pandas.Series', 'pd.Series', (['y_trains'], {}), '(y_trains)\n', (12668, 12678), True, 'import pandas as pd\n'), ((13318, 13345), 'xgboost.XGBRegressor', 'XGBRegressor', ([], {'verbose': '(False)'}), '(verbose=False)\n', (13330, 13345), False, 'from xgboost import XGBRegressor\n'), ((13517, 13544), 'ngboost.NGBRegressor', 'NGBRegressor', ([], {'verbose': '(False)'}), '(verbose=False)\n', (13529, 13544), False, 'from ngboost import NGBRegressor\n'), ((13722, 13769), 'autokeras.StructuredDataRegressor', 'StructuredDataRegressor', ([], {'max_trials': '(100)', 'seed': '(0)'}), '(max_trials=100, seed=0)\n', (13745, 13769), False, 'from autokeras import StructuredDataRegressor\n'), ((14225, 14246), 'pandas.DataFrame', 'pd.DataFrame', (['y_preds'], {}), '(y_preds)\n', (14237, 14246), True, 'import pandas as pd\n'), ((14391, 14454), 'pandas.Series', 'pd.Series', (['y_preds'], {'index': 'X_pred.index', 'name': '"""AvgPointsPerGame"""'}), "(y_preds, index=X_pred.index, name='AvgPointsPerGame')\n", (14400, 14454), True, 'import pandas as pd\n'), ((18240, 18308), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['predictions.actual', 'predictions.AvgPointsPerGame'], {}), '(predictions.actual, predictions.AvgPointsPerGame)\n', (18258, 18308), False, 'from sklearn.metrics import mean_squared_error, mean_absolute_error\n'), ((22797, 22853), 'pandas.Series', 'pd.Series', (['[(x if x in ranks else None) for x in places]'], {}), '([(x if x in ranks else None) for x in places])\n', (22806, 22853), True, 'import pandas as pd\n'), ((13073, 13131), 'autogluon.TabularPrediction.Dataset', 'task.Dataset', ([], {'file_path': 'f"""/mnt/efs/{tourney}-df_train.csv"""'}), "(file_path=f'/mnt/efs/{tourney}-df_train.csv')\n", (13085, 13131), True, 'from autogluon import TabularPrediction as task\n'), ((14040, 14096), 'autogluon.TabularPrediction.Dataset', 'task.Dataset', ([], {'file_path': 'f"""/mnt/efs/{tourney}-X_pred.csv"""'}), "(file_path=f'/mnt/efs/{tourney}-X_pred.csv')\n", (14052, 14096), True, 'from autogluon import TabularPrediction as task\n'), ((3847, 3898), 'numpy.where', 'np.where', (['(self.tournaments == self.start_tournament)'], {}), '(self.tournaments == self.start_tournament)\n', (3855, 3898), True, 'import numpy as np\n'), ((3937, 3988), 'numpy.where', 'np.where', (['(self.tournaments == self.entry_tournament)'], {}), '(self.tournaments == self.entry_tournament)\n', (3945, 3988), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import asyncio
import json
import keras.preprocessing
import numpy as np
import re
import spacy
import sys
import tensorflow as tf
from pathlib import Path
from sklearn.feature_extraction.text import TfidfVectorizer
from spacy import displacy
from spacy.matcher import Matcher
from textblob import TextBlob
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
class ProcessText():
def __init__(self):
self.nlp = spacy.load("en_core_web_md")
self.model = tf.keras.models.load_model(Path(__file__).parents[0].joinpath("model.h5"))
self.uncontracter = RegexpReplacer()
self.unpunctuator = re.compile(r"[!#$%&'()*+,-./:;<=>?@[\]^_`{|}~]")
self.tokenizer = keras.preprocessing.text.Tokenizer(num_words=None)
self.tokenizer.fit_on_texts(list(np.genfromtxt(
Path(__file__).parents[0].joinpath("vocab.txt"), dtype="str", delimiter="\n"
)))
self.sentiments = [
"empty",
"sadness",
"enthusiasm",
"neutral",
"worry",
"surprise",
"love",
"fun",
"hate",
"happiness",
"boredom",
"relief",
"anger"
]
async def process(self, text: str):
self.doc = self.nlp(text)
self.blob = TextBlob(text)
sentiment, fmt_ents, (tf_idf, word_associations) = await asyncio.gather(
self.get_sentiment(self.doc, self.blob),
self.get_formatted_entities(self.doc),
self.get_word_associations(self.doc)
)
return json.dumps({
"ents": fmt_ents,
"tf_idf": tf_idf,
"word_associations": word_associations,
"sentiment": sentiment
})
async def get_formatted_entities(self, doc: spacy.tokens.Doc):
return displacy.render(doc, style="ent")
async def get_word_associations(self, doc: spacy.tokens.Doc):
async def get_tfidf():
vectorizer = TfidfVectorizer(
ngram_range=(1, 1), max_features=10,
stop_words=self.nlp.Defaults.stop_words)
vectorizer.fit_transform([span.text for span in doc.sents])
tf_idf = dict(
feature for feature in zip(vectorizer.get_feature_names(), vectorizer.idf_)
if not feature[0].isnumeric()
)
return tf_idf
tf_idf = await get_tfidf()
matcher = Matcher(self.nlp.vocab)
for token in tf_idf.keys(): # transform tokens to similarity matrix
matcher.add(token, None, [{"LOWER": token}])
spanlist = [doc[match[1]:match[2]] for match in matcher(doc)]
word_associations = {}
for span in spanlist[:-1]:
word_associations[span.text] = [
{otherspan.text: str(span.similarity(otherspan))} for otherspan in spanlist
if otherspan.text != span.text and otherspan.text not in word_associations
]
return (tf_idf, word_associations)
async def get_sentiment(self, doc: spacy.tokens.Doc, blob: TextBlob):
async def sanitize(text: str):
text = text.lower() # lowercase string
text = str(blob.correct()) # spellcheck
text = await self.uncontracter.replace(text) # replace contractions with regex
text = self.unpunctuator.sub("", text)
text = " ".join(
[word for word in text.split() if word not in self.nlp.Defaults.stop_words]
) # remove stopwords
return text
async def predict_sentence_mood(text: str):
seq = self.tokenizer.texts_to_sequences([text])
seq = keras.preprocessing.sequence.pad_sequences(seq, maxlen=160, dtype="int32")
sentiment = self.model.predict(seq, batch_size=1, verbose=2)
sentiment = np.round(np.dot(sentiment, 100).tolist(), 0)[0] # convert to percentage
return sentiment
mood = dict(
zip(
self.sentiments,
((sum(val)/len(list(doc.sents))) for val in zip(
*[await predict_sentence_mood(await sanitize(span.text))
for span in doc.sents]))
)
)
return {
"mood": mood, # 12 emotions + 1 empty
"polarity": self.blob.sentiment.polarity, # [-1, +1]
"subjectivity": self.blob.sentiment.subjectivity # [0, 1]
}
class RegexpReplacer():
replacement_patterns = [
(r'won\'t', 'will not'),
(r'can\'t', 'cannot'),
(r'i\'m', 'i am'),
(r'ain\'t', 'is not'),
(r'(\w+)\'ll', r'\g<1> will'),
(r'(\w+)n\'t', r'\g<1> not'),
(r'(\w+)\'ve', r'\g<1> have'),
(r'(\w+)\'s', r'\g<1> is'),
(r'(\w+)\'re', r'\g<1> are'),
(r'(\w+)\'d', r'\g<1> would')
]
def __init__(self, patterns=replacement_patterns):
self.patterns = [(re.compile(regex), repl) for (regex, repl) in patterns]
async def replace(self, text):
s = text
for (pattern, repl) in self.patterns:
s = re.sub(pattern, repl, s)
return s
if __name__ == "__main__":
proc = ProcessText()
import time
start = time.perf_counter()
print(asyncio.run(proc.process(sys.argv[1])))
end = time.perf_counter()
print(f"Time elapsed: {end - start}")
| [
"warnings.filterwarnings",
"sklearn.feature_extraction.text.TfidfVectorizer",
"spacy.matcher.Matcher",
"time.perf_counter",
"json.dumps",
"spacy.load",
"pathlib.Path",
"textblob.TextBlob",
"numpy.dot",
"re.sub",
"spacy.displacy.render",
"re.compile"
] | [((347, 402), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning'}), "('ignore', category=UserWarning)\n", (370, 402), False, 'import warnings\n'), ((5330, 5349), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5347, 5349), False, 'import time\n'), ((5410, 5429), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5427, 5429), False, 'import time\n'), ((469, 497), 'spacy.load', 'spacy.load', (['"""en_core_web_md"""'], {}), "('en_core_web_md')\n", (479, 497), False, 'import spacy\n'), ((667, 715), 're.compile', 're.compile', (['"""[!#$%&\'()*+,-./:;<=>?@[\\\\]^_`{|}~]"""'], {}), '("[!#$%&\'()*+,-./:;<=>?@[\\\\]^_`{|}~]")\n', (677, 715), False, 'import re\n'), ((1369, 1383), 'textblob.TextBlob', 'TextBlob', (['text'], {}), '(text)\n', (1377, 1383), False, 'from textblob import TextBlob\n'), ((1643, 1759), 'json.dumps', 'json.dumps', (["{'ents': fmt_ents, 'tf_idf': tf_idf, 'word_associations': word_associations,\n 'sentiment': sentiment}"], {}), "({'ents': fmt_ents, 'tf_idf': tf_idf, 'word_associations':\n word_associations, 'sentiment': sentiment})\n", (1653, 1759), False, 'import json\n'), ((1901, 1934), 'spacy.displacy.render', 'displacy.render', (['doc'], {'style': '"""ent"""'}), "(doc, style='ent')\n", (1916, 1934), False, 'from spacy import displacy\n'), ((2515, 2538), 'spacy.matcher.Matcher', 'Matcher', (['self.nlp.vocab'], {}), '(self.nlp.vocab)\n', (2522, 2538), False, 'from spacy.matcher import Matcher\n'), ((2058, 2156), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'ngram_range': '(1, 1)', 'max_features': '(10)', 'stop_words': 'self.nlp.Defaults.stop_words'}), '(ngram_range=(1, 1), max_features=10, stop_words=self.nlp.\n Defaults.stop_words)\n', (2073, 2156), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((5206, 5230), 're.sub', 're.sub', (['pattern', 'repl', 's'], {}), '(pattern, repl, s)\n', (5212, 5230), False, 'import re\n'), ((5035, 5052), 're.compile', 're.compile', (['regex'], {}), '(regex)\n', (5045, 5052), False, 'import re\n'), ((546, 560), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (550, 560), False, 'from pathlib import Path\n'), ((3947, 3969), 'numpy.dot', 'np.dot', (['sentiment', '(100)'], {}), '(sentiment, 100)\n', (3953, 3969), True, 'import numpy as np\n'), ((860, 874), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (864, 874), False, 'from pathlib import Path\n')] |
import numpy as np
from ._ReadCDF import _ReadCDF
import DateTimeTools as TT
import DateTimeTools as TT
def ReadDef(Date):
'''
Reads the 'def' position (l2) files for a given date
'''
#read the CDF file
data,meta = _ReadCDF(Date,'def')
if data is None:
return None
#create an output array
dtype = [ ('Date','int32'), #Date in format yyyymmdd
('ut','float32'), #Hours from beginning of the day
('utc','float64'), #Hours from beginning of 19500101
('Epoch','int64'), #CDF Epoch time
('LatLonR','float32',(3,)), #Geocentric latitude, longitude, and radial distance of the satellite position in unit of deg, deg, and km, respectively
('PosGSE','float32',(3,)), #position in GSE coords
('PosGSM','float32',(3,)), #position in GSM coords
('PosSM','float32',(3,)), #position in SM coords
('PosRMLatMLon','float32',(3,)), #R, magnetic latitude, and magnetic local time of spacecraft positions (using IGRF model)
('PosEq','float32',(2,)), #Equatorial footprint
('NFPLatLon','float32',(2,)), #Northern ionospheric lat and long
('SFPLatLon','float32',(2,)), #Southern ionospheric lat and long
('BvecLocal','float32',(3,)), #Local magnetic model field vector
('BmagLocal','float32'), #Local magnetic model field magnitude
('BvecEq','float32',(3,)), #Equatorial magnetic model field vector
('BmagEq','float32'), #Equatorial magnetic model field magnitude
('Lm','float32',(3,)), #McIlwain's L parameter for pitch angles of 30, 60, and 90 deg, in descending order
('VelGSE','float32',(3,)), #Velocity in GSE coords
('VelGSM','float32',(3,)), #Velocity in GSM coords
('VelSM','float32',(3,)), #Velocity in SM coords
('SpinNo','int16'), #Spin number
('ManPrep','int8'), #Flag for maneuver preparation
('ManOnOff','int8'), #Flag for maneuver on/off
('Eclipse','int8')] #Flag for eclipse
n = data['epoch'].size
out = np.recarray(n,dtype=dtype)
#list the field mappings
fields = { 'pos_llr' : 'LatLonR',
'pos_gse' : 'PosGSE',
'pos_gsm' : 'PosGSM',
'pos_sm' : 'PosSM',
'pos_rmlatmlt' : 'PosRMLatMLon',
'pos_eq' : 'PosEq',
'pos_iono_north' : 'NFPLatLon',
'pos_iono_south' : 'SFPLatLon',
'pos_blocal' : 'BvecLocal',
'pos_blocal_mag' : 'BmagLocal',
'pos_beq' : 'BvecEq',
'pos_beq_mag' : 'BmagEq',
'pos_Lm' : 'Lm',
'vel_gse' : 'VelGSE',
'vel_gsm' : 'VelGSM',
'vel_sm' : 'VelSM',
'spn_num' : 'SpinNo',
'man_prep_flag' : 'ManPrep',
'man_on_flag' : 'ManOnOff',
'eclipse_flag' : 'Eclipse'}
#convert dates and times
out.Date,out.ut = TT.CDFEpochtoDate(data['epoch'])
out.utc = TT.ContUT(out.Date,out.ut)
#move the data into the recarray
for f in list(fields.keys()):
out[fields[f]] = data[f]
if 'FILLVAL' in list(meta[f].keys()):
badval = np.float32(meta[f]['FILLVAL'])
bad = np.where(out[fields[f]] == badval)
try:
out[fields[f]][bad] = np.nan
except:
pass
return out
| [
"DateTimeTools.ContUT",
"numpy.float32",
"numpy.recarray",
"DateTimeTools.CDFEpochtoDate",
"numpy.where"
] | [((1969, 1996), 'numpy.recarray', 'np.recarray', (['n'], {'dtype': 'dtype'}), '(n, dtype=dtype)\n', (1980, 1996), True, 'import numpy as np\n'), ((2681, 2713), 'DateTimeTools.CDFEpochtoDate', 'TT.CDFEpochtoDate', (["data['epoch']"], {}), "(data['epoch'])\n", (2698, 2713), True, 'import DateTimeTools as TT\n'), ((2725, 2752), 'DateTimeTools.ContUT', 'TT.ContUT', (['out.Date', 'out.ut'], {}), '(out.Date, out.ut)\n', (2734, 2752), True, 'import DateTimeTools as TT\n'), ((2898, 2928), 'numpy.float32', 'np.float32', (["meta[f]['FILLVAL']"], {}), "(meta[f]['FILLVAL'])\n", (2908, 2928), True, 'import numpy as np\n'), ((2938, 2972), 'numpy.where', 'np.where', (['(out[fields[f]] == badval)'], {}), '(out[fields[f]] == badval)\n', (2946, 2972), True, 'import numpy as np\n')] |
#<NAME>
#<NAME>
from numpy import asarray
from numpy import exp
from numpy import sqrt
from numpy import cos
from numpy import e
from numpy import pi
from numpy import argsort
from numpy.random import randn
from numpy.random import rand
from numpy.random import seed
def objective(v):
x, y = v
return (x**4) + (y**4) + (x**4) - (x**2) + (2*(y**2)) -(3*(x**2)) + (2*(x**2)) - y - x
def in_bounds(point, bounds):
for d in range(len(bounds)):
if point[d] < bounds[d, 0] or point[d] > bounds[d, 1]:
return False
return True
def es_comma(objective, bounds, n_iter, step_size, mu, lam):
best, best_eval = None, 1e+10
n_children = int(lam / mu)
population = list()
for _ in range(lam):
candidate = None
while candidate is None or not in_bounds(candidate, bounds):
candidate = bounds[:, 0] + rand(len(bounds)) * (bounds[:, 1] - bounds[:, 0])
population.append(candidate)
for epoch in range(n_iter):
scores = [objective(c) for c in population]
ranks = argsort(argsort(scores))
selected = [i for i,_ in enumerate(ranks) if ranks[i] < mu]
children = list()
for i in selected:
if scores[i] < best_eval:
best, best_eval = population[i], scores[i]
print('%d, Best: f(%s) = %.5f' % (epoch, best, best_eval))
for _ in range(n_children):
child = None
while child is None or not in_bounds(child, bounds):
child = population[i] + randn(len(bounds)) * step_size
children.append(child)
population = children
return [best, best_eval]
seed(1)
bounds = asarray([[-5.0, 5.0], [-5.0, 5.0]])
n_iter = 5000
step_size = 0.15
mu = 5
lam = 100
best, score = es_comma(objective, bounds, n_iter, step_size, mu, lam)
print('Done!')
print('f(%s) = %f' % (best, score)) | [
"numpy.argsort",
"numpy.asarray",
"numpy.random.seed"
] | [((1561, 1568), 'numpy.random.seed', 'seed', (['(1)'], {}), '(1)\n', (1565, 1568), False, 'from numpy.random import seed\n'), ((1579, 1614), 'numpy.asarray', 'asarray', (['[[-5.0, 5.0], [-5.0, 5.0]]'], {}), '([[-5.0, 5.0], [-5.0, 5.0]])\n', (1586, 1614), False, 'from numpy import asarray\n'), ((1021, 1036), 'numpy.argsort', 'argsort', (['scores'], {}), '(scores)\n', (1028, 1036), False, 'from numpy import argsort\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import datetime
import time
from visdom import Visdom
class VisPlot:
"""Plots to Visdom"""
def __init__(self, env='main'):
try:
self.vis = Visdom() # global
except ConnectionError as e:
mlb.red(
"Visdom Server not running, please launch it with `visdom` in the terminal")
exit(1)
self.env = env
print(f"View visdom results on env '{env}'")
self.plots = {} # name -> visdom window str
# if not self.vis.win_exists('last_updated'):
# self.vis.win_exists
def plot(self, title, series, x, y, setup=None, xlabel='Epochs', ylabel=None):
"""
title = 'loss' etc
series = 'train' etc
"""
hr_min = datetime.datetime.now().strftime("%I:%M")
timestamp = datetime.datetime.now().strftime("%A, %B %d, %Y %I:%M%p")
self.vis.text(
f'<b>LAST UPDATED</b><br>{time}', env=self.env, win='last_updated')
# if setup.expname != 'NoName':
# title += f" ({setup.expname})"
# if setup.has_suggestion:
# title += f" ({setup.sugg_id})"
#title += f" (Phase {setup.phaser.idx}) "
# if setup.config.sigopt:
# display_title = f"{display_title}:{setup.sugg_id}"
# if setup.config.mode is not None:
# display_title += f" ({setup.config.mode})"
display_title = f"{title} ({hr_min})"
if title in self.plots: # update existing plot
self.vis.line(
X=np.array([x]),
Y=np.array([y]),
env=self.env,
win=self.plots[title],
name=series,
update='append'
)
else: # new plot
self.plots[title] = self.vis.line(
X=np.array([x, x]),
Y=np.array([y, y]),
env=self.env,
opts={
'legend': [series],
'title': display_title,
'xlabel': xlabel,
'ylabel': ylabel,
})
#mlb.gray("[plotted to visdom]")
def heat(input, title=""):
if isinstance(input, (list, tuple)):
return [heat(x, title=title+f'[{i}]') for i, x in enumerate(input)]
if isinstance(input, dict):
return {k: heat(v, title=title+k) for k, v in input.items()}
if isinstance(input, nn.Module):
for name, tens in input.named_parameters():
heat(tens, title=title+' '+name)
return
elif torch.is_tensor(input):
title += ' '+datetime.datetime.now().strftime("%I:%M%p")
if input.dim() == 1:
input = input.unsqueeze(1)
if input.dim() != 2:
print(f"Can't display tensor {title} of dim {input.dim()}")
return
self.vis.heatmap(input, env='heat', opts=dict(title=title))
def gradheat(input, title=""):
if isinstance(input, (list, tuple)):
return [gradheat(x, title=title+f'[{i}]') for i, x in enumerate(input)]
if isinstance(input, dict):
return {k: gradheat(v, title=title+k) for k, v in input.items()}
if isinstance(input, nn.Module):
for name, tens in input.named_parameters():
gradheat(tens, title=title+' '+name)
return
elif torch.is_tensor(input):
if not input.requires_grad:
return
input = input.grad
title += ' '+datetime.datetime.now().strftime(" ∆∆∆ %I:%M%p ")
if input.dim() == 1:
input = input.unsqueeze(1)
if input.dim() != 2:
print(f"Can't display tensor {title} of dim {tens.dim()}")
return
self.vis.heatmap(input, env='heat', opts=dict(title=title))
class Input:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class Module(nn.Module):
def __init__(self, input):
super().__init__()
assert isinstance(input, Input)
self.nn = NNProxy()
class NNProxy:
def __getattr__(self, key):
val = getattr(torch.nn, key)
assert isinstance(val, nn.Module)
def fn(*args, **kwargs):
return val(*args, in_features=self.trial.shape, **kwargs)
class Test(Module):
def __init__(self, input):
def encode(x):
with self.label('conv_encode'):
x.nn.Conv1d(s.o1, s.k1, stride=s.stride)
x.nn.ReLU(True)
x.nn.Conv1d(s.o2, s.k2, stride=s.stride)
x.nn.ReLU(True)
x.nn.Conv1d(s.o3, s.k3, stride=s.stride)
x.nn.ReLU(True)
# these labels apply to `self` globally
x.view(-1).shapes('unflat', 'flat')
x.nn.Linear(s.e2_o)
x.F.relu()
# with x.activation(x.F.relu)
def decode(x):
x.Linear(s.e2_o).relu()
x.Linear('flat').relu()
x.view('unflat')
x.deconv('conv_encode')
x.shape('s').view(-1)
def code():
print("hi")
x(code)
# can do factor=2 for input->output double size etc
x.Linear(factor=1)
x.view('s')
x.transpose(1, 2)
x.F.softmax(dim=2)
x.transpose(1, 2)
def sample(mu, logvar):
with self.constant(): # hold everything constant
std = self.torch.exp(.5*logvar)
epsilon = self.torch.randn_like(std)*.01
return epsilon.mul(std).add_(mu)
def forward(x):
mu, logvar = encode(inputs[0])
sample = sample(mu, logvar)
decoded = decode(sample)
return decoded, mu, logvar
self.build(input[0])
class Trial:
def __init__(self, net, shape, silent=False, print_params=False):
self.BATCH = 23 # ideally a weird number we won't come across
shape = list(shape) # for mutability
if -1 in shape:
shape[shape.index(-1)] = self.BATCH
self.t = torch.zeros(shape)
self.net = net
self.shapes = {}
self.silent = silent
self.print_params = print_params
self.original_shape = self.t.shape
# the shape of the first instance of Trial called on a net determines its input shape
if not hasattr(net, '_inputshape'):
net._inputshape = self.original_shape
assert not hasattr(net, '_shapes')
net._shapes = {}
def build_deconv(self, encoder, encoder_input_shape=None):
assert encoder_input_shape or (encoder in self.net._shapes.keys(
)), "Must run trial.apply(encoder) before building its deconv network so that the input shape is known, or specify shape with build_deconv(encoder_input_shape=...)"
if not encoder_input_shape:
encoder_input_shape = self.net._shapes[encoder][0]
assert isinstance(
encoder, nn.Sequential), "todo implement nn.Conv*d support outside of Sequentials"
res = []
trial = Trial(self, encoder_input_shape, silent=True)
forward_shapes = [trial.shape()]
for layer in encoder:
trial.apply(layer)
forward_shapes.append(trial.shape())
backward_shapes = forward_shapes[::-1]
# now backward_shapes is [final_encoder_output,...,final_encoder_input] == [final_decoder_input,...,final_decoder_output]
backward_shapes = backward_shapes[1:]
# now backward_shapes is [first_decoder_output,...,final_decoder_output]
for i, layer in enumerate(encoder[::-1]):
if not isinstance(layer, nn.Conv1d):
if isinstance(layer, nn.Conv2d):
raise NotImplementedError
inverse_layer = layer
else: # nn.Conv1d case
# initial guess is to just swap in and out channels
kwargs = {
'in_channels': layer.out_channels,
'out_channels': layer.in_channels,
'kernel_size': layer.kernel_size,
'stride': layer.stride,
'padding': layer.padding,
'dilation': layer.dilation,
'groups': layer.groups,
'output_padding': 0
}
# careful idk how to feed in bias and padding mode
# bias=layer.bias
# padding_mode=layer.padding_mode
# build initial guess
inverse_layer = nn.ConvTranspose1d(**kwargs)
# inverse_layer = nn.ConvTranspose1d(
# in_channels=in_channels,
# out_channels=out_channels,
# kernel_size=kernel_size,
# stride=stride,
# padding=padding,
# dilation=dilation,
# groups=groups,
# output_padding=0,
# #bias=bias,
# #padding_mode=padding_mode
# )
test = trial.clone()
test.apply(inverse_layer)
out_shape = test.shape()
# if our first guess failed
if out_shape != backward_shapes[i]:
diff = backward_shapes[i][-1] - out_shape[-1]
assert diff != 0
assert out_shape[:-1] == backward_shapes[i][:-1]
if diff > 0: # we are `diff` short of ideal
kwargs['output_padding'] = diff
# we are `-diff` over ideal (note diff==0 case never happens)
else:
# we are trying for: diff = output_padding - 2*padding
# every bit of `padding` we add decreases us by 2, so lets do that and overshoot then make up the remaining bit with output padding
# important question: if we add padding to the deconv like this, should we also be adding that padding to the original conv?
padding_to_add = -diff//2 + 1 # this may always be 1, not sure
output_padding = diff + 2*padding_to_add
assert padding_to_add >= 0
assert output_padding_to_add >= 0
# not implemented yet
assert len(kwargs['padding']) == 1, breakpoint()
kwargs['padding'] = tuple(
[kwargs['padding'][0] + padding_to_add])
kwargs['output_padding'] = output_padding
inverse_layer = nn.ConvTranspose1d(**kwargs)
test = trial.clone()
test.apply(inverse_layer)
assert test.shape() == backward_shapes[i], breakpoint()
trial.apply(inverse_layer)
res.append(inverse_layer)
assert trial.shape() == backward_shapes[i]
return nn.Sequential(*res)
def reset(self):
self.t = torch.zeros(self.original_shape)
# tr_idx is None if model.forward() returns a tensor, 0 if it returns a tuple with the first element being the desired tensor, etc.
def forward(self, reset=True, assert_same=False, tr_idx=None):
self.print("Running full forward pass...")
# reset to original shape
shape_before_reset = self.t.shape
if reset:
self.reset()
old_shape = self.t.shape
with torch.no_grad():
res = self.net.forward(self.t)
if tr_idx is None:
self.t = res
else:
self.t = res[tr_idx]
self.log("forward()", old_shape)
if assert_same:
assert shape_before_reset == self.t.shape
def new_shape(self, shape, reason=None, batch_included=False):
if isinstance(shape, int):
shape = tuple([shape])
if batch_included:
assert -1 in shape
shape = list(shape)
shape[shape.index(-1)] = self.BATCH
shape = tuple(shape)
else:
shape = (self.BATCH, *shape)
old_shape = self.t.shape
self.t = torch.zeros(shape)
if reason is None:
reason = 'manual trial.new_shape'
self.log(reason, old_shape)
def get_inshape(self, layer):
return self.net._shapes[layer][0]
def get_outshape(self, layer):
return self.net._shapes[layer][1]
def print(self, msg):
if not self.silent:
print(msg)
def log(self, reason, oldshape, newshape=None, depth=0):
if not newshape:
newshape = self.t.shape
batchidx_old = tuple(newshape).index(self.BATCH)
batchidx_new = tuple(oldshape).index(self.BATCH)
#assert oldshape[0] == newshape[0] and newshape[0] == self.BATCH
oldshape = tuple([*oldshape[:batchidx_old], -
1, *oldshape[batchidx_old+1:]])
newshape = tuple([*newshape[:batchidx_new], -
1, *newshape[batchidx_new+1:]])
if oldshape == newshape:
body = "(no effect)"
else:
body = "{} -> {}".format(oldshape, newshape)
self.print("{}{}: {}".format('\t'*depth, reason, body))
# convenience function
def view(self, shape):
old_shape = self.t.shape
self.t = self.t.view(shape)
self.log("view", old_shape)
def clone(self):
return Trial(self.net, self.t.shape, silent=self.silent)
# usage:
# shape = trial.shape()
# last_dim = trial.shape(-1)
# batches = trial.shape(0)
# returns shape with batch size self.BATCH
def shape(self, dim=None, warn=True):
if dim == None:
return self.t.shape
if dim == 0 and warn:
print(
"[warn] Accessing batch count, this is an arbitrary number. Disable warning with .shape(0,warn=False)")
return self.t.shape[dim]
# usage:
# self.flatten, self.unflatten = trial.flat_shape()
# trial.view(self.flatten)
# converts NABCDE to NF where F=A*B*C*D*E
def flat_shapes(self):
flat = 1
for dimsize in self.t.shape[1:]:
flat = flat*dimsize
return (-1, flat), (-1, *self.t.shape[1:])
# note if you wanted to do a method like x.view that didn't have a trial.____ equivalent you could do either:
# trial.t = trial.t.view(...)
# or
# trial.apply(lambda t: t.view(...))
# or
# trial.apply(torch.Tensor.view,...)
# note this works with functions not just layers
def apply(self, callable_obj, *args, name=None, depth=0, tr_idx=None, **kwargs):
if isinstance(callable_obj, list):
for item in callable_obj:
self.apply(item, depth=depth, name=name, tr_idx=tr_idx)
assert callable(callable_obj)
saved = self.clone() # for use in nn.Sequential case
old_shape = self.t.shape
# apply the function
with torch.no_grad():
res = callable_obj(self.t, *args, **kwargs)
if tr_idx is not None:
res = res[tr_idx] # do a deref to get the tensor
assert isinstance(
res, torch.Tensor), "you should be using apply_noassign. The function ({}) you used in apply() returns a nontensor".format(callable_obj)
self.t = res
if isinstance(callable_obj, nn.ReLU):
# abort early. you dont want this printed at all, really.
return self
if name is not None:
self.net._shapes[name] = self.t.shape
self.net._shapes[callable_obj] = (old_shape, self.t.shape)
# make printed name pretty
if name is None:
if '__name__' in dir(callable_obj):
name = callable_obj.__name__
elif isinstance(callable_obj, nn.Module):
name = str(callable_obj)
name = name[:name.find('(')]
else:
name = str(callable_obj)
self.log(name, old_shape, depth=depth)
# print out params if desired
if self.print_params and isinstance(callable_obj, nn.Module):
print("params: {}".format(
sum([x.view(-1, 1).shape[0] for x in callable_obj.parameters()])))
# deal with sequentials
if isinstance(callable_obj, nn.Sequential):
for layer in callable_obj:
saved.apply(layer, depth=depth+1)
return self
def apply_noassign(self, callable_obj):
return callable_obj(self.t)
| [
"torch.nn.Sequential",
"visdom.Visdom",
"torch.nn.ConvTranspose1d",
"numpy.array",
"torch.is_tensor",
"torch.zeros",
"torch.no_grad",
"datetime.datetime.now"
] | [((6294, 6312), 'torch.zeros', 'torch.zeros', (['shape'], {}), '(shape)\n', (6305, 6312), False, 'import torch\n'), ((11258, 11277), 'torch.nn.Sequential', 'nn.Sequential', (['*res'], {}), '(*res)\n', (11271, 11277), True, 'import torch.nn as nn\n'), ((11317, 11349), 'torch.zeros', 'torch.zeros', (['self.original_shape'], {}), '(self.original_shape)\n', (11328, 11349), False, 'import torch\n'), ((12480, 12498), 'torch.zeros', 'torch.zeros', (['shape'], {}), '(shape)\n', (12491, 12498), False, 'import torch\n'), ((257, 265), 'visdom.Visdom', 'Visdom', ([], {}), '()\n', (263, 265), False, 'from visdom import Visdom\n'), ((2680, 2702), 'torch.is_tensor', 'torch.is_tensor', (['input'], {}), '(input)\n', (2695, 2702), False, 'import torch\n'), ((3513, 3535), 'torch.is_tensor', 'torch.is_tensor', (['input'], {}), '(input)\n', (3528, 3535), False, 'import torch\n'), ((11772, 11787), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11785, 11787), False, 'import torch\n'), ((15314, 15329), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15327, 15329), False, 'import torch\n'), ((841, 864), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (862, 864), False, 'import datetime\n'), ((903, 926), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (924, 926), False, 'import datetime\n'), ((8797, 8825), 'torch.nn.ConvTranspose1d', 'nn.ConvTranspose1d', ([], {}), '(**kwargs)\n', (8815, 8825), True, 'import torch.nn as nn\n'), ((1626, 1639), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (1634, 1639), True, 'import numpy as np\n'), ((1659, 1672), 'numpy.array', 'np.array', (['[y]'], {}), '([y])\n', (1667, 1672), True, 'import numpy as np\n'), ((1909, 1925), 'numpy.array', 'np.array', (['[x, x]'], {}), '([x, x])\n', (1917, 1925), True, 'import numpy as np\n'), ((1945, 1961), 'numpy.array', 'np.array', (['[y, y]'], {}), '([y, y])\n', (1953, 1961), True, 'import numpy as np\n'), ((10917, 10945), 'torch.nn.ConvTranspose1d', 'nn.ConvTranspose1d', ([], {}), '(**kwargs)\n', (10935, 10945), True, 'import torch.nn as nn\n'), ((2729, 2752), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2750, 2752), False, 'import datetime\n'), ((3656, 3679), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3677, 3679), False, 'import datetime\n')] |
# Copyright (c) 2020, <NAME>
# See LICENSE file for details: <https://github.com/moble/scri/blob/master/LICENSE>
### NOTE: The functions in this file are intended purely for inclusion in the AsymptoticBondData
### class. In particular, they assume that the first argument, `self` is an instance of
### AsymptoticBondData. They should probably not be used outside of that class.
import numpy as np
from math import sqrt, pi
def mass_aspect(self, truncate_ell=max):
"""Compute the Bondi mass aspect of the AsymptoticBondiData
The Bondi mass aspect is given by
\\Psi = \\psi_2 + \\eth \\eth \bar{\\sigma} + \\sigma * \\dot{\bar{\\sigma}}
Note that the last term is a product between two fields. If, for example, these both have
ell_max=8, then their full product would have ell_max=16, meaning that we would go from
tracking 77 modes to 289. This shows that deciding how to truncate the output ell is
important, which is why this function has the extra argument that it does.
Parameters
==========
truncate_ell: int, or callable [defaults to `max`]
Determines how the ell_max value of the output is determined. If an integer is passed,
each term in the output is truncated to have at most that ell_max. (In particular,
terms that will not be used in the output are simply not computed, without incurring any
errors due to aliasing.) If a callable is passed, it is passed on to the
spherical_functions.Modes.multiply method. See that function's docstring for details.
The default behavior will result in the output having ell_max equal to the largest of
any of the individual Modes objects in the equation for \\Psi above -- but not the
product.
"""
if callable(truncate_ell):
return self.psi2 + self.sigma.bar.eth.eth + self.sigma.multiply(self.sigma.bar.dot, truncator=truncate_ell)
elif truncate_ell:
return (
self.psi2.truncate_ell(truncate_ell)
+ self.sigma.bar.eth.eth.truncate_ell(truncate_ell)
+ self.sigma.multiply(self.sigma.bar.dot, truncator=lambda tup: truncate_ell)
)
else:
return self.psi2 + self.sigma.bar.eth.eth + self.sigma * self.sigma.bar.dot
def bondi_four_momentum(self):
"""Compute the Bondi four-momentum of the AsymptoticBondiData"""
import spherical_functions as sf
P_restricted = -self.mass_aspect(1).view(np.ndarray) / sqrt(4 * pi) # Compute only the parts we need, ell<=1
four_momentum = np.empty(P_restricted.shape, dtype=float)
four_momentum[..., 0] = P_restricted[..., 0].real
four_momentum[..., 1] = (P_restricted[..., 3] - P_restricted[..., 1]).real / sqrt(6)
four_momentum[..., 2] = (1j * (P_restricted[..., 3] + P_restricted[..., 1])).real / sqrt(6)
four_momentum[..., 3] = -P_restricted[..., 2].real / sqrt(3)
return four_momentum
def bondi_angular_momentum(self):
"""Compute the Bondi angular momentum vector via Eq. (8) in T. Dray (1985) [DOI:10.1088/0264-9381/2/1/002]"""
from spherical_functions import LM_index
Q = (
self.psi1
+ self.sigma.grid_multiply(self.sigma.bar.eth_GHP)
+ 0.5 * (self.sigma.grid_multiply(self.sigma.bar)).eth_GHP
).ndarray
angular_momentum = np.real(
1j
/ np.sqrt(24 * np.pi)
* np.array(
[
Q[:, LM_index(-1, 1, 0)] - Q[:, LM_index(1, 1, 0)],
-1j * (Q[:, LM_index(-1, 1, 0)] + Q[:, LM_index(1, 1, 0)]),
np.sqrt(2) * Q[:, LM_index(1, 0, 0)],
]
)
)
return angular_momentum
def bondi_spin(self):
"""Computes the Bondi spin angular momentum vector ASSUMING that the orbital part of the angular momentum
computed from the Bondi data is zero."""
from spherical_functions import LM_index as lm
four_momentum = self.bondi_four_momentum()
rest_mass = four_momentum[-1, 0] ** 2 - np.sum(four_momentum[-1, 1:] ** 2)
angular_momentum = self.bondi_angular_momentum()
return (angular_momentum / rest_mass).T
def bondi_boost_charge(self):
"""Compute the Bondi boost charge vector via Eq. (8) in T. Dray (1985) [DOI:10.1088/0264-9381/2/1/002].
This gives the boost charge corresponding to the boost with origin at t=0."""
from spherical_functions import LM_index
Q = (
self.psi1
+ self.sigma.grid_multiply(self.sigma.bar.eth_GHP)
+ 0.5 * (self.sigma.grid_multiply(self.sigma.bar)).eth_GHP
- self.t * (
self.psi2 + self.sigma.grid_multiply(self.sigma.bar.dot)
).real.eth_GHP
).ndarray
boost_charge = np.real(
-1
/ np.sqrt(24 * np.pi)
* np.array(
[
Q[:, LM_index(-1, 1, 0)] - Q[:, LM_index(1, 1, 0)],
-1j * (Q[:, LM_index(-1, 1, 0)] + Q[:, LM_index(1, 1, 0)]),
np.sqrt(2) * Q[:, LM_index(1, 0, 0)],
]
)
)
return boost_charge
def bondi_comoving_CoM(self):
"""Compute the comoving center of mass vector defined as K^i + t*P^i where K^i is the boost charge and
P^i is the momentum. See discussion in arXiv:1912.03164. """
from spherical_functions import LM_index
Q = (
self.psi1
+ self.sigma.grid_multiply(self.sigma.bar.eth_GHP)
+ 0.5 * (self.sigma.grid_multiply(self.sigma.bar)).eth_GHP
).ndarray
charge = np.real(
-1
/ np.sqrt(24 * np.pi)
* np.array(
[
Q[:, LM_index(-1, 1, 0)] - Q[:, LM_index(1, 1, 0)],
-1j * (Q[:, LM_index(-1, 1, 0)] + Q[:, LM_index(1, 1, 0)]),
np.sqrt(2) * Q[:, LM_index(1, 0, 0)],
]
)
)
return charge
def supermomentum(self, supermomentum_def, integrated=False):
"""Computes the supermomentum of the asymptotic Bondi data. Allows for several different definitions
of the supermomentum. These differences only apply to ell > 1 modes, so they do not affect the Bondi
four-momentum. See Eqs. (7-9) in arXiv:1404.2475 for the different supermomentum definitions and links
to further references.
Parameters
----------
supermomentum_def : str
The definition of the supermomentum to be computed. One of the following options (case insensitive)
can be specified:
* 'Bondi-Sachs' or 'BS'
* 'Moreschi' or 'M'
* 'Geroch' or 'G'
* 'Geroch-Winicour' or 'GW'
integrated : bool, default: False
If true, then return the integrated form of the supermomentum. See Eq. (5) in arXiv:1404.2475.
Returns
-------
ModesTimeSeries
"""
if supermomentum_def.lower() in ["bondi-sachs", "bs"]:
supermomentum = self.psi2 + self.sigma.grid_multiply(self.sigma.bar.dot)
elif supermomentum_def.lower() in ["moreschi", "m"]:
supermomentum = self.psi2 + self.sigma.grid_multiply(self.sigma.bar.dot) + self.sigma.bar.eth_GHP.eth_GHP
elif supermomentum_def.lower() in ["geroch", "g"]:
supermomentum = (
self.psi2
+ self.sigma.grid_multiply(self.sigma.bar.dot)
+ 0.5 * (self.sigma.bar.eth_GHP.eth_GHP - self.sigma.ethbar_GHP.ethbar_GHP)
)
elif supermomentum_def.lower() in ["geroch-winicour", "gw"]:
supermomentum = self.psi2 + self.sigma.grid_multiply(self.sigma.bar.dot) - self.sigma.ethbar_GHP.ethbar_GHP
else:
raise ValueError(
f"Supermomentum defintion '{supermomentum_def}' not recognized. Please choose one of "
"the following options:\n"
" * 'Bondi-Sachs' or 'BS'\n"
" * 'Moreschi' or 'M'\n"
" * 'Geroch' or 'G'\n"
" * 'Geroch-Winicour' or 'GW'"
)
if integrated:
return -0.5 * supermomentum.bar / np.sqrt(np.pi)
else:
return supermomentum
def transform_moreschi_supermomentum(supermomentum, **kwargs):
"""Apply a BMS transformation to the Moreschi supermomentum using the Moreschi formula,
Eq. (9) of arXiv:gr-qc/0203075. NOTE: This transformation only holds for the Moreschi
supermomentum!
It is important to note that the input transformation parameters are applied in this order:
1. (Super)Translations
2. Rotation (about the origin)
3. Boost (about the origin)
All input parameters refer to the transformation required to take the input data's inertial
frame onto the inertial frame of the output data's inertial observers. In what follows, the
coordinates of and functions in the input inertial frame will be unprimed, while corresponding
values of the output inertial frame will be primed.
The translations (space, time, spacetime, or super) can be given in various ways, which may
override each other. Ultimately, however, they are essentially combined into a single function
`α`, representing the supertranslation, which transforms the asymptotic time variable `u` as
u'(u, θ, ϕ) = u(u, θ, ϕ) - α(θ, ϕ)
A simple time translation by δt would correspond to
α(θ, ϕ) = δt # Independent of (θ, ϕ)
A pure spatial translation δx would correspond to
α(θ, ϕ) = -δx · n̂(θ, ϕ)
where `·` is the usual dot product, and `n̂` is the unit vector in the given direction.
Parameters
----------
supermomentum: ModesTimeSeries
The object storing the modes of the original data, which will be transformed in this
function. This is the only required argument to this function.
time_translation: float, optional
Defaults to zero. Nonzero overrides corresponding components of `spacetime_translation` and
`supertranslation` parameters. Note that this is the actual change in the coordinate value,
rather than the corresponding mode weight (which is what `supertranslation` represents).
space_translation : float array of length 3, optional
Defaults to empty (no translation). Non-empty overrides corresponding components of
`spacetime_translation` and `supertranslation` parameters. Note that this is the actual
change in the coordinate value, rather than the corresponding mode weight (which is what
`supertranslation` represents).
spacetime_translation : float array of length 4, optional
Defaults to empty (no translation). Non-empty overrides corresponding components of
`supertranslation`. Note that this is the actual change in the coordinate value, rather
than the corresponding mode weight (which is what `supertranslation` represents).
supertranslation : complex array [defaults to 0]
This gives the complex components of the spherical-harmonic expansion of the
supertranslation in standard form, starting from ell=0 up to some ell_max, which may be
different from the ell_max of the input `supermomentum`. Supertranslations must be real, so
these values should obey the condition
α^{ℓ,m} = (-1)^m ᾱ^{ℓ,-m}
This condition is actually imposed on the input data, so imaginary parts of α(θ, ϕ) will
essentially be discarded. Defaults to empty, which causes no supertranslation. Note that
some components may be overridden by the parameters above.
frame_rotation : quaternion [defaults to 1]
Transformation applied to (x,y,z) basis of the input mode's inertial frame. For example,
the basis z vector of the new frame may be written as
z' = frame_rotation * z * frame_rotation.inverse()
Defaults to 1, corresponding to the identity transformation (no rotation).
boost_velocity : float array of length 3 [defaults to (0, 0, 0)]
This is the three-velocity vector of the new frame relative to the input frame. The norm of
this vector is required to be smaller than 1.
output_ell_max: int [defaults to supermomentum.ell_max]
Maximum ell value in the output data.
working_ell_max: int [defaults to 2 * supermomentum.ell_max]
Maximum ell value to use during the intermediate calculations. Rotations and time
translations do not require this to be any larger than supermomentum.ell_max, but other
transformations will require more values of ell for accurate results. In particular, boosts
are multiplied by time, meaning that a large boost of data with large values of time will
lead to very large power in higher modes. Similarly, large (super)translations will couple
power through a lot of modes. To avoid aliasing, this value should be large, to accomodate
power in higher modes.
Returns
-------
ModesTimeSeries
"""
from quaternion import rotate_vectors
from scipy.interpolate import CubicSpline
import spherical_functions as sf
import spinsfast
import math
from .transformations import _process_transformation_kwargs, boosted_grid, conformal_factors
from ..modes_time_series import ModesTimeSeries
# Parse the input arguments, and define the basic parameters for this function
frame_rotation, boost_velocity, supertranslation, working_ell_max, output_ell_max, = _process_transformation_kwargs(
supermomentum.ell_max, **kwargs
)
n_theta = 2 * working_ell_max + 1
n_phi = n_theta
β = np.linalg.norm(boost_velocity)
γ = 1 / math.sqrt(1 - β ** 2)
# Make this into a Modes object, so it can keep track of its spin weight, etc., through the
# various operations needed below.
supertranslation = sf.Modes(supertranslation, spin_weight=0).real
# This is a 2-d array of unit quaternions, which are what the spin-weighted functions should be
# evaluated on (even for spin 0 functions, for simplicity). That will be equivalent to
# evaluating the spin-weighted functions with respect to the transformed grid -- although on the
# original time slices.
distorted_grid_rotors = boosted_grid(frame_rotation, boost_velocity, n_theta, n_phi)
# Compute u, α, Δα, k, ðk/k, 1/k, and 1/k³ on the distorted grid, including new axes to
# enable broadcasting with time-dependent functions. Note that the first axis should represent
# variation in u, the second axis variation in θ', and the third axis variation in ϕ'.
u = supermomentum.u
α = sf.Grid(supertranslation.evaluate(distorted_grid_rotors), spin_weight=0).real[np.newaxis, :, :]
# The factor of 0.25 comes from using the GHP eth instead of the NP eth.
Δα = sf.Grid(0.25 * supertranslation.ethbar.ethbar.eth.eth.evaluate(distorted_grid_rotors), spin_weight=α.s)[
np.newaxis, :, :
]
k, ðk_over_k, one_over_k, one_over_k_cubed = conformal_factors(boost_velocity, distorted_grid_rotors)
# Ψ(u, θ', ϕ') exp(2iλ)
Ψ = sf.Grid(supermomentum.evaluate(distorted_grid_rotors), spin_weight=0)
### The following calculations are done using in-place Horner form. I suspect this will be the
### most efficient form of this calculation, within reason. Note that the factors of exp(isλ)
### were computed automatically by evaluating in terms of quaternions.
#
# Ψ'(u, θ', ϕ') = k⁻³ (Ψ - ð²ðbar²α)
Ψprime_of_timenaught_directionprime = Ψ.copy() - Δα
Ψprime_of_timenaught_directionprime *= one_over_k_cubed
# Determine the new time slices. The set timeprime is chosen so that on each slice of constant
# u'_i, the average value of u=(u'/k)+α is precisely <u>=u'γ+<α>=u_i. But then, we have to
# narrow that set down, so that every grid point on all the u'_i' slices correspond to data in
# the range of input data.
timeprime = (u - sf.constant_from_ell_0_mode(supertranslation[0]).real) / γ
timeprime_of_initialtime_directionprime = k * (u[0] - α)
timeprime_of_finaltime_directionprime = k * (u[-1] - α)
earliest_complete_timeprime = np.max(timeprime_of_initialtime_directionprime.view(np.ndarray))
latest_complete_timeprime = np.min(timeprime_of_finaltime_directionprime.view(np.ndarray))
timeprime = timeprime[(timeprime >= earliest_complete_timeprime) & (timeprime <= latest_complete_timeprime)]
# This will store the values of Ψ'(u', θ', ϕ')
Ψprime_of_timeprime_directionprime = np.zeros((timeprime.size, n_theta, n_phi), dtype=complex)
# Interpolate the various transformed function values on the transformed grid from the original
# time coordinate to the new set of time coordinates, independently for each direction.
for i in range(n_theta):
for j in range(n_phi):
k_i_j = k[0, i, j]
α_i_j = α[0, i, j]
# u'(u, θ', ϕ')
timeprime_of_timenaught_directionprime_i_j = k_i_j * (u - α_i_j)
# Ψ'(u', θ', ϕ')
Ψprime_of_timeprime_directionprime[:, i, j] = CubicSpline(
timeprime_of_timenaught_directionprime_i_j, Ψprime_of_timenaught_directionprime[:, i, j], axis=0
)(timeprime)
# Finally, transform back from the distorted grid to the SWSH mode weights as measured in that
# grid. I'll abuse notation slightly here by indicating those "distorted" mode weights with
# primes, so that Ψ'(u')_{ℓ', m'} = ∫ Ψ'(u', θ', ϕ') sȲ_{ℓ', m'}(θ', ϕ') sin(θ') dθ' dϕ'
supermomentum_prime = spinsfast.map2salm(Ψprime_of_timeprime_directionprime, 0, output_ell_max)
supermomentum_prime = ModesTimeSeries(
sf.SWSH_modes.Modes(
supermomentum_prime, spin_weight=0, ell_min=0, ell_max=output_ell_max, multiplication_truncator=max
),
time=timeprime,
)
return supermomentum_prime
| [
"numpy.sum",
"spinsfast.map2salm",
"math.sqrt",
"numpy.empty",
"scipy.interpolate.CubicSpline",
"spherical_functions.constant_from_ell_0_mode",
"numpy.zeros",
"spherical_functions.LM_index",
"spherical_functions.Modes",
"numpy.linalg.norm",
"spherical_functions.SWSH_modes.Modes",
"numpy.sqrt"
... | [((2543, 2584), 'numpy.empty', 'np.empty', (['P_restricted.shape'], {'dtype': 'float'}), '(P_restricted.shape, dtype=float)\n', (2551, 2584), True, 'import numpy as np\n'), ((13369, 13399), 'numpy.linalg.norm', 'np.linalg.norm', (['boost_velocity'], {}), '(boost_velocity)\n', (13383, 13399), True, 'import numpy as np\n'), ((16264, 16321), 'numpy.zeros', 'np.zeros', (['(timeprime.size, n_theta, n_phi)'], {'dtype': 'complex'}), '((timeprime.size, n_theta, n_phi), dtype=complex)\n', (16272, 16321), True, 'import numpy as np\n'), ((17295, 17368), 'spinsfast.map2salm', 'spinsfast.map2salm', (['Ψprime_of_timeprime_directionprime', '(0)', 'output_ell_max'], {}), '(Ψprime_of_timeprime_directionprime, 0, output_ell_max)\n', (17313, 17368), False, 'import spinsfast\n'), ((2468, 2480), 'math.sqrt', 'sqrt', (['(4 * pi)'], {}), '(4 * pi)\n', (2472, 2480), False, 'from math import sqrt, pi\n'), ((2720, 2727), 'math.sqrt', 'sqrt', (['(6)'], {}), '(6)\n', (2724, 2727), False, 'from math import sqrt, pi\n'), ((2816, 2823), 'math.sqrt', 'sqrt', (['(6)'], {}), '(6)\n', (2820, 2823), False, 'from math import sqrt, pi\n'), ((2881, 2888), 'math.sqrt', 'sqrt', (['(3)'], {}), '(3)\n', (2885, 2888), False, 'from math import sqrt, pi\n'), ((3963, 3997), 'numpy.sum', 'np.sum', (['(four_momentum[-1, 1:] ** 2)'], {}), '(four_momentum[-1, 1:] ** 2)\n', (3969, 3997), True, 'import numpy as np\n'), ((13412, 13433), 'math.sqrt', 'math.sqrt', (['(1 - β ** 2)'], {}), '(1 - β ** 2)\n', (13421, 13433), False, 'import math\n'), ((13592, 13633), 'spherical_functions.Modes', 'sf.Modes', (['supertranslation'], {'spin_weight': '(0)'}), '(supertranslation, spin_weight=0)\n', (13600, 13633), True, 'import spherical_functions as sf\n'), ((17420, 17545), 'spherical_functions.SWSH_modes.Modes', 'sf.SWSH_modes.Modes', (['supermomentum_prime'], {'spin_weight': '(0)', 'ell_min': '(0)', 'ell_max': 'output_ell_max', 'multiplication_truncator': 'max'}), '(supermomentum_prime, spin_weight=0, ell_min=0, ell_max=\n output_ell_max, multiplication_truncator=max)\n', (17439, 17545), True, 'import spherical_functions as sf\n'), ((7852, 7866), 'numpy.sqrt', 'np.sqrt', (['np.pi'], {}), '(np.pi)\n', (7859, 7866), True, 'import numpy as np\n'), ((3331, 3350), 'numpy.sqrt', 'np.sqrt', (['(24 * np.pi)'], {}), '(24 * np.pi)\n', (3338, 3350), True, 'import numpy as np\n'), ((4697, 4716), 'numpy.sqrt', 'np.sqrt', (['(24 * np.pi)'], {}), '(24 * np.pi)\n', (4704, 4716), True, 'import numpy as np\n'), ((5464, 5483), 'numpy.sqrt', 'np.sqrt', (['(24 * np.pi)'], {}), '(24 * np.pi)\n', (5471, 5483), True, 'import numpy as np\n'), ((15683, 15731), 'spherical_functions.constant_from_ell_0_mode', 'sf.constant_from_ell_0_mode', (['supertranslation[0]'], {}), '(supertranslation[0])\n', (15710, 15731), True, 'import spherical_functions as sf\n'), ((16829, 16942), 'scipy.interpolate.CubicSpline', 'CubicSpline', (['timeprime_of_timenaught_directionprime_i_j', 'Ψprime_of_timenaught_directionprime[:, i, j]'], {'axis': '(0)'}), '(timeprime_of_timenaught_directionprime_i_j,\n Ψprime_of_timenaught_directionprime[:, i, j], axis=0)\n', (16840, 16942), False, 'from scipy.interpolate import CubicSpline\n'), ((3545, 3555), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3552, 3555), True, 'import numpy as np\n'), ((4911, 4921), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (4918, 4921), True, 'import numpy as np\n'), ((5678, 5688), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5685, 5688), True, 'import numpy as np\n'), ((3406, 3424), 'spherical_functions.LM_index', 'LM_index', (['(-1)', '(1)', '(0)'], {}), '(-1, 1, 0)\n', (3414, 3424), False, 'from spherical_functions import LM_index\n'), ((3433, 3450), 'spherical_functions.LM_index', 'LM_index', (['(1)', '(1)', '(0)'], {}), '(1, 1, 0)\n', (3441, 3450), False, 'from spherical_functions import LM_index\n'), ((3563, 3580), 'spherical_functions.LM_index', 'LM_index', (['(1)', '(0)', '(0)'], {}), '(1, 0, 0)\n', (3571, 3580), False, 'from spherical_functions import LM_index\n'), ((4772, 4790), 'spherical_functions.LM_index', 'LM_index', (['(-1)', '(1)', '(0)'], {}), '(-1, 1, 0)\n', (4780, 4790), False, 'from spherical_functions import LM_index\n'), ((4799, 4816), 'spherical_functions.LM_index', 'LM_index', (['(1)', '(1)', '(0)'], {}), '(1, 1, 0)\n', (4807, 4816), False, 'from spherical_functions import LM_index\n'), ((4929, 4946), 'spherical_functions.LM_index', 'LM_index', (['(1)', '(0)', '(0)'], {}), '(1, 0, 0)\n', (4937, 4946), False, 'from spherical_functions import LM_index\n'), ((5539, 5557), 'spherical_functions.LM_index', 'LM_index', (['(-1)', '(1)', '(0)'], {}), '(-1, 1, 0)\n', (5547, 5557), False, 'from spherical_functions import LM_index\n'), ((5566, 5583), 'spherical_functions.LM_index', 'LM_index', (['(1)', '(1)', '(0)'], {}), '(1, 1, 0)\n', (5574, 5583), False, 'from spherical_functions import LM_index\n'), ((5696, 5713), 'spherical_functions.LM_index', 'LM_index', (['(1)', '(0)', '(0)'], {}), '(1, 0, 0)\n', (5704, 5713), False, 'from spherical_functions import LM_index\n'), ((3481, 3499), 'spherical_functions.LM_index', 'LM_index', (['(-1)', '(1)', '(0)'], {}), '(-1, 1, 0)\n', (3489, 3499), False, 'from spherical_functions import LM_index\n'), ((3508, 3525), 'spherical_functions.LM_index', 'LM_index', (['(1)', '(1)', '(0)'], {}), '(1, 1, 0)\n', (3516, 3525), False, 'from spherical_functions import LM_index\n'), ((4847, 4865), 'spherical_functions.LM_index', 'LM_index', (['(-1)', '(1)', '(0)'], {}), '(-1, 1, 0)\n', (4855, 4865), False, 'from spherical_functions import LM_index\n'), ((4874, 4891), 'spherical_functions.LM_index', 'LM_index', (['(1)', '(1)', '(0)'], {}), '(1, 1, 0)\n', (4882, 4891), False, 'from spherical_functions import LM_index\n'), ((5614, 5632), 'spherical_functions.LM_index', 'LM_index', (['(-1)', '(1)', '(0)'], {}), '(-1, 1, 0)\n', (5622, 5632), False, 'from spherical_functions import LM_index\n'), ((5641, 5658), 'spherical_functions.LM_index', 'LM_index', (['(1)', '(1)', '(0)'], {}), '(1, 1, 0)\n', (5649, 5658), False, 'from spherical_functions import LM_index\n')] |
#!/usr/bin/env python3
# Copyright 2014 <NAME>, <EMAIL>
#
# This file is part of the gammatone toolkit, and is licensed under the 3-clause
# BSD license: https://github.com/detly/gammatone/blob/master/COPYING
import nose
import numpy as np
import scipy.io
from pkg_resources import resource_stream
import gammatone.filters
REF_DATA_FILENAME = 'data/test_erb_filter_data.mat'
INPUT_KEY = 'erb_filter_inputs'
RESULT_KEY = 'erb_filter_results'
INPUT_COLS = ('fs', 'cfs')
RESULT_COLS = ('fcoefs',)
def load_reference_data():
""" Load test data generated from the reference code """
# Load test data
with resource_stream(__name__, REF_DATA_FILENAME) as test_data:
data = scipy.io.loadmat(test_data, squeeze_me=False)
zipped_data = zip(data[INPUT_KEY], data[RESULT_KEY])
for inputs, refs in zipped_data:
input_dict = dict(zip(INPUT_COLS, map(np.squeeze, inputs)))
ref_dict = dict(zip(RESULT_COLS, map(np.squeeze, refs)))
yield (input_dict, ref_dict)
def test_make_ERB_filters_known_values():
for inputs, refs in load_reference_data():
args = (
inputs['fs'],
inputs['cfs'],
)
expected = (refs['fcoefs'],)
yield MakeERBFiltersTester(args, expected)
class MakeERBFiltersTester:
def __init__(self, args, expected):
self.fs = args[0]
self.cfs = args[1]
self.expected = expected[0]
self.description = (
"Gammatone filters for {:f}, {:.1f} ... {:.1f}".format(
float(self.fs),
float(self.cfs[0]),
float(self.cfs[-1])
))
def __call__(self):
result = gammatone.filters.make_erb_filters(self.fs, self.cfs)
assert np.allclose(result, self.expected, rtol=1e-6, atol=1e-12)
if __name__ == '__main__':
nose.main()
| [
"pkg_resources.resource_stream",
"nose.main",
"numpy.allclose"
] | [((1872, 1883), 'nose.main', 'nose.main', ([], {}), '()\n', (1881, 1883), False, 'import nose\n'), ((620, 664), 'pkg_resources.resource_stream', 'resource_stream', (['__name__', 'REF_DATA_FILENAME'], {}), '(__name__, REF_DATA_FILENAME)\n', (635, 664), False, 'from pkg_resources import resource_stream\n'), ((1782, 1840), 'numpy.allclose', 'np.allclose', (['result', 'self.expected'], {'rtol': '(1e-06)', 'atol': '(1e-12)'}), '(result, self.expected, rtol=1e-06, atol=1e-12)\n', (1793, 1840), True, 'import numpy as np\n')] |
import numpy as np
import tile
import imageio
CONTOUR_REGION_SIZE = 5
colormap = np.array(
[
[ 47, 0, 135, ],
[ 50, 0, 138, ],
[ 53, 0, 140, ],
[ 56, 0, 142, ],
[ 59, 0, 144, ],
[ 62, 0, 146, ],
[ 65, 0, 148, ],
[ 68, 0, 150, ],
[ 70, 0, 151, ],
[ 73, 0, 153, ],
[ 76, 0, 154, ],
[ 79, 0, 156, ],
[ 81, 0, 157, ],
[ 84, 0, 158, ],
[ 87, 0, 160, ],
[ 89, 0, 161, ],
[ 92, 0, 162, ],
[ 95, 0, 163, ],
[ 98, 0, 164, ],
[100, 0, 165, ],
[103, 0, 166, ],
[106, 0, 166, ],
[109, 0, 167, ],
[111, 0, 168, ],
[114, 0, 168, ],
[117, 0, 168, ],
[119, 0, 169, ],
[122, 0, 169, ],
[125, 0, 169, ],
[127, 0, 169, ],
[130, 0, 169, ],
[133, 0, 169, ],
[135, 0, 168, ],
[138, 0, 168, ],
[140, 0, 167, ],
[143, 0, 167, ],
[145, 0, 166, ],
[148, 0, 165, ],
[150, 0, 164, ],
[152, 0, 163, ],
[155, 0, 162, ],
[157, 3, 161, ],
[160, 7, 159, ],
[162, 11, 158, ],
[164, 15, 156, ],
[166, 19, 155, ],
[168, 22, 153, ],
[171, 26, 151, ],
[173, 29, 150, ],
[175, 32, 148, ],
[177, 35, 146, ],
[179, 37, 144, ],
[181, 40, 142, ],
[183, 43, 141, ],
[185, 45, 139, ],
[187, 48, 137, ],
[189, 51, 135, ],
[190, 53, 133, ],
[192, 56, 131, ],
[194, 58, 129, ],
[196, 61, 127, ],
[198, 63, 125, ],
[199, 66, 124, ],
[201, 68, 122, ],
[203, 70, 120, ],
[204, 73, 118, ],
[206, 75, 116, ],
[208, 78, 114, ],
[209, 80, 113, ],
[211, 82, 111, ],
[212, 85, 109, ],
[214, 87, 107, ],
[215, 90, 106, ],
[217, 92, 104, ],
[218, 94, 102, ],
[220, 97, 100, ],
[221, 99, 99, ],
[222, 102, 97, ],
[224, 104, 95, ],
[225, 107, 93, ],
[226, 109, 92, ],
[228, 112, 90, ],
[229, 114, 88, ],
[230, 117, 86, ],
[231, 119, 85, ],
[232, 122, 83, ],
[234, 124, 81, ],
[235, 127, 79, ],
[236, 129, 77, ],
[237, 132, 76, ],
[238, 135, 74, ],
[239, 137, 72, ],
[239, 140, 70, ],
[240, 143, 68, ],
[241, 146, 67, ],
[242, 148, 65, ],
[242, 151, 63, ],
[243, 154, 61, ],
[244, 157, 59, ],
[244, 160, 57, ],
[245, 163, 55, ],
[245, 166, 54, ],
[245, 168, 52, ],
[246, 171, 50, ],
[246, 174, 48, ],
[246, 178, 46, ],
[246, 181, 44, ],
[246, 184, 43, ],
[246, 187, 41, ],
[246, 190, 39, ],
[245, 193, 37, ],
[245, 196, 36, ],
[245, 200, 34, ],
[244, 203, 33, ],
[244, 206, 32, ],
[243, 209, 31, ],
[242, 213, 30, ],
[241, 216, 30, ],
[240, 219, 29, ],
[239, 223, 29, ],
[238, 226, 29, ],
[237, 230, 29, ],
[235, 233, 30, ],
[234, 237, 30, ],
[232, 240, 30, ],
[231, 244, 29, ],
[229, 247, 27, ],
[228, 250, 21, ],
]
).astype(np.uint8)
BLACK = np.array([0, 0, 0]).astype(np.uint8)
GRAY = np.array([72, 72, 72]).astype(np.uint8)
WHITE = np.array([255, 255, 255]).astype(np.uint8)
# Corners are ordered (MSb first)
# 0 1
# +---+
# | |
# +---+
# 3 2
contours = np.zeros((16, CONTOUR_REGION_SIZE, CONTOUR_REGION_SIZE))
contours[0] = np.array( # 0000 (0)
[
[0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, ],
]
)
contours[1] = np.array( # 0001 (1)
[
[0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, ],
[1, 0, 0, 0, 0, ],
[0, 1, 0, 0, 0, ],
[0, 0, 1, 0, 0, ],
]
)
contours[2] = np.array( # 0010 (2)
[
[0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 1, ],
[0, 0, 0, 1, 0, ],
[0, 0, 1, 0, 0, ],
]
)
contours[3] = np.array( # 0011 (3)
[
[0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, ],
[1, 1, 1, 1, 1, ],
[0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, ],
]
)
contours[4] = np.array( # 0100 (4)
[
[0, 0, 1, 0, 0, ],
[0, 0, 0, 1, 0, ],
[0, 0, 0, 0, 1, ],
[0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, ],
]
)
contours[5] = np.array( # 0101 (5)
[
[0, 0, 1, 0, 0, ],
[0, 1, 0, 0, 0, ],
[1, 0, 0, 0, 1, ],
[0, 0, 0, 1, 0, ],
[0, 0, 1, 0, 0, ],
]
)
contours[6] = np.array( # 0110 (6)
[
[0, 0, 1, 0, 0, ],
[0, 0, 1, 0, 0, ],
[0, 0, 1, 0, 0, ],
[0, 0, 1, 0, 0, ],
[0, 0, 1, 0, 0, ],
]
)
contours[7] = np.array( # 0111 (0)
[
[0, 0, 1, 0, 0, ],
[0, 1, 0, 0, 0, ],
[1, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, ],
[0, 0, 0, 0, 0, ],
]
)
contours[8] = contours[7] # 1000 (8)
contours[9] = contours[6] # 1001 (9)
contours[10] = np.array( # 1010 (0)
[
[0, 0, 1, 0, 0, ],
[0, 0, 0, 1, 0, ],
[1, 0, 0, 0, 1, ],
[0, 1, 0, 0, 0, ],
[0, 0, 1, 0, 0, ],
]
)
contours[11] = contours[4] # 1011 (11)
contours[12] = contours[3] # 1100 (12)
contours[13] = contours[2] # 1101 (13)
contours[14] = contours[1] # 1110 (14)
contours[15] = contours[0] # 1111 (15)
def get_case(thresholded_region):
i, j = [0, 0, 1, 1], [0, 1, 1, 0]
bits = thresholded_region[i, j].astype(np.uint8)
return np.packbits(bits)[0] >> 4
if __name__ == '__main__':
corner1 = (46.915952, -121.818551)
corner2 = (46.792267, -121.687303)
zoom = 12
tmap = tile.TileMap(corner1, corner2, zoom)
num_levels = 50
min_elevation = tmap.elevation.min()
max_elevation = tmap.elevation.max()
elevations = np.linspace(min_elevation, max_elevation, num_levels + 2)[1:-1] # Drop first and last
color_indices = np.linspace(0, len(colormap) - 1, num_levels).round().astype(np.int_)
colors = colormap[color_indices]
nrows, ncols = tmap.elevation.shape
image = np.ones((nrows * CONTOUR_REGION_SIZE, ncols * CONTOUR_REGION_SIZE, 3)).astype(np.uint8) * WHITE
for elev, color in zip(elevations, colors):
thresholded = tmap.elevation >= elev
for i in range(nrows - 1):
for j in range(ncols - 1):
ii = i * CONTOUR_REGION_SIZE
jj = j * CONTOUR_REGION_SIZE
thresholded_region = thresholded[i:i+2, j:j+2]
case = get_case(thresholded_region)
threshi, threshj = np.where(contours[case] == 1)
threshi += ii
threshj += jj
image[threshi, threshj] = BLACK
print(f'Done with elevation {elev}', flush=True)
imageio.imwrite('contour.png', image) | [
"tile.TileMap",
"numpy.zeros",
"numpy.packbits",
"numpy.ones",
"numpy.where",
"numpy.array",
"numpy.linspace",
"imageio.imwrite"
] | [((3820, 3876), 'numpy.zeros', 'np.zeros', (['(16, CONTOUR_REGION_SIZE, CONTOUR_REGION_SIZE)'], {}), '((16, CONTOUR_REGION_SIZE, CONTOUR_REGION_SIZE))\n', (3828, 3876), True, 'import numpy as np\n'), ((3891, 3991), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0\n ], [0, 0, 0, 0, 0]])\n', (3899, 3991), True, 'import numpy as np\n'), ((4076, 4176), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0,\n 1, 0, 0]]'], {}), '([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 0, 0, 0, 0], [0, 1, 0, 0, 0\n ], [0, 0, 1, 0, 0]])\n', (4084, 4176), True, 'import numpy as np\n'), ((4261, 4361), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 1], [0, 0, 0, 1, 0], [0, 0,\n 1, 0, 0]]'], {}), '([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 1], [0, 0, 0, 1, 0\n ], [0, 0, 1, 0, 0]])\n', (4269, 4361), True, 'import numpy as np\n'), ((4446, 4546), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [0, 0, 0, 0, 0\n ], [0, 0, 0, 0, 0]])\n', (4454, 4546), True, 'import numpy as np\n'), ((4631, 4731), 'numpy.array', 'np.array', (['[[0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1], [0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0]]'], {}), '([[0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1], [0, 0, 0, 0, 0\n ], [0, 0, 0, 0, 0]])\n', (4639, 4731), True, 'import numpy as np\n'), ((4816, 4916), 'numpy.array', 'np.array', (['[[0, 0, 1, 0, 0], [0, 1, 0, 0, 0], [1, 0, 0, 0, 1], [0, 0, 0, 1, 0], [0, 0,\n 1, 0, 0]]'], {}), '([[0, 0, 1, 0, 0], [0, 1, 0, 0, 0], [1, 0, 0, 0, 1], [0, 0, 0, 1, 0\n ], [0, 0, 1, 0, 0]])\n', (4824, 4916), True, 'import numpy as np\n'), ((5001, 5101), 'numpy.array', 'np.array', (['[[0, 0, 1, 0, 0], [0, 0, 1, 0, 0], [0, 0, 1, 0, 0], [0, 0, 1, 0, 0], [0, 0,\n 1, 0, 0]]'], {}), '([[0, 0, 1, 0, 0], [0, 0, 1, 0, 0], [0, 0, 1, 0, 0], [0, 0, 1, 0, 0\n ], [0, 0, 1, 0, 0]])\n', (5009, 5101), True, 'import numpy as np\n'), ((5186, 5286), 'numpy.array', 'np.array', (['[[0, 0, 1, 0, 0], [0, 1, 0, 0, 0], [1, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0]]'], {}), '([[0, 0, 1, 0, 0], [0, 1, 0, 0, 0], [1, 0, 0, 0, 0], [0, 0, 0, 0, 0\n ], [0, 0, 0, 0, 0]])\n', (5194, 5286), True, 'import numpy as np\n'), ((5448, 5548), 'numpy.array', 'np.array', (['[[0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [1, 0, 0, 0, 1], [0, 1, 0, 0, 0], [0, 0,\n 1, 0, 0]]'], {}), '([[0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [1, 0, 0, 0, 1], [0, 1, 0, 0, 0\n ], [0, 0, 1, 0, 0]])\n', (5456, 5548), True, 'import numpy as np\n'), ((6116, 6152), 'tile.TileMap', 'tile.TileMap', (['corner1', 'corner2', 'zoom'], {}), '(corner1, corner2, zoom)\n', (6128, 6152), False, 'import tile\n'), ((7246, 7283), 'imageio.imwrite', 'imageio.imwrite', (['"""contour.png"""', 'image'], {}), "('contour.png', image)\n", (7261, 7283), False, 'import imageio\n'), ((83, 2201), 'numpy.array', 'np.array', (['[[47, 0, 135], [50, 0, 138], [53, 0, 140], [56, 0, 142], [59, 0, 144], [62,\n 0, 146], [65, 0, 148], [68, 0, 150], [70, 0, 151], [73, 0, 153], [76, 0,\n 154], [79, 0, 156], [81, 0, 157], [84, 0, 158], [87, 0, 160], [89, 0, \n 161], [92, 0, 162], [95, 0, 163], [98, 0, 164], [100, 0, 165], [103, 0,\n 166], [106, 0, 166], [109, 0, 167], [111, 0, 168], [114, 0, 168], [117,\n 0, 168], [119, 0, 169], [122, 0, 169], [125, 0, 169], [127, 0, 169], [\n 130, 0, 169], [133, 0, 169], [135, 0, 168], [138, 0, 168], [140, 0, 167\n ], [143, 0, 167], [145, 0, 166], [148, 0, 165], [150, 0, 164], [152, 0,\n 163], [155, 0, 162], [157, 3, 161], [160, 7, 159], [162, 11, 158], [164,\n 15, 156], [166, 19, 155], [168, 22, 153], [171, 26, 151], [173, 29, 150\n ], [175, 32, 148], [177, 35, 146], [179, 37, 144], [181, 40, 142], [183,\n 43, 141], [185, 45, 139], [187, 48, 137], [189, 51, 135], [190, 53, 133\n ], [192, 56, 131], [194, 58, 129], [196, 61, 127], [198, 63, 125], [199,\n 66, 124], [201, 68, 122], [203, 70, 120], [204, 73, 118], [206, 75, 116\n ], [208, 78, 114], [209, 80, 113], [211, 82, 111], [212, 85, 109], [214,\n 87, 107], [215, 90, 106], [217, 92, 104], [218, 94, 102], [220, 97, 100\n ], [221, 99, 99], [222, 102, 97], [224, 104, 95], [225, 107, 93], [226,\n 109, 92], [228, 112, 90], [229, 114, 88], [230, 117, 86], [231, 119, 85\n ], [232, 122, 83], [234, 124, 81], [235, 127, 79], [236, 129, 77], [237,\n 132, 76], [238, 135, 74], [239, 137, 72], [239, 140, 70], [240, 143, 68\n ], [241, 146, 67], [242, 148, 65], [242, 151, 63], [243, 154, 61], [244,\n 157, 59], [244, 160, 57], [245, 163, 55], [245, 166, 54], [245, 168, 52\n ], [246, 171, 50], [246, 174, 48], [246, 178, 46], [246, 181, 44], [246,\n 184, 43], [246, 187, 41], [246, 190, 39], [245, 193, 37], [245, 196, 36\n ], [245, 200, 34], [244, 203, 33], [244, 206, 32], [243, 209, 31], [242,\n 213, 30], [241, 216, 30], [240, 219, 29], [239, 223, 29], [238, 226, 29\n ], [237, 230, 29], [235, 233, 30], [234, 237, 30], [232, 240, 30], [231,\n 244, 29], [229, 247, 27], [228, 250, 21]]'], {}), '([[47, 0, 135], [50, 0, 138], [53, 0, 140], [56, 0, 142], [59, 0, \n 144], [62, 0, 146], [65, 0, 148], [68, 0, 150], [70, 0, 151], [73, 0, \n 153], [76, 0, 154], [79, 0, 156], [81, 0, 157], [84, 0, 158], [87, 0, \n 160], [89, 0, 161], [92, 0, 162], [95, 0, 163], [98, 0, 164], [100, 0, \n 165], [103, 0, 166], [106, 0, 166], [109, 0, 167], [111, 0, 168], [114,\n 0, 168], [117, 0, 168], [119, 0, 169], [122, 0, 169], [125, 0, 169], [\n 127, 0, 169], [130, 0, 169], [133, 0, 169], [135, 0, 168], [138, 0, 168\n ], [140, 0, 167], [143, 0, 167], [145, 0, 166], [148, 0, 165], [150, 0,\n 164], [152, 0, 163], [155, 0, 162], [157, 3, 161], [160, 7, 159], [162,\n 11, 158], [164, 15, 156], [166, 19, 155], [168, 22, 153], [171, 26, 151\n ], [173, 29, 150], [175, 32, 148], [177, 35, 146], [179, 37, 144], [181,\n 40, 142], [183, 43, 141], [185, 45, 139], [187, 48, 137], [189, 51, 135\n ], [190, 53, 133], [192, 56, 131], [194, 58, 129], [196, 61, 127], [198,\n 63, 125], [199, 66, 124], [201, 68, 122], [203, 70, 120], [204, 73, 118\n ], [206, 75, 116], [208, 78, 114], [209, 80, 113], [211, 82, 111], [212,\n 85, 109], [214, 87, 107], [215, 90, 106], [217, 92, 104], [218, 94, 102\n ], [220, 97, 100], [221, 99, 99], [222, 102, 97], [224, 104, 95], [225,\n 107, 93], [226, 109, 92], [228, 112, 90], [229, 114, 88], [230, 117, 86\n ], [231, 119, 85], [232, 122, 83], [234, 124, 81], [235, 127, 79], [236,\n 129, 77], [237, 132, 76], [238, 135, 74], [239, 137, 72], [239, 140, 70\n ], [240, 143, 68], [241, 146, 67], [242, 148, 65], [242, 151, 63], [243,\n 154, 61], [244, 157, 59], [244, 160, 57], [245, 163, 55], [245, 166, 54\n ], [245, 168, 52], [246, 171, 50], [246, 174, 48], [246, 178, 46], [246,\n 181, 44], [246, 184, 43], [246, 187, 41], [246, 190, 39], [245, 193, 37\n ], [245, 196, 36], [245, 200, 34], [244, 203, 33], [244, 206, 32], [243,\n 209, 31], [242, 213, 30], [241, 216, 30], [240, 219, 29], [239, 223, 29\n ], [238, 226, 29], [237, 230, 29], [235, 233, 30], [234, 237, 30], [232,\n 240, 30], [231, 244, 29], [229, 247, 27], [228, 250, 21]])\n', (91, 2201), True, 'import numpy as np\n'), ((3589, 3608), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (3597, 3608), True, 'import numpy as np\n'), ((3633, 3655), 'numpy.array', 'np.array', (['[72, 72, 72]'], {}), '([72, 72, 72])\n', (3641, 3655), True, 'import numpy as np\n'), ((3681, 3706), 'numpy.array', 'np.array', (['[255, 255, 255]'], {}), '([255, 255, 255])\n', (3689, 3706), True, 'import numpy as np\n'), ((6273, 6330), 'numpy.linspace', 'np.linspace', (['min_elevation', 'max_elevation', '(num_levels + 2)'], {}), '(min_elevation, max_elevation, num_levels + 2)\n', (6284, 6330), True, 'import numpy as np\n'), ((5957, 5974), 'numpy.packbits', 'np.packbits', (['bits'], {}), '(bits)\n', (5968, 5974), True, 'import numpy as np\n'), ((6541, 6611), 'numpy.ones', 'np.ones', (['(nrows * CONTOUR_REGION_SIZE, ncols * CONTOUR_REGION_SIZE, 3)'], {}), '((nrows * CONTOUR_REGION_SIZE, ncols * CONTOUR_REGION_SIZE, 3))\n', (6548, 6611), True, 'import numpy as np\n'), ((7046, 7075), 'numpy.where', 'np.where', (['(contours[case] == 1)'], {}), '(contours[case] == 1)\n', (7054, 7075), True, 'import numpy as np\n')] |
# general
# =======
#
# ... gerneal purpose tasks and helper functions
from affine import Affine
import geopandas as gp
import luigi
import math
import numpy as np
import os
from rasterio import features
import xarray as xr
# helper functions ------------------------------------------------
def bbox_to_latlon(bbox, res):
"""Create coords for netcdf file.
bbox format: [c1_lon, c1_lat, c2_lon, c2_lat]"""
lon1, lat1, lon2, lat2 = bbox
# flip bbox coords if in wrong order
lat1, lat2 = (lat1, lat2) if lat1 < lat2 else (lat2, lat1)
lon1, lon2 = (lon1, lon2) if lon1 < lon2 else (lon2, lon1)
lons = np.arange(lon1 + res/2, lon2, res)
lats = np.arange(lat1 + res/2, lat2, res)
return (lats, lons)
def transform_from_latlon(lat, lon):
lat = np.asarray(lat)
lon = np.asarray(lon)
trans = Affine.translation(lon[0], lat[0])
scale = Affine.scale(lon[1] - lon[0], lat[1] - lat[0])
return trans * scale
def calc_area(lat, pixeldegree):
area_km2 = (110.45 * pixeldegree) * (111.1944 * pixeldegree) * math.cos(lat * (math.pi / 180.0))
area_ha = area_km2 * 100
area_m2 = area_km2 * 1000000
# calculate gridcell areas THOMAS
# mean radius of the earth (km)
radius_Earth = 6367.425
lat_upper = lat + pixeldegree / 0.5
lat_lower = lat - pixeldegree / 0.5
h1 = radius_Earth * math.sin( lat_upper * math.pi / 180.0)
h2 = radius_Earth * math.sin( lat_lower * math.pi / 180.0)
area_band = 2.0 * math.pi * radius_Earth * (h1-h2) # area of this latitude band
area_km2_TH = area_band * (pixeldegree / 360.0)
return area_ha
def add_attributes(da):
da['lat'].attrs['long_name'] = 'latitude'
da['lat'].attrs['units'] = 'degrees_north'
da['lon'].attrs['long_name'] = 'longitude'
da['lon'].attrs['units'] = 'degrees_east'
return da
# general purpose tasks ---------------------------------------------
class MakeDirectory(luigi.Task):
path = luigi.Parameter()
def output(self):
return luigi.LocalTarget(self.path)
def run(self):
os.makedirs(self.path)
class RasterizeShapefile(luigi.Task):
shp_file = luigi.Parameter()
attr = luigi.Parameter(default="NONE")
bbox = luigi.ListParameter()
res = luigi.FloatParameter()
name = luigi.Parameter(default="variable")
file_path = luigi.Parameter(default="data.nc")
def run(self):
fill = np.nan
# load and potentially reproject shapefile
shp = gp.read_file(self.shp_file)
shp = shp.to_crs({'init': 'epsg:4326'})
# use existing geometry of raster file to use
lats_, lons_ = bbox_to_latlon(self.bbox, self.res)
# create cin variable
data = np.ones((len(lats_), len(lons_)))
da = xr.DataArray(data, coords=(lats_, lons_), dims=('lat', 'lon'), name=self.name)
da.attrs['units'] = '-'
# create mask or burn attr feature values
if self.attr == 'NONE':
shapes = [(feature['geometry'], 1) for feature in shp.iterfeatures()]
else:
shapes = ((geom, value) for geom, value in zip(shp.geometry, shp[self.attr]))
raster_data = features.rasterize(shapes, out_shape=data.shape, fill=fill,
transform=transform_from_latlon(da.coords['lat'], da.coords['lon']))
da = xr.DataArray(raster_data, coords=da.coords, dims=('lat', 'lon'), name=self.name)
da = add_attributes(da)
da.to_dataset().to_netcdf(self.output().path, format='NETCDF4_CLASSIC')
def output(self):
return luigi.LocalTarget(self.file_path) | [
"os.makedirs",
"luigi.FloatParameter",
"numpy.asarray",
"affine.Affine.translation",
"affine.Affine.scale",
"math.sin",
"numpy.arange",
"math.cos",
"luigi.LocalTarget",
"xarray.DataArray",
"luigi.Parameter",
"luigi.ListParameter",
"geopandas.read_file"
] | [((634, 670), 'numpy.arange', 'np.arange', (['(lon1 + res / 2)', 'lon2', 'res'], {}), '(lon1 + res / 2, lon2, res)\n', (643, 670), True, 'import numpy as np\n'), ((680, 716), 'numpy.arange', 'np.arange', (['(lat1 + res / 2)', 'lat2', 'res'], {}), '(lat1 + res / 2, lat2, res)\n', (689, 716), True, 'import numpy as np\n'), ((788, 803), 'numpy.asarray', 'np.asarray', (['lat'], {}), '(lat)\n', (798, 803), True, 'import numpy as np\n'), ((814, 829), 'numpy.asarray', 'np.asarray', (['lon'], {}), '(lon)\n', (824, 829), True, 'import numpy as np\n'), ((842, 876), 'affine.Affine.translation', 'Affine.translation', (['lon[0]', 'lat[0]'], {}), '(lon[0], lat[0])\n', (860, 876), False, 'from affine import Affine\n'), ((889, 935), 'affine.Affine.scale', 'Affine.scale', (['(lon[1] - lon[0])', '(lat[1] - lat[0])'], {}), '(lon[1] - lon[0], lat[1] - lat[0])\n', (901, 935), False, 'from affine import Affine\n'), ((1972, 1989), 'luigi.Parameter', 'luigi.Parameter', ([], {}), '()\n', (1987, 1989), False, 'import luigi\n'), ((2162, 2179), 'luigi.Parameter', 'luigi.Parameter', ([], {}), '()\n', (2177, 2179), False, 'import luigi\n'), ((2195, 2226), 'luigi.Parameter', 'luigi.Parameter', ([], {'default': '"""NONE"""'}), "(default='NONE')\n", (2210, 2226), False, 'import luigi\n'), ((2242, 2263), 'luigi.ListParameter', 'luigi.ListParameter', ([], {}), '()\n', (2261, 2263), False, 'import luigi\n'), ((2279, 2301), 'luigi.FloatParameter', 'luigi.FloatParameter', ([], {}), '()\n', (2299, 2301), False, 'import luigi\n'), ((2317, 2352), 'luigi.Parameter', 'luigi.Parameter', ([], {'default': '"""variable"""'}), "(default='variable')\n", (2332, 2352), False, 'import luigi\n'), ((2369, 2403), 'luigi.Parameter', 'luigi.Parameter', ([], {'default': '"""data.nc"""'}), "(default='data.nc')\n", (2384, 2403), False, 'import luigi\n'), ((1063, 1096), 'math.cos', 'math.cos', (['(lat * (math.pi / 180.0))'], {}), '(lat * (math.pi / 180.0))\n', (1071, 1096), False, 'import math\n'), ((1369, 1406), 'math.sin', 'math.sin', (['(lat_upper * math.pi / 180.0)'], {}), '(lat_upper * math.pi / 180.0)\n', (1377, 1406), False, 'import math\n'), ((1432, 1469), 'math.sin', 'math.sin', (['(lat_lower * math.pi / 180.0)'], {}), '(lat_lower * math.pi / 180.0)\n', (1440, 1469), False, 'import math\n'), ((2028, 2056), 'luigi.LocalTarget', 'luigi.LocalTarget', (['self.path'], {}), '(self.path)\n', (2045, 2056), False, 'import luigi\n'), ((2085, 2107), 'os.makedirs', 'os.makedirs', (['self.path'], {}), '(self.path)\n', (2096, 2107), False, 'import os\n'), ((2516, 2543), 'geopandas.read_file', 'gp.read_file', (['self.shp_file'], {}), '(self.shp_file)\n', (2528, 2543), True, 'import geopandas as gp\n'), ((2799, 2877), 'xarray.DataArray', 'xr.DataArray', (['data'], {'coords': '(lats_, lons_)', 'dims': "('lat', 'lon')", 'name': 'self.name'}), "(data, coords=(lats_, lons_), dims=('lat', 'lon'), name=self.name)\n", (2811, 2877), True, 'import xarray as xr\n'), ((3400, 3485), 'xarray.DataArray', 'xr.DataArray', (['raster_data'], {'coords': 'da.coords', 'dims': "('lat', 'lon')", 'name': 'self.name'}), "(raster_data, coords=da.coords, dims=('lat', 'lon'), name=self.name\n )\n", (3412, 3485), True, 'import xarray as xr\n'), ((3631, 3664), 'luigi.LocalTarget', 'luigi.LocalTarget', (['self.file_path'], {}), '(self.file_path)\n', (3648, 3664), False, 'import luigi\n')] |
import torch
import numpy as np
from .exp import VaeSmExperiment
import scanpy as sc
import pandas as pd
def define_exp(
x_fname, s_fname,
model_params = {
'x_dim': 100,
'z_dim': 10,
'enc_z_h_dim': 50, 'enc_d_h_dim': 50, 'dec_z_h_dim': 50,
'num_enc_z_layers': 2, 'num_enc_d_layers': 2,
'num_dec_z_layers': 2
},
lr=0.001, val_ratio=0.01, test_ratio=0.01, batch_ratio=0.05, num_workers=2, device='auto'):
x = torch.tensor(np.loadtxt(x_fname))
s = torch.tensor(np.loadtxt(s_fname))
model_params['x_dim'] = x.size()[1]
model_params['s_dim'] = s.size()[1]
x_batch_size = int(x.size()[0] * batch_ratio)
s_batch_size = int(s.size()[0] * batch_ratio)
vaesm_exp = VaeSmExperiment(model_params, lr, x, s, test_ratio, 100, 100, num_workers, validation_ratio=val_ratio, device=device)
return(vaesm_exp)
# defining useful function
def select_slide(adata, s, s_col='sample', s_sub=None):
if s_sub == None:
slide = adata[adata.obs[s_col].isin([s]), :]
else:
slide = adata[adata.obs[s_col].isin([s_sub]), :]
s_keys = list(slide.uns['spatial'].keys())
s_spatial = s_keys[0]
slide.uns['spatial'] = {s_spatial: slide.uns['spatial'][s]}
return slide
def conduct_umap(adata, key):
sc.pp.neighbors(adata, use_rep=key, n_neighbors=30)
sc.tl.umap(adata)
return(adata)
def convert2array(mat):
if isinstance(mat, np.ndarray):
return(mat)
else:
return(mat.toarray())
def plot_mapped_sc(sc_adata, mapping, ax):
embed = sc_adata.obsm['X_umap']
ax.scatter(embed[:, 0], embed[:, 1], c='gray', s=5)
ax.scatter(embed[:, 0], embed[:, 1], c=mapping, s=30 * mapping / np.max(mapping))
def calculate_roc_df(pred, target):
stats_df = pd.DataFrame({'pred': pred, 'target': target})
stats_df['pos_target'] = (stats_df['target'] > 0).astype(int)
stats_df['neg_target'] = (stats_df['target'] == 0).astype(int)
stats_df['pos_target'] = stats_df['pos_target'] / stats_df['pos_target'].sum()
stats_df['neg_target'] = stats_df['neg_target'] / stats_df['neg_target'].sum()
stats_df = stats_df.groupby('pred', as_index=False).sum()
stats_df = stats_df.sort_values('pred', ascending=False)
stats_df['tpr'] = np.cumsum(stats_df['pos_target'])
stats_df['fpr'] = np.cumsum(stats_df['neg_target'])
return(stats_df)
def calculate_auc(pred, target):
stats_df = pd.DataFrame({'pred': pred, 'target': target})
stats_df['norm_target'] = stats_df['target'].div(stats_df['target'].sum())
stats_df['ompr'] = stats_df['pred'].rank(method='average').div(stats_df.shape[0])
auc = (stats_df['norm_target'] * stats_df['ompr']).sum()
return(auc)
def calculate_recall(pred, target, q):
thresh = np.quantile(pred, 1 - q)
target = target / np.sum(target)
recall = np.sum(target[pred > thresh])
return(recall)
def process_each_ensembl(symbol, info_df):
val = info_df.loc[symbol]['ensembl']
if type(val) == dict:
return([val['gene']])
elif type(val) == list:
ensembls = [d['gene'] for d in val]
return(ensembls)
else:
return([])
def cut_unmapped(adata, q):
contrib_vec = np.sort(adata.obsm['map2sp'].sum(axis=1))
cum_contrib_vec = np.cumsum(contrib_vec)
cum_contrib_vec = cum_contrib_vec / np.max(cum_contrib_vec)
map_thresh = np.max(contrib_vec[cum_contrib_vec < q])
adata = adata[adata.obsm['map2sp'].sum(axis=1) > map_thresh]
return(adata)
def make_celltype_coloc(sc_adata, celltypes, celltype_label, thresh=2.0):
mapped_cells = cut_unmapped(sc_adata, 0.05).obs_names
map_vec = sc_adata.obs_names.isin(mapped_cells).astype(int)
p_mat = sc_adata.obsm['map2sp'] / np.sum(sc_adata.obsm['map2sp'], axis=1).reshape((-1, 1))
p_mat = p_mat * map_vec.reshape((-1, 1))
coloc_mat = p_mat @ p_mat.transpose()
coloc_mat = coloc_mat * p_mat.shape[1]
bcoloc_mat = (coloc_mat > thresh).astype(int)
celltype_coloc_props = np.array(
[[
np.sum(coloc_mat[sc_adata.obs[celltype_label] == celltype1][:, sc_adata.obs[celltype_label] == celltype2]) /
(np.sum(sc_adata.obs[celltype_label] == celltype1) * np.sum(sc_adata.obs[celltype_label] == celltype2))
for celltype2 in celltypes]
for celltype1 in celltypes])
return(celltype_coloc_props)
def make_df_col_category(df, col, categories):
df[col] = pd.Categorical(df[col], categories=categories)
return(df)
def categolize_method(methods):
methods_cat = pd.Categorical(methods, categories=['scoloc', 'cell2loc', 'tangram'])
return(methods_cat)
def make_edge_df(sc_adata, large_celltype_label, sub_sample=True, tot_size=5000, exclude_reverse=True, edge_thresh=1):
if sub_sample:
tot_size = 5000
sub_sc_adata = sc_adata[np.random.choice(sc_adata.obs_names, tot_size, replace=False)]
else:
sub_sc_adata = sc_adata
p_mat = sub_sc_adata.obsm['map2sp'] / np.sum(sub_sc_adata.obsm['map2sp'], axis=1).reshape((-1, 1))
coloc_mat = p_mat @ p_mat.transpose()
coloc_mat = np.log2(coloc_mat) + np.log2(p_mat.shape[1])
thresh = edge_thresh
## thresh = np.quantile(coloc_mat, 0.8)
high_coloc_index = np.argwhere(coloc_mat > thresh)
if exclude_reverse:
high_coloc_index = high_coloc_index[high_coloc_index[:, 0] < high_coloc_index[:, 1]]
ocell1_types = sub_sc_adata.obs[large_celltype_label].iloc[high_coloc_index[:, 0]].values
ocell2_types = sub_sc_adata.obs[large_celltype_label].iloc[high_coloc_index[:, 1]].values
high_coloc_index = high_coloc_index[ocell1_types != ocell2_types]
cell1_types = ocell1_types[ocell1_types != ocell2_types]
cell2_types = ocell2_types[ocell1_types != ocell2_types]
edge_idx = np.arange(cell1_types.shape[0])
orig_edge_df = pd.DataFrame({'edge': edge_idx, 'cell1': sub_sc_adata.obs_names[high_coloc_index[:, 0]], 'cell2': sub_sc_adata.obs_names[high_coloc_index[:, 1]], 'cell1_type': cell1_types, 'cell2_type': cell2_types}, index=edge_idx)
return(orig_edge_df)
def calc_signature_score(adata, orig_genes,label, min_count=100):
genes = orig_genes[np.isin(orig_genes, adata.var_names)]
tot_vec = np.array(adata[:, genes].layers['count'].sum(axis=0))
genes = genes[tot_vec > min_count]
sc.tl.score_genes(adata, genes, score_name=label)
return(adata)
def trancate_ext_val(vec, q=0.01):
high_val = np.quantile(vec, 1 - q)
vec[vec > high_val] = high_val
return(vec)
def make_count_vec(X, axis):
count_vec = np.array(X.sum(axis=axis)).reshape(-1)
return(count_vec)
| [
"pandas.DataFrame",
"numpy.isin",
"numpy.quantile",
"scanpy.tl.umap",
"numpy.sum",
"scanpy.tl.score_genes",
"numpy.log2",
"scanpy.pp.neighbors",
"numpy.cumsum",
"numpy.max",
"numpy.arange",
"numpy.loadtxt",
"pandas.Categorical",
"numpy.random.choice",
"numpy.argwhere"
] | [((1334, 1385), 'scanpy.pp.neighbors', 'sc.pp.neighbors', (['adata'], {'use_rep': 'key', 'n_neighbors': '(30)'}), '(adata, use_rep=key, n_neighbors=30)\n', (1349, 1385), True, 'import scanpy as sc\n'), ((1390, 1407), 'scanpy.tl.umap', 'sc.tl.umap', (['adata'], {}), '(adata)\n', (1400, 1407), True, 'import scanpy as sc\n'), ((1822, 1868), 'pandas.DataFrame', 'pd.DataFrame', (["{'pred': pred, 'target': target}"], {}), "({'pred': pred, 'target': target})\n", (1834, 1868), True, 'import pandas as pd\n'), ((2313, 2346), 'numpy.cumsum', 'np.cumsum', (["stats_df['pos_target']"], {}), "(stats_df['pos_target'])\n", (2322, 2346), True, 'import numpy as np\n'), ((2369, 2402), 'numpy.cumsum', 'np.cumsum', (["stats_df['neg_target']"], {}), "(stats_df['neg_target'])\n", (2378, 2402), True, 'import numpy as np\n'), ((2474, 2520), 'pandas.DataFrame', 'pd.DataFrame', (["{'pred': pred, 'target': target}"], {}), "({'pred': pred, 'target': target})\n", (2486, 2520), True, 'import pandas as pd\n'), ((2820, 2844), 'numpy.quantile', 'np.quantile', (['pred', '(1 - q)'], {}), '(pred, 1 - q)\n', (2831, 2844), True, 'import numpy as np\n'), ((2895, 2924), 'numpy.sum', 'np.sum', (['target[pred > thresh]'], {}), '(target[pred > thresh])\n', (2901, 2924), True, 'import numpy as np\n'), ((3332, 3354), 'numpy.cumsum', 'np.cumsum', (['contrib_vec'], {}), '(contrib_vec)\n', (3341, 3354), True, 'import numpy as np\n'), ((3436, 3476), 'numpy.max', 'np.max', (['contrib_vec[cum_contrib_vec < q]'], {}), '(contrib_vec[cum_contrib_vec < q])\n', (3442, 3476), True, 'import numpy as np\n'), ((4496, 4542), 'pandas.Categorical', 'pd.Categorical', (['df[col]'], {'categories': 'categories'}), '(df[col], categories=categories)\n', (4510, 4542), True, 'import pandas as pd\n'), ((4609, 4678), 'pandas.Categorical', 'pd.Categorical', (['methods'], {'categories': "['scoloc', 'cell2loc', 'tangram']"}), "(methods, categories=['scoloc', 'cell2loc', 'tangram'])\n", (4623, 4678), True, 'import pandas as pd\n'), ((5302, 5333), 'numpy.argwhere', 'np.argwhere', (['(coloc_mat > thresh)'], {}), '(coloc_mat > thresh)\n', (5313, 5333), True, 'import numpy as np\n'), ((5846, 5877), 'numpy.arange', 'np.arange', (['cell1_types.shape[0]'], {}), '(cell1_types.shape[0])\n', (5855, 5877), True, 'import numpy as np\n'), ((5897, 6127), 'pandas.DataFrame', 'pd.DataFrame', (["{'edge': edge_idx, 'cell1': sub_sc_adata.obs_names[high_coloc_index[:, 0]],\n 'cell2': sub_sc_adata.obs_names[high_coloc_index[:, 1]], 'cell1_type':\n cell1_types, 'cell2_type': cell2_types}"], {'index': 'edge_idx'}), "({'edge': edge_idx, 'cell1': sub_sc_adata.obs_names[\n high_coloc_index[:, 0]], 'cell2': sub_sc_adata.obs_names[\n high_coloc_index[:, 1]], 'cell1_type': cell1_types, 'cell2_type':\n cell2_types}, index=edge_idx)\n", (5909, 6127), True, 'import pandas as pd\n'), ((6380, 6429), 'scanpy.tl.score_genes', 'sc.tl.score_genes', (['adata', 'genes'], {'score_name': 'label'}), '(adata, genes, score_name=label)\n', (6397, 6429), True, 'import scanpy as sc\n'), ((6500, 6523), 'numpy.quantile', 'np.quantile', (['vec', '(1 - q)'], {}), '(vec, 1 - q)\n', (6511, 6523), True, 'import numpy as np\n'), ((518, 537), 'numpy.loadtxt', 'np.loadtxt', (['x_fname'], {}), '(x_fname)\n', (528, 537), True, 'import numpy as np\n'), ((560, 579), 'numpy.loadtxt', 'np.loadtxt', (['s_fname'], {}), '(s_fname)\n', (570, 579), True, 'import numpy as np\n'), ((2867, 2881), 'numpy.sum', 'np.sum', (['target'], {}), '(target)\n', (2873, 2881), True, 'import numpy as np\n'), ((3395, 3418), 'numpy.max', 'np.max', (['cum_contrib_vec'], {}), '(cum_contrib_vec)\n', (3401, 3418), True, 'import numpy as np\n'), ((5165, 5183), 'numpy.log2', 'np.log2', (['coloc_mat'], {}), '(coloc_mat)\n', (5172, 5183), True, 'import numpy as np\n'), ((5186, 5209), 'numpy.log2', 'np.log2', (['p_mat.shape[1]'], {}), '(p_mat.shape[1])\n', (5193, 5209), True, 'import numpy as np\n'), ((6231, 6267), 'numpy.isin', 'np.isin', (['orig_genes', 'adata.var_names'], {}), '(orig_genes, adata.var_names)\n', (6238, 6267), True, 'import numpy as np\n'), ((4899, 4960), 'numpy.random.choice', 'np.random.choice', (['sc_adata.obs_names', 'tot_size'], {'replace': '(False)'}), '(sc_adata.obs_names, tot_size, replace=False)\n', (4915, 4960), True, 'import numpy as np\n'), ((1753, 1768), 'numpy.max', 'np.max', (['mapping'], {}), '(mapping)\n', (1759, 1768), True, 'import numpy as np\n'), ((3800, 3839), 'numpy.sum', 'np.sum', (["sc_adata.obsm['map2sp']"], {'axis': '(1)'}), "(sc_adata.obsm['map2sp'], axis=1)\n", (3806, 3839), True, 'import numpy as np\n'), ((5046, 5089), 'numpy.sum', 'np.sum', (["sub_sc_adata.obsm['map2sp']"], {'axis': '(1)'}), "(sub_sc_adata.obsm['map2sp'], axis=1)\n", (5052, 5089), True, 'import numpy as np\n'), ((4097, 4208), 'numpy.sum', 'np.sum', (['coloc_mat[sc_adata.obs[celltype_label] == celltype1][:, sc_adata.obs[\n celltype_label] == celltype2]'], {}), '(coloc_mat[sc_adata.obs[celltype_label] == celltype1][:, sc_adata.obs\n [celltype_label] == celltype2])\n', (4103, 4208), True, 'import numpy as np\n'), ((4219, 4268), 'numpy.sum', 'np.sum', (['(sc_adata.obs[celltype_label] == celltype1)'], {}), '(sc_adata.obs[celltype_label] == celltype1)\n', (4225, 4268), True, 'import numpy as np\n'), ((4271, 4320), 'numpy.sum', 'np.sum', (['(sc_adata.obs[celltype_label] == celltype2)'], {}), '(sc_adata.obs[celltype_label] == celltype2)\n', (4277, 4320), True, 'import numpy as np\n')] |
import argparse
import cv2
import math
import time
import numpy as np
import util
from config_reader import config_reader
from scipy.ndimage.filters import gaussian_filter
from model import get_testing_model
# find connection in the specified sequence, center 29 is in the position 15
limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \
[10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \
[1, 16], [16, 18], [3, 17], [6, 18]]
# the middle joints heatmap correpondence
mapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44], [19, 20], [21, 22], \
[23, 24], [25, 26], [27, 28], [29, 30], [47, 48], [49, 50], [53, 54], [51, 52], \
[55, 56], [37, 38], [45, 46]]
# visualize
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0],
[0, 255, 0], \
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255],
[85, 0, 255], \
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
w = 256
h = 256
size = (256,256)
def process (input_image, params, model_params, pose_scale):
oriImg = input_image # B,G,R order
multiplier = [x * model_params['boxsize'] / oriImg.shape[0] for x in params['scale_search']]
heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19))
paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))
#for m in range(len(multiplier)):
for m in range(1):
scale = multiplier[m]
imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
imageToTest_padded, pad = util.padRightDownCorner(imageToTest, model_params['stride'],
model_params['padValue'])
input_img = np.transpose(np.float32(imageToTest_padded[:,:,:,np.newaxis]), (3,0,1,2)) # required shape (1, width, height, channels)
output_blobs = model.predict(input_img)
# extract outputs, resize, and remove padding
heatmap = np.squeeze(output_blobs[1]) # output 1 is heatmaps
heatmap = cv2.resize(heatmap, (0, 0), fx=model_params['stride'], fy=model_params['stride'],
interpolation=cv2.INTER_CUBIC)
heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
paf = np.squeeze(output_blobs[0]) # output 0 is PAFs
paf = cv2.resize(paf, (0, 0), fx=model_params['stride'], fy=model_params['stride'],
interpolation=cv2.INTER_CUBIC)
paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
paf = cv2.resize(paf, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
heatmap_avg = heatmap_avg + heatmap / len(multiplier)
paf_avg = paf_avg + paf / len(multiplier)
all_peaks = []
peak_counter = 0
for part in range(18):
map_ori = heatmap_avg[:, :, part]
map = gaussian_filter(map_ori, sigma=3)
map_left = np.zeros(map.shape)
map_left[1:, :] = map[:-1, :]
map_right = np.zeros(map.shape)
map_right[:-1, :] = map[1:, :]
map_up = np.zeros(map.shape)
map_up[:, 1:] = map[:, :-1]
map_down = np.zeros(map.shape)
map_down[:, :-1] = map[:, 1:]
peaks_binary = np.logical_and.reduce(
(map >= map_left, map >= map_right, map >= map_up, map >= map_down, map > params['thre1']))
peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse
peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks]
id = range(peak_counter, peak_counter + len(peaks))
peaks_with_score_and_id = [peaks_with_score[i] + (id[i],) for i in range(len(id))]
all_peaks.append(peaks_with_score_and_id)
peak_counter += len(peaks)
connection_all = []
special_k = []
mid_num = 10
for k in range(len(mapIdx)):
score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]]
candA = all_peaks[limbSeq[k][0] - 1]
candB = all_peaks[limbSeq[k][1] - 1]
nA = len(candA)
nB = len(candB)
indexA, indexB = limbSeq[k]
if (nA != 0 and nB != 0):
connection_candidate = []
for i in range(nA):
for j in range(nB):
vec = np.subtract(candB[j][:2], candA[i][:2])
norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
# failure case when 2 body parts overlaps
if norm == 0:
continue
vec = np.divide(vec, norm)
startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \
np.linspace(candA[i][1], candB[j][1], num=mid_num)))
vec_x = np.array(
[score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \
for I in range(len(startend))])
vec_y = np.array(
[score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \
for I in range(len(startend))])
score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1])
score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min(
0.5 * oriImg.shape[0] / norm - 1, 0)
criterion1 = len(np.nonzero(score_midpts > params['thre2'])[0]) > 0.8 * len(
score_midpts)
criterion2 = score_with_dist_prior > 0
if criterion1 and criterion2:
connection_candidate.append([i, j, score_with_dist_prior,
score_with_dist_prior + candA[i][2] + candB[j][2]])
connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True)
connection = np.zeros((0, 5))
for c in range(len(connection_candidate)):
i, j, s = connection_candidate[c][0:3]
if (i not in connection[:, 3] and j not in connection[:, 4]):
connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]])
if (len(connection) >= min(nA, nB)):
break
connection_all.append(connection)
else:
special_k.append(k)
connection_all.append([])
# last number in each row is the total parts number of that person
# the second last number in each row is the score of the overall configuration
subset = -1 * np.ones((0, 20))
candidate = np.array([item for sublist in all_peaks for item in sublist])
for k in range(len(mapIdx)):
if k not in special_k:
partAs = connection_all[k][:, 0]
partBs = connection_all[k][:, 1]
indexA, indexB = np.array(limbSeq[k]) - 1
for i in range(len(connection_all[k])): # = 1:size(temp,1)
found = 0
subset_idx = [-1, -1]
for j in range(len(subset)): # 1:size(subset,1):
if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]:
subset_idx[found] = j
found += 1
if found == 1:
j = subset_idx[0]
if (subset[j][indexB] != partBs[i]):
subset[j][indexB] = partBs[i]
subset[j][-1] += 1
subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
elif found == 2: # if found 2 and disjoint, merge them
j1, j2 = subset_idx
membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2]
if len(np.nonzero(membership == 2)[0]) == 0: # merge
subset[j1][:-2] += (subset[j2][:-2] + 1)
subset[j1][-2:] += subset[j2][-2:]
subset[j1][-2] += connection_all[k][i][2]
subset = np.delete(subset, j2, 0)
else: # as like found == 1
subset[j1][indexB] = partBs[i]
subset[j1][-1] += 1
subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
# if find no partA in the subset, create a new subset
elif not found and k < 17:
row = -1 * np.ones(20)
row[indexA] = partAs[i]
row[indexB] = partBs[i]
row[-1] = 2
row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + \
connection_all[k][i][2]
subset = np.vstack([subset, row])
# delete some rows of subset which has few parts occur
deleteIdx = []
for i in range(len(subset)):
if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4:
deleteIdx.append(i)
subset = np.delete(subset, deleteIdx, axis=0)
#create a black use numpy
poseFrame = np.zeros((h, w, 3), np.uint8)
#fill the image with black
poseFrame.fill(1)
# draw 18 keypoints
keypoints = []
for i in range(18):
for j in range(len(all_peaks[i])):
# loc = all_peaks[i][j][0:2]
# print('x:', loc[0], ', y:', loc[1])
# cv2.circle(poseFrame, all_peaks[i][j][0:2], 4, colors[i], thickness=-1)
keypoints.append(all_peaks[i][j][0:2])
keypoints = normalize(keypoints, pose_scale)
for i in range(len(keypoints) if len(keypoints) < 18 else 18):
cv2.circle(poseFrame, keypoints[i], 4, colors[i], thickness=-1)
# draw 17 parts of a body
stickwidth = 4
for i in range(17):
for n in range(len(subset)):
index = subset[n][np.array(limbSeq[i]) - 1]
if -1 in index:
continue
cur_poseFrame = poseFrame.copy()
Y = candidate[index.astype(int), 0]
X = candidate[index.astype(int), 1]
# normalize parts
X = X * pose_scale[0]
Y = Y * pose_scale[1]
mX = np.mean(X)
mY = np.mean(Y)
length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
cv2.fillConvexPoly(cur_poseFrame, polygon, colors[i])
poseFrame = cv2.addWeighted(poseFrame, 0.4, cur_poseFrame, 0.6, 0)
poseFrame, cur_radius = move_pose_center(input_image.shape, poseFrame)
return poseFrame, cur_radius
# normalize keypoints
def normalize(src_points, scale):
normalized_points = []
# 缩放
mean_x = 0
mean_y = 0
for i in range(len(src_points)):
x = src_points[i][0] * scale[0]
y = src_points[i][1] * scale[1]
normalized_points.append((int(x), int(y)))
# mean_x += x
# mean_y += y
# 平移到画布中央
# mean_x = mean_x / len(normalized_points)
# mean_y = mean_y / len(normalized_points)
# move_x = img_size[1] / 2 - mean_x # shape[1] = width
# move_y = img_size[0] / 2 - mean_y - 20 # shape[0] = height,dela配重
# for i in range(len(normalized_points)):
# x = normalized_points[i][0] + move_x
# y = normalized_points[i][1] + move_y
# normalized_points[i] = (int(x), int(y))
return normalized_points
# 平移pose到画布中央
def move_pose_center(img_size, poseFrame):
# convert image to grayscale image
gray_image = cv2.cvtColor(poseFrame, cv2.COLOR_BGR2GRAY)
# convert the grayscale image to binary image
ret, thresh = cv2.threshold(gray_image, 5, 255, cv2.THRESH_BINARY)
# cv2.imshow('thresh', thresh)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cnt = contours[0]
(x, y), radius = cv2.minEnclosingCircle(cnt)
center = (int(x), int(y))
print('>>> radius:', radius)
radius = int(radius)
# cv2.circle(poseFrame, center, radius, (255, 0, 0), 2)
# 平移矩阵M:[[1,0,x],[0,1,y]]
M = np.float32([[1, 0, img_size[0]/2-x], [0, 1, img_size[1]/2-y]])
dst = cv2.warpAffine(poseFrame, M, (img_size[1], img_size[0]))
return dst, min(radius, img_size[0]/2, img_size[1]/2)
# 根据外接圆半径计算缩放比例
def getScale(pose_radius, model_radius=104.0):
s = model_radius / pose_radius
return (s, s)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, default='../../images/pbug_man_450x420.avi', help='input video')
parser.add_argument('--output', type=str, default='../../result/pose_out.avi', help='output pose video')
parser.add_argument('--model', type=str, default='model/keras/model.h5', help='path to the weights file')
args = parser.parse_args()
keras_weights_file = args.model
# load model
tic = time.time()
print('load model...')
# authors of original model don't use
# vgg normalization (subtracting mean) on input images
model = get_testing_model()
model.load_weights(keras_weights_file)
print('* h5模型加载时间为:{:.2f}s.'.format(time.time() - tic))
cap = cv2.VideoCapture(args.input if args.input else 0)
# 视频总帧数
frameNum = cap.get(cv2.CAP_PROP_FRAME_COUNT)
# vedio writer
# fourcc = cv.VideoWriter_fourcc('m', 'p', '4', 'v')
fourcc = cv2.VideoWriter_fourcc(* 'XVID')
# 保存size必须和输出size设定为一致,否则无法写入保存文件
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
size = (w, h)
poseout = cv2.VideoWriter(args.output, fourcc, 20.0, size)
start_time = time.time()
print('start processing...')
print('共计{}帧图像,预计耗时{:.2f}min.'.format(frameNum, frameNum * 1.75/60))
j = 1
scale = (0.7, 0.7) # request x==y!! a bug.
while(1):
ret, frame = cap.read()
if not ret:
break
params, model_params = config_reader()
# generate image with body parts
poseFrame, pose_radius = process(frame, params, model_params, scale)
scale = getScale(pose_radius)
print('>>> scale = ', scale)
# cur_pose + cur_frame 横向连接,图片作为pix2pix输入
cur_pairs = np.concatenate([poseFrame, frame], axis=1)
# write to pix2pix workdir
cv2.imwrite('../pytorch_pix2pix/datasets/pbug_full/test/curPose.jpg', cur_pairs)
# write in video
poseout.write(poseFrame)
end_time = time.time()
cv2.imshow('frame', frame)
cv2.imshow('poseFrame, scale', poseFrame)
j += 1
if j % 20 == 0:
# 记录时间
end_time = time.time()
print('已处理{}/{}帧图像, 用时{:.4f}s, 平均每帧用时{:.4f}s'.format(j, int(frameNum), end_time - start_time, (end_time-start_time)/j))
if cv2.waitKey(1) & 0xFF==ord('q'):
break
end_time = time.time()
print('{}张帧图像,处理完成!耗时{:.4f}s.'.format(j, end_time - start_time))
cap.release()
poseout.release()
cv2.destroyAllWindows()
| [
"argparse.ArgumentParser",
"cv2.VideoWriter_fourcc",
"math.atan2",
"numpy.ones",
"cv2.warpAffine",
"numpy.mean",
"cv2.VideoWriter",
"cv2.imshow",
"numpy.multiply",
"model.get_testing_model",
"cv2.cvtColor",
"cv2.imwrite",
"numpy.logical_and.reduce",
"numpy.linspace",
"cv2.destroyAllWindo... | [((1337, 1385), 'numpy.zeros', 'np.zeros', (['(oriImg.shape[0], oriImg.shape[1], 19)'], {}), '((oriImg.shape[0], oriImg.shape[1], 19))\n', (1345, 1385), True, 'import numpy as np\n'), ((1400, 1448), 'numpy.zeros', 'np.zeros', (['(oriImg.shape[0], oriImg.shape[1], 38)'], {}), '((oriImg.shape[0], oriImg.shape[1], 38))\n', (1408, 1448), True, 'import numpy as np\n'), ((6933, 6994), 'numpy.array', 'np.array', (['[item for sublist in all_peaks for item in sublist]'], {}), '([item for sublist in all_peaks for item in sublist])\n', (6941, 6994), True, 'import numpy as np\n'), ((9396, 9432), 'numpy.delete', 'np.delete', (['subset', 'deleteIdx'], {'axis': '(0)'}), '(subset, deleteIdx, axis=0)\n', (9405, 9432), True, 'import numpy as np\n'), ((9480, 9509), 'numpy.zeros', 'np.zeros', (['(h, w, 3)', 'np.uint8'], {}), '((h, w, 3), np.uint8)\n', (9488, 9509), True, 'import numpy as np\n'), ((12053, 12096), 'cv2.cvtColor', 'cv2.cvtColor', (['poseFrame', 'cv2.COLOR_BGR2GRAY'], {}), '(poseFrame, cv2.COLOR_BGR2GRAY)\n', (12065, 12096), False, 'import cv2\n'), ((12165, 12217), 'cv2.threshold', 'cv2.threshold', (['gray_image', '(5)', '(255)', 'cv2.THRESH_BINARY'], {}), '(gray_image, 5, 255, cv2.THRESH_BINARY)\n', (12178, 12217), False, 'import cv2\n'), ((12279, 12341), 'cv2.findContours', 'cv2.findContours', (['thresh', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_NONE'], {}), '(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n', (12295, 12341), False, 'import cv2\n'), ((12385, 12412), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['cnt'], {}), '(cnt)\n', (12407, 12412), False, 'import cv2\n'), ((12599, 12669), 'numpy.float32', 'np.float32', (['[[1, 0, img_size[0] / 2 - x], [0, 1, img_size[1] / 2 - y]]'], {}), '([[1, 0, img_size[0] / 2 - x], [0, 1, img_size[1] / 2 - y]])\n', (12609, 12669), True, 'import numpy as np\n'), ((12672, 12728), 'cv2.warpAffine', 'cv2.warpAffine', (['poseFrame', 'M', '(img_size[1], img_size[0])'], {}), '(poseFrame, M, (img_size[1], img_size[0]))\n', (12686, 12728), False, 'import cv2\n'), ((12951, 12976), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (12974, 12976), False, 'import argparse\n'), ((13402, 13413), 'time.time', 'time.time', ([], {}), '()\n', (13411, 13413), False, 'import time\n'), ((13554, 13573), 'model.get_testing_model', 'get_testing_model', ([], {}), '()\n', (13571, 13573), False, 'from model import get_testing_model\n'), ((13692, 13741), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(args.input if args.input else 0)'], {}), '(args.input if args.input else 0)\n', (13708, 13741), False, 'import cv2\n'), ((13892, 13923), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (13914, 13923), False, 'import cv2\n'), ((14090, 14138), 'cv2.VideoWriter', 'cv2.VideoWriter', (['args.output', 'fourcc', '(20.0)', 'size'], {}), '(args.output, fourcc, 20.0, size)\n', (14105, 14138), False, 'import cv2\n'), ((14157, 14168), 'time.time', 'time.time', ([], {}), '()\n', (14166, 14168), False, 'import time\n'), ((15373, 15384), 'time.time', 'time.time', ([], {}), '()\n', (15382, 15384), False, 'import time\n'), ((15503, 15526), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (15524, 15526), False, 'import cv2\n'), ((1564, 1641), 'cv2.resize', 'cv2.resize', (['oriImg', '(0, 0)'], {'fx': 'scale', 'fy': 'scale', 'interpolation': 'cv2.INTER_CUBIC'}), '(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)\n', (1574, 1641), False, 'import cv2\n'), ((1676, 1767), 'util.padRightDownCorner', 'util.padRightDownCorner', (['imageToTest', "model_params['stride']", "model_params['padValue']"], {}), "(imageToTest, model_params['stride'], model_params[\n 'padValue'])\n", (1699, 1767), False, 'import util\n'), ((2084, 2111), 'numpy.squeeze', 'np.squeeze', (['output_blobs[1]'], {}), '(output_blobs[1])\n', (2094, 2111), True, 'import numpy as np\n'), ((2154, 2271), 'cv2.resize', 'cv2.resize', (['heatmap', '(0, 0)'], {'fx': "model_params['stride']", 'fy': "model_params['stride']", 'interpolation': 'cv2.INTER_CUBIC'}), "(heatmap, (0, 0), fx=model_params['stride'], fy=model_params[\n 'stride'], interpolation=cv2.INTER_CUBIC)\n", (2164, 2271), False, 'import cv2\n'), ((2421, 2512), 'cv2.resize', 'cv2.resize', (['heatmap', '(oriImg.shape[1], oriImg.shape[0])'], {'interpolation': 'cv2.INTER_CUBIC'}), '(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.\n INTER_CUBIC)\n', (2431, 2512), False, 'import cv2\n'), ((2523, 2550), 'numpy.squeeze', 'np.squeeze', (['output_blobs[0]'], {}), '(output_blobs[0])\n', (2533, 2550), True, 'import numpy as np\n'), ((2585, 2698), 'cv2.resize', 'cv2.resize', (['paf', '(0, 0)'], {'fx': "model_params['stride']", 'fy': "model_params['stride']", 'interpolation': 'cv2.INTER_CUBIC'}), "(paf, (0, 0), fx=model_params['stride'], fy=model_params['stride'\n ], interpolation=cv2.INTER_CUBIC)\n", (2595, 2698), False, 'import cv2\n'), ((2832, 2919), 'cv2.resize', 'cv2.resize', (['paf', '(oriImg.shape[1], oriImg.shape[0])'], {'interpolation': 'cv2.INTER_CUBIC'}), '(paf, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.\n INTER_CUBIC)\n', (2842, 2919), False, 'import cv2\n'), ((3157, 3190), 'scipy.ndimage.filters.gaussian_filter', 'gaussian_filter', (['map_ori'], {'sigma': '(3)'}), '(map_ori, sigma=3)\n', (3172, 3190), False, 'from scipy.ndimage.filters import gaussian_filter\n'), ((3211, 3230), 'numpy.zeros', 'np.zeros', (['map.shape'], {}), '(map.shape)\n', (3219, 3230), True, 'import numpy as np\n'), ((3289, 3308), 'numpy.zeros', 'np.zeros', (['map.shape'], {}), '(map.shape)\n', (3297, 3308), True, 'import numpy as np\n'), ((3365, 3384), 'numpy.zeros', 'np.zeros', (['map.shape'], {}), '(map.shape)\n', (3373, 3384), True, 'import numpy as np\n'), ((3440, 3459), 'numpy.zeros', 'np.zeros', (['map.shape'], {}), '(map.shape)\n', (3448, 3459), True, 'import numpy as np\n'), ((3522, 3640), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (["(map >= map_left, map >= map_right, map >= map_up, map >= map_down, map >\n params['thre1'])"], {}), "((map >= map_left, map >= map_right, map >= map_up, \n map >= map_down, map > params['thre1']))\n", (3543, 3640), True, 'import numpy as np\n'), ((6900, 6916), 'numpy.ones', 'np.ones', (['(0, 20)'], {}), '((0, 20))\n', (6907, 6916), True, 'import numpy as np\n'), ((10034, 10097), 'cv2.circle', 'cv2.circle', (['poseFrame', 'keypoints[i]', '(4)', 'colors[i]'], {'thickness': '(-1)'}), '(poseFrame, keypoints[i], 4, colors[i], thickness=-1)\n', (10044, 10097), False, 'import cv2\n'), ((14450, 14465), 'config_reader.config_reader', 'config_reader', ([], {}), '()\n', (14463, 14465), False, 'from config_reader import config_reader\n'), ((14729, 14771), 'numpy.concatenate', 'np.concatenate', (['[poseFrame, frame]'], {'axis': '(1)'}), '([poseFrame, frame], axis=1)\n', (14743, 14771), True, 'import numpy as np\n'), ((14815, 14900), 'cv2.imwrite', 'cv2.imwrite', (['"""../pytorch_pix2pix/datasets/pbug_full/test/curPose.jpg"""', 'cur_pairs'], {}), "('../pytorch_pix2pix/datasets/pbug_full/test/curPose.jpg', cur_pairs\n )\n", (14826, 14900), False, 'import cv2\n'), ((14973, 14984), 'time.time', 'time.time', ([], {}), '()\n', (14982, 14984), False, 'import time\n'), ((14993, 15019), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (15003, 15019), False, 'import cv2\n'), ((15028, 15069), 'cv2.imshow', 'cv2.imshow', (['"""poseFrame, scale"""', 'poseFrame'], {}), "('poseFrame, scale', poseFrame)\n", (15038, 15069), False, 'import cv2\n'), ((1855, 1906), 'numpy.float32', 'np.float32', (['imageToTest_padded[:, :, :, np.newaxis]'], {}), '(imageToTest_padded[:, :, :, np.newaxis])\n', (1865, 1906), True, 'import numpy as np\n'), ((6210, 6226), 'numpy.zeros', 'np.zeros', (['(0, 5)'], {}), '((0, 5))\n', (6218, 6226), True, 'import numpy as np\n'), ((10574, 10584), 'numpy.mean', 'np.mean', (['X'], {}), '(X)\n', (10581, 10584), True, 'import numpy as np\n'), ((10602, 10612), 'numpy.mean', 'np.mean', (['Y'], {}), '(Y)\n', (10609, 10612), True, 'import numpy as np\n'), ((10879, 10932), 'cv2.fillConvexPoly', 'cv2.fillConvexPoly', (['cur_poseFrame', 'polygon', 'colors[i]'], {}), '(cur_poseFrame, polygon, colors[i])\n', (10897, 10932), False, 'import cv2\n'), ((10957, 11011), 'cv2.addWeighted', 'cv2.addWeighted', (['poseFrame', '(0.4)', 'cur_poseFrame', '(0.6)', '(0)'], {}), '(poseFrame, 0.4, cur_poseFrame, 0.6, 0)\n', (10972, 11011), False, 'import cv2\n'), ((15151, 15162), 'time.time', 'time.time', ([], {}), '()\n', (15160, 15162), False, 'import time\n'), ((7183, 7203), 'numpy.array', 'np.array', (['limbSeq[k]'], {}), '(limbSeq[k])\n', (7191, 7203), True, 'import numpy as np\n'), ((10716, 10752), 'math.atan2', 'math.atan2', (['(X[0] - X[1])', '(Y[0] - Y[1])'], {}), '(X[0] - X[1], Y[0] - Y[1])\n', (10726, 10752), False, 'import math\n'), ((13673, 13684), 'time.time', 'time.time', ([], {}), '()\n', (13682, 13684), False, 'import time\n'), ((15307, 15321), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (15318, 15321), False, 'import cv2\n'), ((3674, 3698), 'numpy.nonzero', 'np.nonzero', (['peaks_binary'], {}), '(peaks_binary)\n', (3684, 3698), True, 'import numpy as np\n'), ((3703, 3727), 'numpy.nonzero', 'np.nonzero', (['peaks_binary'], {}), '(peaks_binary)\n', (3713, 3727), True, 'import numpy as np\n'), ((4559, 4598), 'numpy.subtract', 'np.subtract', (['candB[j][:2]', 'candA[i][:2]'], {}), '(candB[j][:2], candA[i][:2])\n', (4570, 4598), True, 'import numpy as np\n'), ((4626, 4670), 'math.sqrt', 'math.sqrt', (['(vec[0] * vec[0] + vec[1] * vec[1])'], {}), '(vec[0] * vec[0] + vec[1] * vec[1])\n', (4635, 4670), False, 'import math\n'), ((4826, 4846), 'numpy.divide', 'np.divide', (['vec', 'norm'], {}), '(vec, norm)\n', (4835, 4846), True, 'import numpy as np\n'), ((6448, 6508), 'numpy.vstack', 'np.vstack', (['[connection, [candA[i][3], candB[j][3], s, i, j]]'], {}), '([connection, [candA[i][3], candB[j][3], s, i, j]])\n', (6457, 6508), True, 'import numpy as np\n'), ((10239, 10259), 'numpy.array', 'np.array', (['limbSeq[i]'], {}), '(limbSeq[i])\n', (10247, 10259), True, 'import numpy as np\n'), ((5449, 5475), 'numpy.multiply', 'np.multiply', (['vec_x', 'vec[0]'], {}), '(vec_x, vec[0])\n', (5460, 5475), True, 'import numpy as np\n'), ((5478, 5504), 'numpy.multiply', 'np.multiply', (['vec_y', 'vec[1]'], {}), '(vec_y, vec[1])\n', (5489, 5504), True, 'import numpy as np\n'), ((4888, 4938), 'numpy.linspace', 'np.linspace', (['candA[i][0]', 'candB[j][0]'], {'num': 'mid_num'}), '(candA[i][0], candB[j][0], num=mid_num)\n', (4899, 4938), True, 'import numpy as np\n'), ((4977, 5027), 'numpy.linspace', 'np.linspace', (['candA[i][1]', 'candB[j][1]'], {'num': 'mid_num'}), '(candA[i][1], candB[j][1], num=mid_num)\n', (4988, 5027), True, 'import numpy as np\n'), ((8419, 8443), 'numpy.delete', 'np.delete', (['subset', 'j2', '(0)'], {}), '(subset, j2, 0)\n', (8428, 8443), True, 'import numpy as np\n'), ((9145, 9169), 'numpy.vstack', 'np.vstack', (['[subset, row]'], {}), '([subset, row])\n', (9154, 9169), True, 'import numpy as np\n'), ((5692, 5734), 'numpy.nonzero', 'np.nonzero', (["(score_midpts > params['thre2'])"], {}), "(score_midpts > params['thre2'])\n", (5702, 5734), True, 'import numpy as np\n'), ((8840, 8851), 'numpy.ones', 'np.ones', (['(20)'], {}), '(20)\n', (8847, 8851), True, 'import numpy as np\n'), ((8149, 8176), 'numpy.nonzero', 'np.nonzero', (['(membership == 2)'], {}), '(membership == 2)\n', (8159, 8176), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 7 14:59:12 2018
@author: abrantesasf
"""
import numpy as np
array1 = np.array([1, 2, 3, 4, 5])
array1
array2 = np.array([2, 3, 4, 5, 6])
array2
# Multiplicação vetorial dos arrays:
array1 * array2
# Dot product dos arrays
np.dot(array1, array2)
array3 = np.array([1, 2])
array3
matriz1 = np.array([[2, 4, 6], [3, 5, 7]])
matriz1
# Multiplicação do array pela matriz
np.dot(array3, matriz1)
array4 = np.array([8, 9, 10])
array4
matriz2 = np.array([[2, 4, 6], [3, 5, 7]])
matriz2
np.dot(matriz2, array4)
| [
"numpy.dot",
"numpy.array"
] | [((143, 168), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (151, 168), True, 'import numpy as np\n'), ((186, 211), 'numpy.array', 'np.array', (['[2, 3, 4, 5, 6]'], {}), '([2, 3, 4, 5, 6])\n', (194, 211), True, 'import numpy as np\n'), ((299, 321), 'numpy.dot', 'np.dot', (['array1', 'array2'], {}), '(array1, array2)\n', (305, 321), True, 'import numpy as np\n'), ((334, 350), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (342, 350), True, 'import numpy as np\n'), ((368, 400), 'numpy.array', 'np.array', (['[[2, 4, 6], [3, 5, 7]]'], {}), '([[2, 4, 6], [3, 5, 7]])\n', (376, 400), True, 'import numpy as np\n'), ((447, 470), 'numpy.dot', 'np.dot', (['array3', 'matriz1'], {}), '(array3, matriz1)\n', (453, 470), True, 'import numpy as np\n'), ((482, 502), 'numpy.array', 'np.array', (['[8, 9, 10]'], {}), '([8, 9, 10])\n', (490, 502), True, 'import numpy as np\n'), ((520, 552), 'numpy.array', 'np.array', (['[[2, 4, 6], [3, 5, 7]]'], {}), '([[2, 4, 6], [3, 5, 7]])\n', (528, 552), True, 'import numpy as np\n'), ((561, 584), 'numpy.dot', 'np.dot', (['matriz2', 'array4'], {}), '(matriz2, array4)\n', (567, 584), True, 'import numpy as np\n')] |
import random
import string
import re
import html
import cv2 # Not actually necessary if you just want to create an image.
import numpy as np
from PIL import ImageFont, ImageDraw, Image
import h5py
def generate(text, filepath, fontpath):
height = 100
width = 1050
blank_image = np.zeros((height, width, 3), np.uint8)
blank_image[:, :] = (255, 255, 255)
image = blank_image
# Convert the image to RGB (OpenCV uses BGR)
cv2_im_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Pass the image to PIL
pil_im = Image.fromarray(cv2_im_rgb)
draw = ImageDraw.Draw(pil_im)
# use a truetype font
font = ImageFont.truetype(fontpath, 100)
# Draw the text
draw.text((4, 0), text, font=font, fill=(0, 0, 0))
# Get back the image to OpenCV
cv2_im_processed = cv2.cvtColor(np.array(pil_im), cv2.COLOR_RGB2BGR)
# cv2.imshow(cv2_im_processed)
cv2.imwrite(filepath, cv2_im_processed)
def truncateLabel(text, maxTextLen):
# ctc_loss can't compute loss if it cannot find a mapping between text label and input
# labels. Repeat letters cost double because of the blank symbol needing to be inserted.
# If a too-long label is provided, ctc_loss returns an infinite gradient
cost = 0
for i in range(len(text)):
if i != 0 and text[i] == text[i-1]:
cost += 2
else:
cost += 1
if cost > maxTextLen:
return text[:i]
return text
def getGeneratedData(font=None, Fake=True):
f = open('/home/kuadmin01/terng/SeniorProjectMaterial/Word/words_filter.txt')
datapath = []
label = []
fake_datapath = []
fake_datalabel = []
for line in f:
# ignore comment line
if not line or line[0] == '#':
continue
lineSplit = line.strip().split(' ')
fileNameSplit = lineSplit[0].split('-')
fileName = '/home/kuadmin01/terng/HTR_dataset_word/' + \
fileNameSplit[0] + '/' + fileNameSplit[0] + '-' + \
fileNameSplit[1] + '/' + lineSplit[0] + '.png'
gtText = truncateLabel(' '.join(lineSplit[8:]), 128)
# if len(gtText)>2:
datapath.append(fileName)
label.append(gtText)
if Fake:
generated_path = '/home/kuadmin01/terng/HTR_generated_dataset/' + \
font.split('.')[0]+'_' + lineSplit[0] + '.png'
fake_datapath.append(generated_path)
fake_datalabel.append(gtText)
if Fake:
return fake_datapath, fake_datalabel
else:
return datapath, label
"""
DeepSpell based text cleaning process.
Tal Weiss.
Deep Spelling.
Medium: https://machinelearnings.co/deep-spelling-9ffef96a24f6#.2c9pu8nlm
Github: https://github.com/MajorTal/DeepSpell
"""
RE_DASH_FILTER = re.compile(r'[\-\˗\֊\‐\‑\‒\–\—\⁻\₋\−\﹣\-]', re.UNICODE)
RE_APOSTROPHE_FILTER = re.compile(r''|[ʼ՚'‘’‛❛❜ߴߵ`‵´ˊˋ{}{}{}{}{}{}{}{}{}]'.format(
chr(768), chr(769), chr(832), chr(833), chr(2387),
chr(5151), chr(5152), chr(65344), chr(8242)), re.UNICODE)
RE_RESERVED_CHAR_FILTER = re.compile(r'[¶¤«»]', re.UNICODE)
RE_LEFT_PARENTH_FILTER = re.compile(r'[\(\[\{\⁽\₍\❨\❪\﹙\(]', re.UNICODE)
RE_RIGHT_PARENTH_FILTER = re.compile(r'[\)\]\}\⁾\₎\❩\❫\﹚\)]', re.UNICODE)
RE_BASIC_CLEANER = re.compile(r'[^\w\s{}]'.format(
re.escape(string.punctuation)), re.UNICODE)
LEFT_PUNCTUATION_FILTER = """!%&),.:;<=>?@\\]^_`|}~"""
RIGHT_PUNCTUATION_FILTER = """"(/<=>@[\\^_`{|~"""
NORMALIZE_WHITESPACE_REGEX = re.compile(r'[^\S\n]+', re.UNICODE)
def text_standardize(text):
"""Organize/add spaces around punctuation marks"""
if text is None:
return ""
text = html.unescape(text).replace("\\n", "").replace("\\t", "")
text = RE_RESERVED_CHAR_FILTER.sub("", text)
text = RE_DASH_FILTER.sub("-", text)
text = RE_APOSTROPHE_FILTER.sub("'", text)
text = RE_LEFT_PARENTH_FILTER.sub("(", text)
text = RE_RIGHT_PARENTH_FILTER.sub(")", text)
text = RE_BASIC_CLEANER.sub("", text)
text = text.lstrip(LEFT_PUNCTUATION_FILTER)
text = text.rstrip(RIGHT_PUNCTUATION_FILTER)
text = text.translate(str.maketrans(
{c: f" {c} " for c in string.punctuation}))
text = NORMALIZE_WHITESPACE_REGEX.sub(" ", text.strip())
return text
"""
Sauvola binarization based in,
<NAME>, <NAME>, <NAME>, <NAME>,
Adaptive Document Binarization, in IEEE Computer Society Washington, 1997.
"""
def sauvola(img, window, thresh, k):
"""Sauvola binarization"""
rows, cols = img.shape
pad = int(np.floor(window[0] / 2))
sum2, sqsum = cv2.integral2(
cv2.copyMakeBorder(img, pad, pad, pad, pad, cv2.BORDER_CONSTANT))
isum = sum2[window[0]:rows + window[0], window[1]:cols + window[1]] + \
sum2[0:rows, 0:cols] - \
sum2[window[0]:rows + window[0], 0:cols] - \
sum2[0:rows, window[1]:cols + window[1]]
isqsum = sqsum[window[0]:rows + window[0], window[1]:cols + window[1]] + \
sqsum[0:rows, 0:cols] - \
sqsum[window[0]:rows + window[0], 0:cols] - \
sqsum[0:rows, window[1]:cols + window[1]]
ksize = window[0] * window[1]
mean = isum / ksize
std = (((isqsum / ksize) - (mean**2) / ksize) / ksize) ** 0.5
threshold = (mean * (1 + k * (std / thresh - 1))) * (mean >= 100)
return np.asarray(255 * (img >= threshold), 'uint8')
def remove_cursive_style(img):
"""Remove cursive writing style from image with deslanting algorithm"""
def calc_y_alpha(vec):
indices = np.where(vec > 0)[0]
h_alpha = len(indices)
if h_alpha > 0:
delta_y_alpha = indices[h_alpha - 1] - indices[0] + 1
if h_alpha == delta_y_alpha:
return h_alpha * h_alpha
return 0
alpha_vals = [-1.0, -0.75, -0.5, -0.25, 0.0, 0.25, 0.5, 0.75, 1.0]
rows, cols = img.shape
results = []
ret, otsu = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
binary = otsu if ret < 127 else sauvola(
img, (int(img.shape[0] / 2), int(img.shape[0] / 2)), 127, 1e-2)
for alpha in alpha_vals:
shift_x = max(-alpha * rows, 0.)
size = (cols + int(np.ceil(abs(alpha * rows))), rows)
transform = np.asarray(
[[1, alpha, shift_x], [0, 1, 0]], dtype=np.float)
shear_img = cv2.warpAffine(binary, transform, size, cv2.INTER_NEAREST)
sum_alpha = 0
sum_alpha += np.apply_along_axis(calc_y_alpha, 0, shear_img)
results.append([np.sum(sum_alpha), size, transform])
result = sorted(results, key=lambda x: x[0], reverse=True)[0]
warp = cv2.warpAffine(img, result[2], result[1], borderValue=255)
return cv2.resize(warp, dsize=(cols, rows))
def preproc(img, input_size):
"""Make the process with the `input_size` to the scale resize"""
img_src = img
if isinstance(img, str):
img = cv2.imread(img, cv2.IMREAD_GRAYSCALE)
if isinstance(img, tuple):
image, boundbox = img
img = cv2.imread(image, cv2.IMREAD_GRAYSCALE)
for i in range(len(boundbox)):
if isinstance(boundbox[i], float):
total = len(img) if i < 2 else len(img[0])
boundbox[i] = int(total * boundbox[i])
img = np.asarray(img[boundbox[0]:boundbox[1],
boundbox[2]:boundbox[3]], dtype=np.uint8)
wt, ht, _ = input_size
try:
h, w = np.asarray(img).shape
except Exception as e:
print(f"ERROR: {img_src}")
return
f = max((w / wt), (h / ht))
new_size = (max(min(wt, int(w / f)), 1), max(min(ht, int(h / f)), 1))
img = cv2.resize(img, new_size)
_, binary = cv2.threshold(img, 254, 255, cv2.THRESH_BINARY)
if np.sum(img) * 0.8 > np.sum(binary):
img = illumination_compensation(img)
img = remove_cursive_style(img)
target = np.ones([ht, wt], dtype=np.uint8) * 255
target[0:new_size[1], 0:new_size[0]] = img
img = cv2.transpose(target)
return img
def normalization(imgs):
"""Normalize list of images"""
imgs = np.asarray(imgs).astype(np.float32)
_, h, w = imgs.shape
for i in range(len(imgs)):
m, s = cv2.meanStdDev(imgs[i])
imgs[i] = imgs[i] - m[0][0]
imgs[i] = imgs[i] / s[0][0] if s[0][0] > 0 else imgs[i]
return np.expand_dims(imgs, axis=-1)
def augmentation(imgs,
rotation_range=0,
scale_range=0,
height_shift_range=0,
width_shift_range=0,
dilate_range=1,
erode_range=1):
"""Apply variations to a list of images (rotate, width and height shift, scale, erode, dilate)"""
imgs = imgs.astype(np.float32)
_, h, w = imgs.shape
dilate_kernel = np.ones(
(int(np.random.uniform(1, dilate_range)),), np.uint8)
erode_kernel = np.ones((int(np.random.uniform(1, erode_range)),), np.uint8)
height_shift = np.random.uniform(-height_shift_range, height_shift_range)
rotation = np.random.uniform(-rotation_range, rotation_range)
scale = np.random.uniform(1 - scale_range, 1)
width_shift = np.random.uniform(-width_shift_range, width_shift_range)
trans_map = np.float32([[1, 0, width_shift * w], [0, 1, height_shift * h]])
rot_map = cv2.getRotationMatrix2D((w // 2, h // 2), rotation, scale)
trans_map_aff = np.r_[trans_map, [[0, 0, 1]]]
rot_map_aff = np.r_[rot_map, [[0, 0, 1]]]
affine_mat = rot_map_aff.dot(trans_map_aff)[:2, :]
for i in range(len(imgs)):
imgs[i] = cv2.warpAffine(
imgs[i], affine_mat, (w, h), flags=cv2.INTER_NEAREST, borderValue=255)
imgs[i] = cv2.erode(imgs[i], erode_kernel, iterations=1)
imgs[i] = cv2.dilate(imgs[i], dilate_kernel, iterations=1)
return imgs
def illumination_compensation(img):
"""Illumination compensation technique for text image"""
def scale(img):
s = np.max(img) - np.min(img)
res = img / s
res -= np.min(res)
res *= 255
return res
img = img.astype(np.float32)
height, width = img.shape
sqrt_hw = np.sqrt(height * width)
bins = np.arange(0, 300, 10)
bins[26] = 255
hp = np.histogram(img, bins)
for i in range(len(hp[0])):
if hp[0][i] > sqrt_hw:
hr = i * 10
break
np.seterr(divide='ignore', invalid='ignore')
cei = (img - (hr + 50 * 0.3)) * 2
cei[cei > 255] = 255
cei[cei < 0] = 0
m1 = np.array([-1, 0, 1, -2, 0, 2, -1, 0, 1]).reshape((3, 3))
m2 = np.array([-2, -1, 0, -1, 0, 1, 0, 1, 2]).reshape((3, 3))
m3 = np.array([-1, -2, -1, 0, 0, 0, 1, 2, 1]).reshape((3, 3))
m4 = np.array([0, 1, 2, -1, 0, 1, -2, -1, 0]).reshape((3, 3))
eg1 = np.abs(cv2.filter2D(img, -1, m1))
eg2 = np.abs(cv2.filter2D(img, -1, m2))
eg3 = np.abs(cv2.filter2D(img, -1, m3))
eg4 = np.abs(cv2.filter2D(img, -1, m4))
eg_avg = scale((eg1 + eg2 + eg3 + eg4) / 4)
h, w = eg_avg.shape
eg_bin = np.zeros((h, w))
eg_bin[eg_avg >= 30] = 255
h, w = cei.shape
cei_bin = np.zeros((h, w))
cei_bin[cei >= 60] = 255
h, w = eg_bin.shape
tli = 255 * np.ones((h, w))
tli[eg_bin == 255] = 0
tli[cei_bin == 255] = 0
kernel = np.ones((3, 3), np.uint8)
erosion = cv2.erode(tli, kernel, iterations=1)
int_img = np.array(cei)
estimate_light_distribution(width, height, erosion, cei, int_img)
mean_filter = 1 / 121 * np.ones((11, 11), np.uint8)
ldi = cv2.filter2D(scale(int_img), -1, mean_filter)
result = np.divide(cei, ldi) * 260
result[erosion != 0] *= 1.5
result[result < 0] = 0
result[result > 255] = 255
return np.array(result, dtype=np.uint8)
def estimate_light_distribution(width, height, erosion, cei, int_img):
"""Light distribution performed by numba (thanks @Sundrops)"""
for y in range(width):
for x in range(height):
if erosion[x][y] == 0:
i = x
while i < erosion.shape[0] and erosion[i][y] == 0:
i += 1
end = i - 1
n = end - x + 1
if n <= 30:
h, e = [], []
for k in range(5):
if x - k >= 0:
h.append(cei[x - k][y])
if end + k < cei.shape[0]:
e.append(cei[end + k][y])
mpv_h, mpv_e = max(h), max(e)
for m in range(n):
int_img[x + m][y] = mpv_h + \
(m + 1) * ((mpv_e - mpv_h) / n)
x = end
break
datapath = []
label = []
fake_datapath = []
fake_datalabel = []
#font_list = ['alphabetizedCassetteTapes.ttf','ASensibleArmadillo.ttf','ArdiniaDemo.ttf','Buttercake.ttf','Heatting.ttf','KGNoMatterWhat.ttf','Sweetness.ttf']
font_list = ['alphabetizedCassetteTapes.ttf',
'ASensibleArmadillo.ttf', 'ArdiniaDemo.ttf', 'Buttercake.ttf']
for f in font_list:
genPath, genLabel = getGeneratedData(f)
fake_datapath += genPath
fake_datalabel += genLabel
datapath, label = getGeneratedData(Fake=False)
input_size = (1024, 128, 1)
# fake_datalabel = fake_datalabel[:20000]
# fake_datapath = fake_datapath[:20000]
# datapath = datapath[:10000]
# label = label[:10000]
path_to_font = '/home/kuadmin01/terng/Fonts/'
print('-------Generating dataset-------')
for i in range(len(fake_datalabel)):
font_name = fake_datapath[i].split('/')[-1].split('_')[0] + '.ttf'
if(i % 10000 == 0):
print(i)
generate(fake_datalabel[i], fake_datapath[i], path_to_font+font_name)
#datapath += fake_datapath
#label += fake_datalabel
print('data length --> ' + str(len(datapath)))
number_list = np.arange(0, len(datapath))
print("Original list : ", number_list)
random.shuffle(number_list) # shuffle method
print("List after first shuffle : ", number_list)
#train, validate, test = 0.7, 0.1, 0.2
shuffled = number_list.tolist()
train_data_order = shuffled[:int(0.7*len(shuffled))]
test_data_order = shuffled[int(0.7*len(shuffled)):int(0.9*len(shuffled))]
valid_data_order = shuffled[int(0.9*len(shuffled)): int(len(shuffled))]
print('test : ' + str(len(test_data_order)))
print('train : ' + str(len(train_data_order)))
print('valid : ' + str(len(valid_data_order)))
partitions = ['train', 'test', 'valid']
dataset = dict()
data = [train_data_order, test_data_order, valid_data_order]
for i in range(len(partitions)):
dataset[partitions[i]] = {"dt": [], "gt": []}
for j in data[i]:
dataset[partitions[i]]['dt'].append(datapath[j])
dataset[partitions[i]]['gt'].append(label[j])
print('-----preprocessing----')
for i in range(len(partitions)):
gt = []
dt = []
for d, g in zip(dataset[partitions[i]]['dt'], dataset[partitions[i]]['gt']):
txt = text_standardize(g).encode()
if(len(txt) > 0):
gt.append(txt)
dt.append(preproc(d, input_size))
dataset[partitions[i]]['gt'] = gt
dataset[partitions[i]]['dt'] = dt
print('-----compressing-----')
for i in partitions:
with h5py.File('/home/kuadmin01/terng/Dataset/dataset_for_experiment.hdf5', "a") as hf:
hf.create_dataset(
f"{i}/dt", data=dataset[i]['dt'], compression="gzip", compression_opts=9)
hf.create_dataset(
f"{i}/gt", data=dataset[i]['gt'], compression="gzip", compression_opts=9)
print(f"[OK] {i} partition.")
print(f"Transformation finished.")
| [
"numpy.sum",
"random.shuffle",
"numpy.floor",
"numpy.ones",
"cv2.transpose",
"cv2.warpAffine",
"numpy.histogram",
"numpy.arange",
"cv2.erode",
"cv2.getRotationMatrix2D",
"cv2.filter2D",
"cv2.dilate",
"cv2.cvtColor",
"cv2.imwrite",
"cv2.copyMakeBorder",
"re.escape",
"numpy.apply_along... | [((2794, 2861), 're.compile', 're.compile', (['"""[\\\\-\\\\˗\\\\֊\\\\‐\\\\‑\\\\‒\\\\–\\\\—\\\\⁻\\\\₋\\\\−\\\\﹣\\\\-]"""', 're.UNICODE'], {}), "('[\\\\-\\\\˗\\\\֊\\\\‐\\\\‑\\\\‒\\\\–\\\\—\\\\⁻\\\\₋\\\\−\\\\﹣\\\\-]', re.UNICODE)\n", (2804, 2861), False, 'import re\n'), ((3080, 3112), 're.compile', 're.compile', (['"""[¶¤«»]"""', 're.UNICODE'], {}), "('[¶¤«»]', re.UNICODE)\n", (3090, 3112), False, 'import re\n'), ((3139, 3194), 're.compile', 're.compile', (['"""[\\\\(\\\\[\\\\{\\\\⁽\\\\₍\\\\❨\\\\❪\\\\﹙\\\\(]"""', 're.UNICODE'], {}), "('[\\\\(\\\\[\\\\{\\\\⁽\\\\₍\\\\❨\\\\❪\\\\﹙\\\\(]', re.UNICODE)\n", (3149, 3194), False, 'import re\n'), ((3213, 3268), 're.compile', 're.compile', (['"""[\\\\)\\\\]\\\\}\\\\⁾\\\\₎\\\\❩\\\\❫\\\\﹚\\\\)]"""', 're.UNICODE'], {}), "('[\\\\)\\\\]\\\\}\\\\⁾\\\\₎\\\\❩\\\\❫\\\\﹚\\\\)]', re.UNICODE)\n", (3223, 3268), False, 'import re\n'), ((3495, 3531), 're.compile', 're.compile', (['"""[^\\\\S\\\\n]+"""', 're.UNICODE'], {}), "('[^\\\\S\\\\n]+', re.UNICODE)\n", (3505, 3531), False, 'import re\n'), ((13856, 13883), 'random.shuffle', 'random.shuffle', (['number_list'], {}), '(number_list)\n', (13870, 13883), False, 'import random\n'), ((294, 332), 'numpy.zeros', 'np.zeros', (['(height, width, 3)', 'np.uint8'], {}), '((height, width, 3), np.uint8)\n', (302, 332), True, 'import numpy as np\n'), ((463, 501), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (475, 501), False, 'import cv2\n'), ((544, 571), 'PIL.Image.fromarray', 'Image.fromarray', (['cv2_im_rgb'], {}), '(cv2_im_rgb)\n', (559, 571), False, 'from PIL import ImageFont, ImageDraw, Image\n'), ((584, 606), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['pil_im'], {}), '(pil_im)\n', (598, 606), False, 'from PIL import ImageFont, ImageDraw, Image\n'), ((644, 677), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['fontpath', '(100)'], {}), '(fontpath, 100)\n', (662, 677), False, 'from PIL import ImageFont, ImageDraw, Image\n'), ((902, 941), 'cv2.imwrite', 'cv2.imwrite', (['filepath', 'cv2_im_processed'], {}), '(filepath, cv2_im_processed)\n', (913, 941), False, 'import cv2\n'), ((5311, 5356), 'numpy.asarray', 'np.asarray', (['(255 * (img >= threshold))', '"""uint8"""'], {}), "(255 * (img >= threshold), 'uint8')\n", (5321, 5356), True, 'import numpy as np\n'), ((5888, 5951), 'cv2.threshold', 'cv2.threshold', (['img', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (5901, 5951), False, 'import cv2\n'), ((6606, 6664), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'result[2]', 'result[1]'], {'borderValue': '(255)'}), '(img, result[2], result[1], borderValue=255)\n', (6620, 6664), False, 'import cv2\n'), ((6677, 6713), 'cv2.resize', 'cv2.resize', (['warp'], {'dsize': '(cols, rows)'}), '(warp, dsize=(cols, rows))\n', (6687, 6713), False, 'import cv2\n'), ((7626, 7651), 'cv2.resize', 'cv2.resize', (['img', 'new_size'], {}), '(img, new_size)\n', (7636, 7651), False, 'import cv2\n'), ((7669, 7716), 'cv2.threshold', 'cv2.threshold', (['img', '(254)', '(255)', 'cv2.THRESH_BINARY'], {}), '(img, 254, 255, cv2.THRESH_BINARY)\n', (7682, 7716), False, 'import cv2\n'), ((7954, 7975), 'cv2.transpose', 'cv2.transpose', (['target'], {}), '(target)\n', (7967, 7975), False, 'import cv2\n'), ((8310, 8339), 'numpy.expand_dims', 'np.expand_dims', (['imgs'], {'axis': '(-1)'}), '(imgs, axis=-1)\n', (8324, 8339), True, 'import numpy as np\n'), ((8929, 8987), 'numpy.random.uniform', 'np.random.uniform', (['(-height_shift_range)', 'height_shift_range'], {}), '(-height_shift_range, height_shift_range)\n', (8946, 8987), True, 'import numpy as np\n'), ((9003, 9053), 'numpy.random.uniform', 'np.random.uniform', (['(-rotation_range)', 'rotation_range'], {}), '(-rotation_range, rotation_range)\n', (9020, 9053), True, 'import numpy as np\n'), ((9066, 9103), 'numpy.random.uniform', 'np.random.uniform', (['(1 - scale_range)', '(1)'], {}), '(1 - scale_range, 1)\n', (9083, 9103), True, 'import numpy as np\n'), ((9122, 9178), 'numpy.random.uniform', 'np.random.uniform', (['(-width_shift_range)', 'width_shift_range'], {}), '(-width_shift_range, width_shift_range)\n', (9139, 9178), True, 'import numpy as np\n'), ((9196, 9259), 'numpy.float32', 'np.float32', (['[[1, 0, width_shift * w], [0, 1, height_shift * h]]'], {}), '([[1, 0, width_shift * w], [0, 1, height_shift * h]])\n', (9206, 9259), True, 'import numpy as np\n'), ((9274, 9332), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(w // 2, h // 2)', 'rotation', 'scale'], {}), '((w // 2, h // 2), rotation, scale)\n', (9297, 9332), False, 'import cv2\n'), ((10106, 10129), 'numpy.sqrt', 'np.sqrt', (['(height * width)'], {}), '(height * width)\n', (10113, 10129), True, 'import numpy as np\n'), ((10142, 10163), 'numpy.arange', 'np.arange', (['(0)', '(300)', '(10)'], {}), '(0, 300, 10)\n', (10151, 10163), True, 'import numpy as np\n'), ((10192, 10215), 'numpy.histogram', 'np.histogram', (['img', 'bins'], {}), '(img, bins)\n', (10204, 10215), True, 'import numpy as np\n'), ((10326, 10370), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (10335, 10370), True, 'import numpy as np\n'), ((10984, 11000), 'numpy.zeros', 'np.zeros', (['(h, w)'], {}), '((h, w))\n', (10992, 11000), True, 'import numpy as np\n'), ((11068, 11084), 'numpy.zeros', 'np.zeros', (['(h, w)'], {}), '((h, w))\n', (11076, 11084), True, 'import numpy as np\n'), ((11240, 11265), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (11247, 11265), True, 'import numpy as np\n'), ((11280, 11316), 'cv2.erode', 'cv2.erode', (['tli', 'kernel'], {'iterations': '(1)'}), '(tli, kernel, iterations=1)\n', (11289, 11316), False, 'import cv2\n'), ((11331, 11344), 'numpy.array', 'np.array', (['cei'], {}), '(cei)\n', (11339, 11344), True, 'import numpy as np\n'), ((11671, 11703), 'numpy.array', 'np.array', (['result'], {'dtype': 'np.uint8'}), '(result, dtype=np.uint8)\n', (11679, 11703), True, 'import numpy as np\n'), ((826, 842), 'numpy.array', 'np.array', (['pil_im'], {}), '(pil_im)\n', (834, 842), True, 'import numpy as np\n'), ((3316, 3345), 're.escape', 're.escape', (['string.punctuation'], {}), '(string.punctuation)\n', (3325, 3345), False, 'import re\n'), ((4542, 4565), 'numpy.floor', 'np.floor', (['(window[0] / 2)'], {}), '(window[0] / 2)\n', (4550, 4565), True, 'import numpy as np\n'), ((4608, 4672), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['img', 'pad', 'pad', 'pad', 'pad', 'cv2.BORDER_CONSTANT'], {}), '(img, pad, pad, pad, pad, cv2.BORDER_CONSTANT)\n', (4626, 4672), False, 'import cv2\n'), ((6222, 6282), 'numpy.asarray', 'np.asarray', (['[[1, alpha, shift_x], [0, 1, 0]]'], {'dtype': 'np.float'}), '([[1, alpha, shift_x], [0, 1, 0]], dtype=np.float)\n', (6232, 6282), True, 'import numpy as np\n'), ((6317, 6375), 'cv2.warpAffine', 'cv2.warpAffine', (['binary', 'transform', 'size', 'cv2.INTER_NEAREST'], {}), '(binary, transform, size, cv2.INTER_NEAREST)\n', (6331, 6375), False, 'import cv2\n'), ((6419, 6466), 'numpy.apply_along_axis', 'np.apply_along_axis', (['calc_y_alpha', '(0)', 'shear_img'], {}), '(calc_y_alpha, 0, shear_img)\n', (6438, 6466), True, 'import numpy as np\n'), ((6876, 6913), 'cv2.imread', 'cv2.imread', (['img', 'cv2.IMREAD_GRAYSCALE'], {}), '(img, cv2.IMREAD_GRAYSCALE)\n', (6886, 6913), False, 'import cv2\n'), ((6990, 7029), 'cv2.imread', 'cv2.imread', (['image', 'cv2.IMREAD_GRAYSCALE'], {}), '(image, cv2.IMREAD_GRAYSCALE)\n', (7000, 7029), False, 'import cv2\n'), ((7246, 7332), 'numpy.asarray', 'np.asarray', (['img[boundbox[0]:boundbox[1], boundbox[2]:boundbox[3]]'], {'dtype': 'np.uint8'}), '(img[boundbox[0]:boundbox[1], boundbox[2]:boundbox[3]], dtype=np.\n uint8)\n', (7256, 7332), True, 'import numpy as np\n'), ((7745, 7759), 'numpy.sum', 'np.sum', (['binary'], {}), '(binary)\n', (7751, 7759), True, 'import numpy as np\n'), ((7857, 7890), 'numpy.ones', 'np.ones', (['[ht, wt]'], {'dtype': 'np.uint8'}), '([ht, wt], dtype=np.uint8)\n', (7864, 7890), True, 'import numpy as np\n'), ((8174, 8197), 'cv2.meanStdDev', 'cv2.meanStdDev', (['imgs[i]'], {}), '(imgs[i])\n', (8188, 8197), False, 'import cv2\n'), ((9535, 9624), 'cv2.warpAffine', 'cv2.warpAffine', (['imgs[i]', 'affine_mat', '(w, h)'], {'flags': 'cv2.INTER_NEAREST', 'borderValue': '(255)'}), '(imgs[i], affine_mat, (w, h), flags=cv2.INTER_NEAREST,\n borderValue=255)\n', (9549, 9624), False, 'import cv2\n'), ((9652, 9698), 'cv2.erode', 'cv2.erode', (['imgs[i]', 'erode_kernel'], {'iterations': '(1)'}), '(imgs[i], erode_kernel, iterations=1)\n', (9661, 9698), False, 'import cv2\n'), ((9717, 9765), 'cv2.dilate', 'cv2.dilate', (['imgs[i]', 'dilate_kernel'], {'iterations': '(1)'}), '(imgs[i], dilate_kernel, iterations=1)\n', (9727, 9765), False, 'import cv2\n'), ((9978, 9989), 'numpy.min', 'np.min', (['res'], {}), '(res)\n', (9984, 9989), True, 'import numpy as np\n'), ((10738, 10763), 'cv2.filter2D', 'cv2.filter2D', (['img', '(-1)', 'm1'], {}), '(img, -1, m1)\n', (10750, 10763), False, 'import cv2\n'), ((10782, 10807), 'cv2.filter2D', 'cv2.filter2D', (['img', '(-1)', 'm2'], {}), '(img, -1, m2)\n', (10794, 10807), False, 'import cv2\n'), ((10826, 10851), 'cv2.filter2D', 'cv2.filter2D', (['img', '(-1)', 'm3'], {}), '(img, -1, m3)\n', (10838, 10851), False, 'import cv2\n'), ((10870, 10895), 'cv2.filter2D', 'cv2.filter2D', (['img', '(-1)', 'm4'], {}), '(img, -1, m4)\n', (10882, 10895), False, 'import cv2\n'), ((11155, 11170), 'numpy.ones', 'np.ones', (['(h, w)'], {}), '((h, w))\n', (11162, 11170), True, 'import numpy as np\n'), ((11445, 11472), 'numpy.ones', 'np.ones', (['(11, 11)', 'np.uint8'], {}), '((11, 11), np.uint8)\n', (11452, 11472), True, 'import numpy as np\n'), ((11543, 11562), 'numpy.divide', 'np.divide', (['cei', 'ldi'], {}), '(cei, ldi)\n', (11552, 11562), True, 'import numpy as np\n'), ((15156, 15231), 'h5py.File', 'h5py.File', (['"""/home/kuadmin01/terng/Dataset/dataset_for_experiment.hdf5"""', '"""a"""'], {}), "('/home/kuadmin01/terng/Dataset/dataset_for_experiment.hdf5', 'a')\n", (15165, 15231), False, 'import h5py\n'), ((5512, 5529), 'numpy.where', 'np.where', (['(vec > 0)'], {}), '(vec > 0)\n', (5520, 5529), True, 'import numpy as np\n'), ((7409, 7424), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (7419, 7424), True, 'import numpy as np\n'), ((7725, 7736), 'numpy.sum', 'np.sum', (['img'], {}), '(img)\n', (7731, 7736), True, 'import numpy as np\n'), ((8066, 8082), 'numpy.asarray', 'np.asarray', (['imgs'], {}), '(imgs)\n', (8076, 8082), True, 'import numpy as np\n'), ((9915, 9926), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (9921, 9926), True, 'import numpy as np\n'), ((9929, 9940), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (9935, 9940), True, 'import numpy as np\n'), ((10465, 10505), 'numpy.array', 'np.array', (['[-1, 0, 1, -2, 0, 2, -1, 0, 1]'], {}), '([-1, 0, 1, -2, 0, 2, -1, 0, 1])\n', (10473, 10505), True, 'import numpy as np\n'), ((10531, 10571), 'numpy.array', 'np.array', (['[-2, -1, 0, -1, 0, 1, 0, 1, 2]'], {}), '([-2, -1, 0, -1, 0, 1, 0, 1, 2])\n', (10539, 10571), True, 'import numpy as np\n'), ((10597, 10637), 'numpy.array', 'np.array', (['[-1, -2, -1, 0, 0, 0, 1, 2, 1]'], {}), '([-1, -2, -1, 0, 0, 0, 1, 2, 1])\n', (10605, 10637), True, 'import numpy as np\n'), ((10663, 10703), 'numpy.array', 'np.array', (['[0, 1, 2, -1, 0, 1, -2, -1, 0]'], {}), '([0, 1, 2, -1, 0, 1, -2, -1, 0])\n', (10671, 10703), True, 'import numpy as np\n'), ((6491, 6508), 'numpy.sum', 'np.sum', (['sum_alpha'], {}), '(sum_alpha)\n', (6497, 6508), True, 'import numpy as np\n'), ((8781, 8815), 'numpy.random.uniform', 'np.random.uniform', (['(1)', 'dilate_range'], {}), '(1, dilate_range)\n', (8798, 8815), True, 'import numpy as np\n'), ((8862, 8895), 'numpy.random.uniform', 'np.random.uniform', (['(1)', 'erode_range'], {}), '(1, erode_range)\n', (8879, 8895), True, 'import numpy as np\n'), ((3668, 3687), 'html.unescape', 'html.unescape', (['text'], {}), '(text)\n', (3681, 3687), False, 'import html\n')] |
#cython: profile=True
#cython: wraparound=False
#cython: boundscheck=False
#cython: initializedcheck=False
import cython
"""
Module for creating boundary conditions. Imported in mprans.SpatialTools.py
"""
import sys
import numpy as np
from proteus import AuxiliaryVariables
from proteus.ctransportCoefficients import (smoothedHeaviside,
smoothedHeaviside_integral)
from proteus import WaveTools as wt
class BC_RANS(BC_Base):
"""
Class regrouping boundary conditions for two-phase flows
"""
def __init__(self, shape=None, name=None, b_or=None, b_i=0., nd=None):
super(BC_RANS, self).__init__(shape=shape, name=name, b_or=b_or, b_i=b_i, nd=nd)
# _dirichlet
self.p_dirichlet = BoundaryCondition() # pressure
self.u_dirichlet = BoundaryCondition() # velocity u
self.v_dirichlet = BoundaryCondition() # velocity v
self.w_dirichlet = BoundaryCondition() # velocity w
self.vof_dirichlet = BoundaryCondition() # VOF
self.k_dirichlet = BoundaryCondition() # kappa
self.dissipation_dirichlet = BoundaryCondition() # dissipation
# _advective
self.p_advective = BoundaryCondition()
self.u_advective = BoundaryCondition()
self.v_advective = BoundaryCondition()
self.w_advective = BoundaryCondition()
self.vof_advective = BoundaryCondition()
self.k_advective = BoundaryCondition()
self.dissipation_advective = BoundaryCondition()
# _diffusive
self.u_diffusive = BoundaryCondition()
self.v_diffusive = BoundaryCondition()
self.w_diffusive = BoundaryCondition()
self.k_diffusive = BoundaryCondition()
self.dissipation_diffusive = BoundaryCondition()
# moveMesh boundary conditions
self.hx_dirichlet = BoundaryCondition()
self.hy_dirichlet = BoundaryCondition()
self.hz_dirichlet = BoundaryCondition()
self.u_stress = BoundaryCondition()
self.v_stress = BoundaryCondition()
self.w_stress = BoundaryCondition()
self.u_stress.uOfXT = 0.
self.v_stress.uOfXT = 0.
self.w_stress.uOfXT = 0.
def reset(self):
"""
Resets all BoundaryCondtion functions to None, apart from the BCs
affecting: moving mesh
"""
# self.BC_type = 'None'
self.p_dirichlet.resetBC()
self.u_dirichlet.resetBC()
self.v_dirichlet.resetBC()
self.w_dirichlet.resetBC()
self.vof_dirichlet.resetBC()
self.k_dirichlet.resetBC()
self.dissipation_dirichlet.resetBC()
self.p_advective.resetBC()
self.u_advective.resetBC()
self.v_advective.resetBC()
self.w_advective.resetBC()
self.vof_advective.resetBC()
self.k_advective.resetBC()
self.dissipation_advective.resetBC()
self.u_diffusive.resetBC()
self.v_diffusive.resetBC()
self.w_diffusive.resetBC()
self.k_diffusive.resetBC()
self.dissipation_diffusive.resetBC()
def setNonMaterial(self):
"""
Sets non-material boundary conditions (diffusive flux and advective vof
to 0.).
"""
self.reset()
self.BC_type = 'NonMaterial'
self.vof_advective.setConstantBC(0.)
self.u_diffusive.setConstantBC(0.)
self.v_diffusive.setConstantBC(0.)
self.w_diffusive.setConstantBC(0.)
def setTank(self):
b_or = self._b_or
if b_or[0] == 1 or b_or[0] == -1:
self.hx_dirichlet.setConstantBC(0.)
self.u_stress.uOfXT = None
elif b_or[1] == 1 or b_or[1] == -1:
self.hy_dirichlet.setConstantBC(0.)
self.v_stress.uOfXT = None
elif len(b_or) > 2 and (b_or[2] == 1 or b_or[2] == -1):
self.hz_dirichlet.setConstantBC(0.)
self.w_stress.uOfXT = None
def setFixedNodes(self):
"""
For moving domains: fixes nodes/boundary
"""
self.hx_dirichlet.setConstantBC(0.)
self.hy_dirichlet.setConstantBC(0.)
self.hz_dirichlet.setConstantBC(0.)
self.u_stress.uOfXT = 0
self.v_stress.uOfXT = 0
self.w_stress.uOfXT = 0
def setNoSlip(self):
"""
Sets no slip conditions at the boundary
"""
self.reset()
self.BC_type = 'NoSlip'
self.u_dirichlet.setConstantBC(0.)
self.v_dirichlet.setConstantBC(0.)
self.w_dirichlet.setConstantBC(0.)
self.p_advective.setConstantBC(0.)
self.vof_advective.setConstantBC(0.)
self.k_dirichlet.setConstantBC(0.)
self.dissipation_diffusive.setConstantBC(0.)
def setFreeSlip(self):
"""
Sets free slip conditions at the boundary
"""
self.reset()
self.BC_type = 'FreeSlip'
self.p_advective.setConstantBC(0.)
self.u_advective.setConstantBC(0.)
self.v_advective.setConstantBC(0.)
self.w_advective.setConstantBC(0.)
self.vof_advective.setConstantBC(0.)
self.k_dirichlet.setConstantBC(0.)
self.u_diffusive.setConstantBC(0.)
self.v_diffusive.setConstantBC(0.)
self.w_diffusive.setConstantBC(0.)
self.dissipation_diffusive.setConstantBC(0.)
def setAtmosphere(self, orientation=None, vof_air=1.):
"""
Sets atmosphere boundary conditions (water can come out)
(!) pressure dirichlet set to 0 for this BC
Parameters
----------
orientation: Optional[array_like]
orientation of the boundary. Optional if orientation was already
passed when creating the BC_RANS class instance.
vof_air: Optional[float]
VOF value of air (default is 1.)
"""
self.BC_type = 'OpenAir'
if orientation is None and self._b_or is not None:
orientation = self._b_or
self.reset()
self.p_dirichlet.setConstantBC(0.)
if self._b_or[0] == 1. or self._b_or[0] == -1.:
self.u_dirichlet.setConstantBC(0.)
else:
self.u_dirichlet.resetBC()
if self._b_or[1] == 1. or self._b_or[1] == -1.:
self.v_dirichlet.setConstantBC(0.)
else:
self.v_dirichlet.resetBC()
if self._b_or[2] == 1. or self._b_or[2] == -1.:
self.w_dirichlet.setConstantBC(0.)
else:
self.w_dirichlet.resetBC()
self.vof_dirichlet.setConstantBC(vof_air) # air
self.u_diffusive.setConstantBC(0.)
self.v_diffusive.setConstantBC(0.)
self.w_diffusive.setConstantBC(0.)
self.k_diffusive.setConstantBC(0.)
self.dissipation_diffusive.setConstantBC(0.)
def setMoveMesh(self, last_pos, h=(0., 0., 0.), rot_matrix=None):
"""
Sets boundary conditions for moving the mesh with a rigid body
Parameters
----------
last_pos: array_like
last position of rigig body
h: array_like
displacement of the body
rot_matrix:
rotation matrix describing displament due to rotation between last
position and new position (3x3 array)
(!) if set manually, the input arrays should be updated externally
without loosing their memory address
"""
if rot_matrix is None:
rot_matrix = np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
self.body_python_rot_matrix = rot_matrix
self.body_python_last_pos = last_pos
self.body_python_h = h
self.hx_dirichlet.uOfXT = lambda x, t: self.__cpp_MoveMesh_hx(x, t)
self.hy_dirichlet.uOfXT = lambda x, t: self.__cpp_MoveMesh_hy(x, t)
self.hz_dirichlet.uOfXT = lambda x, t: self.__cpp_MoveMesh_hz(x, t)
def __cpp_MoveMesh_h(self, x, t):
x_0 = cython.declare(cython.double[3])
new_x_0 = cython.declare(cython.double[3])
hx = cython.declare(cython.double[3])
x_0[0] = x[0]-self.body_python_last_pos[0]
x_0[1] = x[1]-self.body_python_last_pos[1]
x_0[2] = x[2]-self.body_python_last_pos[2]
new_x_0 = np.dot(x_0, self.body_python_rot_matrix)
hx[0] = new_x_0[0]-x_0[0]+self.body_python_h[0]
hx[1] = new_x_0[1]-x_0[1]+self.body_python_h[1]
hx[2] = new_x_0[2]-x_0[2]+self.body_python_h[2]
return hx
def __cpp_MoveMesh_hx(self, x, t):
return self.__cpp_MoveMesh_h(x, t)[0]
def __cpp_MoveMesh_hy(self, x, t):
return self.__cpp_MoveMesh_h(x, t)[1]
def __cpp_MoveMesh_hz(self, x, t):
return self.__cpp_MoveMesh_h(x, t)[2]
def setUnsteadyTwoPhaseVelocityInlet(self, wave, vert_axis=None, wind_speed=None,
smoothing=0., vof_air=1., vof_water=0.):
"""
Imposes a velocity profile on the fluid with input wave and wind
conditions.
Parameters
----------
wave: proteus.WaveTools
class describing a wave (from proteus.WaveTools)
vert_axis: Optional[int]
index of vertical position vector (x:0, y:1, z:2), must always be
aligned with gravity. If not set, will be 1 in 2D (y), 2 in 3D (z).
wind_speed: Optional[array_like]
vof_air: Optional[float]
VOF value of air (default is 1.)
vof_water: Optional[float]
VOF value of water (default is 0.)
Below the sea water level: fluid velocity to wave speed.
Above the sea water level: fluid velocity set to wind speed.
(!) Boundary condition relies on specific variables defined in Context:
he (mesh element size) and ecH (number of elements for smoothing)
"""
self.reset()
if vert_axis is None:
vert_axis = self.nd-1
if wind_speed is None:
wind_speed = np.zeros(3)
self.waves = __cppClass_WavesCharacteristics(waves=wave, vert_axis=vert_axis, b_or=self._b_or, wind_speed=wind_speed, smoothing=smoothing, vof_water=vof_water, vof_air=vof_air)
self.u_dirichlet.uOfXT = lambda x, t: self.__cpp_UnsteadyTwoPhaseVelocityInlet_u_dirichlet(x, t)
self.v_dirichlet.uOfXT = lambda x, t: self.__cpp_UnsteadyTwoPhaseVelocityInlet_v_dirichlet(x, t)
self.w_dirichlet.uOfXT = lambda x, t: self.__cpp_UnsteadyTwoPhaseVelocityInlet_w_dirichlet(x, t)
self.vof_dirichlet.uOfXT = lambda x, t: self.__cpp_UnsteadyTwoPhaseVelocityInlet_vof_dirichlet(x, t)
self.p_advective.uOfXT = lambda x, t: self.__cpp_UnsteadyTwoPhaseVelocityInlet_p_advective(x, t)
def __cpp_UnsteadyTwoPhaseVelocityInlet_u_dirichlet(self, x, t):
cython.declare(xx=cython.double[3])
xx[0] = x[0]
xx[1] = x[1]
xx[2] = x[2]
return self.waves.__cpp_calculate_velocity(xx, t)[0]
def __cpp_UnsteadyTwoPhaseVelocityInlet_v_dirichlet(self, x, t):
cython.declare(xx=cython.double[3])
xx[0] = x[0]
xx[1] = x[1]
xx[2] = x[2]
return self.waves.__cpp_calculate_velocity(xx, t)[1]
def __cpp_UnsteadyTwoPhaseVelocityInlet_w_dirichlet(self, x, t):
cython.declare(xx=cython.double[3])
xx[0] = x[0]
xx[1] = x[1]
xx[2] = x[2]
return self.waves.__cpp_calculate_velocity(xx, t)[2]
def __cpp_UnsteadyTwoPhaseVelocityInlet_p_advective(self, x, t):
cython.declare(xx=cython.double[3])
xx[0] = x[0]
xx[1] = x[1]
xx[2] = x[2]
return self.waves.__cpp_calculate_pressure(xx, t)
def __cpp_UnsteadyTwoPhaseVelocityInlet_vof_dirichlet(self, x, t):
cython.declare(xx=cython.double[3])
xx[0] = x[0]
xx[1] = x[1]
xx[2] = x[2]
return self.waves.__cpp_calculate_vof(xx, t)
# FOLLOWING BOUNDARY CONDITION IS UNTESTED #
def setTwoPhaseVelocityInlet(self, U, waterLevel, vert_axis=None, air=1.,
water=0.):
"""
Imposes a velocity profile lower than the sea level and an open
boundary for higher than the sealevel.
:param U: Velocity vector at the global system.
:param waterLevel: water level at global coordinate system.
:param vert_axis: index of vertical in position vector, must always be
aligned with gravity, by default set to 1].
:param air: Volume fraction for air (1.0 by default).
:param water: Volume fraction for water (0.0 by default).
Below the seawater level, the condition returns the _dirichlet and
p_advective condition according to the inflow velocity.
Above the sea water level, the condition returns the gravity as zero,
and sets _dirichlet condition to zero, only if there is a zero inflow
velocity component.
(!) This condition is best used for boundaries and gravity aligned with
one of the main axes.
"""
# self.reset()
# self.BC_type = 'TwoPhaseVelocityInlet'
# U = np.array(U)
# if vert_axis is None:
# vert_axis = self.Shape.Domain.nd - 1
# def get_inlet_ux_dirichlet_cython(i):
# def get_inlet_ux_dirichlet():
# def ux_dirichlet(x, t):
# if x[vert_axis] < waterLevel:
# return U[i]
# elif x[vert_axis] >= waterLevel and U[i] == 0:
# return 0.
# return ux_dirichlet
# return get_inlet_ux_dirichlet
# def inlet_vof_dirichlet_cython():
# def inlet_vof_dirichlet(x, t):
# if x[vert_axis] < waterLevel:
# return water
# elif x[vert_axis] >= waterLevel:
# return air
# return inlet_vof_dirichlet
# def inlet_p_advective_cython():
# def inlet_p_advective(x, t):
# b_or = self._b_or[self._b_i]
# u_p = np.sum(U * b_or)
# # This is the normal velocity, based on the inwards boundary
# # orientation -b_or
# u_p = -u_p
# if x[vert_axis] < waterLevel:
# return u_p
# elif x[vert_axis] >= waterLevel:
# return None
# return inlet_p_advective
# self.u_dirichlet.init_cython = get_inlet_ux_dirichlet_cython(0)
# self.v_dirichlet.init_cython = get_inlet_ux_dirichlet_cython(1)
# if len(U) == 3:
# self.w_dirichlet.init_cython = get_inlet_ux_dirichlet_cython(2)
# self.vof_dirichlet.init_cython = inlet_vof_dirichlet_cython
# self.p_advective.init_cython = inlet_p_advective_cython
# def setHydrostaticPressureOutlet(self, rho, g, refLevel, vof, pRef=0.0,
# vert_axis=-1):
# self.reset()
# a0 = pRef - rho*g[vert_axis]*refLevel
# a1 = rho*g[vert_axis]
# # This is the normal velocity, based on the boundary orientation
# def get_outlet_ux_dirichlet(i):
# def ux_dirichlet(x, t):
# b_or = self._b_or[self._b_i]
# if b_or[i] == 0:
# return 0.
# return ux_dirichlet
# self.u_dirichlet.uOfXT = get_outlet_ux_dirichlet(0)
# self.v_dirichlet.uOfXT = get_outlet_ux_dirichlet(1)
# if len(g) == 3:
# self.w_dirichlet.uOfXT = get_outlet_ux_dirichlet(2)
# self.p_dirichlet.setLinearBC(a0, a1, vert_axis)
# self.vof_dirichlet.setConstantBC(vof)
# self.u_diffusive.setConstantBC(0.)
# self.v_diffusive.setConstantBC(0.)
# self.w_diffusive.setConstantBC(0.)
pass
# FOLLOWING BOUNDARY CONDITION IS UNTESTED #
def setHydrostaticPressureOutletWithDepth(self, seaLevel, rhoUp, rhoDown, g,
refLevel, pRef=0.0, vert_axis=None,
air=1.0, water=0.0):
"""Imposes a hydrostatic pressure profile and open boundary conditions
with a known otuflow depth
:param rhoUp: Phase density of the upper part.
:param rhoDown: Phase density of the lower part.
:param g: Gravitational acceleration vector.
:param refLevel: Level at which pressure = pRef.
:param pRef: Reference value for the pressure at x[vert_axis]=refLevel,
be default set to 0.
:param vert_axis: index of vertical in position vector, must always be
aligned with gravity, by default set to 1.
:return: hydrostaticPressureOutlet except when the pressure and the
vof are defined. Then it returns the pressure and vof profile
based on the known depth.
If the boundary is aligned with one of the main axes, sets the
tangential velocity components to zero as well.
(!) This condition is best used for boundaries and gravity aligned with
one of the main axes.
"""
# self.reset()
# if vert_axis is None:
# vert_axis = self.Shape.Domain.nd - 1
# def hydrostaticPressureOutletWithDepth_p_dirichlet(x, t):
# if x[vert_axis] < seaLevel:
# a0 = pRef-rhoUp*g[vert_axis]*(refLevel-seaLevel)-rhoDown*g[vert_axis]*seaLevel
# a1 = rhoDown*g[vert_axis]
# return a0 + a1*x[vert_axis]
# def hydrostaticPressureOutletWithDepth_vof_dirichlet(x, t):
# if x[vert_axis] < seaLevel:
# return water
# self.setHydrostaticPressureOutlet(rhoUp, g, refLevel, pRef, vert_axis)
# self.p_dirichlet.uOfXT = hydrostaticPressureOutletWithDepth_p_dirichlet
# self.vof_dirichlet.uOfXT = hydrostaticPressureOutletWithDepth_vof_dirichlet
pass
# for regions
class RelaxationZone:
"""
Holds information about a relaxation zone (wave generation/absorption
or porous zone)
Parameters
----------
zone_type: string
type of zone, can be set to 'absorption', 'generation', or 'porous'
center: array_like
coordinates of center of the zone
orientation: array_like
orientation for absorption/generation zones: from boundary to tank
epsFact_solid: float
half the zone length
waves: Optional[proteus.WaveTools]
class instance of a wave from proteus.WaveTools (must be set for
generation zone)
shape: Optional[proteus.SpatialTools.Shape]
shape class instance containing region
dragAlpha: Optional[float]
parameter for porous zones (default: 0.5/1.005e-6)
dragBeta: Optional[float]
parameter for porous zones (default: 0.)
porosity: Optional[float]
parameter for porous zone (default: 1.)
"""
def __cinit__(self, zone_type, center, orientation, epsFact_solid,
waves=None, shape=None, wind_speed=np.array([0.,0.,0.]),
dragAlpha=0.5/1.005e-6, dragBeta=0., porosity=1., vert_axis=None, smoothing=0.,
he=0., ecH=3., vof_water=0., vof_air=1.):
self.Shape = shape
self.nd = self.Shape.Domain.nd
self.zone_type = zone_type
self.center = center
self.orientation = orientation
if vert_axis is None:
vert_axis = self.Shape.Domain.nd-1
if waves is not None:
self.waves = __cppClass_WavesCharacteristics(waves=waves, wind_speed=wind_speed, vert_axis=vert_axis, smoothing=smoothing, vof_water=vof_water, vof_air=vof_air)
self.epsFact_solid = epsFact_solid
self.dragAlpha = dragAlpha
self.dragBeta = dragBeta
self.porosity = porosity
self.zero_vel = np.zeros(3)
def calculate_init(self):
if self.zone_type == 'generation':
#self.u = &self.waves.u
#self.eta = &self.waves.eta
self.uu = self.__cpp_calculate_vel_wave
self.phi = self.__cpp_calculate_phi_solid
elif self.zone_type == 'absorption':
self.uu = self.__cpp_calculate_vel_zero
self.phi = self.__cpp_calculate_phi_solid
elif self.zone_type == 'porous':
self.uu = self.__cpp_calculate_vel_zero
self.phi = self.__cpp_calculate_phi_solid_porous
def calculate_phi(self, x):
return self.phi(self, x)
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.initializedcheck(False)
def __cpp_calculate_phi_solid(self, x):
"""
Used for RelaxationZone only
"""
cython.declare(d=cython.double[3], o=cython.double[3])
d[0] = self.center[0]-x[0]
d[1] = self.center[1]-x[1]
d[2] = self.center[2]-x[2]
o[0] = self.orientation[0]
o[1] = self.orientation[1]
o[2] = self.orientation[2]
phi = o[0]*d[0]+o[1]*d[1]+o[2]*d[2]
return phi
def __cpp_calculate_phi_solid_porous(self, x):
return self.epsFact_solid
def calculate_vel(self, x, t):
return self.uu(self, x, t)
def __cpp_calculate_vel_zero(self, x, t):
return self.zero_vel
def __cpp_calculate_vel_wave(self, x, t):
return self.waves.__cpp_calculate_velocity(x, t)
class RelaxationZoneWaveGenerator():
"""
Prescribe a velocity penalty scaling in a material zone via a
Darcy-Forchheimer penalty
Parameters
----------
zones: dict
dictionary with key as the region flag and values as a RelaxationZone
class
nd: int
number of dimensions of domain
"""
def __init__(self, zones, nd):
self.zones = zones
self.nd = nd
def attachModel(self, model, ar):
self.model = model
self.ar = ar
return self
def attachAuxiliaryVariables(self,avDict):
pass
def calculate_init(self):
max_key = 0
for key, zone in self.zones.iteritems():
zone.calculate_init()
if key > max_key:
max_key = key
self.max_flag = max_key
self.zones_array = np.empty(self.max_flag+1, dtype=object)
for key, zone in self.zones.iteritems():
self.zones_array[key] = zone
def calculate(self):
self.__cpp_iterate()
def __cpp_iterate(self):
nl = len(self.model.levelModelList)
for l in range(nl): # usually only 1
# initialisation of variables before costly loop
m = self.model.levelModelList[l]
nE = m.coefficients.q_phi.shape[0]
nk = m.coefficients.q_phi.shape[1]
t = m.timeIntegration.t
qx = m.q['x']
q_phi_solid = m.coefficients.q_phi_solid
q_velocity_solid = m.coefficients.q_velocity_solid
mTypes = m.mesh.elementMaterialTypes
# costly loop
for eN in range(nE):
mType = mTypes[eN]
if mType < self.max_flag:
zone = self.zones_array[mType]
if zone is not None:
for k in range(nk):
x[0] = qx[eN, k, 0]
x[1] = qx[eN, k, 1]
x[2] = qx[eN, k, 2]
#print qx.__array_interface__['data'] == m.q['x'].__array_interface__['data']
#print x.__array_interface__['data'] == m.q['x'][eN, k].__array_interface__['data']
phi = zone.calculate_phi(x)
q_phi_solid[eN, k] = phi
u = zone.calculate_vel(x, t)
q_velocity_solid[eN, k, 0] = u[0]
q_velocity_solid[eN, k, 1] = u[1]
if self.nd > 2:
q_velocity_solid[eN, k, 2] = u[2]
m.q['phi_solid'] = q_phi_solid
m.q['velocity_solid'] = q_velocity_solid
class __cppClass_WavesCharacteristics:
def __init__(self, waves, vert_axis, wind_speed=None, b_or=None, smoothing=0., vof_water=0., vof_air = 1.):
self.WT = waves # wavetools wave
self.vert_axis = vert_axis
self.zero_vel = np.zeros(3)
self._b_or = b_or
self.smoothing = smoothing
self.vof_air = vof_air
self.vof_water = vof_water
if wind_speed is None:
self.wind_speed = self.zero_vel
else:
self.wind_speed = wind_speed
def __cpp_calculate_velocity(self, x, t):
cython.declare(u=cython.double[3])
cython.declare(xx=cython.double[3])
cython.declare(x_max=cython.double[3])
xx[0] = x[0]
xx[1] = x[1]
xx[2] = x[2]
phi = self.__cpp_calculate_phi(x, t)
if phi <= 0.:
# no smoothing below mwl, or wave peak could get chopped off
H = 0
waterSpeed = self.WT.u(xx, t)
elif 0 < phi <= self.smoothing:
# smoothing on half the range of VOF (above wave crest)
H = smoothedHeaviside(self.smoothing/2.,phi-self.smoothing/2.)
# use max velocity of wave for water
x_max[0] = x[0]
x_max[1] = x[1]
x_max[2] = x[2]
x_max[self.vert_axis] = x[self.vert_axis]-phi
waterSpeed = self.WT.u(x_max, t)
else:
H = 1.
waterSpeed = self.zero_vel
u[0] = H*self.wind_speed[0] + (1-H)*waterSpeed[0]
u[1] = H*self.wind_speed[1] + (1-H)*waterSpeed[1]
u[2] = H*self.wind_speed[2] + (1-H)*waterSpeed[2]
return u
def __cpp_calculate_pressure(self, x, t):
# This is the normal velocity, based on the outwards boundary
# orientation b_or
# needs to be equal to -ux_dirichlet
ux = self.__cpp_calculate_velocity(x, t)
b0, b1, b2 = self._b_or[0], self._b_or[1], self._b_or[2]
u0, u1, u2 = ux[0], ux[1], ux[2]
return b0*u0+b1*u1+b2*u2
def __cpp_calculate_phi(self, x, t):
cython.declare(xx=cython.double[3])
xx[0] = x[0]
xx[1] = x[1]
xx[2] = x[2]
level = self.WT.mwl + self.WT.eta(xx,t)
return x[self.vert_axis]-level
def __cpp_calculate_vof(self, x, t):
phi = self.__cpp_calculate_phi(x, t)
H = self.__cpp_calculate_smoothing_H(phi)
return H
def __cpp_calculate_smoothing_H(self, phi):
if phi >= self.smoothing:
H = 1.
elif self.smoothing > 0 and -self.smoothing < phi < self.smoothing:
H = smoothedHeaviside(self.smoothing, phi)
elif phi <= -self.smoothing:
H = 0.
return H
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.initializedcheck(False)
def __x_to_cpp(x):
cython.declare(xx=double[3])
xx[0] = x[0]
xx[1] = x[1]
xx[2] = x[2]
return xx
| [
"cython.boundscheck",
"numpy.empty",
"numpy.zeros",
"cython.initializedcheck",
"proteus.ctransportCoefficients.smoothedHeaviside",
"numpy.array",
"cython.declare",
"numpy.dot",
"cython.wraparound"
] | [((26820, 26845), 'cython.boundscheck', 'cython.boundscheck', (['(False)'], {}), '(False)\n', (26838, 26845), False, 'import cython\n'), ((26847, 26871), 'cython.wraparound', 'cython.wraparound', (['(False)'], {}), '(False)\n', (26864, 26871), False, 'import cython\n'), ((26873, 26903), 'cython.initializedcheck', 'cython.initializedcheck', (['(False)'], {}), '(False)\n', (26896, 26903), False, 'import cython\n'), ((20541, 20566), 'cython.boundscheck', 'cython.boundscheck', (['(False)'], {}), '(False)\n', (20559, 20566), False, 'import cython\n'), ((20572, 20596), 'cython.wraparound', 'cython.wraparound', (['(False)'], {}), '(False)\n', (20589, 20596), False, 'import cython\n'), ((20602, 20632), 'cython.initializedcheck', 'cython.initializedcheck', (['(False)'], {}), '(False)\n', (20625, 20632), False, 'import cython\n'), ((26927, 26955), 'cython.declare', 'cython.declare', ([], {'xx': 'double[3]'}), '(xx=double[3])\n', (26941, 26955), False, 'import cython\n'), ((7957, 7989), 'cython.declare', 'cython.declare', (['cython.double[3]'], {}), '(cython.double[3])\n', (7971, 7989), False, 'import cython\n'), ((8009, 8041), 'cython.declare', 'cython.declare', (['cython.double[3]'], {}), '(cython.double[3])\n', (8023, 8041), False, 'import cython\n'), ((8056, 8088), 'cython.declare', 'cython.declare', (['cython.double[3]'], {}), '(cython.double[3])\n', (8070, 8088), False, 'import cython\n'), ((8261, 8301), 'numpy.dot', 'np.dot', (['x_0', 'self.body_python_rot_matrix'], {}), '(x_0, self.body_python_rot_matrix)\n', (8267, 8301), True, 'import numpy as np\n'), ((10794, 10829), 'cython.declare', 'cython.declare', ([], {'xx': 'cython.double[3]'}), '(xx=cython.double[3])\n', (10808, 10829), False, 'import cython\n'), ((11031, 11066), 'cython.declare', 'cython.declare', ([], {'xx': 'cython.double[3]'}), '(xx=cython.double[3])\n', (11045, 11066), False, 'import cython\n'), ((11268, 11303), 'cython.declare', 'cython.declare', ([], {'xx': 'cython.double[3]'}), '(xx=cython.double[3])\n', (11282, 11303), False, 'import cython\n'), ((11505, 11540), 'cython.declare', 'cython.declare', ([], {'xx': 'cython.double[3]'}), '(xx=cython.double[3])\n', (11519, 11540), False, 'import cython\n'), ((11741, 11776), 'cython.declare', 'cython.declare', ([], {'xx': 'cython.double[3]'}), '(xx=cython.double[3])\n', (11755, 11776), False, 'import cython\n'), ((19098, 19123), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (19106, 19123), True, 'import numpy as np\n'), ((19895, 19906), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (19903, 19906), True, 'import numpy as np\n'), ((20746, 20800), 'cython.declare', 'cython.declare', ([], {'d': 'cython.double[3]', 'o': 'cython.double[3]'}), '(d=cython.double[3], o=cython.double[3])\n', (20760, 20800), False, 'import cython\n'), ((22259, 22300), 'numpy.empty', 'np.empty', (['(self.max_flag + 1)'], {'dtype': 'object'}), '(self.max_flag + 1, dtype=object)\n', (22267, 22300), True, 'import numpy as np\n'), ((24348, 24359), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (24356, 24359), True, 'import numpy as np\n'), ((24673, 24707), 'cython.declare', 'cython.declare', ([], {'u': 'cython.double[3]'}), '(u=cython.double[3])\n', (24687, 24707), False, 'import cython\n'), ((24716, 24751), 'cython.declare', 'cython.declare', ([], {'xx': 'cython.double[3]'}), '(xx=cython.double[3])\n', (24730, 24751), False, 'import cython\n'), ((24760, 24798), 'cython.declare', 'cython.declare', ([], {'x_max': 'cython.double[3]'}), '(x_max=cython.double[3])\n', (24774, 24798), False, 'import cython\n'), ((26171, 26206), 'cython.declare', 'cython.declare', ([], {'xx': 'cython.double[3]'}), '(xx=cython.double[3])\n', (26185, 26206), False, 'import cython\n'), ((7428, 7489), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])\n', (7436, 7489), True, 'import numpy as np\n'), ((9990, 10001), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (9998, 10001), True, 'import numpy as np\n'), ((25186, 25253), 'proteus.ctransportCoefficients.smoothedHeaviside', 'smoothedHeaviside', (['(self.smoothing / 2.0)', '(phi - self.smoothing / 2.0)'], {}), '(self.smoothing / 2.0, phi - self.smoothing / 2.0)\n', (25203, 25253), False, 'from proteus.ctransportCoefficients import smoothedHeaviside, smoothedHeaviside_integral\n'), ((26705, 26743), 'proteus.ctransportCoefficients.smoothedHeaviside', 'smoothedHeaviside', (['self.smoothing', 'phi'], {}), '(self.smoothing, phi)\n', (26722, 26743), False, 'from proteus.ctransportCoefficients import smoothedHeaviside, smoothedHeaviside_integral\n')] |
import os
import sys
import time
import torch
import random
import argparse
import numpy as np
from src.GAL import *
parser = argparse.ArgumentParser(description=' ')
parser.add_argument('--cuda', type=int, default=-1, help='Which GPU to run on (-1 for using CPU, 9 for not specifying which GPU to use.)')
parser.add_argument('--dataSet', type=str, default='weibo_s')
parser.add_argument('--file_paths', type=str, default='file_paths.json')
parser.add_argument('--config_dir', type=str, default='./configs')
parser.add_argument('--logs_dir', type=str, default='./logs')
parser.add_argument('--out_dir', default='./results')
parser.add_argument('--name', type=str, default='debug')
parser.add_argument('--seed', type=int, default=2019)
parser.add_argument('--epochs', type=int, default=5)
parser.add_argument('--b_sz', type=int, default=100)
parser.add_argument('--n_gnnlayer', type=int, default=2)
parser.add_argument('--out_emb_size', type=int, default=128)
parser.add_argument('--tvt_split', type=int, default=0, help='Which of the 5 presets of train-validation-test data splits to use. (0~4), used for bitcoin dataset.')
parser.add_argument('--C', type=float, default=20)
parser.add_argument('--n_block', type=int, default=-1)
parser.add_argument('--thresh', type=float, default=-1)
parser.add_argument('--a_loss_weight', type=float, default=4)
parser.add_argument('--max_vali_f1', type=float, default=0)
parser.add_argument('--agg_func', type=str, default='MEAN')
parser.add_argument('--simi_func', type=str, default='cos')
parser.add_argument('--learn_method', type=str, default='bigal')
parser.add_argument('--loss', type=str, default='1010')
parser.add_argument('--a_loss', type=str, default='none')
parser.add_argument('--unsup_loss', type=str, default='normal')
parser.add_argument('--over_sample', type=str, default='none')
parser.add_argument('--feature', type=str, default='all')
parser.add_argument('--biased_rw', action='store_true')
parser.add_argument('--cluster_aloss', action='store_true')
parser.add_argument('--best_rw', action='store_true')
parser.add_argument('--gcn', action='store_true')
parser.add_argument('--gat', action='store_true')
parser.add_argument('--no_save_embs', action='store_true')
parser.add_argument('--batch_output', action='store_true')
parser.add_argument('--nognn', action='store_true')
parser.add_argument('--noblock', action='store_true')
args = parser.parse_args()
args.argv = sys.argv
if torch.cuda.is_available():
if args.cuda == -1:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
else:
device_id = torch.cuda.current_device()
print('using device', device_id, torch.cuda.get_device_name(device_id))
args.device = torch.device(f"cuda:{args.cuda}" if args.cuda>=0 else "cpu")
if args.cuda == 9:
args.device = torch.device('cuda')
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
def main():
args.name = f'{args.name}_{args.dataSet}_{args.learn_method}_{args.feature}_loss{args.loss}_{args.n_gnnlayer}layers_simi-{args.simi_func}_{args.a_loss}_{time.strftime("%m-%d_%H-%M")}'
args.out_path = args.out_dir + '/' + args.name
if not os.path.isdir(args.out_path): os.mkdir(args.out_path)
args.biased_rw = True
args.best_rw = True
args.embedding_ready_methods = set(['feature', 'rand', 'rand2', 'svdgnn', 'lr'])
GAL(args)
if __name__ == '__main__':
main()
| [
"os.mkdir",
"numpy.random.seed",
"argparse.ArgumentParser",
"os.path.isdir",
"torch.manual_seed",
"time.strftime",
"torch.cuda.manual_seed_all",
"random.seed",
"torch.cuda.is_available",
"torch.device",
"torch.cuda.current_device",
"torch.cuda.get_device_name"
] | [((138, 178), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""" """'}), "(description=' ')\n", (161, 178), False, 'import argparse\n'), ((2491, 2516), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2514, 2516), False, 'import torch\n'), ((2791, 2853), 'torch.device', 'torch.device', (["(f'cuda:{args.cuda}' if args.cuda >= 0 else 'cpu')"], {}), "(f'cuda:{args.cuda}' if args.cuda >= 0 else 'cpu')\n", (2803, 2853), False, 'import torch\n'), ((2915, 2937), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (2926, 2937), False, 'import random\n'), ((2939, 2964), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (2953, 2964), True, 'import numpy as np\n'), ((2966, 2994), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2983, 2994), False, 'import torch\n'), ((2996, 3033), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (3022, 3033), False, 'import torch\n'), ((2891, 2911), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2903, 2911), False, 'import torch\n'), ((2665, 2692), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (2690, 2692), False, 'import torch\n'), ((3304, 3332), 'os.path.isdir', 'os.path.isdir', (['args.out_path'], {}), '(args.out_path)\n', (3317, 3332), False, 'import os\n'), ((3334, 3357), 'os.mkdir', 'os.mkdir', (['args.out_path'], {}), '(args.out_path)\n', (3342, 3357), False, 'import os\n'), ((2735, 2772), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', (['device_id'], {}), '(device_id)\n', (2761, 2772), False, 'import torch\n'), ((3207, 3235), 'time.strftime', 'time.strftime', (['"""%m-%d_%H-%M"""'], {}), "('%m-%d_%H-%M')\n", (3220, 3235), False, 'import time\n')] |
"""
Classes for mass-unvariate tuning analyses
"""
from numpy import array, sum, inner, dot, angle, abs, exp, asarray
from thunder.rdds.series import Series
from thunder.utils.common import loadMatVar
class TuningModel(object):
"""
Base class for loading and fitting tuning models.
Parameters
----------
modelFile : str, or array
Array of input values or specification of a MAT file
containing a variable s with input values
var : str, default = 's'
Variable name if loading from a MAT file
Attributes
----------
s : array
Input values along which tuning will be estimated,
i.e. s if we are fitting a function y = f(s)
See also
--------
CircularTuningModel : circular tuning parameter estimation
GaussianTuningModel : gaussian tuning parameter estimation
"""
def __init__(self, modelFile, var='s'):
if isinstance(modelFile, basestring):
self.s = loadMatVar(modelFile, var)
else:
self.s = modelFile
@staticmethod
def load(modelFile, tuningMode):
from thunder.utils.common import checkParams
checkParams(tuningMode.lower(), TUNING_MODELS.keys())
return TUNING_MODELS[tuningMode.lower()](modelFile)
def get(self, y):
pass
def fit(self, data):
"""
Fit a mass univariate tuning model.
Parameters
----------
data : Series or a subclass (e.g. RowMatrix)
The data to fit tuning models to, a collection of
key-value pairs where the keys are identifiers and the values are
one-dimensional arrays
Returns
-------
params : RDD of (tuple, array) pairs
Fitted tuning parameters for each record
"""
if not (isinstance(data, Series)):
raise Exception('Input must be Series or a subclass (e.g. RowMatrix)')
return Series(data.rdd.mapValues(lambda x: self.get(x)), index=['center', 'spread']).__finalize__(data)
class CircularTuningModel(TuningModel):
""" Circular tuning model fitting. """
def get(self, y):
"""
Estimate the circular mean and variance ("kappa"),
identical to the max likelihood estimates of the
parameters of the best fitting von-mises function
"""
y = y - min(y)
if sum(y) == 0:
y += 1E-06
y = y / sum(y)
r = inner(y, exp(1j * self.s))
mu = angle(r)
v = abs(r) / sum(y)
if v < 0.53:
k = 2 * v + (v ** 3) + 5 * (v ** 5) / 6
elif (v >= 0.53) & (v < 0.85):
k = -.4 + 1.39 * v + 0.43 / (1 - v)
elif (v ** 3 - 4 * (v ** 2) + 3 * v) == 0:
k = array([0.0])
else:
k = 1 / (v ** 3 - 4 * (v ** 2) + 3 * v)
if k > 1E8:
k = array([0.0])
return asarray([mu, k])
class GaussianTuningModel(TuningModel):
""" Gaussian tuning model fitting. """
def get(self, y):
"""
Estimate the mean and variance,
similar to the max likelihood estimates of the
parameters of the best fitting gaussian
but non-infinite supports may bias estimates
"""
y[y < 0] = 0
if sum(y) == 0:
y += 1E-06
y = y / sum(y)
mu = dot(self.s, y)
sigma = dot((self.s - mu) ** 2, y)
return asarray([mu, sigma])
TUNING_MODELS = {
'circular': CircularTuningModel,
'gaussian': GaussianTuningModel
}
| [
"numpy.sum",
"numpy.abs",
"numpy.angle",
"numpy.asarray",
"thunder.utils.common.loadMatVar",
"numpy.array",
"numpy.exp",
"numpy.dot"
] | [((2497, 2505), 'numpy.angle', 'angle', (['r'], {}), '(r)\n', (2502, 2505), False, 'from numpy import array, sum, inner, dot, angle, abs, exp, asarray\n'), ((2904, 2920), 'numpy.asarray', 'asarray', (['[mu, k]'], {}), '([mu, k])\n', (2911, 2920), False, 'from numpy import array, sum, inner, dot, angle, abs, exp, asarray\n'), ((3353, 3367), 'numpy.dot', 'dot', (['self.s', 'y'], {}), '(self.s, y)\n', (3356, 3367), False, 'from numpy import array, sum, inner, dot, angle, abs, exp, asarray\n'), ((3384, 3410), 'numpy.dot', 'dot', (['((self.s - mu) ** 2)', 'y'], {}), '((self.s - mu) ** 2, y)\n', (3387, 3410), False, 'from numpy import array, sum, inner, dot, angle, abs, exp, asarray\n'), ((3426, 3446), 'numpy.asarray', 'asarray', (['[mu, sigma]'], {}), '([mu, sigma])\n', (3433, 3446), False, 'from numpy import array, sum, inner, dot, angle, abs, exp, asarray\n'), ((975, 1001), 'thunder.utils.common.loadMatVar', 'loadMatVar', (['modelFile', 'var'], {}), '(modelFile, var)\n', (985, 1001), False, 'from thunder.utils.common import loadMatVar\n'), ((2386, 2392), 'numpy.sum', 'sum', (['y'], {}), '(y)\n', (2389, 2392), False, 'from numpy import array, sum, inner, dot, angle, abs, exp, asarray\n'), ((2438, 2444), 'numpy.sum', 'sum', (['y'], {}), '(y)\n', (2441, 2444), False, 'from numpy import array, sum, inner, dot, angle, abs, exp, asarray\n'), ((2466, 2484), 'numpy.exp', 'exp', (['(1.0j * self.s)'], {}), '(1.0j * self.s)\n', (2469, 2484), False, 'from numpy import array, sum, inner, dot, angle, abs, exp, asarray\n'), ((2518, 2524), 'numpy.abs', 'abs', (['r'], {}), '(r)\n', (2521, 2524), False, 'from numpy import array, sum, inner, dot, angle, abs, exp, asarray\n'), ((2527, 2533), 'numpy.sum', 'sum', (['y'], {}), '(y)\n', (2530, 2533), False, 'from numpy import array, sum, inner, dot, angle, abs, exp, asarray\n'), ((2876, 2888), 'numpy.array', 'array', (['[0.0]'], {}), '([0.0])\n', (2881, 2888), False, 'from numpy import array, sum, inner, dot, angle, abs, exp, asarray\n'), ((3281, 3287), 'numpy.sum', 'sum', (['y'], {}), '(y)\n', (3284, 3287), False, 'from numpy import array, sum, inner, dot, angle, abs, exp, asarray\n'), ((3333, 3339), 'numpy.sum', 'sum', (['y'], {}), '(y)\n', (3336, 3339), False, 'from numpy import array, sum, inner, dot, angle, abs, exp, asarray\n'), ((2761, 2773), 'numpy.array', 'array', (['[0.0]'], {}), '([0.0])\n', (2766, 2773), False, 'from numpy import array, sum, inner, dot, angle, abs, exp, asarray\n')] |
import numpy as np
def test_labeling_and_statistics():
from skimage.io import imread
image = imread("napari_pyclesperanto_assistant/data/blobs.tif")
from napari_pyclesperanto_assistant._napari_cle_functions import voronoi_otsu_labeling
labels = voronoi_otsu_labeling(image)
from napari_pyclesperanto_assistant._statistics_of_labeled_pixels import statistics_of_labeled_pixels
stats = statistics_of_labeled_pixels(image, labels)
assert len(stats) == 37
binary = labels >= 1
from napari_pyclesperanto_assistant._napari_cle_functions import label
cca = label(binary)
assert cca.max() == 59
def test_select_gpu():
from napari_pyclesperanto_assistant._gui._select_gpu import select_gpu, gpu_selector
gpu_selector("")
select_gpu()
select_gpu.device = 1
select_gpu()
def test_numpy_functions(make_napari_viewer):
from napari import Viewer
from napari.layers import Image, Labels, Layer
from napari_pyclesperanto_assistant._convert_to_numpy import convert_to_numpy, convert_image_to_labels, \
convert_labels_to_image, make_labels_editable, reset_brightness_contrast_selected_image_layers, \
reset_brightness_contrast, auto_brightness_contrast, auto_brightness_contrast_all_images, \
auto_brightness_contrast_selected_image_layers, split_stack, set_voxel_size, set_voxel_size_of_all_layers
image = np.asarray([[[0,1], [2, 3]], [[0,1], [2, 3]]])
viewer = make_napari_viewer()
image_layer = viewer.add_image(image)
labels_layer = viewer.add_labels(image)
convert_to_numpy(image_layer)
convert_image_to_labels(image_layer)
convert_labels_to_image(labels_layer)
make_labels_editable(labels_layer)
reset_brightness_contrast(image_layer)
auto_brightness_contrast(image_layer)
reset_brightness_contrast_selected_image_layers(viewer)
auto_brightness_contrast_selected_image_layers(viewer)
auto_brightness_contrast_all_images(viewer)
split_stack(image_layer, viewer)
set_voxel_size(image_layer)
set_voxel_size_of_all_layers(viewer)
from napari_pyclesperanto_assistant._statistics_of_labeled_pixels import statistics_of_labeled_pixels
statistics_of_labeled_pixels(image, labels_layer.data, napari_viewer=viewer)
def test_advanced_statistics(make_napari_viewer):
image = np.asarray([[[0, 1], [2, 3]], [[0, 1], [2, 3]]])
viewer = make_napari_viewer()
image_layer = viewer.add_image(image)
labels_layer = viewer.add_labels(image)
from napari_pyclesperanto_assistant._advanced_statistics import advanced_statistics
advanced_statistics(image, labels_layer.data, napari_viewer=viewer)
def test_plugin_interface():
from napari_pyclesperanto_assistant._napari_plugin import napari_experimental_provide_function, \
napari_provide_sample_data, napari_experimental_provide_dock_widget
napari_experimental_provide_function()
napari_provide_sample_data()
napari_experimental_provide_dock_widget()
def test_example_data():
from napari_pyclesperanto_assistant._napari_plugin import _load_perfect_tissue, _load_chaotic_tissue, \
_load_orderly_tissue
_load_orderly_tissue()
_load_perfect_tissue()
_load_chaotic_tissue()
| [
"napari_pyclesperanto_assistant._convert_to_numpy.convert_labels_to_image",
"napari_pyclesperanto_assistant._convert_to_numpy.reset_brightness_contrast",
"napari_pyclesperanto_assistant._convert_to_numpy.set_voxel_size",
"napari_pyclesperanto_assistant._convert_to_numpy.auto_brightness_contrast_all_images",
... | [((102, 157), 'skimage.io.imread', 'imread', (['"""napari_pyclesperanto_assistant/data/blobs.tif"""'], {}), "('napari_pyclesperanto_assistant/data/blobs.tif')\n", (108, 157), False, 'from skimage.io import imread\n'), ((264, 292), 'napari_pyclesperanto_assistant._napari_cle_functions.voronoi_otsu_labeling', 'voronoi_otsu_labeling', (['image'], {}), '(image)\n', (285, 292), False, 'from napari_pyclesperanto_assistant._napari_cle_functions import voronoi_otsu_labeling\n'), ((412, 455), 'napari_pyclesperanto_assistant._statistics_of_labeled_pixels.statistics_of_labeled_pixels', 'statistics_of_labeled_pixels', (['image', 'labels'], {}), '(image, labels)\n', (440, 455), False, 'from napari_pyclesperanto_assistant._statistics_of_labeled_pixels import statistics_of_labeled_pixels\n'), ((597, 610), 'napari_pyclesperanto_assistant._napari_cle_functions.label', 'label', (['binary'], {}), '(binary)\n', (602, 610), False, 'from napari_pyclesperanto_assistant._napari_cle_functions import label\n'), ((757, 773), 'napari_pyclesperanto_assistant._gui._select_gpu.gpu_selector', 'gpu_selector', (['""""""'], {}), "('')\n", (769, 773), False, 'from napari_pyclesperanto_assistant._gui._select_gpu import select_gpu, gpu_selector\n'), ((778, 790), 'napari_pyclesperanto_assistant._gui._select_gpu.select_gpu', 'select_gpu', ([], {}), '()\n', (788, 790), False, 'from napari_pyclesperanto_assistant._gui._select_gpu import select_gpu, gpu_selector\n'), ((821, 833), 'napari_pyclesperanto_assistant._gui._select_gpu.select_gpu', 'select_gpu', ([], {}), '()\n', (831, 833), False, 'from napari_pyclesperanto_assistant._gui._select_gpu import select_gpu, gpu_selector\n'), ((1406, 1454), 'numpy.asarray', 'np.asarray', (['[[[0, 1], [2, 3]], [[0, 1], [2, 3]]]'], {}), '([[[0, 1], [2, 3]], [[0, 1], [2, 3]]])\n', (1416, 1454), True, 'import numpy as np\n'), ((1580, 1609), 'napari_pyclesperanto_assistant._convert_to_numpy.convert_to_numpy', 'convert_to_numpy', (['image_layer'], {}), '(image_layer)\n', (1596, 1609), False, 'from napari_pyclesperanto_assistant._convert_to_numpy import convert_to_numpy, convert_image_to_labels, convert_labels_to_image, make_labels_editable, reset_brightness_contrast_selected_image_layers, reset_brightness_contrast, auto_brightness_contrast, auto_brightness_contrast_all_images, auto_brightness_contrast_selected_image_layers, split_stack, set_voxel_size, set_voxel_size_of_all_layers\n'), ((1614, 1650), 'napari_pyclesperanto_assistant._convert_to_numpy.convert_image_to_labels', 'convert_image_to_labels', (['image_layer'], {}), '(image_layer)\n', (1637, 1650), False, 'from napari_pyclesperanto_assistant._convert_to_numpy import convert_to_numpy, convert_image_to_labels, convert_labels_to_image, make_labels_editable, reset_brightness_contrast_selected_image_layers, reset_brightness_contrast, auto_brightness_contrast, auto_brightness_contrast_all_images, auto_brightness_contrast_selected_image_layers, split_stack, set_voxel_size, set_voxel_size_of_all_layers\n'), ((1655, 1692), 'napari_pyclesperanto_assistant._convert_to_numpy.convert_labels_to_image', 'convert_labels_to_image', (['labels_layer'], {}), '(labels_layer)\n', (1678, 1692), False, 'from napari_pyclesperanto_assistant._convert_to_numpy import convert_to_numpy, convert_image_to_labels, convert_labels_to_image, make_labels_editable, reset_brightness_contrast_selected_image_layers, reset_brightness_contrast, auto_brightness_contrast, auto_brightness_contrast_all_images, auto_brightness_contrast_selected_image_layers, split_stack, set_voxel_size, set_voxel_size_of_all_layers\n'), ((1697, 1731), 'napari_pyclesperanto_assistant._convert_to_numpy.make_labels_editable', 'make_labels_editable', (['labels_layer'], {}), '(labels_layer)\n', (1717, 1731), False, 'from napari_pyclesperanto_assistant._convert_to_numpy import convert_to_numpy, convert_image_to_labels, convert_labels_to_image, make_labels_editable, reset_brightness_contrast_selected_image_layers, reset_brightness_contrast, auto_brightness_contrast, auto_brightness_contrast_all_images, auto_brightness_contrast_selected_image_layers, split_stack, set_voxel_size, set_voxel_size_of_all_layers\n'), ((1736, 1774), 'napari_pyclesperanto_assistant._convert_to_numpy.reset_brightness_contrast', 'reset_brightness_contrast', (['image_layer'], {}), '(image_layer)\n', (1761, 1774), False, 'from napari_pyclesperanto_assistant._convert_to_numpy import convert_to_numpy, convert_image_to_labels, convert_labels_to_image, make_labels_editable, reset_brightness_contrast_selected_image_layers, reset_brightness_contrast, auto_brightness_contrast, auto_brightness_contrast_all_images, auto_brightness_contrast_selected_image_layers, split_stack, set_voxel_size, set_voxel_size_of_all_layers\n'), ((1779, 1816), 'napari_pyclesperanto_assistant._convert_to_numpy.auto_brightness_contrast', 'auto_brightness_contrast', (['image_layer'], {}), '(image_layer)\n', (1803, 1816), False, 'from napari_pyclesperanto_assistant._convert_to_numpy import convert_to_numpy, convert_image_to_labels, convert_labels_to_image, make_labels_editable, reset_brightness_contrast_selected_image_layers, reset_brightness_contrast, auto_brightness_contrast, auto_brightness_contrast_all_images, auto_brightness_contrast_selected_image_layers, split_stack, set_voxel_size, set_voxel_size_of_all_layers\n'), ((1821, 1876), 'napari_pyclesperanto_assistant._convert_to_numpy.reset_brightness_contrast_selected_image_layers', 'reset_brightness_contrast_selected_image_layers', (['viewer'], {}), '(viewer)\n', (1868, 1876), False, 'from napari_pyclesperanto_assistant._convert_to_numpy import convert_to_numpy, convert_image_to_labels, convert_labels_to_image, make_labels_editable, reset_brightness_contrast_selected_image_layers, reset_brightness_contrast, auto_brightness_contrast, auto_brightness_contrast_all_images, auto_brightness_contrast_selected_image_layers, split_stack, set_voxel_size, set_voxel_size_of_all_layers\n'), ((1881, 1935), 'napari_pyclesperanto_assistant._convert_to_numpy.auto_brightness_contrast_selected_image_layers', 'auto_brightness_contrast_selected_image_layers', (['viewer'], {}), '(viewer)\n', (1927, 1935), False, 'from napari_pyclesperanto_assistant._convert_to_numpy import convert_to_numpy, convert_image_to_labels, convert_labels_to_image, make_labels_editable, reset_brightness_contrast_selected_image_layers, reset_brightness_contrast, auto_brightness_contrast, auto_brightness_contrast_all_images, auto_brightness_contrast_selected_image_layers, split_stack, set_voxel_size, set_voxel_size_of_all_layers\n'), ((1940, 1983), 'napari_pyclesperanto_assistant._convert_to_numpy.auto_brightness_contrast_all_images', 'auto_brightness_contrast_all_images', (['viewer'], {}), '(viewer)\n', (1975, 1983), False, 'from napari_pyclesperanto_assistant._convert_to_numpy import convert_to_numpy, convert_image_to_labels, convert_labels_to_image, make_labels_editable, reset_brightness_contrast_selected_image_layers, reset_brightness_contrast, auto_brightness_contrast, auto_brightness_contrast_all_images, auto_brightness_contrast_selected_image_layers, split_stack, set_voxel_size, set_voxel_size_of_all_layers\n'), ((1988, 2020), 'napari_pyclesperanto_assistant._convert_to_numpy.split_stack', 'split_stack', (['image_layer', 'viewer'], {}), '(image_layer, viewer)\n', (1999, 2020), False, 'from napari_pyclesperanto_assistant._convert_to_numpy import convert_to_numpy, convert_image_to_labels, convert_labels_to_image, make_labels_editable, reset_brightness_contrast_selected_image_layers, reset_brightness_contrast, auto_brightness_contrast, auto_brightness_contrast_all_images, auto_brightness_contrast_selected_image_layers, split_stack, set_voxel_size, set_voxel_size_of_all_layers\n'), ((2025, 2052), 'napari_pyclesperanto_assistant._convert_to_numpy.set_voxel_size', 'set_voxel_size', (['image_layer'], {}), '(image_layer)\n', (2039, 2052), False, 'from napari_pyclesperanto_assistant._convert_to_numpy import convert_to_numpy, convert_image_to_labels, convert_labels_to_image, make_labels_editable, reset_brightness_contrast_selected_image_layers, reset_brightness_contrast, auto_brightness_contrast, auto_brightness_contrast_all_images, auto_brightness_contrast_selected_image_layers, split_stack, set_voxel_size, set_voxel_size_of_all_layers\n'), ((2057, 2093), 'napari_pyclesperanto_assistant._convert_to_numpy.set_voxel_size_of_all_layers', 'set_voxel_size_of_all_layers', (['viewer'], {}), '(viewer)\n', (2085, 2093), False, 'from napari_pyclesperanto_assistant._convert_to_numpy import convert_to_numpy, convert_image_to_labels, convert_labels_to_image, make_labels_editable, reset_brightness_contrast_selected_image_layers, reset_brightness_contrast, auto_brightness_contrast, auto_brightness_contrast_all_images, auto_brightness_contrast_selected_image_layers, split_stack, set_voxel_size, set_voxel_size_of_all_layers\n'), ((2205, 2281), 'napari_pyclesperanto_assistant._statistics_of_labeled_pixels.statistics_of_labeled_pixels', 'statistics_of_labeled_pixels', (['image', 'labels_layer.data'], {'napari_viewer': 'viewer'}), '(image, labels_layer.data, napari_viewer=viewer)\n', (2233, 2281), False, 'from napari_pyclesperanto_assistant._statistics_of_labeled_pixels import statistics_of_labeled_pixels\n'), ((2345, 2393), 'numpy.asarray', 'np.asarray', (['[[[0, 1], [2, 3]], [[0, 1], [2, 3]]]'], {}), '([[[0, 1], [2, 3]], [[0, 1], [2, 3]]])\n', (2355, 2393), True, 'import numpy as np\n'), ((2609, 2676), 'napari_pyclesperanto_assistant._advanced_statistics.advanced_statistics', 'advanced_statistics', (['image', 'labels_layer.data'], {'napari_viewer': 'viewer'}), '(image, labels_layer.data, napari_viewer=viewer)\n', (2628, 2676), False, 'from napari_pyclesperanto_assistant._advanced_statistics import advanced_statistics\n'), ((2889, 2927), 'napari_pyclesperanto_assistant._napari_plugin.napari_experimental_provide_function', 'napari_experimental_provide_function', ([], {}), '()\n', (2925, 2927), False, 'from napari_pyclesperanto_assistant._napari_plugin import napari_experimental_provide_function, napari_provide_sample_data, napari_experimental_provide_dock_widget\n'), ((2932, 2960), 'napari_pyclesperanto_assistant._napari_plugin.napari_provide_sample_data', 'napari_provide_sample_data', ([], {}), '()\n', (2958, 2960), False, 'from napari_pyclesperanto_assistant._napari_plugin import napari_experimental_provide_function, napari_provide_sample_data, napari_experimental_provide_dock_widget\n'), ((2965, 3006), 'napari_pyclesperanto_assistant._napari_plugin.napari_experimental_provide_dock_widget', 'napari_experimental_provide_dock_widget', ([], {}), '()\n', (3004, 3006), False, 'from napari_pyclesperanto_assistant._napari_plugin import napari_experimental_provide_function, napari_provide_sample_data, napari_experimental_provide_dock_widget\n'), ((3175, 3197), 'napari_pyclesperanto_assistant._napari_plugin._load_orderly_tissue', '_load_orderly_tissue', ([], {}), '()\n', (3195, 3197), False, 'from napari_pyclesperanto_assistant._napari_plugin import _load_perfect_tissue, _load_chaotic_tissue, _load_orderly_tissue\n'), ((3202, 3224), 'napari_pyclesperanto_assistant._napari_plugin._load_perfect_tissue', '_load_perfect_tissue', ([], {}), '()\n', (3222, 3224), False, 'from napari_pyclesperanto_assistant._napari_plugin import _load_perfect_tissue, _load_chaotic_tissue, _load_orderly_tissue\n'), ((3229, 3251), 'napari_pyclesperanto_assistant._napari_plugin._load_chaotic_tissue', '_load_chaotic_tissue', ([], {}), '()\n', (3249, 3251), False, 'from napari_pyclesperanto_assistant._napari_plugin import _load_perfect_tissue, _load_chaotic_tissue, _load_orderly_tissue\n')] |
from os.path import join
from multiprocessing import cpu_count
import pytest
from Tests import save_validation_path as save_path
from numpy import exp, sqrt, pi, meshgrid, zeros, real
from numpy.testing import assert_array_almost_equal
from pyleecan.Classes.Simu1 import Simu1
from pyleecan.Classes.InputCurrent import InputCurrent
from pyleecan.Classes.MagFEMM import MagFEMM
from pyleecan.Classes.ForceMT import ForceMT
from pyleecan.Classes.Output import Output
from pyleecan.Functions.load import load
from pyleecan.definitions import DATA_DIR
@pytest.mark.long
@pytest.mark.validation
@pytest.mark.FEMM
@pytest.mark.failed # reason: Arrays are not almost equal to 2 decimals 'test1_AGSF'
def test_FEMM_periodicity():
"""Validation of the implementaiton of periodic angle axis in Magnetic (MagFEMM) and Force (ForceMT) modules"""
IPMSM_A = load(join(DATA_DIR, "Machine", "IPMSM_A.json"))
assert IPMSM_A.comp_periodicity() == (4, True, 4, True)
simu = Simu1(name="FEMM_periodicity", machine=IPMSM_A)
# Definition of the enforced output of the electrical module
I0_rms = 250 / sqrt(2)
Phi0 = 140 * pi / 180 # Maximum Torque Per Amp
Id_ref = (I0_rms * exp(1j * Phi0)).real
Iq_ref = (I0_rms * exp(1j * Phi0)).imag
simu.input = InputCurrent(
Id_ref=Id_ref,
Iq_ref=Iq_ref,
Na_tot=252 * 8,
Nt_tot=2 * 8,
N0=1000,
)
# Definition of the magnetic simulation: with periodicity
simu.mag = MagFEMM(
type_BH_stator=1,
type_BH_rotor=1,
is_periodicity_a=True,
is_periodicity_t=True,
nb_worker=cpu_count(),
)
simu.force = ForceMT(is_periodicity_a=True, is_periodicity_t=True)
# Definition of the magnetic simulation: no periodicity
simu2 = simu.copy()
simu2.mag = MagFEMM(
type_BH_stator=1,
type_BH_rotor=1,
is_periodicity_a=False,
is_periodicity_t=False,
nb_worker=cpu_count(),
)
simu2.force = ForceMT(is_periodicity_a=False, is_periodicity_t=False)
# Run simulations
out = Output(simu=simu)
simu.run()
out2 = Output(simu=simu2)
simu2.run()
# Plot the result
out.plot_2D_Data(
"mag.B",
"time",
"angle[0]{°}",
data_list=[out2.mag.B],
legend_list=["Periodic", "Full"],
save_path=join(save_path, simu.name + "_B_time.png"),
is_show_fig=False,
)
out.plot_2D_Data(
"mag.B",
"angle",
"time[0]",
data_list=[out2.mag.B],
legend_list=["Periodic", "Full"],
save_path=join(save_path, simu.name + "_B_space.png"),
is_show_fig=False,
)
out.plot_2D_Data(
"mag.B",
"wavenumber=[0,100]",
data_list=[out2.mag.B],
legend_list=["Periodic", "Full"],
save_path=join(save_path, simu.name + "_B_space_fft.png"),
is_show_fig=False,
)
out.plot_2D_Data(
"force.AGSF",
"time",
"angle[0]{°}",
data_list=[out2.force.AGSF],
legend_list=["Periodic", "Full"],
save_path=join(save_path, simu.name + "_P_time.png"),
is_show_fig=False,
)
out.plot_2D_Data(
"force.AGSF",
"angle",
"time[0]",
data_list=[out2.force.AGSF],
legend_list=["Periodic", "Full"],
save_path=join(save_path, simu.name + "_P_space.png"),
is_show_fig=False,
)
out.plot_2D_Data(
"force.AGSF",
"wavenumber=[0,100]",
"time[0]",
data_list=[out2.force.AGSF],
legend_list=["Periodic", "Full"],
save_path=join(save_path, simu.name + "_P_space_fft.png"),
is_show_fig=False,
)
out.plot_2D_Data(
"mag.Tem",
"time",
data_list=[out2.mag.Tem],
legend_list=["Periodic", "Full"],
save_path=join(save_path, simu.name + "_Tem_time.png"),
is_show_fig=False,
)
out.plot_2D_Data(
"mag.Phi_wind_stator",
"time",
"phase",
data_list=[out2.mag.Phi_wind_stator],
legend_list=["Periodic", "Full"],
save_path=join(save_path, simu.name + "_Phi_wind_stator_time.png"),
is_show_fig=False,
)
# Check Flux spatio-temporal reconstruction sym
Bflux = out.mag.B
arg_list = ["time", "angle"]
result = Bflux.get_rphiz_along(*arg_list)
Brad = result["radial"]
time = result["time"]
angle = result["angle"]
Xangle, Xtime = meshgrid(angle, time)
arg_list = ["freqs", "wavenumber"]
result_freq = Bflux.get_rphiz_along(*arg_list)
Brad_wr = result_freq["radial"]
freqs = result_freq["freqs"]
wavenumber = result_freq["wavenumber"]
Nf = len(freqs)
Nr = len(wavenumber)
XB_rad = zeros(Brad.shape)
for ir in range(Nr):
r = wavenumber[ir]
for ifrq in range(Nf):
frq = freqs[ifrq]
XB_rad = XB_rad + real(
Brad_wr[ifrq, ir] * exp(1j * 2 * pi * frq * Xtime + 1j * r * Xangle)
)
test1 = abs(Brad - XB_rad) / abs(Brad).max()
assert_array_almost_equal(test1, 0, decimal=2)
assert_array_almost_equal(Brad, XB_rad, decimal=6)
# Check Flux spatio-temporal reconstruction full
Bflux2 = out2.mag.B
arg_list = ["time", "angle"]
result2 = Bflux2.get_rphiz_along(*arg_list)
Brad2 = result2["radial"]
time = result2["time"]
angle = result2["angle"]
Xangle, Xtime = meshgrid(angle, time)
arg_list = ["freqs", "wavenumber"]
result_freq2 = Bflux2.get_rphiz_along(*arg_list)
Brad_wr2 = result_freq2["radial"]
freqs = result_freq2["freqs"]
wavenumber = result_freq2["wavenumber"]
Nf = len(freqs)
Nr = len(wavenumber)
XB_rad2 = zeros(Brad2.shape)
for ir in range(Nr):
r = wavenumber[ir]
for ifrq in range(Nf):
frq = freqs[ifrq]
XB_rad2 = XB_rad2 + real(
Brad_wr2[ifrq, ir] * exp(1j * 2 * pi * frq * Xtime + 1j * r * Xangle)
)
test2 = abs(Brad2 - XB_rad2) / abs(Brad2).max()
assert_array_almost_equal(test2, 0, decimal=2)
assert_array_almost_equal(Brad2, XB_rad2, decimal=2)
# Compare both simu
test11 = abs(Brad - Brad2) / abs(Brad).max()
assert_array_almost_equal(test11, 0, decimal=1)
test22 = abs(XB_rad - XB_rad2) / abs(Brad).max()
assert_array_almost_equal(test22, 0, decimal=1)
# Check AGSF spatio-temporal reconstruction sym
AGSF = out.force.AGSF
arg_list = ["time", "angle"]
result_AGSF = AGSF.get_rphiz_along(*arg_list)
Prad = result_AGSF["radial"]
time = result_AGSF["time"]
angle = result_AGSF["angle"]
Xangle, Xtime = meshgrid(angle, time)
arg_list = ["freqs", "wavenumber"]
result_freq_agsf = AGSF.get_rphiz_along(*arg_list)
Prad_wr = result_freq_agsf["radial"]
freqs = result_freq_agsf["freqs"]
wavenumber = result_freq_agsf["wavenumber"]
Nf = len(freqs)
Nr = len(wavenumber)
XP_rad = zeros(Prad.shape)
for ir in range(Nr):
r = wavenumber[ir]
for ifrq in range(Nf):
frq = freqs[ifrq]
XP_rad = XP_rad + real(
Prad_wr[ifrq, ir] * exp(1j * 2 * pi * frq * Xtime + 1j * r * Xangle)
)
test1_AGSF = abs(Prad - XP_rad) / abs(Prad).max()
assert_array_almost_equal(test1_AGSF, 0, decimal=2)
# Check AGSF spatio-temporal reconstruction full
AGSF2 = out2.force.AGSF
arg_list = ["time", "angle"]
result_AGSF2 = AGSF2.get_rphiz_along(*arg_list)
Prad2 = result_AGSF2["radial"]
time = result_AGSF2["time"]
angle = result_AGSF2["angle"]
Xangle, Xtime = meshgrid(angle, time)
arg_list = ["freqs", "wavenumber"]
result_freq_agsf2 = AGSF2.get_rphiz_along(*arg_list)
Prad_wr2 = result_freq_agsf2["radial"]
freqs = result_freq_agsf2["freqs"]
wavenumber = result_freq_agsf2["wavenumber"]
Nf = len(freqs)
Nr = len(wavenumber)
XP_rad2 = zeros(Prad2.shape)
for ir in range(Nr):
r = wavenumber[ir]
for ifrq in range(Nf):
frq = freqs[ifrq]
XP_rad2 = XP_rad2 + real(
Prad_wr2[ifrq, ir] * exp(1j * 2 * pi * frq * Xtime + 1j * r * Xangle)
)
test2_AGSF = abs(Prad2 - XP_rad2) / abs(Prad2).mean()
assert_array_almost_equal(test2_AGSF, 0, decimal=2)
# Reconstrcution results should be the same
test3_AGSF = abs(XP_rad - XP_rad2) / abs(XP_rad2).mean()
assert_array_almost_equal(test3_AGSF, 0, decimal=1)
return out, out2
# To run it without pytest
if __name__ == "__main__":
out, out2 = test_FEMM_periodicity()
| [
"pyleecan.Classes.ForceMT.ForceMT",
"numpy.meshgrid",
"pyleecan.Classes.InputCurrent.InputCurrent",
"numpy.zeros",
"multiprocessing.cpu_count",
"numpy.exp",
"pyleecan.Classes.Simu1.Simu1",
"pyleecan.Classes.Output.Output",
"numpy.testing.assert_array_almost_equal",
"os.path.join",
"numpy.sqrt"
] | [((984, 1031), 'pyleecan.Classes.Simu1.Simu1', 'Simu1', ([], {'name': '"""FEMM_periodicity"""', 'machine': 'IPMSM_A'}), "(name='FEMM_periodicity', machine=IPMSM_A)\n", (989, 1031), False, 'from pyleecan.Classes.Simu1 import Simu1\n'), ((1284, 1370), 'pyleecan.Classes.InputCurrent.InputCurrent', 'InputCurrent', ([], {'Id_ref': 'Id_ref', 'Iq_ref': 'Iq_ref', 'Na_tot': '(252 * 8)', 'Nt_tot': '(2 * 8)', 'N0': '(1000)'}), '(Id_ref=Id_ref, Iq_ref=Iq_ref, Na_tot=252 * 8, Nt_tot=2 * 8, N0\n =1000)\n', (1296, 1370), False, 'from pyleecan.Classes.InputCurrent import InputCurrent\n'), ((1667, 1720), 'pyleecan.Classes.ForceMT.ForceMT', 'ForceMT', ([], {'is_periodicity_a': '(True)', 'is_periodicity_t': '(True)'}), '(is_periodicity_a=True, is_periodicity_t=True)\n', (1674, 1720), False, 'from pyleecan.Classes.ForceMT import ForceMT\n'), ((2001, 2056), 'pyleecan.Classes.ForceMT.ForceMT', 'ForceMT', ([], {'is_periodicity_a': '(False)', 'is_periodicity_t': '(False)'}), '(is_periodicity_a=False, is_periodicity_t=False)\n', (2008, 2056), False, 'from pyleecan.Classes.ForceMT import ForceMT\n'), ((2090, 2107), 'pyleecan.Classes.Output.Output', 'Output', ([], {'simu': 'simu'}), '(simu=simu)\n', (2096, 2107), False, 'from pyleecan.Classes.Output import Output\n'), ((2135, 2153), 'pyleecan.Classes.Output.Output', 'Output', ([], {'simu': 'simu2'}), '(simu=simu2)\n', (2141, 2153), False, 'from pyleecan.Classes.Output import Output\n'), ((4488, 4509), 'numpy.meshgrid', 'meshgrid', (['angle', 'time'], {}), '(angle, time)\n', (4496, 4509), False, 'from numpy import exp, sqrt, pi, meshgrid, zeros, real\n'), ((4772, 4789), 'numpy.zeros', 'zeros', (['Brad.shape'], {}), '(Brad.shape)\n', (4777, 4789), False, 'from numpy import exp, sqrt, pi, meshgrid, zeros, real\n'), ((5093, 5139), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['test1', '(0)'], {'decimal': '(2)'}), '(test1, 0, decimal=2)\n', (5118, 5139), False, 'from numpy.testing import assert_array_almost_equal\n'), ((5144, 5194), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['Brad', 'XB_rad'], {'decimal': '(6)'}), '(Brad, XB_rad, decimal=6)\n', (5169, 5194), False, 'from numpy.testing import assert_array_almost_equal\n'), ((5460, 5481), 'numpy.meshgrid', 'meshgrid', (['angle', 'time'], {}), '(angle, time)\n', (5468, 5481), False, 'from numpy import exp, sqrt, pi, meshgrid, zeros, real\n'), ((5751, 5769), 'numpy.zeros', 'zeros', (['Brad2.shape'], {}), '(Brad2.shape)\n', (5756, 5769), False, 'from numpy import exp, sqrt, pi, meshgrid, zeros, real\n'), ((6079, 6125), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['test2', '(0)'], {'decimal': '(2)'}), '(test2, 0, decimal=2)\n', (6104, 6125), False, 'from numpy.testing import assert_array_almost_equal\n'), ((6130, 6182), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['Brad2', 'XB_rad2'], {'decimal': '(2)'}), '(Brad2, XB_rad2, decimal=2)\n', (6155, 6182), False, 'from numpy.testing import assert_array_almost_equal\n'), ((6261, 6308), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['test11', '(0)'], {'decimal': '(1)'}), '(test11, 0, decimal=1)\n', (6286, 6308), False, 'from numpy.testing import assert_array_almost_equal\n'), ((6367, 6414), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['test22', '(0)'], {'decimal': '(1)'}), '(test22, 0, decimal=1)\n', (6392, 6414), False, 'from numpy.testing import assert_array_almost_equal\n'), ((6694, 6715), 'numpy.meshgrid', 'meshgrid', (['angle', 'time'], {}), '(angle, time)\n', (6702, 6715), False, 'from numpy import exp, sqrt, pi, meshgrid, zeros, real\n'), ((6997, 7014), 'numpy.zeros', 'zeros', (['Prad.shape'], {}), '(Prad.shape)\n', (7002, 7014), False, 'from numpy import exp, sqrt, pi, meshgrid, zeros, real\n'), ((7323, 7374), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['test1_AGSF', '(0)'], {'decimal': '(2)'}), '(test1_AGSF, 0, decimal=2)\n', (7348, 7374), False, 'from numpy.testing import assert_array_almost_equal\n'), ((7663, 7684), 'numpy.meshgrid', 'meshgrid', (['angle', 'time'], {}), '(angle, time)\n', (7671, 7684), False, 'from numpy import exp, sqrt, pi, meshgrid, zeros, real\n'), ((7973, 7991), 'numpy.zeros', 'zeros', (['Prad2.shape'], {}), '(Prad2.shape)\n', (7978, 7991), False, 'from numpy import exp, sqrt, pi, meshgrid, zeros, real\n'), ((8307, 8358), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['test2_AGSF', '(0)'], {'decimal': '(2)'}), '(test2_AGSF, 0, decimal=2)\n', (8332, 8358), False, 'from numpy.testing import assert_array_almost_equal\n'), ((8473, 8524), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['test3_AGSF', '(0)'], {'decimal': '(1)'}), '(test3_AGSF, 0, decimal=1)\n', (8498, 8524), False, 'from numpy.testing import assert_array_almost_equal\n'), ((868, 909), 'os.path.join', 'join', (['DATA_DIR', '"""Machine"""', '"""IPMSM_A.json"""'], {}), "(DATA_DIR, 'Machine', 'IPMSM_A.json')\n", (872, 909), False, 'from os.path import join\n'), ((1117, 1124), 'numpy.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (1121, 1124), False, 'from numpy import exp, sqrt, pi, meshgrid, zeros, real\n'), ((1201, 1217), 'numpy.exp', 'exp', (['(1.0j * Phi0)'], {}), '(1.0j * Phi0)\n', (1204, 1217), False, 'from numpy import exp, sqrt, pi, meshgrid, zeros, real\n'), ((1245, 1261), 'numpy.exp', 'exp', (['(1.0j * Phi0)'], {}), '(1.0j * Phi0)\n', (1248, 1261), False, 'from numpy import exp, sqrt, pi, meshgrid, zeros, real\n'), ((1631, 1642), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (1640, 1642), False, 'from multiprocessing import cpu_count\n'), ((1964, 1975), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (1973, 1975), False, 'from multiprocessing import cpu_count\n'), ((2363, 2405), 'os.path.join', 'join', (['save_path', "(simu.name + '_B_time.png')"], {}), "(save_path, simu.name + '_B_time.png')\n", (2367, 2405), False, 'from os.path import join\n'), ((2608, 2651), 'os.path.join', 'join', (['save_path', "(simu.name + '_B_space.png')"], {}), "(save_path, simu.name + '_B_space.png')\n", (2612, 2651), False, 'from os.path import join\n'), ((2848, 2895), 'os.path.join', 'join', (['save_path', "(simu.name + '_B_space_fft.png')"], {}), "(save_path, simu.name + '_B_space_fft.png')\n", (2852, 2895), False, 'from os.path import join\n'), ((3111, 3153), 'os.path.join', 'join', (['save_path', "(simu.name + '_P_time.png')"], {}), "(save_path, simu.name + '_P_time.png')\n", (3115, 3153), False, 'from os.path import join\n'), ((3366, 3409), 'os.path.join', 'join', (['save_path', "(simu.name + '_P_space.png')"], {}), "(save_path, simu.name + '_P_space.png')\n", (3370, 3409), False, 'from os.path import join\n'), ((3635, 3682), 'os.path.join', 'join', (['save_path', "(simu.name + '_P_space_fft.png')"], {}), "(save_path, simu.name + '_P_space_fft.png')\n", (3639, 3682), False, 'from os.path import join\n'), ((3869, 3913), 'os.path.join', 'join', (['save_path', "(simu.name + '_Tem_time.png')"], {}), "(save_path, simu.name + '_Tem_time.png')\n", (3873, 3913), False, 'from os.path import join\n'), ((4141, 4197), 'os.path.join', 'join', (['save_path', "(simu.name + '_Phi_wind_stator_time.png')"], {}), "(save_path, simu.name + '_Phi_wind_stator_time.png')\n", (4145, 4197), False, 'from os.path import join\n'), ((4976, 5028), 'numpy.exp', 'exp', (['(1.0j * 2 * pi * frq * Xtime + 1.0j * r * Xangle)'], {}), '(1.0j * 2 * pi * frq * Xtime + 1.0j * r * Xangle)\n', (4979, 5028), False, 'from numpy import exp, sqrt, pi, meshgrid, zeros, real\n'), ((5959, 6011), 'numpy.exp', 'exp', (['(1.0j * 2 * pi * frq * Xtime + 1.0j * r * Xangle)'], {}), '(1.0j * 2 * pi * frq * Xtime + 1.0j * r * Xangle)\n', (5962, 6011), False, 'from numpy import exp, sqrt, pi, meshgrid, zeros, real\n'), ((7201, 7253), 'numpy.exp', 'exp', (['(1.0j * 2 * pi * frq * Xtime + 1.0j * r * Xangle)'], {}), '(1.0j * 2 * pi * frq * Xtime + 1.0j * r * Xangle)\n', (7204, 7253), False, 'from numpy import exp, sqrt, pi, meshgrid, zeros, real\n'), ((8181, 8233), 'numpy.exp', 'exp', (['(1.0j * 2 * pi * frq * Xtime + 1.0j * r * Xangle)'], {}), '(1.0j * 2 * pi * frq * Xtime + 1.0j * r * Xangle)\n', (8184, 8233), False, 'from numpy import exp, sqrt, pi, meshgrid, zeros, real\n')] |
"""
Usage:
tpch-pyarrow-p.py <num>
Options:
-h --help Show this screen.
--version Show version.
"""
import io
import itertools
import os
from multiprocessing import Pool
from typing import Any, List
import numpy as np
import pyarrow as pa
from contexttimer import Timer
from docopt import docopt
from pyarrow import csv
from sqlalchemy import create_engine
def get_sqls(table: str, count: int) -> List[str]:
sqls = []
split = np.linspace(0, 60000000, num=count + 1, endpoint=True, dtype=int)
for i in range(len(split) - 1):
sqls.append(
f"""SELECT
l_orderkey,
l_partkey,
l_suppkey,
l_linenumber,
l_quantity::float8,
l_extendedprice::float8,
l_discount::float8,
l_tax::float8,
l_returnflag,
l_linestatus,
l_shipdate,
l_commitdate,
l_receiptdate,
l_shipinstruct,
l_shipmode,
l_comment
FROM {table}
WHERE l_orderkey > {split[i]} and l_orderkey <= {split[i+1]}"""
)
return sqls
def func(id: int, conn: str, query: str) -> Any:
engine = create_engine(conn)
conn = engine.connect()
cur = conn.connection.cursor()
store = io.BytesIO()
with Timer() as timer:
cur.copy_expert(f"COPY ({query}) TO STDOUT WITH CSV HEADER;", store)
print(f"[Copy {id}] {timer.elapsed:.2f}s")
store.seek(0)
with Timer() as timer:
df = csv.read_csv(store, read_options=csv.ReadOptions(use_threads=False))
print(f"[Read CSV {id}] {timer.elapsed:.2f}s")
return df
if __name__ == "__main__":
args = docopt(__doc__, version="1.0")
conn = os.environ["POSTGRES_URL"]
table = os.environ["POSTGRES_TABLE"]
queries = get_sqls(table, int(args["<num>"]))
print(f"number of threads: {len(queries)}\nsqls: {queries}")
with Timer() as timer, Pool(len(queries)) as pool:
dfs = pool.starmap(
func, zip(range(len(queries)), itertools.repeat(conn), queries)
)
print(f"[All Jobs] {timer.elapsed:.2f}s")
with Timer() as timer:
df = pa.concat_tables(dfs)
print(f"[Concat] {timer.elapsed:.2f}s")
with Timer() as timer:
df = df.to_pandas()
print(f"[To Pandas] {timer.elapsed:.2f}s")
print(df.head())
| [
"io.BytesIO",
"docopt.docopt",
"pyarrow.csv.ReadOptions",
"pyarrow.concat_tables",
"numpy.linspace",
"sqlalchemy.create_engine",
"contexttimer.Timer",
"itertools.repeat"
] | [((452, 517), 'numpy.linspace', 'np.linspace', (['(0)', '(60000000)'], {'num': '(count + 1)', 'endpoint': '(True)', 'dtype': 'int'}), '(0, 60000000, num=count + 1, endpoint=True, dtype=int)\n', (463, 517), True, 'import numpy as np\n'), ((1370, 1389), 'sqlalchemy.create_engine', 'create_engine', (['conn'], {}), '(conn)\n', (1383, 1389), False, 'from sqlalchemy import create_engine\n'), ((1465, 1477), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (1475, 1477), False, 'import io\n'), ((1864, 1894), 'docopt.docopt', 'docopt', (['__doc__'], {'version': '"""1.0"""'}), "(__doc__, version='1.0')\n", (1870, 1894), False, 'from docopt import docopt\n'), ((1488, 1495), 'contexttimer.Timer', 'Timer', ([], {}), '()\n', (1493, 1495), False, 'from contexttimer import Timer\n'), ((1658, 1665), 'contexttimer.Timer', 'Timer', ([], {}), '()\n', (1663, 1665), False, 'from contexttimer import Timer\n'), ((2101, 2108), 'contexttimer.Timer', 'Timer', ([], {}), '()\n', (2106, 2108), False, 'from contexttimer import Timer\n'), ((2318, 2325), 'contexttimer.Timer', 'Timer', ([], {}), '()\n', (2323, 2325), False, 'from contexttimer import Timer\n'), ((2349, 2370), 'pyarrow.concat_tables', 'pa.concat_tables', (['dfs'], {}), '(dfs)\n', (2365, 2370), True, 'import pyarrow as pa\n'), ((2425, 2432), 'contexttimer.Timer', 'Timer', ([], {}), '()\n', (2430, 2432), False, 'from contexttimer import Timer\n'), ((1722, 1756), 'pyarrow.csv.ReadOptions', 'csv.ReadOptions', ([], {'use_threads': '(False)'}), '(use_threads=False)\n', (1737, 1756), False, 'from pyarrow import csv\n'), ((2218, 2240), 'itertools.repeat', 'itertools.repeat', (['conn'], {}), '(conn)\n', (2234, 2240), False, 'import itertools\n')] |
import sqlite3
import numpy as np
def get_prof(prof_identifier):
"""
Returns all professors that match the identifier entered in the query
"""
#Fetching professor from DB acc to query
conn = sqlite3.connect('./db.sqlite3')
cursor = conn.cursor()
cursor.execute("SELECT * FROM professor WHERE name LIKE ?;", (f'%{prof_identifier}%',))
return cursor.fetchall()
def get_course(course_identifier):
"""
Returns all courses that match the identifier entered in the query
"""
#Fetching professor from course acc to query - checks for substring
conn = sqlite3.connect('./db.sqlite3')
cursor = conn.cursor()
cursor.execute("SELECT * FROM course WHERE course_name LIKE ?", (f'%{course_identifier}%',))
return cursor.fetchall()
def get_prof_course_comb(identifier):
"""
Returns all instructor and/or courses matching a certain query - made for the combo page
"""
#Query DB according to the comb_query
conn = sqlite3.connect('./db.sqlite3')
cursor = conn.cursor()
#Getting all the instructor + course pairs matching the query
cursor.execute("""SELECT professor.prof_id, professor.name, prof_sec.course_name, course.level
FROM prof_sec
INNER JOIN professor ON prof_sec.prof_id=professor.prof_id
LEFT JOIN course ON course.course_name = prof_sec.course_name
WHERE professor.name LIKE ? OR prof_sec.course_name LIKE ?;"""
, (f'%{identifier}%', f'%{identifier}%',))
return cursor.fetchall()
def get_ratings(prof_id, course_name):
"""
Returns rating of a particular course+instructor tuple
"""
conn = sqlite3.connect('./db.sqlite3')
cursor = conn.cursor()
cursor.execute("""SELECT rating.workload, rating.learning, rating.grading
FROM review
INNER JOIN rating ON review.review_id = rating.review_id
WHERE review.prof_id = ? AND course_name = ?""", (prof_id, course_name))
return cursor.fetchall()
def get_cards(prof_course_list):
"""
Returns the card view given a filtered list of instructor+course tuples
"""
#for each pair - get the ratings if any
if (len(prof_course_list) == 0): return []
else:
comb_list = []
for row in prof_course_list:
#getting the ratings
rating_tuple = get_ratings(row[0], row[2])
if len(rating_tuple) != 0:
ratings = np.array(rating_tuple)
print(ratings)
cum_ratings = np.round(np.mean(ratings, axis = 0, dtype = np.float64), 2)
else:
cum_ratings = [-1]
print(cum_ratings)
comb_list.append({'prof_id': row[0], 'prof_name': row[1], 'course_name': row[2], 'level': row[3], 'ratings': cum_ratings})
return comb_list
def filter_acc_level(level):
"""
Returns records filtered according to level ONLY
"""
conn = sqlite3.connect('./db.sqlite3')
cursor = conn.cursor()
cursor.execute("""SELECT professor.prof_id, professor.name, prof_sec.course_name, course.level
FROM prof_sec
INNER JOIN professor ON prof_sec.prof_id=professor.prof_id
LEFT JOIN course ON course.course_name = prof_sec.course_name
WHERE course.level = ?;""", (level,))
return cursor.fetchall()
def filter_acc_ratings(rating_list):
"""
Returns records filtered according to ratings ONLY
"""
conn = sqlite3.connect('./db.sqlite3')
cursor = conn.cursor()
cursor.execute("""SELECT professor.prof_id, professor.name, prof_sec.course_name, course.level
FROM prof_sec
INNER JOIN professor ON prof_sec.prof_id=professor.prof_id
LEFT JOIN course ON course.course_name = prof_sec.course_name
WHERE (professor.prof_id, prof_sec.course_name) IN (SELECT prof_id, course_name
FROM review
INNER JOIN rating ON review.review_id = rating.review_id
GROUP BY prof_id
HAVING (AVG(workload) BETWEEN ? AND ?) AND (AVG(learning) BETWEEN ? AND ?) AND (AVG(grading) BETWEEN ? AND ?));"""
, (float(rating_list[0]), float(rating_list[1]), float(rating_list[2]), float(rating_list[3]), float(rating_list[4]), float(rating_list[5]),))
return cursor.fetchall()
def filter_acc_level_ratings(level, rating_list):
"""
Returns records filtered according to ratings and level ONLY
"""
conn = sqlite3.connect('./db.sqlite3')
cursor = conn.cursor()
cursor.execute("""SELECT professor.prof_id, professor.name, prof_sec.course_name, course.level
FROM prof_sec
INNER JOIN professor ON prof_sec.prof_id=professor.prof_id
LEFT JOIN course ON course.course_name = prof_sec.course_name
WHERE course.level = ? AND (professor.prof_id, prof_sec.course_name) IN (SELECT prof_id, course_name
FROM review
INNER JOIN rating ON review.review_id = rating.review_id
GROUP BY prof_id
HAVING (AVG(workload) BETWEEN ? AND ?) AND (AVG(learning) BETWEEN ? AND ?) AND (AVG(grading) BETWEEN ? AND ?));"""
, (level, float(rating_list[0]), float(rating_list[1]), float(rating_list[2]), float(rating_list[3]), float(rating_list[4]), float(rating_list[5]),))
return cursor.fetchall()
def filter_acc_prof_course_ratings(identifier, rating_list):
"""
Returns records filtered according to prof/course and ratings ONLY
"""
conn = sqlite3.connect('./db.sqlite3')
cursor = conn.cursor()
cursor.execute("""SELECT professor.prof_id, professor.name, prof_sec.course_name, course.level
FROM prof_sec
INNER JOIN professor ON prof_sec.prof_id=professor.prof_id
LEFT JOIN course ON course.course_name = prof_sec.course_name
WHERE (professor.name LIKE ? OR prof_sec.course_name LIKE ?) AND (professor.prof_id, prof_sec.course_name) IN (SELECT prof_id, course_name
FROM review
INNER JOIN rating ON review.review_id = rating.review_id
GROUP BY prof_id
HAVING (AVG(workload) BETWEEN ? AND ?) AND (AVG(learning) BETWEEN ? AND ?) AND (AVG(grading) BETWEEN ? AND ?));"""
, (f'%{identifier}%', f'%{identifier}%', float(rating_list[0]), float(rating_list[1]), float(rating_list[2]), float(rating_list[3]), float(rating_list[4]), float(rating_list[5]),))
return cursor.fetchall()
def filter_acc_prof_course_level(identifier, level):
"""
Returns records filtered according to prof/course and level ONLY
"""
conn = sqlite3.connect('./db.sqlite3')
cursor = conn.cursor()
cursor.execute("""SELECT professor.prof_id, professor.name, prof_sec.course_name, course.level
FROM prof_sec
INNER JOIN professor ON prof_sec.prof_id=professor.prof_id
LEFT JOIN course ON course.course_name = prof_sec.course_name
WHERE professor.name LIKE ? OR prof_sec.course_name LIKE ? AND course.level = ?;"""
, (f'%{identifier}%', f'%{identifier}%', level))
return cursor.fetchall()
def filter_acc_all(identifier, level, rating_list):
"""
Returns records filtered according to prof/course, level and ratings
"""
conn = sqlite3.connect('./db.sqlite3')
cursor = conn.cursor()
cursor.execute("""SELECT professor.prof_id, professor.name, prof_sec.course_name, course.level
FROM prof_sec
INNER JOIN professor ON prof_sec.prof_id=professor.prof_id
LEFT JOIN course ON course.course_name = prof_sec.course_name
WHERE professor.name LIKE ? OR prof_sec.course_name LIKE ? AND course.level = ? AND (professor.prof_id, prof_sec.course_name) IN (SELECT prof_id, course_name
FROM review
INNER JOIN rating ON review.review_id = rating.review_id
GROUP BY prof_id
HAVING (AVG(workload) BETWEEN ? AND ?) AND (AVG(learning) BETWEEN ? AND ?) AND (AVG(grading) BETWEEN ? AND ?));"""
, (f'%{identifier}%', f'%{identifier}%', level, float(rating_list[0]), float(rating_list[1]), float(rating_list[2]), float(rating_list[3]), float(rating_list[4]), float(rating_list[5]),))
return cursor.fetchall() | [
"numpy.array",
"numpy.mean",
"sqlite3.connect"
] | [((212, 243), 'sqlite3.connect', 'sqlite3.connect', (['"""./db.sqlite3"""'], {}), "('./db.sqlite3')\n", (227, 243), False, 'import sqlite3\n'), ((598, 629), 'sqlite3.connect', 'sqlite3.connect', (['"""./db.sqlite3"""'], {}), "('./db.sqlite3')\n", (613, 629), False, 'import sqlite3\n'), ((984, 1015), 'sqlite3.connect', 'sqlite3.connect', (['"""./db.sqlite3"""'], {}), "('./db.sqlite3')\n", (999, 1015), False, 'import sqlite3\n'), ((1722, 1753), 'sqlite3.connect', 'sqlite3.connect', (['"""./db.sqlite3"""'], {}), "('./db.sqlite3')\n", (1737, 1753), False, 'import sqlite3\n'), ((3055, 3086), 'sqlite3.connect', 'sqlite3.connect', (['"""./db.sqlite3"""'], {}), "('./db.sqlite3')\n", (3070, 3086), False, 'import sqlite3\n'), ((3633, 3664), 'sqlite3.connect', 'sqlite3.connect', (['"""./db.sqlite3"""'], {}), "('./db.sqlite3')\n", (3648, 3664), False, 'import sqlite3\n'), ((5022, 5053), 'sqlite3.connect', 'sqlite3.connect', (['"""./db.sqlite3"""'], {}), "('./db.sqlite3')\n", (5037, 5053), False, 'import sqlite3\n'), ((6380, 6411), 'sqlite3.connect', 'sqlite3.connect', (['"""./db.sqlite3"""'], {}), "('./db.sqlite3')\n", (6395, 6411), False, 'import sqlite3\n'), ((7797, 7828), 'sqlite3.connect', 'sqlite3.connect', (['"""./db.sqlite3"""'], {}), "('./db.sqlite3')\n", (7812, 7828), False, 'import sqlite3\n'), ((8523, 8554), 'sqlite3.connect', 'sqlite3.connect', (['"""./db.sqlite3"""'], {}), "('./db.sqlite3')\n", (8538, 8554), False, 'import sqlite3\n'), ((2556, 2578), 'numpy.array', 'np.array', (['rating_tuple'], {}), '(rating_tuple)\n', (2564, 2578), True, 'import numpy as np\n'), ((2649, 2691), 'numpy.mean', 'np.mean', (['ratings'], {'axis': '(0)', 'dtype': 'np.float64'}), '(ratings, axis=0, dtype=np.float64)\n', (2656, 2691), True, 'import numpy as np\n')] |
# SAMPLE USAGE:
# python3 step2_featureSelction_regression.py
# before running, install all packages in requirement.txt
import sys
sys.path.append('../..')
import operator
import argparse
import numpy as np
import sklearn.linear_model
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.feature_selection import f_regression
from sklearn.svm import SVC
from sklearn.model_selection import cross_val_score
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from statsmodels.stats.outliers_influence import variance_inflation_factor
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import mutual_info_classif
from sklearn.feature_selection import RFE
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from scipy.stats import spearmanr, pearsonr
from sklearn.metrics import precision_recall_fscore_support, roc_curve, auc
import random
def selectByPCA(features,fnames,target_num=10):
pca = PCA(target_num,copy=True)
fit = pca.fit_transform(features)
return fit, None
def selectByVIF(features, fnames, target_num=10, threshold=5.):
variables = np.array([i for i in range(features.shape[1])])
cnt = 0
for i in np.arange(0, len(variables)):
cnt += 1
vif = [variance_inflation_factor(features[:,variables], ix) for ix in range(len(variables))]
#print('round {}: {}'.format(cnt,vif))
maxloc = vif.index(max(vif))
if len(variables) <= target_num or max(vif) < threshold:
break
else:
variables = np.delete(variables,maxloc)
#print('Remaining variables ({}):'.format(len(variables)))
#for nn in variables:
#print(fnames[nn])
return features[:,variables], fnames[variables]
def addRebuttalScoreFeature(features,names):
self_prevs = features[:,[nn=='all_max' for nn in names]]
added = [max(ss[0],4.) for ss in self_prevs]
return np.append(np.array(added).reshape(-1,1),features,axis=1), np.append('rebuttal_score',names)
def randomDownSample(labels, target_lable='nc', left_ratio=0.3):
idx = []
for ii in range(len(labels)):
if labels[ii] != target_lable:
idx.append(True)
else:
idx.append(random.random()<=left_ratio)
return np.array(idx)
def myCrossValidation(clf,features,labels,cv=50,dev_ratio=0.2):
window_size = int(len(labels)/cv)
pointer = 0
class_names = ['inc','dec','nc','macroAvg']
metric_names = ['pre','rec','f1']
results = {}
for cn in class_names:
for mn in metric_names:
results['{}-{}'.format(cn,mn)] = []
for fold in range(cv):
avai_idx = np.array([not(ii>=pointer and ii<pointer+window_size) for ii in range(len(labels))])
test_idx = np.array([ii>=pointer and ii<pointer+window_size for ii in range(len(labels))])
avai_features = features[avai_idx]
avai_labels = labels[avai_idx]
#train_features = avai_features[range(0,int(len(avai_labels)*(1-dev_ratio)))]
#train_labels = avai_labels[range(0,int(len(avai_labels)*(1-dev_ratio)))]
#dev_features = avai_features[range(int(len(avai_labels)*(1-dev_ratio)), len(avai_labels))]
#dev_labels = avai_labels[range(0,int(len(avai_labels)*(1-dev_ratio)), len(avai_labels))]
test_features = features[test_idx]
test_labels = labels[test_idx]
### Down Sampling
lnames,lcounts = np.unique(avai_labels,return_counts=True)
lnames = list(lnames)
lcounts = list(lcounts)
nc_num = lcounts[lnames.index('nc')]
inc_dec_num = lcounts[lnames.index('inc')] + lcounts[lnames.index('dec')]
sampled_idx = randomDownSample(avai_labels,left_ratio=inc_dec_num*1./nc_num)
clf.fit(avai_features[sampled_idx],avai_labels[sampled_idx])
pre, rec, f1, _ = precision_recall_fscore_support(test_labels,clf.predict(test_features),labels=class_names[:-1])
for mi,metric in enumerate([pre,rec,f1]):
for ni,nn in enumerate(metric):
results['{}-{}'.format(class_names[ni],metric_names[mi])].append(nn)
results['macroAvg-pre'].append(np.mean(pre))
results['macroAvg-rec'].append(np.mean(rec))
results['macroAvg-f1'].append(np.mean(f1))
pointer += window_size
return results
def getLabels(data_path):
data = pd.read_csv(data_path,usecols=['self_prev','score'])
labels = []
prev_scores = np.array(data['self_prev'].tolist())
scores = np.array(data['score'].tolist())
delta = scores-prev_scores
for ii in range(len(scores)):
if delta[ii] > 0:
cn = 'inc'
elif delta[ii] < 0:
cn = 'dec'
else:
cn = 'nc'
labels.append(cn)
return np.array(labels)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--feature_num',action="store",dest="fn",nargs='?',default=-1)
parser.add_argument("--classifier",action="store",dest="clf",default="log-reg",nargs='?',help="gp|log-reg|forest|tree|svc_lin|svc_poly|svc_rbf|svc_sigmoid")
parser.add_argument("--feature-selector",action="store",dest="fs",default="none",nargs='?',help='pca or vif')
parser.add_argument("--feature-set",action="store",dest="fst",
default="length-similarity-opinion-politeness-specificity-convincingness",nargs='?')
#default="opinion",nargs='?')
parser.add_argument("--data",action="store",dest="data",
default="./AggregateFeatures/borderline_score_cleanedPlt_cleanedSpc_cleanedCvc_sim_respLogLen.csv",nargs='?')
#parser.set_defaults(conll=False)
parsed_args = parser.parse_args(sys.argv[1:])
print(":::",parsed_args.fn,parsed_args.clf,parsed_args.fs,parsed_args.fst)
cv = 10
feature_num = int(parsed_args.fn)
clf_type = parsed_args.clf
feature_selector = parsed_args.fs
feature_set_name = parsed_args.fst
data_path = parsed_args.data
### read features and scores
length_features = [1]
opinion_features = list(range(2,28))
specificity_features = list(range(28,33))
politeness_features = list(range(33,38))
convincingness_features = list(range(38,43))
similarity_features = [43]
out_feature = [44]
h = {"opinion":opinion_features,"politeness":politeness_features,"specificity":specificity_features,"length":length_features,'convincingness':convincingness_features,'similarity':similarity_features,'length':length_features}
feature_set = []
for x in feature_set_name.split("-"):
feature_set += h[x]
if feature_num==-1:
feature_num = len(feature_set)
print(len(feature_set))
### get classification labels
labels = getLabels(data_path)
print('\n===Label Distribution===')
lnames, cnts = np.unique(labels,return_counts=True)
for idx in range(len(lnames)):
print('{} : {} ({}%)'.format(lnames[idx],cnts[idx],cnts[idx]*100./sum(cnts)))
data = pd.read_csv(data_path,usecols=feature_set+out_feature)
print("-->",data_path,data.columns.values)
### get target labels
class_names = ['inc','dec','nc']
feature_names= np.array(list(data.columns.values)[:-1])
print(feature_names)
matrix = np.array(data.values)
features = matrix[:,:-1]
### when using score alone, use other_mean-self and prev_self
### when not using score features, activate long_resp_length
### simple model
#wanted_features = ['other_mean-self','self-other_min',
#'rev_resp_embd_sim',
#'plt_max', #'plt_median','plt_min',
#'cvc_max',#'cvc_min','cvc_mean',
#'spec_median','spec_max',#'spec_min',
#'log_resp_length'
#] #all case
wanted_features = ['other_mean-self','self-other_min',
'rev_resp_embd_sim',
'plt_max', 'plt_median','plt_max',
'cvc_min', 'cvc_max',#'cvc_mean',
'spec_median',#'spec_max','spec_min',
#'log_resp_length'
] #borderline case
features = features[:,np.array([feature_names[ii] in wanted_features for ii in range(len(feature_names))])]
feature_names = np.array(wanted_features)
### simple model
features = StandardScaler().fit_transform(features)
if feature_selector == 'vif' and features.shape[1] > 1:
features, feature_names = selectByVIF(features, feature_names, feature_num)
elif feature_selector == 'pca' and features.shape[1] > 1:
features, feature_names = selectByPCA(features,feature_names,feature_num)
'''
### majority baseline:
maj_labels = ['nc']*len(labels)
pre, rec, f1, _ = precision_recall_fscore_support(labels,maj_labels,labels=class_names)
print('\n===MAJORITY BAELINE===')
for ii,cn in enumerate(class_names):
print('---CLASS {}'.format(cn.upper()))
print('precision {}, recall {}, F1 {}'.format(pre[ii], rec[ii], f1[ii]))
print('---Macro Avg---')
print('precision {}, recall {}, F1 {}'.format(np.mean(pre), np.mean(rec), np.mean(f1)))
## random baseline:
f1_list = [[],[],[]]
p_list = [[],[],[]]
r_list = [[],[],[]]
for _ in range(500):
rnd_labels = [random.choice(['nc','inc','dec']) for ii in range(len(labels))]
pre, rec, f1, _ = precision_recall_fscore_support(labels,rnd_labels,labels=class_names)
for ii,cn in enumerate(class_names):
p_list[ii].append(pre[ii])
r_list[ii].append(rec[ii])
f1_list[ii].append(f1[ii])
print('\n===Random BAELINE===')
for ii,cn in enumerate(class_names):
print('---CLASS {}'.format(cn.upper()))
print('precision {}, recall {}, F1 {}'.format(np.mean(p_list[ii]), np.mean(r_list[ii]), np.mean(f1_list[ii])))
print('---Macro Avg---')
print('precision {}, recall {}, F1 {}'.format(np.mean(p_list[0]+p_list[1]+p_list[2]), np.mean(r_list[0]+r_list[1]+r_list[2]), np.mean(f1_list[0]+f1_list[1]+f1_list[2])))
exit(111)
'''
### cross-validation
if clf_type == 'gp':
#kernel = C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2))
#clf = GaussianProcessClassifier(kernel=kernel)
clf = GaussianProcessClassifier()
elif clf_type == 'svc_rbf':
clf = SVC(kernel='rbf')
elif clf_type == 'svc_lin':
clf = SVC(kernel='linear')
elif clf_type == 'svc_poly':
clf = SVC(kernel='poly')
elif clf_type == 'svc_sigmoid':
clf = SVC(kernel='sigmoid')
elif clf_type == 'forest':
clf = RandomForestClassifier()
elif clf_type == 'tree':
clf = DecisionTreeClassifier()
else:
clf = sklearn.linear_model.LogisticRegression(multi_class='multinomial',solver='newton-cg')
#clf = sklearn.linear_model.LogisticRegression()
all_results = None
repeat = 100
feat_imp_dic = {}
feat_weights_dic = {}
for ii in range(repeat):
if (ii+1)%500 == 0:
print(ii+1)
### shuffle the order of the features and training examples
indices = np.random.permutation(features.shape[0])
features = features[indices]
labels = labels[indices]
### shuffling the features
indices = np.random.permutation(features.shape[1])
if feature_selector != 'pca':
feature_names = feature_names[indices]
features = features[:,indices]
cv_results = myCrossValidation(clf,features,labels,cv)
if clf_type == 'forest':
ww = clf.feature_importances_
for ii in range(len(feature_names)):
feat_imp_dic[feature_names[ii]] = feat_imp_dic.get(feature_names[ii],0) + ww[ii]
elif clf_type == 'log-reg' and feature_selector == 'none':
classes = list(clf.classes_)
for i,cc in enumerate(classes):
if cc not in feat_weights_dic:
feat_weights_dic[cc] = {}
for j, ff in enumerate(feature_names):
feat_weights_dic[cc][ff] = feat_weights_dic[cc].get(ff,0) + clf.coef_[i][j]
if all_results is None:
all_results = cv_results.copy()
else:
for metric in cv_results:
all_results[metric].extend(cv_results[metric])
if clf_type == 'forest':
print('\n---Features Importance---')
sorted_dic = sorted(feat_imp_dic.items(), key=operator.itemgetter(1), reverse=True)
for entry in sorted_dic:
print('{}\t\t{}'.format(entry[0],entry[1]))
elif clf_type == 'log-reg' and feature_selector == 'none':
print('\n---Features Weights---')
for cc in feat_weights_dic:
print('\n--{}--'.format(cc))
for ff in feat_weights_dic[cc]:
print('{}: {}'.format(ff,feat_weights_dic[cc][ff]*1./repeat))
print('\n===Repeat {} times {} {}-Fold Cross Validation==='.format(repeat,clf_type,cv))
for metric in cv_results:
print('{} : mean {}, std {}'.format(metric,np.mean(cv_results[metric]),np.std(cv_results[metric])))
| [
"sklearn.preprocessing.StandardScaler",
"argparse.ArgumentParser",
"pandas.read_csv",
"sklearn.tree.DecisionTreeClassifier",
"numpy.mean",
"sklearn.svm.SVC",
"numpy.unique",
"sys.path.append",
"statsmodels.stats.outliers_influence.variance_inflation_factor",
"numpy.std",
"sklearn.gaussian_proces... | [((133, 157), 'sys.path.append', 'sys.path.append', (['"""../.."""'], {}), "('../..')\n", (148, 157), False, 'import sys\n'), ((1195, 1221), 'sklearn.decomposition.PCA', 'PCA', (['target_num'], {'copy': '(True)'}), '(target_num, copy=True)\n', (1198, 1221), False, 'from sklearn.decomposition import PCA\n'), ((2505, 2518), 'numpy.array', 'np.array', (['idx'], {}), '(idx)\n', (2513, 2518), True, 'import numpy as np\n'), ((4598, 4652), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {'usecols': "['self_prev', 'score']"}), "(data_path, usecols=['self_prev', 'score'])\n", (4609, 4652), True, 'import pandas as pd\n'), ((5006, 5022), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (5014, 5022), True, 'import numpy as np\n'), ((5065, 5090), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5088, 5090), False, 'import argparse\n'), ((7083, 7120), 'numpy.unique', 'np.unique', (['labels'], {'return_counts': '(True)'}), '(labels, return_counts=True)\n', (7092, 7120), True, 'import numpy as np\n'), ((7253, 7310), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {'usecols': '(feature_set + out_feature)'}), '(data_path, usecols=feature_set + out_feature)\n', (7264, 7310), True, 'import pandas as pd\n'), ((7517, 7538), 'numpy.array', 'np.array', (['data.values'], {}), '(data.values)\n', (7525, 7538), True, 'import numpy as np\n'), ((8578, 8603), 'numpy.array', 'np.array', (['wanted_features'], {}), '(wanted_features)\n', (8586, 8603), True, 'import numpy as np\n'), ((2211, 2245), 'numpy.append', 'np.append', (['"""rebuttal_score"""', 'names'], {}), "('rebuttal_score', names)\n", (2220, 2245), True, 'import numpy as np\n'), ((3663, 3705), 'numpy.unique', 'np.unique', (['avai_labels'], {'return_counts': '(True)'}), '(avai_labels, return_counts=True)\n', (3672, 3705), True, 'import numpy as np\n'), ((10581, 10608), 'sklearn.gaussian_process.GaussianProcessClassifier', 'GaussianProcessClassifier', ([], {}), '()\n', (10606, 10608), False, 'from sklearn.gaussian_process import GaussianProcessClassifier\n'), ((11439, 11479), 'numpy.random.permutation', 'np.random.permutation', (['features.shape[0]'], {}), '(features.shape[0])\n', (11460, 11479), True, 'import numpy as np\n'), ((11604, 11644), 'numpy.random.permutation', 'np.random.permutation', (['features.shape[1]'], {}), '(features.shape[1])\n', (11625, 11644), True, 'import numpy as np\n'), ((1497, 1550), 'statsmodels.stats.outliers_influence.variance_inflation_factor', 'variance_inflation_factor', (['features[:, variables]', 'ix'], {}), '(features[:, variables], ix)\n', (1522, 1550), False, 'from statsmodels.stats.outliers_influence import variance_inflation_factor\n'), ((1788, 1816), 'numpy.delete', 'np.delete', (['variables', 'maxloc'], {}), '(variables, maxloc)\n', (1797, 1816), True, 'import numpy as np\n'), ((4389, 4401), 'numpy.mean', 'np.mean', (['pre'], {}), '(pre)\n', (4396, 4401), True, 'import numpy as np\n'), ((4442, 4454), 'numpy.mean', 'np.mean', (['rec'], {}), '(rec)\n', (4449, 4454), True, 'import numpy as np\n'), ((4494, 4505), 'numpy.mean', 'np.mean', (['f1'], {}), '(f1)\n', (4501, 4505), True, 'import numpy as np\n'), ((8640, 8656), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (8654, 8656), False, 'from sklearn.preprocessing import StandardScaler\n'), ((10655, 10672), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""rbf"""'}), "(kernel='rbf')\n", (10658, 10672), False, 'from sklearn.svm import SVC\n'), ((10719, 10739), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (10722, 10739), False, 'from sklearn.svm import SVC\n'), ((12775, 12797), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (12794, 12797), False, 'import operator\n'), ((13380, 13407), 'numpy.mean', 'np.mean', (['cv_results[metric]'], {}), '(cv_results[metric])\n', (13387, 13407), True, 'import numpy as np\n'), ((13408, 13434), 'numpy.std', 'np.std', (['cv_results[metric]'], {}), '(cv_results[metric])\n', (13414, 13434), True, 'import numpy as np\n'), ((2163, 2178), 'numpy.array', 'np.array', (['added'], {}), '(added)\n', (2171, 2178), True, 'import numpy as np\n'), ((2464, 2479), 'random.random', 'random.random', ([], {}), '()\n', (2477, 2479), False, 'import random\n'), ((10787, 10805), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""poly"""'}), "(kernel='poly')\n", (10790, 10805), False, 'from sklearn.svm import SVC\n'), ((10856, 10877), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""sigmoid"""'}), "(kernel='sigmoid')\n", (10859, 10877), False, 'from sklearn.svm import SVC\n'), ((10923, 10947), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (10945, 10947), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((10991, 11015), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (11013, 11015), False, 'from sklearn.tree import DecisionTreeClassifier\n')] |
import cv2
import numpy as np
def get_amplified_image(image_path, amplified_image_path, factor=2):
image = cv2.imread(image_path)
height, width, channels = image.shape
amplified_image = np.zeros((height,width,3), np.uint8)
for i in range(0, height):
for j in range(0,width):
amplified_image[i][j][0] = min(factor * image[i][j][0], 255)
amplified_image[i][j][1] = min(factor * image[i][j][1], 255)
amplified_image[i][j][2] = min(factor * image[i][j][2], 255)
cv2.imwrite(amplified_image_path, amplified_image)
if __name__ == "__main__":
image_path = 'raw_images/2.jpg'
amplified_image_path = 'processed_images/2_amplified.jpg'
factor = 2
get_amplified_image(image_path, amplified_image_path, factor) | [
"cv2.imread",
"numpy.zeros",
"cv2.imwrite"
] | [((113, 135), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (123, 135), False, 'import cv2\n'), ((200, 238), 'numpy.zeros', 'np.zeros', (['(height, width, 3)', 'np.uint8'], {}), '((height, width, 3), np.uint8)\n', (208, 238), True, 'import numpy as np\n'), ((524, 574), 'cv2.imwrite', 'cv2.imwrite', (['amplified_image_path', 'amplified_image'], {}), '(amplified_image_path, amplified_image)\n', (535, 574), False, 'import cv2\n')] |
"""/app/routes.py
Description: Route definition for constellation generator
Project: Fauxstrology
Author: <NAME>
Date: 12/7/2019
"""
#=== Start imports ===#
# third party
from flask import current_app, jsonify, request
from textgenrnn import textgenrnn
from imageai.Prediction import ImagePrediction
import numpy as np
import cv2
import imutils
from PIL import Image
# std lib
from io import BytesIO
import logging
import datetime
import time
import re
import os
from random import randint
#=== End imports ===#
l = logging.getLogger(__name__)
def constellation():
try:
bd = request.args.get("bd")
rkey = "const_" + bd
if not current_app.config["REDIS_DB"].exists(rkey):
dayOfYear = datetime.strptime(bd, "%D-%M-%Y").timetuple().tm_yday
#right ascension and declinaiton come from the birthday
ra = dayOfYear/365*360
dec = (dayOfYear/365*180)-90
#angular distance image will span in horizontal (width) and vertical (height)
#directions
width_deg = 0.25
height_deg = 0.25
#pixels along width and height of image
width = 400
height = width
#scale is arcseconds per pixel
scale = width_deg * 3600 / width
#get picture from SDSS
url = "http://skyserver.sdss.org/dr15/SkyServerWS/ImgCutout/getjpeg?ra=" + \
str(ra) + "&dec=" + str(dec) + "&scale=" + str(scale) + \
"&width=" + str(width) + "&height=" + str(height)
r = requests.get(url = url)
pil_img = Image.open(BytesIO(r.content))
img = np.array(pil_image)
img = img[:, :, ::-1].copy()
# Number of stars to find
numStars = 10
# Determine the center point
centerPoint = (img.shape[0]/2, img.shape[1]/2)
# Convert the image to grayscale, then blur and threshold
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5,5), 0)
thresh = cv2.threshold(blurred, 50, 255, cv2.THRESH_BINARY)[1]
# Find the contours in the thresholded image and pick out the brightest
conts = cv2.findContours(thresh.copy(),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
conts = imutils.grab_contours(conts);
conts = sorted(conts, key=cv2.contourArea, reverse=True)[:numStars]
# Pull out the center of each contour
stars = np.array([[1,2,3]])
for c in conts:
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
cv2.circle(img, (cX, cY), 5, (255, 255, 0), -1)
# For each star, add the center and the angle wrt x-axis of the ray pointing
# to the center
angle = np.arctan2(cY-centerPoint[1], cX-centerPoint[0])
stars = np.append(stars,[[cX,cY,angle]], axis=0)
# Sort the stars by the angle
stars = stars[1:,:]
stars = stars[stars[:,-1].argsort()]
stars = stars[:,:-1].astype(np.int32)
# Connect the dots
cv2.fillPoly(img, np.array([stars]), (255,0,0))
cv2.imwrite('prediction_image.jpg', img)
executionPath = os.getcwd()
prediction = ImagePrediction()
prediction.setModelTypeAsSqueezeNet()
prediction.setModelPath(os.path.join(executionPath,
"squeezenet_weights_tf_dim_ordering_tf_kernels.h5"))
prediction.loadModel()
predictions, probabilities = prediction.predictImage(os.path.join(executionPath,"prediction_image.jpg"), result_count=10)
imagePath = ""
predictionList = predictions
current_app.config["REDIS_DB"].hset(rkey, "imagePath", imagePath)
current_app.config["REDIS_DB"].hset(rkey, "predictionList", predictionList)
else:
imagePath = current_app.config["REDIS_DB"].hget(rkey, "imagePath").decode('utf-8')
predictionList = current_app.config["REDIS_DB"].hget(rkey,"predictionList")
data = {
"metadata": {
"gen_at": time.strftime("%d %m, %H:%M:%S"),
"input_bd": bd
},
"image_path": imagePath,
"predictionList": predictionList
}
l.info("successfully generated horoscope")
return jsonify(data=data), 200
except Exception as e:
l.exception("An error occured in constellation generation | {}".format(str(e)))
return jsonify(error=["Could not find your future in the stars"]), 500
| [
"cv2.GaussianBlur",
"numpy.arctan2",
"time.strftime",
"flask.jsonify",
"os.path.join",
"flask.request.args.get",
"cv2.cvtColor",
"cv2.imwrite",
"numpy.append",
"io.BytesIO",
"cv2.circle",
"datetime.strptime",
"imutils.grab_contours",
"os.getcwd",
"cv2.threshold",
"cv2.moments",
"nump... | [((524, 551), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (541, 551), False, 'import logging\n'), ((596, 618), 'flask.request.args.get', 'request.args.get', (['"""bd"""'], {}), "('bd')\n", (612, 618), False, 'from flask import current_app, jsonify, request\n'), ((1666, 1685), 'numpy.array', 'np.array', (['pil_image'], {}), '(pil_image)\n', (1674, 1685), True, 'import numpy as np\n'), ((1985, 2022), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1997, 2022), False, 'import cv2\n'), ((2045, 2078), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gray', '(5, 5)', '(0)'], {}), '(gray, (5, 5), 0)\n', (2061, 2078), False, 'import cv2\n'), ((2431, 2459), 'imutils.grab_contours', 'imutils.grab_contours', (['conts'], {}), '(conts)\n', (2452, 2459), False, 'import imutils\n'), ((2612, 2633), 'numpy.array', 'np.array', (['[[1, 2, 3]]'], {}), '([[1, 2, 3]])\n', (2620, 2633), True, 'import numpy as np\n'), ((3393, 3433), 'cv2.imwrite', 'cv2.imwrite', (['"""prediction_image.jpg"""', 'img'], {}), "('prediction_image.jpg', img)\n", (3404, 3433), False, 'import cv2\n'), ((3463, 3474), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3472, 3474), False, 'import os\n'), ((3501, 3518), 'imageai.Prediction.ImagePrediction', 'ImagePrediction', ([], {}), '()\n', (3516, 3518), False, 'from imageai.Prediction import ImagePrediction\n'), ((4617, 4635), 'flask.jsonify', 'jsonify', ([], {'data': 'data'}), '(data=data)\n', (4624, 4635), False, 'from flask import current_app, jsonify, request\n'), ((1628, 1646), 'io.BytesIO', 'BytesIO', (['r.content'], {}), '(r.content)\n', (1635, 1646), False, 'from io import BytesIO\n'), ((2099, 2149), 'cv2.threshold', 'cv2.threshold', (['blurred', '(50)', '(255)', 'cv2.THRESH_BINARY'], {}), '(blurred, 50, 255, cv2.THRESH_BINARY)\n', (2112, 2149), False, 'import cv2\n'), ((2680, 2694), 'cv2.moments', 'cv2.moments', (['c'], {}), '(c)\n', (2691, 2694), False, 'import cv2\n'), ((2803, 2850), 'cv2.circle', 'cv2.circle', (['img', '(cX, cY)', '(5)', '(255, 255, 0)', '(-1)'], {}), '(img, (cX, cY), 5, (255, 255, 0), -1)\n', (2813, 2850), False, 'import cv2\n'), ((3000, 3052), 'numpy.arctan2', 'np.arctan2', (['(cY - centerPoint[1])', '(cX - centerPoint[0])'], {}), '(cY - centerPoint[1], cX - centerPoint[0])\n', (3010, 3052), True, 'import numpy as np\n'), ((3073, 3116), 'numpy.append', 'np.append', (['stars', '[[cX, cY, angle]]'], {'axis': '(0)'}), '(stars, [[cX, cY, angle]], axis=0)\n', (3082, 3116), True, 'import numpy as np\n'), ((3350, 3367), 'numpy.array', 'np.array', (['[stars]'], {}), '([stars])\n', (3358, 3367), True, 'import numpy as np\n'), ((3605, 3684), 'os.path.join', 'os.path.join', (['executionPath', '"""squeezenet_weights_tf_dim_ordering_tf_kernels.h5"""'], {}), "(executionPath, 'squeezenet_weights_tf_dim_ordering_tf_kernels.h5')\n", (3617, 3684), False, 'import os\n'), ((3802, 3853), 'os.path.join', 'os.path.join', (['executionPath', '"""prediction_image.jpg"""'], {}), "(executionPath, 'prediction_image.jpg')\n", (3814, 3853), False, 'import os\n'), ((4375, 4407), 'time.strftime', 'time.strftime', (['"""%d %m, %H:%M:%S"""'], {}), "('%d %m, %H:%M:%S')\n", (4388, 4407), False, 'import time\n'), ((4773, 4831), 'flask.jsonify', 'jsonify', ([], {'error': "['Could not find your future in the stars']"}), "(error=['Could not find your future in the stars'])\n", (4780, 4831), False, 'from flask import current_app, jsonify, request\n'), ((732, 765), 'datetime.strptime', 'datetime.strptime', (['bd', '"""%D-%M-%Y"""'], {}), "(bd, '%D-%M-%Y')\n", (749, 765), False, 'import datetime\n')] |
'''
(c) University of Liverpool 2019
All rights reserved.
@author: neilswainston
'''
# pylint: disable=invalid-name
# pylint: disable=ungrouped-imports
import math
import random
import matplotlib
from numpy import dot
import matplotlib.pyplot as plt
def step_function(x):
'''step_function.'''
return 1 if x >= 0 else 0
def perceptron_output(weights, bias, x):
'''returns 1 if the perceptron 'fires', 0 if not'''
return step_function(dot(weights, x) + bias)
def sigmoid(t):
'''sigmoid.'''
return 1 / (1 + math.exp(-t))
def neuron_output(weights, inputs):
'''neuron_output.'''
return sigmoid(dot(weights, inputs))
def feed_forward(neural_network, input_vector):
'''takes in a neural network (represented as a list of lists of lists of
weights) and returns the output from forward-propagating the input'''
outputs = []
for layer in neural_network:
input_with_bias = input_vector + [1] # add a bias input
output = [neuron_output(neuron, input_with_bias) # compute the output
for neuron in layer] # for this layer
outputs.append(output) # and remember it
# the input to the next layer is the output of this one
input_vector = output
return outputs
def backpropagate(network, input_vector, target):
'''backpropogate.'''
outputs = feed_forward(network, input_vector)
layer = len(network) - 1
# the output * (1 - output) is from the derivative of sigmoid
deltas = [output * (1 - output) * (output - target[i])
for i, output in enumerate(outputs[layer])]
# adjust weights for output layer (network[-1])
for i, neuron in enumerate(network[-1]):
for j, output in enumerate(outputs[layer - 1] + [1]):
neuron[j] -= deltas[i] * output
layer -= 1
# back-propagate errors to hidden layer
deltas = [output * (1 - output) * dot(deltas, [n[i] for n in network[-1]])
for i, output in enumerate(outputs[layer])]
# adjust weights for hidden layer (network[0])
for i, neuron in enumerate(network[0]):
for j, inpt in enumerate(input_vector + [1]):
neuron[j] -= deltas[i] * inpt
def patch(x, y, hatch, color):
'''return a matplotlib 'patch' object with the specified
location, crosshatch pattern, and color'''
return matplotlib.patches.Rectangle((x - 0.5, y - 0.5), 1, 1,
hatch=hatch, fill=False, color=color)
def show_weights(network, neuron_idx):
'''show_wrights.'''
weights = network[0][neuron_idx]
abs_weights = list(map(abs, weights))
grid = [abs_weights[row:(row + 5)] # turn the weights into a 5x5 grid
for row in range(0, 25, 5)] # [weights[0:5], ..., weights[20:25]]
ax = plt.gca() # to use hatching, we'll need the axis
ax.imshow(grid, # here same as plt.imshow
interpolation='none') # plot blocks as blocks
# cross-hatch the negative weights
for i in range(5): # row
for j in range(5): # column
if weights[5 * i + j] < 0: # row i, column j = weights[5*i + j]
# add black and white hatches, so visible whether dark or light
ax.add_patch(patch(j, i, '/', 'white'))
ax.add_patch(patch(j, i, '\\', 'black'))
plt.show()
def make_digit(raw_digit):
'''make_digit.'''
return [1 if c == '1' else 0
for row in raw_digit.split('\n')
for c in row.strip()]
def predict(network, inpt):
'''predict.'''
return feed_forward(network, inpt)[-1]
def main():
'''main method.'''
raw_digits = [
'''11111
1...1
1...1
1...1
11111''',
'''..1..
..1..
..1..
..1..
..1..''',
'''11111
....1
11111
1....
11111''',
'''11111
....1
11111
....1
11111''',
'''1...1
1...1
11111
....1
....1''',
'''11111
1....
11111
....1
11111''',
'''11111
1....
11111
1...1
11111''',
'''11111
....1
....1
....1
....1''',
'''11111
1...1
11111
1...1
11111''',
'''11111
1...1
11111
....1
11111''']
inputs = list(map(make_digit, raw_digits))
targets = [[1 if i == j else 0 for i in range(10)]
for j in range(10)]
num_hidden = 5
# the network starts out with random weights
network = [[[random.random() for __ in range(len(inputs[0]) + 1)]
for _ in range(num_hidden)],
[[random.random() for __ in range(num_hidden + 1)]
for _ in range(len(targets[0]))]]
for _ in range(10000):
for inpt, target in zip(inputs, targets):
backpropagate(network, inpt, target)
for i, inpt in enumerate(inputs):
outputs = predict(network, inpt)
print(i, [round(p, 2) for p in outputs])
print()
print([round(x, 2) for x in predict(network,
[0, 1, 1, 1, 0, # .@@@.
0, 0, 0, 1, 1, # ...@@
0, 0, 1, 1, 0, # ..@@.
0, 0, 0, 1, 1, # ...@@
0, 1, 1, 1, 0] # .@@@.
)])
print()
print([round(x, 2) for x in predict(network,
[0, 1, 1, 1, 0, # .@@@.
1, 0, 0, 1, 1, # @..@@
0, 1, 1, 1, 0, # .@@@.
1, 0, 0, 1, 1, # @..@@
0, 1, 1, 1, 0] # .@@@.
)])
if __name__ == '__main__':
main()
| [
"math.exp",
"matplotlib.pyplot.show",
"matplotlib.patches.Rectangle",
"random.random",
"matplotlib.pyplot.gca",
"numpy.dot"
] | [((2416, 2513), 'matplotlib.patches.Rectangle', 'matplotlib.patches.Rectangle', (['(x - 0.5, y - 0.5)', '(1)', '(1)'], {'hatch': 'hatch', 'fill': '(False)', 'color': 'color'}), '((x - 0.5, y - 0.5), 1, 1, hatch=hatch, fill=\n False, color=color)\n', (2444, 2513), False, 'import matplotlib\n'), ((2858, 2867), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2865, 2867), True, 'import matplotlib.pyplot as plt\n'), ((3398, 3408), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3406, 3408), True, 'import matplotlib.pyplot as plt\n'), ((634, 654), 'numpy.dot', 'dot', (['weights', 'inputs'], {}), '(weights, inputs)\n', (637, 654), False, 'from numpy import dot\n'), ((457, 472), 'numpy.dot', 'dot', (['weights', 'x'], {}), '(weights, x)\n', (460, 472), False, 'from numpy import dot\n'), ((538, 550), 'math.exp', 'math.exp', (['(-t)'], {}), '(-t)\n', (546, 550), False, 'import math\n'), ((1973, 2013), 'numpy.dot', 'dot', (['deltas', '[n[i] for n in network[-1]]'], {}), '(deltas, [n[i] for n in network[-1]])\n', (1976, 2013), False, 'from numpy import dot\n'), ((4925, 4940), 'random.random', 'random.random', ([], {}), '()\n', (4938, 4940), False, 'import random\n'), ((5040, 5055), 'random.random', 'random.random', ([], {}), '()\n', (5053, 5055), False, 'import random\n')] |
"""
Create Nested Pipelines in Neuraxle
================================================
You can create pipelines within pipelines using the composition design pattern.
This demonstrates how to create pipelines within pipelines, and how to access the steps and their
attributes in the nested pipelines.
For more info, see the `thread here <https://stackoverflow.com/questions/28822756/getting-model-attributes-from-scikit-learn-pipeline/58359509#58359509>`__.
..
Copyright 2019, Neuraxio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
..
Thanks to Umaneo Technologies Inc. for their contributions to this Machine Learning
project, visit https://www.umaneo.com/ for more information on Umaneo Technologies Inc.
"""
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from neuraxle.base import Identity
from neuraxle.pipeline import Pipeline
def main():
np.random.seed(42)
X = np.random.randint(5, size=(100, 5))
# Create and fit the pipeline:
pipeline = Pipeline([
StandardScaler(),
Identity(),
Pipeline([
Identity(),
Identity(), # Note: an Identity step is a step that does nothing.
Identity(), # We use it here for demonstration purposes.
Pipeline([
Identity(),
PCA(n_components=2)
])
])
])
pipeline, X_t = pipeline.fit_transform(X)
# Get the components:
pca_components = pipeline["Pipeline"]["Pipeline"][-1].get_wrapped_sklearn_predictor().components_
assert pca_components.shape == (2, 5)
# Discussion:
# https://stackoverflow.com/questions/28822756/getting-model-attributes-from-scikit-learn-pipeline/58359509#58359509
if __name__ == "__main__":
main()
| [
"numpy.random.seed",
"sklearn.preprocessing.StandardScaler",
"neuraxle.base.Identity",
"numpy.random.randint",
"sklearn.decomposition.PCA"
] | [((1451, 1469), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1465, 1469), True, 'import numpy as np\n'), ((1478, 1513), 'numpy.random.randint', 'np.random.randint', (['(5)'], {'size': '(100, 5)'}), '(5, size=(100, 5))\n', (1495, 1513), True, 'import numpy as np\n'), ((1584, 1600), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1598, 1600), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1610, 1620), 'neuraxle.base.Identity', 'Identity', ([], {}), '()\n', (1618, 1620), False, 'from neuraxle.base import Identity\n'), ((1653, 1663), 'neuraxle.base.Identity', 'Identity', ([], {}), '()\n', (1661, 1663), False, 'from neuraxle.base import Identity\n'), ((1677, 1687), 'neuraxle.base.Identity', 'Identity', ([], {}), '()\n', (1685, 1687), False, 'from neuraxle.base import Identity\n'), ((1756, 1766), 'neuraxle.base.Identity', 'Identity', ([], {}), '()\n', (1764, 1766), False, 'from neuraxle.base import Identity\n'), ((1853, 1863), 'neuraxle.base.Identity', 'Identity', ([], {}), '()\n', (1861, 1863), False, 'from neuraxle.base import Identity\n'), ((1881, 1900), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (1884, 1900), False, 'from sklearn.decomposition import PCA\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 08 20:02:09 2018
@author: sarth
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage
def order_disorder_separation(image, percentile, size):
"""
Seperates the input image into order and disorder regions
using percentile_filter from scipy.ndimage.
This function will also provide parameters
such as order-diorder ratio, order percentage, disorder percentage
and total percent coverage from the separated images.
Args:
image: segmented image, numpy array
percentile: float
size: size of the separation filter, int
Returns:
filt_img: numpy array
I_ordered: float
I_disordered: float
order_disorder_ratio: float
percent_ordered: float
percent_disordered: float
percent_coverage: float
Raises:
Errors when input datatype is wront
Error when percentile is out of range.
"""
# Checking the right data type for the input image
assert type(image) == np.ndarray, ('Wrong data type', 'image must be a numpy array')
# Checking the right data type for the percentile
assert type(percentile) == int, ('Wrong data type', 'percentile must be an integer')
assert 0 <= percentile <= 100, ('Out of range', 'Percentile value must be between 0 and 100')
# Checking the right data type for the size
assert type(size) == int, ('Wrong data type', 'size must be an integer')
# Using percentile filter to filter image into two labels - 0 and 1
filt_img = ndimage.percentile_filter(image, percentile, size, mode='reflect')
# creating a boolean array of all the labels - True = label == 1 and False = label == 0
q = filt_img == 1
# creating empty arrays similar to input image
I_ordered = np.zeros_like(image)
I_disordered = np.zeros_like(image)
# Assigning values for ordered and disordered regions
I_ordered[q] = image[q]
I_disordered[~q] = image[~q]
# Plotting Original, Ordered and Disordered Image
figure, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(10, 2.5))
img1 = ax1.imshow(image, cmap='gray', interpolation='nearest')
ax1.set_title('Segmented image, before separation')
plt.colorbar(img1, ax=ax1, shrink=0.5)
img2 = ax2.imshow(I_ordered, cmap='gray', interpolation='nearest')
ax2.set_title('Ordered region')
plt.colorbar(img2, ax=ax2, shrink=0.5)
img3 = ax3.imshow(I_disordered, cmap='gray', interpolation='nearest')
ax3.set_title('Diordered region')
plt.colorbar(img3, ax=ax3, shrink=0.5)
plt.show()
# Calculating order disorder ratio, percent coverage of ordered, disordered and overall
order_disorder_ratio = np.sum(np.sum(I_ordered)) / np.sum(np.sum(I_disordered))
percent_ordered =100 * (np.sum(np.sum(I_ordered)) / (image.shape[0] * image.shape[1]))
percent_disordered =100 * (np.sum(np.sum(I_disordered)) / (image.shape[0] * image.shape[1]))
percent_coverage = percent_ordered + percent_disordered
print ('--- Disorderness of Image ---')
print ('Order-Disorder ratio = %.5f' %(order_disorder_ratio))
print ('Order Percentage = %.5f' %(percent_ordered))
print ('Disorder Percentage = %.5f' %(percent_disordered))
print ('Coverage Percentage = %.5f' %(percent_coverage))
return filt_img, I_ordered, I_disordered, order_disorder_ratio, percent_ordered, percent_disordered, percent_coverage
| [
"numpy.zeros_like",
"matplotlib.pyplot.show",
"numpy.sum",
"scipy.ndimage.percentile_filter",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.subplots"
] | [((1641, 1707), 'scipy.ndimage.percentile_filter', 'ndimage.percentile_filter', (['image', 'percentile', 'size'], {'mode': '"""reflect"""'}), "(image, percentile, size, mode='reflect')\n", (1666, 1707), False, 'from scipy import ndimage\n'), ((1900, 1920), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (1913, 1920), True, 'import numpy as np\n'), ((1940, 1960), 'numpy.zeros_like', 'np.zeros_like', (['image'], {}), '(image)\n', (1953, 1960), True, 'import numpy as np\n'), ((2174, 2211), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(10, 2.5)'}), '(1, 3, figsize=(10, 2.5))\n', (2186, 2211), True, 'import matplotlib.pyplot as plt\n'), ((2339, 2377), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['img1'], {'ax': 'ax1', 'shrink': '(0.5)'}), '(img1, ax=ax1, shrink=0.5)\n', (2351, 2377), True, 'import matplotlib.pyplot as plt\n'), ((2494, 2532), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['img2'], {'ax': 'ax2', 'shrink': '(0.5)'}), '(img2, ax=ax2, shrink=0.5)\n', (2506, 2532), True, 'import matplotlib.pyplot as plt\n'), ((2650, 2688), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['img3'], {'ax': 'ax3', 'shrink': '(0.5)'}), '(img3, ax=ax3, shrink=0.5)\n', (2662, 2688), True, 'import matplotlib.pyplot as plt\n'), ((2694, 2704), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2702, 2704), True, 'import matplotlib.pyplot as plt\n'), ((2841, 2858), 'numpy.sum', 'np.sum', (['I_ordered'], {}), '(I_ordered)\n', (2847, 2858), True, 'import numpy as np\n'), ((2869, 2889), 'numpy.sum', 'np.sum', (['I_disordered'], {}), '(I_disordered)\n', (2875, 2889), True, 'import numpy as np\n'), ((2931, 2948), 'numpy.sum', 'np.sum', (['I_ordered'], {}), '(I_ordered)\n', (2937, 2948), True, 'import numpy as np\n'), ((3030, 3050), 'numpy.sum', 'np.sum', (['I_disordered'], {}), '(I_disordered)\n', (3036, 3050), True, 'import numpy as np\n')] |
from collections import defaultdict
from graphviz import Digraph
import numpy as np
class BayesNet:
def __init__(self, rvs):
self.k = len(rvs)
self.rvs = rvs
self.G = None
self.Grev = None
def add_edges(self, edges):
self.G = edges
Grev = defaultdict(list)
for vi, vfs in edges.items():
for vf in vfs:
Grev[vf].append(vi)
self.Grev = Grev
print(f"graph: {self.G}\nreverse graph: {self.Grev}")
def learn_params(self, data):
assert data.shape[-1] == self.k, f"#cols = {data.shape[-1]} differs from #rvs = {self.k}"
def show_graph(self):
dot = Digraph()
for v in self.rvs:
dot.node(str(v), str(v))
edges = []
# for vi, vfs
dot.edges(f"{vi}{vf}" for vi, vf in self.G)
print(dot.source)
file_name = "myplot"
dot.render(file_name, view=True)
# generate binary RVs: 100 samples of 4 binary rv
data = np.random.randint(2, size=(100, 4))
print(data)
rvs = ['a', 'b', 'c', 'd']
# rvs = [1, 2]
# edges = {('a', 'b'), ('a', 'c'), ('b', 'd'), ('c', 'd')}
edges = {0: (1, 2), 1: (3,), 2: (3, )}
BN = BayesNet(rvs)
BN.add_edges(edges)
BN.learn_params(data)
# BN.show_graph() | [
"collections.defaultdict",
"numpy.random.randint",
"graphviz.Digraph"
] | [((1004, 1039), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(100, 4)'}), '(2, size=(100, 4))\n', (1021, 1039), True, 'import numpy as np\n'), ((300, 317), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (311, 317), False, 'from collections import defaultdict\n'), ((681, 690), 'graphviz.Digraph', 'Digraph', ([], {}), '()\n', (688, 690), False, 'from graphviz import Digraph\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
import random
from typing import List
from .base_transform import BaseTransform
from ...builder import TRANSFORMS
@TRANSFORMS.register_module()
class GroupFlip(BaseTransform):
def __init__(self, flip_prob: float = 0.5):
self.flip_prob = flip_prob
def get_transform_param(self, data, *args, **kwargs):
flag = random.uniform(0.0, 1.0) < self.flip_prob
return dict(flag=flag)
def _apply_image(self,
data: List[np.ndarray],
transform_param: dict):
if transform_param['flag']:
data = [self._flip_image(d) for d in data]
transform_param['img_shape'] = data[0].shape
return data
def apply_boxes(self,
boxes: np.ndarray,
transform_param: dict):
assert isinstance(boxes, np.ndarray), f'unknown type {type(boxes)}'
if transform_param['flag']:
img_width = transform_param['img_shape'][1]
trans_boxes = boxes.copy()
trans_boxes[..., 0:4:2] = img_width - trans_boxes[..., [2, 0]]
return trans_boxes
else:
return boxes
def apply_flow(self,
flows: List[np.ndarray],
transform_param: dict):
if transform_param['flag']:
transformed_flows = []
for i_flow in flows:
i_trans_flow = i_flow.copy()
i_trans_flow[..., 0] = -i_trans_flow[..., 0]
i_trans_flow = self._flip_image(i_trans_flow)
transformed_flows.append(i_trans_flow)
return transformed_flows
else:
return flows
@staticmethod
def _flip_image(img: np.ndarray):
img = np.ascontiguousarray(img[:, ::-1, :])
return img
| [
"numpy.ascontiguousarray",
"random.uniform"
] | [((1858, 1895), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img[:, ::-1, :]'], {}), '(img[:, ::-1, :])\n', (1878, 1895), True, 'import numpy as np\n'), ((450, 474), 'random.uniform', 'random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (464, 474), False, 'import random\n')] |
import h5py
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPRegressor
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler # doctest: +SKIP
from sklearn.decomposition import PCA
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
import numpy as np
import scipy as sp
fname = "C:/Users/AlessioB/Desktop/REFTEP ANN/sub-1_band-mu_iplv.mat"
mat1 = h5py.File(fname)
fname = "C:/Users/AlessioB/Desktop/REFTEP ANN/sub-1_band-betalow_iplv.mat"
mat2 = h5py.File(fname)
fname = "C:/Users/AlessioB/Desktop/REFTEP ANN/sub-1_band-betahigh_iplv.mat"
mat3 = h5py.File(fname)
X = np.hstack((mat1['iPLV'].value[:,::20],
mat2['iPLV'].value[:,::20],
mat3['iPLV'].value[:,::20]))
Y = mat1['AmpsMclean'].value
Y=np.log(Y.T)
#Y=sp.stats.zscore(Y)
#plt.hist(Y)
Y=Y[:,0]
threshold=np.median(Y)
Y[Y<threshold]=0
Y[Y>=threshold]=1
X=X[:,np.std(X,0)>0]
X=np.log(np.abs(X)/(1-np.abs(X)))
#X=sp.stats.zscore(X)
#pca = PCA(n_components=2)
#pca.fit(X.T)
#Xred=pca.components_.T
#Xred=sp.stats.zscore(Xred)
#vectvox=np.random.randint(0,X.shape[1],100)
#vectvox=np.random.permutation(100)
#Xred=X[:,vectvox_app_fewer[vectvox[1:50]]]
Xred=X
NVox=Xred.shape[1]
SizeLayer=int(NVox/10)
res=np.zeros(100)
for iiter in range(100):
X_train, X_test, y_train, y_test = train_test_split(Xred, Y, train_size=0.75)
scaler = StandardScaler() # doctest: +SKIP
scaler.fit(X_train) # doctest: +SKIP
X_train = scaler.transform(X_train) # doctest: +SKIP
X_test = scaler.transform(X_test) # doctest: +SKIP
clf = MLPClassifier(hidden_layer_sizes=(SizeLayer), activation='relu', max_iter=500).fit(X_train, y_train)
res[iiter]=clf.score(X_test, y_test)
plt.hist(res)
plt.show() | [
"h5py.File",
"matplotlib.pyplot.show",
"numpy.log",
"matplotlib.pyplot.hist",
"sklearn.preprocessing.StandardScaler",
"numpy.median",
"sklearn.model_selection.train_test_split",
"numpy.abs",
"numpy.std",
"numpy.zeros",
"numpy.hstack",
"sklearn.neural_network.MLPClassifier"
] | [((512, 528), 'h5py.File', 'h5py.File', (['fname'], {}), '(fname)\n', (521, 528), False, 'import h5py\n'), ((611, 627), 'h5py.File', 'h5py.File', (['fname'], {}), '(fname)\n', (620, 627), False, 'import h5py\n'), ((711, 727), 'h5py.File', 'h5py.File', (['fname'], {}), '(fname)\n', (720, 727), False, 'import h5py\n'), ((734, 837), 'numpy.hstack', 'np.hstack', (["(mat1['iPLV'].value[:, ::20], mat2['iPLV'].value[:, ::20], mat3['iPLV'].\n value[:, ::20])"], {}), "((mat1['iPLV'].value[:, ::20], mat2['iPLV'].value[:, ::20], mat3[\n 'iPLV'].value[:, ::20]))\n", (743, 837), True, 'import numpy as np\n'), ((893, 904), 'numpy.log', 'np.log', (['Y.T'], {}), '(Y.T)\n', (899, 904), True, 'import numpy as np\n'), ((960, 972), 'numpy.median', 'np.median', (['Y'], {}), '(Y)\n', (969, 972), True, 'import numpy as np\n'), ((1365, 1378), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (1373, 1378), True, 'import numpy as np\n'), ((1844, 1857), 'matplotlib.pyplot.hist', 'plt.hist', (['res'], {}), '(res)\n', (1852, 1857), True, 'import matplotlib.pyplot as plt\n'), ((1858, 1868), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1866, 1868), True, 'import matplotlib.pyplot as plt\n'), ((1443, 1485), 'sklearn.model_selection.train_test_split', 'train_test_split', (['Xred', 'Y'], {'train_size': '(0.75)'}), '(Xred, Y, train_size=0.75)\n', (1459, 1485), False, 'from sklearn.model_selection import train_test_split\n'), ((1499, 1515), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1513, 1515), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1039, 1048), 'numpy.abs', 'np.abs', (['X'], {}), '(X)\n', (1045, 1048), True, 'import numpy as np\n'), ((1015, 1027), 'numpy.std', 'np.std', (['X', '(0)'], {}), '(X, 0)\n', (1021, 1027), True, 'import numpy as np\n'), ((1052, 1061), 'numpy.abs', 'np.abs', (['X'], {}), '(X)\n', (1058, 1061), True, 'import numpy as np\n'), ((1700, 1776), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'hidden_layer_sizes': 'SizeLayer', 'activation': '"""relu"""', 'max_iter': '(500)'}), "(hidden_layer_sizes=SizeLayer, activation='relu', max_iter=500)\n", (1713, 1776), False, 'from sklearn.neural_network import MLPClassifier\n')] |
from collections import defaultdict, namedtuple
from itertools import combinations
import numpy as np
from ._sitq import Sitq
class Mips:
def __init__(self, signature_size):
"""
Parameters
----------
signature_size: int
The number of bits of a signature.
"""
self._sitq = Sitq(signature_size=signature_size)
def fit(self, items, iteration=50):
"""
Learn lookup table and parameters for search.
Parameters
----------
items: array_like or dict
Training data. If it is array_like, shape of (n_items, n_features)
where n_items is the number of items and n_features is the number
of features. If it is dict, the key is the name of the item and the
value is array_like with shape of (n_features).
iteration: int, optional
The number of iteration for learning ITQ encoder.
Returns
-------
self: object
Returns the instance itself.
"""
Item = namedtuple('Item', ['name', 'vector'])
_items = []
if isinstance(items, dict):
for name, vector in items.items():
_items.append(Item(name, vector))
else:
for name, vector in enumerate(items):
_items.append(Item(name, vector))
item_vectors = np.array([item.vector for item in _items])
self._sitq.fit(item_vectors, iteration=iteration)
sigs = self._sitq.get_item_signatures(item_vectors)
self._vector_table = defaultdict(list)
self._name_table = defaultdict(list)
for sig, item in zip(sigs, _items):
_sig = tuple(sig)
self._vector_table[_sig].append(item.vector)
self._name_table[_sig].append(item.name)
self._vector_table = defaultdict(
lambda: np.empty((0, item_vectors.shape[1]), dtype=float),
{k: np.array(v) for k, v in self._vector_table.items()}
)
return self
def search(self, query, limit=None, distance=0, require_items=False, sort=True):
"""
Find items which are likely to maximize inner product against query.
Parameters
----------
query: array_like, shape(n_features)
Vector for query.
limit: int or None, optional
The maximum number of items to be returned. It must be positive.
distance: int, optional
The number of bits by which query and signature can differ.
Maximum number of different bits may be over this number if
`require_items` is set to true.
require_items: bool, optional
If true, then the number of returned items must be `limit`.
`distance` is ignored if the number of fetched items was
insufficient.
sort: bool, optional
If true, then the returned `item_names` are sorted in descending
order according to these `scores`.
Returns
-------
item_names: ndarray
Names of items. Indexes are used as names when array_like was used
for `fit()`.
scores: ndarray
Inner prodects of items.
"""
query_sig = self._sitq.get_query_signatures([query])[0]
item_vectors = []
item_names = []
distance = np.clip(distance, 0, self._sitq._signature_size)
for i in range(self._sitq._signature_size + 1):
for mutation_indexes in combinations(range(self._sitq._signature_size), i):
mutated_sig = query_sig.copy()
for idx in mutation_indexes:
mutated_sig[idx] = not mutated_sig[idx]
_sig = tuple(mutated_sig)
item_vectors.append(self._vector_table[_sig])
item_names += self._name_table[_sig]
if (not require_items and i == distance) or \
(require_items and limit is not None and len(item_names) >= limit):
break
item_vectors = np.vstack(item_vectors)
scores = item_vectors.dot(query) if item_vectors.size > 0 else np.empty(0)
if limit is not None:
limit = np.clip(limit, 0, len(scores))
idxs = np.argpartition(scores, -1 * limit)[-1 * limit:]
else:
idxs = np.arange(len(item_vectors))
if sort:
idxs = idxs[np.argsort(scores[idxs])[::-1]]
scores = scores[idxs]
item_names = np.array([item_names[idx] for idx in idxs])
return item_names, scores
| [
"numpy.empty",
"numpy.clip",
"collections.defaultdict",
"numpy.argpartition",
"numpy.argsort",
"numpy.array",
"collections.namedtuple",
"numpy.vstack"
] | [((1072, 1110), 'collections.namedtuple', 'namedtuple', (['"""Item"""', "['name', 'vector']"], {}), "('Item', ['name', 'vector'])\n", (1082, 1110), False, 'from collections import defaultdict, namedtuple\n'), ((1403, 1445), 'numpy.array', 'np.array', (['[item.vector for item in _items]'], {}), '([item.vector for item in _items])\n', (1411, 1445), True, 'import numpy as np\n'), ((1594, 1611), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1605, 1611), False, 'from collections import defaultdict, namedtuple\n'), ((1639, 1656), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1650, 1656), False, 'from collections import defaultdict, namedtuple\n'), ((3408, 3456), 'numpy.clip', 'np.clip', (['distance', '(0)', 'self._sitq._signature_size'], {}), '(distance, 0, self._sitq._signature_size)\n', (3415, 3456), True, 'import numpy as np\n'), ((4101, 4124), 'numpy.vstack', 'np.vstack', (['item_vectors'], {}), '(item_vectors)\n', (4110, 4124), True, 'import numpy as np\n'), ((4546, 4589), 'numpy.array', 'np.array', (['[item_names[idx] for idx in idxs]'], {}), '([item_names[idx] for idx in idxs])\n', (4554, 4589), True, 'import numpy as np\n'), ((4197, 4208), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (4205, 4208), True, 'import numpy as np\n'), ((1903, 1952), 'numpy.empty', 'np.empty', (['(0, item_vectors.shape[1])'], {'dtype': 'float'}), '((0, item_vectors.shape[1]), dtype=float)\n', (1911, 1952), True, 'import numpy as np\n'), ((1970, 1981), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (1978, 1981), True, 'import numpy as np\n'), ((4309, 4344), 'numpy.argpartition', 'np.argpartition', (['scores', '(-1 * limit)'], {}), '(scores, -1 * limit)\n', (4324, 4344), True, 'import numpy as np\n'), ((4462, 4486), 'numpy.argsort', 'np.argsort', (['scores[idxs]'], {}), '(scores[idxs])\n', (4472, 4486), True, 'import numpy as np\n')] |
import numpy as np
from static import *
import xml.etree.ElementTree as ET
from PIL import Image
def onboarding(all_images_, all_breeds_):
'''
Takes all images and breeds
makes them ready for modeling
args:
all_images_: list of all images
all_breeds_: list of all breeds
outputs:
images_input: return vectorized version of images
breeds_name: numpy array of dog breeds
'''
breeds_names = np.array([], dtype='str')
images_inputs = np.zeros((len(all_images_), image_height, image_width, image_channels))
idxIn = 0
for breed in all_breeds_:
if breed == '.DS_Store':
continue
for image_name in os.listdir(breed_path+breed):
try:
img = Image.open(image_path+image_name+'.jpg')
except:
continue
# print('g')
tree = ET.parse(breed_path+breed+'/'+image_name)
# print(tree)
root = tree.getroot()
objects = root.findall('object')
o = objects[0]
bndbox = o.find('bndbox') # <bndbox>
xmin = int(bndbox.find('xmin').text)
ymin = int(bndbox.find('ymin').text)
xmax = int(bndbox.find('xmax').text)
ymax = int(bndbox.find('ymax').text)
w = np.min((xmax - xmin, ymax - ymin))
img2 = img.crop((xmin, ymin, xmin+w, ymin+w))
img2 = img2.resize((image_height,image_width), Image.ANTIALIAS)
images_inputs[idxIn,:,:,:] = np.asarray(img2)
idxIn += 1
breeds_names = np.append(breeds_names, breed)
normalized_image_vectors = images_inputs/255.0
return images_inputs, np.asarray(normalized_image_vectors), breeds_names
if __name__=='__main__':
images_inputs, normalized_image_vectors, breeds_names = onboarding(all_images, all_breeds)
| [
"xml.etree.ElementTree.parse",
"numpy.asarray",
"PIL.Image.open",
"numpy.append",
"numpy.min",
"numpy.array"
] | [((488, 513), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""str"""'}), "([], dtype='str')\n", (496, 513), True, 'import numpy as np\n'), ((1794, 1830), 'numpy.asarray', 'np.asarray', (['normalized_image_vectors'], {}), '(normalized_image_vectors)\n', (1804, 1830), True, 'import numpy as np\n'), ((954, 1001), 'xml.etree.ElementTree.parse', 'ET.parse', (["(breed_path + breed + '/' + image_name)"], {}), "(breed_path + breed + '/' + image_name)\n", (962, 1001), True, 'import xml.etree.ElementTree as ET\n'), ((1395, 1429), 'numpy.min', 'np.min', (['(xmax - xmin, ymax - ymin)'], {}), '((xmax - xmin, ymax - ymin))\n', (1401, 1429), True, 'import numpy as np\n'), ((1605, 1621), 'numpy.asarray', 'np.asarray', (['img2'], {}), '(img2)\n', (1615, 1621), True, 'import numpy as np\n'), ((1672, 1702), 'numpy.append', 'np.append', (['breeds_names', 'breed'], {}), '(breeds_names, breed)\n', (1681, 1702), True, 'import numpy as np\n'), ((808, 852), 'PIL.Image.open', 'Image.open', (["(image_path + image_name + '.jpg')"], {}), "(image_path + image_name + '.jpg')\n", (818, 852), False, 'from PIL import Image\n')] |
import math
import numpy as np
import pandas as pd
#K均值 聚类
class K_means:
def __init__(self,dataspath,k):
'''
:param dataspath:数据的地址
:param k: 聚类簇数
'''
self.category = k
self.model = {}
self.datas = self.loadDataSet(dataspath)
def loadDataSet(self,dataspath):
'''
:param dataspath:读入数据的地址,csv格式文件
:return: np数组
'''
data_set = pd.read_csv(dataspath)
return np.array(data_set.iloc[:,0:4])
def calcDiatance(self,o1,o2):
'''
:param o1: 点o1
:param o2: 点o2
:return: o1与o2之间的相似度 ans越小表示类别越接近
'''
ans = 0
for item1,item2 in zip(o1,o2):
ans = ans + math.pow(item1-item2,2)
return math.sqrt(ans)
def iteration(self):
avg_vector = {}
#初始化k个向量
for i in range(self.category):
index = np.random.randint(0,len(self.datas))
avg_vector[i] = self.datas[index]
flag = 1
while flag == 1:
for cate in range(self.category):
self.model[cate] = []
for data in self.datas:
best_cate = -1
min_value = 1000
for cate in range(self.category):
temp_value = self.calcDiatance(data,avg_vector[cate])
if temp_value < min_value:
min_value = temp_value
best_cate = cate
self.model[best_cate].append(list(data))
flag = -1
for i in range(self.category):
if self.calcDiatance(np.mean(self.model[i],axis=0),avg_vector[i]) != 0.0:
avg_vector[i] = np.mean(self.model[i],axis=0)
flag = 1
model = K_means("../database/iris_data.csv",3)
model.iteration()
print(model.model[0])
print(model.model[1])
print(model.model[2]) | [
"math.pow",
"math.sqrt",
"pandas.read_csv",
"numpy.mean",
"numpy.array"
] | [((433, 455), 'pandas.read_csv', 'pd.read_csv', (['dataspath'], {}), '(dataspath)\n', (444, 455), True, 'import pandas as pd\n'), ((471, 502), 'numpy.array', 'np.array', (['data_set.iloc[:, 0:4]'], {}), '(data_set.iloc[:, 0:4])\n', (479, 502), True, 'import numpy as np\n'), ((767, 781), 'math.sqrt', 'math.sqrt', (['ans'], {}), '(ans)\n', (776, 781), False, 'import math\n'), ((728, 754), 'math.pow', 'math.pow', (['(item1 - item2)', '(2)'], {}), '(item1 - item2, 2)\n', (736, 754), False, 'import math\n'), ((1724, 1754), 'numpy.mean', 'np.mean', (['self.model[i]'], {'axis': '(0)'}), '(self.model[i], axis=0)\n', (1731, 1754), True, 'import numpy as np\n'), ((1635, 1665), 'numpy.mean', 'np.mean', (['self.model[i]'], {'axis': '(0)'}), '(self.model[i], axis=0)\n', (1642, 1665), True, 'import numpy as np\n')] |
from learnml.linear_model import LinearRegression
import numpy as np
import numpy.testing
import unittest
class TestLinearRegression(unittest.TestCase):
def test_fit_predict(self):
X = np.array([[1], [2]])
y = np.array([1, 2])
lin_reg = LinearRegression()
lin_reg.fit(X, y)
numpy.testing.assert_almost_equal(np.array([1]), lin_reg.coef_)
numpy.testing.assert_almost_equal(np.array([0]), lin_reg.intercept_)
numpy.testing.assert_almost_equal(np.array([1, 2]), lin_reg.predict(X))
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"learnml.linear_model.LinearRegression",
"numpy.array"
] | [((576, 591), 'unittest.main', 'unittest.main', ([], {}), '()\n', (589, 591), False, 'import unittest\n'), ((199, 219), 'numpy.array', 'np.array', (['[[1], [2]]'], {}), '([[1], [2]])\n', (207, 219), True, 'import numpy as np\n'), ((232, 248), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (240, 248), True, 'import numpy as np\n'), ((268, 286), 'learnml.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (284, 286), False, 'from learnml.linear_model import LinearRegression\n'), ((356, 369), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (364, 369), True, 'import numpy as np\n'), ((428, 441), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (436, 441), True, 'import numpy as np\n'), ((505, 521), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (513, 521), True, 'import numpy as np\n')] |
"""Test schmidt_decomposition."""
import numpy as np
from toqito.state_ops import schmidt_decomposition
from toqito.states import basis, max_entangled
def test_schmidt_decomp_max_ent():
"""Schmidt decomposition of the 3-D maximally entangled state."""
singular_vals, u_mat, vt_mat = schmidt_decomposition(max_entangled(3))
expected_u_mat = np.identity(3)
expected_vt_mat = np.identity(3)
expected_singular_vals = 1 / np.sqrt(3) * np.array([[1], [1], [1]])
bool_mat = np.isclose(expected_u_mat, u_mat)
np.testing.assert_equal(np.all(bool_mat), True)
bool_mat = np.isclose(expected_vt_mat, vt_mat)
np.testing.assert_equal(np.all(bool_mat), True)
bool_mat = np.isclose(expected_singular_vals, singular_vals)
np.testing.assert_equal(np.all(bool_mat), True)
def test_schmidt_decomp_two_qubit_1():
"""
Schmidt decomposition of two-qubit state.
The Schmidt decomposition of | phi > = 1/2(|00> + |01> + |10> + |11>) is
the state |+>|+> where |+> = 1/sqrt(2) * (|0> + |1>).
"""
e_0, e_1 = basis(2, 0), basis(2, 1)
phi = 1 / 2 * (np.kron(e_0, e_0) + np.kron(e_0, e_1) + np.kron(e_1, e_0) + np.kron(e_1, e_1))
singular_vals, vt_mat, u_mat = schmidt_decomposition(phi)
expected_singular_vals = np.array([[1]])
bool_mat = np.isclose(expected_singular_vals, singular_vals)
np.testing.assert_equal(np.all(bool_mat), True)
expected_vt_mat = 1 / np.sqrt(2) * np.array([[-1], [-1]])
bool_mat = np.isclose(expected_vt_mat, vt_mat)
np.testing.assert_equal(np.all(bool_mat), True)
expected_u_mat = 1 / np.sqrt(2) * np.array([[-1], [-1]])
bool_mat = np.isclose(expected_u_mat, u_mat)
np.testing.assert_equal(np.all(bool_mat), True)
def test_schmidt_decomp_two_qubit_2():
"""
Schmidt decomposition of two-qubit state.
The Schmidt decomposition of | phi > = 1/2(|00> + |01> + |10> - |11>) is
the state 1/sqrt(2) * (|0>|+> + |1>|->).
"""
e_0, e_1 = basis(2, 0), basis(2, 1)
phi = 1 / 2 * (np.kron(e_0, e_0) + np.kron(e_0, e_1) + np.kron(e_1, e_0) - np.kron(e_1, e_1))
singular_vals, vt_mat, u_mat = schmidt_decomposition(phi)
expected_singular_vals = 1 / np.sqrt(2) * np.array([[1], [1]])
bool_mat = np.isclose(expected_singular_vals, singular_vals)
np.testing.assert_equal(np.all(bool_mat), True)
expected_vt_mat = np.array([[-1, 0], [0, -1]])
bool_mat = np.isclose(expected_vt_mat, vt_mat)
np.testing.assert_equal(np.all(bool_mat), True)
expected_u_mat = 1 / np.sqrt(2) * np.array([[-1, -1], [-1, 1]])
bool_mat = np.isclose(expected_u_mat, u_mat)
np.testing.assert_equal(np.all(bool_mat), True)
s_decomp = (
singular_vals[0] * np.atleast_2d(np.kron(vt_mat[:, 0], u_mat[:, 0])).T
+ singular_vals[1] * np.atleast_2d(np.kron(vt_mat[:, 1], u_mat[:, 1])).T
)
np.testing.assert_equal(np.isclose(np.linalg.norm(phi - s_decomp), 0), True)
def test_schmidt_decomp_two_qubit_3():
"""
Schmidt decomposition of two-qubit state.
The Schmidt decomposition of 1/2* (|00> + |11>) has Schmidt coefficients
equal to 1/2[1, 1]
"""
e_0, e_1 = basis(2, 0), basis(2, 1)
phi = 1 / 2 * (np.kron(e_0, e_0) + np.kron(e_1, e_1))
singular_vals, vt_mat, u_mat = schmidt_decomposition(phi)
expected_singular_vals = 1 / 2 * np.array([[1], [1]])
bool_mat = np.isclose(expected_singular_vals, singular_vals)
np.testing.assert_equal(np.all(bool_mat), True)
expected_vt_mat = np.array([[1, 0], [0, 1]])
bool_mat = np.isclose(expected_vt_mat, vt_mat)
np.testing.assert_equal(np.all(bool_mat), True)
expected_u_mat = np.array([[1, 0], [0, 1]])
bool_mat = np.isclose(expected_u_mat, u_mat)
np.testing.assert_equal(np.all(bool_mat), True)
s_decomp = (
singular_vals[0] * np.atleast_2d(np.kron(vt_mat[:, 0], u_mat[:, 0])).T
+ singular_vals[1] * np.atleast_2d(np.kron(vt_mat[:, 1], u_mat[:, 1])).T
)
np.testing.assert_equal(np.isclose(np.linalg.norm(phi - s_decomp), 0), True)
def test_schmidt_decomp_two_qubit_4():
"""
Schmidt decomposition of two-qubit state.
The Schmidt decomposition of 1/2 * (|00> - |01> + |10> + |11>) has Schmidt coefficients
equal to [1, 1]
"""
e_0, e_1 = basis(2, 0), basis(2, 1)
phi = 1 / 2 * (np.kron(e_0, e_0) - np.kron(e_0, e_1) + np.kron(e_1, e_0) + np.kron(e_1, e_1))
singular_vals, vt_mat, u_mat = schmidt_decomposition(phi)
expected_singular_vals = 1 / np.sqrt(2) * np.array([[1], [1]])
bool_mat = np.isclose(expected_singular_vals, singular_vals)
np.testing.assert_equal(np.all(bool_mat), True)
expected_vt_mat = np.array([[-1, 0], [0, 1]])
bool_mat = np.isclose(expected_vt_mat, vt_mat)
np.testing.assert_equal(np.all(bool_mat), True)
expected_u_mat = 1 / np.sqrt(2) * np.array([[-1, 1], [1, 1]])
bool_mat = np.isclose(expected_u_mat, u_mat)
np.testing.assert_equal(np.all(bool_mat), True)
def test_schmidt_decomp_dim_list():
"""Schmidt decomposition with list specifying dimension."""
singular_vals, u_mat, vt_mat = schmidt_decomposition(max_entangled(3), dim=[3, 3])
expected_u_mat = np.identity(3)
expected_vt_mat = np.identity(3)
expected_singular_vals = 1 / np.sqrt(3) * np.array([[1], [1], [1]])
bool_mat = np.isclose(expected_u_mat, u_mat)
np.testing.assert_equal(np.all(bool_mat), True)
bool_mat = np.isclose(expected_vt_mat, vt_mat)
np.testing.assert_equal(np.all(bool_mat), True)
bool_mat = np.isclose(expected_singular_vals, singular_vals)
np.testing.assert_equal(np.all(bool_mat), True)
def test_schmidt_decomp_dim_list_pure_state():
"""Schmidt decomposition of a pure state with a dimension list."""
pure_vec = -1 / np.sqrt(2) * np.array([[1], [0], [1], [0]])
# Test when dimension default and k_param is default (0):
singular_vals, vt_mat, u_mat = schmidt_decomposition(pure_vec)
expected_singular_vals = np.array([[1]])
bool_mat = np.isclose(expected_singular_vals, singular_vals)
np.testing.assert_equal(np.all(bool_mat), True)
expected_vt_mat = 1 / np.sqrt(2) * np.array([[-1], [-1]])
bool_mat = np.isclose(expected_vt_mat, vt_mat)
np.testing.assert_equal(np.all(bool_mat), True)
expected_u_mat = np.array([[1], [0]])
bool_mat = np.isclose(expected_u_mat, u_mat)
np.testing.assert_equal(np.all(bool_mat), True)
# Test when dimension [2, 2] and k_param is 1:
singular_vals, vt_mat, u_mat = schmidt_decomposition(pure_vec, [2, 2], 1)
expected_singular_vals = np.array([[1]])
bool_mat = np.isclose(expected_singular_vals, singular_vals)
np.testing.assert_equal(np.all(bool_mat), True)
expected_vt_mat = 1 / np.sqrt(2) * np.array([[-1], [-1]])
bool_mat = np.isclose(expected_vt_mat, vt_mat)
np.testing.assert_equal(np.all(bool_mat), True)
expected_u_mat = np.array([[1], [0]])
bool_mat = np.isclose(expected_u_mat, u_mat)
np.testing.assert_equal(np.all(bool_mat), True)
# Test when dimension [2, 2] and k_param is 2:
singular_vals, vt_mat, u_mat = schmidt_decomposition(pure_vec, [2, 2], 2)
expected_singular_vals = np.array([[1], [0]])
bool_mat = np.isclose(expected_singular_vals, singular_vals)
np.testing.assert_equal(np.all(bool_mat), True)
expected_vt_mat = 1 / np.sqrt(2) * np.array([[-1, -1], [-1, 1]])
bool_mat = np.isclose(expected_vt_mat, vt_mat)
np.testing.assert_equal(np.all(bool_mat), True)
expected_u_mat = np.identity(2)
bool_mat = np.isclose(expected_u_mat, u_mat)
np.testing.assert_equal(np.all(bool_mat), True)
def test_schmidt_decomp_standard_basis():
"""Test on standard basis vectors."""
e_1 = basis(2, 1)
singular_vals, vt_mat, u_mat = schmidt_decomposition(np.kron(e_1, e_1))
expected_singular_vals = np.array([[1]])
bool_mat = np.isclose(expected_singular_vals, singular_vals)
np.testing.assert_equal(np.all(bool_mat), True)
expected_vt_mat = np.array([[0], [1]])
bool_mat = np.isclose(expected_vt_mat, vt_mat)
np.testing.assert_equal(np.all(bool_mat), True)
expected_u_mat = np.array([[0], [1]])
bool_mat = np.isclose(expected_u_mat, u_mat)
np.testing.assert_equal(np.all(bool_mat), True)
def test_schmidt_decomp_example():
"""Test for example Schmidt decomposition."""
e_0, e_1 = basis(2, 0), basis(2, 1)
phi = (
(1 + np.sqrt(6)) / (2 * np.sqrt(6)) * np.kron(e_0, e_0)
+ (1 - np.sqrt(6)) / (2 * np.sqrt(6)) * np.kron(e_0, e_1)
+ (np.sqrt(2) - np.sqrt(3)) / (2 * np.sqrt(6)) * np.kron(e_1, e_0)
+ (np.sqrt(2) + np.sqrt(3)) / (2 * np.sqrt(6)) * np.kron(e_1, e_1)
)
singular_vals, vt_mat, u_mat = schmidt_decomposition(phi)
expected_singular_vals = np.array([[np.sqrt(3 / 4)], [np.sqrt(1 / 4)]])
bool_mat = np.isclose(expected_singular_vals, singular_vals)
np.testing.assert_equal(np.all(bool_mat), True)
expected_vt_mat = np.array([[-0.81649658, 0.57735027], [0.57735027, 0.81649658]])
bool_mat = np.isclose(expected_vt_mat, vt_mat)
np.testing.assert_equal(np.all(bool_mat), True)
expected_u_mat = 1 / np.sqrt(2) * np.array([[-1, 1], [1, 1]])
bool_mat = np.isclose(expected_u_mat, u_mat)
np.testing.assert_equal(np.all(bool_mat), True)
if __name__ == "__main__":
np.testing.run_module_suite()
| [
"toqito.states.basis",
"numpy.testing.run_module_suite",
"numpy.identity",
"toqito.state_ops.schmidt_decomposition",
"numpy.isclose",
"numpy.array",
"toqito.states.max_entangled",
"numpy.kron",
"numpy.linalg.norm",
"numpy.all",
"numpy.sqrt"
] | [((356, 370), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (367, 370), True, 'import numpy as np\n'), ((393, 407), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (404, 407), True, 'import numpy as np\n'), ((496, 529), 'numpy.isclose', 'np.isclose', (['expected_u_mat', 'u_mat'], {}), '(expected_u_mat, u_mat)\n', (506, 529), True, 'import numpy as np\n'), ((598, 633), 'numpy.isclose', 'np.isclose', (['expected_vt_mat', 'vt_mat'], {}), '(expected_vt_mat, vt_mat)\n', (608, 633), True, 'import numpy as np\n'), ((702, 751), 'numpy.isclose', 'np.isclose', (['expected_singular_vals', 'singular_vals'], {}), '(expected_singular_vals, singular_vals)\n', (712, 751), True, 'import numpy as np\n'), ((1217, 1243), 'toqito.state_ops.schmidt_decomposition', 'schmidt_decomposition', (['phi'], {}), '(phi)\n', (1238, 1243), False, 'from toqito.state_ops import schmidt_decomposition\n'), ((1274, 1289), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (1282, 1289), True, 'import numpy as np\n'), ((1305, 1354), 'numpy.isclose', 'np.isclose', (['expected_singular_vals', 'singular_vals'], {}), '(expected_singular_vals, singular_vals)\n', (1315, 1354), True, 'import numpy as np\n'), ((1485, 1520), 'numpy.isclose', 'np.isclose', (['expected_vt_mat', 'vt_mat'], {}), '(expected_vt_mat, vt_mat)\n', (1495, 1520), True, 'import numpy as np\n'), ((1650, 1683), 'numpy.isclose', 'np.isclose', (['expected_u_mat', 'u_mat'], {}), '(expected_u_mat, u_mat)\n', (1660, 1683), True, 'import numpy as np\n'), ((2136, 2162), 'toqito.state_ops.schmidt_decomposition', 'schmidt_decomposition', (['phi'], {}), '(phi)\n', (2157, 2162), False, 'from toqito.state_ops import schmidt_decomposition\n'), ((2246, 2295), 'numpy.isclose', 'np.isclose', (['expected_singular_vals', 'singular_vals'], {}), '(expected_singular_vals, singular_vals)\n', (2256, 2295), True, 'import numpy as np\n'), ((2371, 2399), 'numpy.array', 'np.array', (['[[-1, 0], [0, -1]]'], {}), '([[-1, 0], [0, -1]])\n', (2379, 2399), True, 'import numpy as np\n'), ((2415, 2450), 'numpy.isclose', 'np.isclose', (['expected_vt_mat', 'vt_mat'], {}), '(expected_vt_mat, vt_mat)\n', (2425, 2450), True, 'import numpy as np\n'), ((2587, 2620), 'numpy.isclose', 'np.isclose', (['expected_u_mat', 'u_mat'], {}), '(expected_u_mat, u_mat)\n', (2597, 2620), True, 'import numpy as np\n'), ((3277, 3303), 'toqito.state_ops.schmidt_decomposition', 'schmidt_decomposition', (['phi'], {}), '(phi)\n', (3298, 3303), False, 'from toqito.state_ops import schmidt_decomposition\n'), ((3378, 3427), 'numpy.isclose', 'np.isclose', (['expected_singular_vals', 'singular_vals'], {}), '(expected_singular_vals, singular_vals)\n', (3388, 3427), True, 'import numpy as np\n'), ((3503, 3529), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (3511, 3529), True, 'import numpy as np\n'), ((3545, 3580), 'numpy.isclose', 'np.isclose', (['expected_vt_mat', 'vt_mat'], {}), '(expected_vt_mat, vt_mat)\n', (3555, 3580), True, 'import numpy as np\n'), ((3655, 3681), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (3663, 3681), True, 'import numpy as np\n'), ((3697, 3730), 'numpy.isclose', 'np.isclose', (['expected_u_mat', 'u_mat'], {}), '(expected_u_mat, u_mat)\n', (3707, 3730), True, 'import numpy as np\n'), ((4439, 4465), 'toqito.state_ops.schmidt_decomposition', 'schmidt_decomposition', (['phi'], {}), '(phi)\n', (4460, 4465), False, 'from toqito.state_ops import schmidt_decomposition\n'), ((4549, 4598), 'numpy.isclose', 'np.isclose', (['expected_singular_vals', 'singular_vals'], {}), '(expected_singular_vals, singular_vals)\n', (4559, 4598), True, 'import numpy as np\n'), ((4674, 4701), 'numpy.array', 'np.array', (['[[-1, 0], [0, 1]]'], {}), '([[-1, 0], [0, 1]])\n', (4682, 4701), True, 'import numpy as np\n'), ((4717, 4752), 'numpy.isclose', 'np.isclose', (['expected_vt_mat', 'vt_mat'], {}), '(expected_vt_mat, vt_mat)\n', (4727, 4752), True, 'import numpy as np\n'), ((4887, 4920), 'numpy.isclose', 'np.isclose', (['expected_u_mat', 'u_mat'], {}), '(expected_u_mat, u_mat)\n', (4897, 4920), True, 'import numpy as np\n'), ((5184, 5198), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (5195, 5198), True, 'import numpy as np\n'), ((5221, 5235), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (5232, 5235), True, 'import numpy as np\n'), ((5324, 5357), 'numpy.isclose', 'np.isclose', (['expected_u_mat', 'u_mat'], {}), '(expected_u_mat, u_mat)\n', (5334, 5357), True, 'import numpy as np\n'), ((5426, 5461), 'numpy.isclose', 'np.isclose', (['expected_vt_mat', 'vt_mat'], {}), '(expected_vt_mat, vt_mat)\n', (5436, 5461), True, 'import numpy as np\n'), ((5530, 5579), 'numpy.isclose', 'np.isclose', (['expected_singular_vals', 'singular_vals'], {}), '(expected_singular_vals, singular_vals)\n', (5540, 5579), True, 'import numpy as np\n'), ((5914, 5945), 'toqito.state_ops.schmidt_decomposition', 'schmidt_decomposition', (['pure_vec'], {}), '(pure_vec)\n', (5935, 5945), False, 'from toqito.state_ops import schmidt_decomposition\n'), ((5976, 5991), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (5984, 5991), True, 'import numpy as np\n'), ((6007, 6056), 'numpy.isclose', 'np.isclose', (['expected_singular_vals', 'singular_vals'], {}), '(expected_singular_vals, singular_vals)\n', (6017, 6056), True, 'import numpy as np\n'), ((6187, 6222), 'numpy.isclose', 'np.isclose', (['expected_vt_mat', 'vt_mat'], {}), '(expected_vt_mat, vt_mat)\n', (6197, 6222), True, 'import numpy as np\n'), ((6297, 6317), 'numpy.array', 'np.array', (['[[1], [0]]'], {}), '([[1], [0]])\n', (6305, 6317), True, 'import numpy as np\n'), ((6333, 6366), 'numpy.isclose', 'np.isclose', (['expected_u_mat', 'u_mat'], {}), '(expected_u_mat, u_mat)\n', (6343, 6366), True, 'import numpy as np\n'), ((6506, 6548), 'toqito.state_ops.schmidt_decomposition', 'schmidt_decomposition', (['pure_vec', '[2, 2]', '(1)'], {}), '(pure_vec, [2, 2], 1)\n', (6527, 6548), False, 'from toqito.state_ops import schmidt_decomposition\n'), ((6579, 6594), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (6587, 6594), True, 'import numpy as np\n'), ((6610, 6659), 'numpy.isclose', 'np.isclose', (['expected_singular_vals', 'singular_vals'], {}), '(expected_singular_vals, singular_vals)\n', (6620, 6659), True, 'import numpy as np\n'), ((6790, 6825), 'numpy.isclose', 'np.isclose', (['expected_vt_mat', 'vt_mat'], {}), '(expected_vt_mat, vt_mat)\n', (6800, 6825), True, 'import numpy as np\n'), ((6900, 6920), 'numpy.array', 'np.array', (['[[1], [0]]'], {}), '([[1], [0]])\n', (6908, 6920), True, 'import numpy as np\n'), ((6936, 6969), 'numpy.isclose', 'np.isclose', (['expected_u_mat', 'u_mat'], {}), '(expected_u_mat, u_mat)\n', (6946, 6969), True, 'import numpy as np\n'), ((7109, 7151), 'toqito.state_ops.schmidt_decomposition', 'schmidt_decomposition', (['pure_vec', '[2, 2]', '(2)'], {}), '(pure_vec, [2, 2], 2)\n', (7130, 7151), False, 'from toqito.state_ops import schmidt_decomposition\n'), ((7182, 7202), 'numpy.array', 'np.array', (['[[1], [0]]'], {}), '([[1], [0]])\n', (7190, 7202), True, 'import numpy as np\n'), ((7218, 7267), 'numpy.isclose', 'np.isclose', (['expected_singular_vals', 'singular_vals'], {}), '(expected_singular_vals, singular_vals)\n', (7228, 7267), True, 'import numpy as np\n'), ((7405, 7440), 'numpy.isclose', 'np.isclose', (['expected_vt_mat', 'vt_mat'], {}), '(expected_vt_mat, vt_mat)\n', (7415, 7440), True, 'import numpy as np\n'), ((7515, 7529), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (7526, 7529), True, 'import numpy as np\n'), ((7545, 7578), 'numpy.isclose', 'np.isclose', (['expected_u_mat', 'u_mat'], {}), '(expected_u_mat, u_mat)\n', (7555, 7578), True, 'import numpy as np\n'), ((7727, 7738), 'toqito.states.basis', 'basis', (['(2)', '(1)'], {}), '(2, 1)\n', (7732, 7738), False, 'from toqito.states import basis, max_entangled\n'), ((7845, 7860), 'numpy.array', 'np.array', (['[[1]]'], {}), '([[1]])\n', (7853, 7860), True, 'import numpy as np\n'), ((7876, 7925), 'numpy.isclose', 'np.isclose', (['expected_singular_vals', 'singular_vals'], {}), '(expected_singular_vals, singular_vals)\n', (7886, 7925), True, 'import numpy as np\n'), ((8001, 8021), 'numpy.array', 'np.array', (['[[0], [1]]'], {}), '([[0], [1]])\n', (8009, 8021), True, 'import numpy as np\n'), ((8037, 8072), 'numpy.isclose', 'np.isclose', (['expected_vt_mat', 'vt_mat'], {}), '(expected_vt_mat, vt_mat)\n', (8047, 8072), True, 'import numpy as np\n'), ((8147, 8167), 'numpy.array', 'np.array', (['[[0], [1]]'], {}), '([[0], [1]])\n', (8155, 8167), True, 'import numpy as np\n'), ((8183, 8216), 'numpy.isclose', 'np.isclose', (['expected_u_mat', 'u_mat'], {}), '(expected_u_mat, u_mat)\n', (8193, 8216), True, 'import numpy as np\n'), ((8730, 8756), 'toqito.state_ops.schmidt_decomposition', 'schmidt_decomposition', (['phi'], {}), '(phi)\n', (8751, 8756), False, 'from toqito.state_ops import schmidt_decomposition\n'), ((8849, 8898), 'numpy.isclose', 'np.isclose', (['expected_singular_vals', 'singular_vals'], {}), '(expected_singular_vals, singular_vals)\n', (8859, 8898), True, 'import numpy as np\n'), ((8974, 9037), 'numpy.array', 'np.array', (['[[-0.81649658, 0.57735027], [0.57735027, 0.81649658]]'], {}), '([[-0.81649658, 0.57735027], [0.57735027, 0.81649658]])\n', (8982, 9037), True, 'import numpy as np\n'), ((9053, 9088), 'numpy.isclose', 'np.isclose', (['expected_vt_mat', 'vt_mat'], {}), '(expected_vt_mat, vt_mat)\n', (9063, 9088), True, 'import numpy as np\n'), ((9223, 9256), 'numpy.isclose', 'np.isclose', (['expected_u_mat', 'u_mat'], {}), '(expected_u_mat, u_mat)\n', (9233, 9256), True, 'import numpy as np\n'), ((9342, 9371), 'numpy.testing.run_module_suite', 'np.testing.run_module_suite', ([], {}), '()\n', (9369, 9371), True, 'import numpy as np\n'), ((316, 332), 'toqito.states.max_entangled', 'max_entangled', (['(3)'], {}), '(3)\n', (329, 332), False, 'from toqito.states import basis, max_entangled\n'), ((454, 479), 'numpy.array', 'np.array', (['[[1], [1], [1]]'], {}), '([[1], [1], [1]])\n', (462, 479), True, 'import numpy as np\n'), ((558, 574), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (564, 574), True, 'import numpy as np\n'), ((662, 678), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (668, 678), True, 'import numpy as np\n'), ((780, 796), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (786, 796), True, 'import numpy as np\n'), ((1058, 1069), 'toqito.states.basis', 'basis', (['(2)', '(0)'], {}), '(2, 0)\n', (1063, 1069), False, 'from toqito.states import basis, max_entangled\n'), ((1071, 1082), 'toqito.states.basis', 'basis', (['(2)', '(1)'], {}), '(2, 1)\n', (1076, 1082), False, 'from toqito.states import basis, max_entangled\n'), ((1383, 1399), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (1389, 1399), True, 'import numpy as np\n'), ((1447, 1469), 'numpy.array', 'np.array', (['[[-1], [-1]]'], {}), '([[-1], [-1]])\n', (1455, 1469), True, 'import numpy as np\n'), ((1549, 1565), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (1555, 1565), True, 'import numpy as np\n'), ((1612, 1634), 'numpy.array', 'np.array', (['[[-1], [-1]]'], {}), '([[-1], [-1]])\n', (1620, 1634), True, 'import numpy as np\n'), ((1712, 1728), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (1718, 1728), True, 'import numpy as np\n'), ((1977, 1988), 'toqito.states.basis', 'basis', (['(2)', '(0)'], {}), '(2, 0)\n', (1982, 1988), False, 'from toqito.states import basis, max_entangled\n'), ((1990, 2001), 'toqito.states.basis', 'basis', (['(2)', '(1)'], {}), '(2, 1)\n', (1995, 2001), False, 'from toqito.states import basis, max_entangled\n'), ((2210, 2230), 'numpy.array', 'np.array', (['[[1], [1]]'], {}), '([[1], [1]])\n', (2218, 2230), True, 'import numpy as np\n'), ((2324, 2340), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (2330, 2340), True, 'import numpy as np\n'), ((2479, 2495), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (2485, 2495), True, 'import numpy as np\n'), ((2542, 2571), 'numpy.array', 'np.array', (['[[-1, -1], [-1, 1]]'], {}), '([[-1, -1], [-1, 1]])\n', (2550, 2571), True, 'import numpy as np\n'), ((2649, 2665), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (2655, 2665), True, 'import numpy as np\n'), ((3158, 3169), 'toqito.states.basis', 'basis', (['(2)', '(0)'], {}), '(2, 0)\n', (3163, 3169), False, 'from toqito.states import basis, max_entangled\n'), ((3171, 3182), 'toqito.states.basis', 'basis', (['(2)', '(1)'], {}), '(2, 1)\n', (3176, 3182), False, 'from toqito.states import basis, max_entangled\n'), ((3342, 3362), 'numpy.array', 'np.array', (['[[1], [1]]'], {}), '([[1], [1]])\n', (3350, 3362), True, 'import numpy as np\n'), ((3456, 3472), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (3462, 3472), True, 'import numpy as np\n'), ((3609, 3625), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (3615, 3625), True, 'import numpy as np\n'), ((3759, 3775), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (3765, 3775), True, 'import numpy as np\n'), ((4280, 4291), 'toqito.states.basis', 'basis', (['(2)', '(0)'], {}), '(2, 0)\n', (4285, 4291), False, 'from toqito.states import basis, max_entangled\n'), ((4293, 4304), 'toqito.states.basis', 'basis', (['(2)', '(1)'], {}), '(2, 1)\n', (4298, 4304), False, 'from toqito.states import basis, max_entangled\n'), ((4513, 4533), 'numpy.array', 'np.array', (['[[1], [1]]'], {}), '([[1], [1]])\n', (4521, 4533), True, 'import numpy as np\n'), ((4627, 4643), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (4633, 4643), True, 'import numpy as np\n'), ((4781, 4797), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (4787, 4797), True, 'import numpy as np\n'), ((4844, 4871), 'numpy.array', 'np.array', (['[[-1, 1], [1, 1]]'], {}), '([[-1, 1], [1, 1]])\n', (4852, 4871), True, 'import numpy as np\n'), ((4949, 4965), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (4955, 4965), True, 'import numpy as np\n'), ((5132, 5148), 'toqito.states.max_entangled', 'max_entangled', (['(3)'], {}), '(3)\n', (5145, 5148), False, 'from toqito.states import basis, max_entangled\n'), ((5282, 5307), 'numpy.array', 'np.array', (['[[1], [1], [1]]'], {}), '([[1], [1], [1]])\n', (5290, 5307), True, 'import numpy as np\n'), ((5386, 5402), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (5392, 5402), True, 'import numpy as np\n'), ((5490, 5506), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (5496, 5506), True, 'import numpy as np\n'), ((5608, 5624), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (5614, 5624), True, 'import numpy as np\n'), ((5785, 5815), 'numpy.array', 'np.array', (['[[1], [0], [1], [0]]'], {}), '([[1], [0], [1], [0]])\n', (5793, 5815), True, 'import numpy as np\n'), ((6085, 6101), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (6091, 6101), True, 'import numpy as np\n'), ((6149, 6171), 'numpy.array', 'np.array', (['[[-1], [-1]]'], {}), '([[-1], [-1]])\n', (6157, 6171), True, 'import numpy as np\n'), ((6251, 6267), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (6257, 6267), True, 'import numpy as np\n'), ((6395, 6411), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (6401, 6411), True, 'import numpy as np\n'), ((6688, 6704), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (6694, 6704), True, 'import numpy as np\n'), ((6752, 6774), 'numpy.array', 'np.array', (['[[-1], [-1]]'], {}), '([[-1], [-1]])\n', (6760, 6774), True, 'import numpy as np\n'), ((6854, 6870), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (6860, 6870), True, 'import numpy as np\n'), ((6998, 7014), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (7004, 7014), True, 'import numpy as np\n'), ((7296, 7312), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (7302, 7312), True, 'import numpy as np\n'), ((7360, 7389), 'numpy.array', 'np.array', (['[[-1, -1], [-1, 1]]'], {}), '([[-1, -1], [-1, 1]])\n', (7368, 7389), True, 'import numpy as np\n'), ((7469, 7485), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (7475, 7485), True, 'import numpy as np\n'), ((7607, 7623), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (7613, 7623), True, 'import numpy as np\n'), ((7796, 7813), 'numpy.kron', 'np.kron', (['e_1', 'e_1'], {}), '(e_1, e_1)\n', (7803, 7813), True, 'import numpy as np\n'), ((7954, 7970), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (7960, 7970), True, 'import numpy as np\n'), ((8101, 8117), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (8107, 8117), True, 'import numpy as np\n'), ((8245, 8261), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (8251, 8261), True, 'import numpy as np\n'), ((8371, 8382), 'toqito.states.basis', 'basis', (['(2)', '(0)'], {}), '(2, 0)\n', (8376, 8382), False, 'from toqito.states import basis, max_entangled\n'), ((8384, 8395), 'toqito.states.basis', 'basis', (['(2)', '(1)'], {}), '(2, 1)\n', (8389, 8395), False, 'from toqito.states import basis, max_entangled\n'), ((8927, 8943), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (8933, 8943), True, 'import numpy as np\n'), ((9117, 9133), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (9123, 9133), True, 'import numpy as np\n'), ((9180, 9207), 'numpy.array', 'np.array', (['[[-1, 1], [1, 1]]'], {}), '([[-1, 1], [1, 1]])\n', (9188, 9207), True, 'import numpy as np\n'), ((9285, 9301), 'numpy.all', 'np.all', (['bool_mat'], {}), '(bool_mat)\n', (9291, 9301), True, 'import numpy as np\n'), ((441, 451), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (448, 451), True, 'import numpy as np\n'), ((1163, 1180), 'numpy.kron', 'np.kron', (['e_1', 'e_1'], {}), '(e_1, e_1)\n', (1170, 1180), True, 'import numpy as np\n'), ((1434, 1444), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1441, 1444), True, 'import numpy as np\n'), ((1599, 1609), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1606, 1609), True, 'import numpy as np\n'), ((2082, 2099), 'numpy.kron', 'np.kron', (['e_1', 'e_1'], {}), '(e_1, e_1)\n', (2089, 2099), True, 'import numpy as np\n'), ((2197, 2207), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2204, 2207), True, 'import numpy as np\n'), ((2529, 2539), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2536, 2539), True, 'import numpy as np\n'), ((2897, 2927), 'numpy.linalg.norm', 'np.linalg.norm', (['(phi - s_decomp)'], {}), '(phi - s_decomp)\n', (2911, 2927), True, 'import numpy as np\n'), ((3203, 3220), 'numpy.kron', 'np.kron', (['e_0', 'e_0'], {}), '(e_0, e_0)\n', (3210, 3220), True, 'import numpy as np\n'), ((3223, 3240), 'numpy.kron', 'np.kron', (['e_1', 'e_1'], {}), '(e_1, e_1)\n', (3230, 3240), True, 'import numpy as np\n'), ((4007, 4037), 'numpy.linalg.norm', 'np.linalg.norm', (['(phi - s_decomp)'], {}), '(phi - s_decomp)\n', (4021, 4037), True, 'import numpy as np\n'), ((4385, 4402), 'numpy.kron', 'np.kron', (['e_1', 'e_1'], {}), '(e_1, e_1)\n', (4392, 4402), True, 'import numpy as np\n'), ((4500, 4510), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (4507, 4510), True, 'import numpy as np\n'), ((4831, 4841), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (4838, 4841), True, 'import numpy as np\n'), ((5269, 5279), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (5276, 5279), True, 'import numpy as np\n'), ((5772, 5782), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5779, 5782), True, 'import numpy as np\n'), ((6136, 6146), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6143, 6146), True, 'import numpy as np\n'), ((6739, 6749), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6746, 6749), True, 'import numpy as np\n'), ((7347, 7357), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7354, 7357), True, 'import numpy as np\n'), ((8670, 8687), 'numpy.kron', 'np.kron', (['e_1', 'e_1'], {}), '(e_1, e_1)\n', (8677, 8687), True, 'import numpy as np\n'), ((9167, 9177), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (9174, 9177), True, 'import numpy as np\n'), ((1143, 1160), 'numpy.kron', 'np.kron', (['e_1', 'e_0'], {}), '(e_1, e_0)\n', (1150, 1160), True, 'import numpy as np\n'), ((2062, 2079), 'numpy.kron', 'np.kron', (['e_1', 'e_0'], {}), '(e_1, e_0)\n', (2069, 2079), True, 'import numpy as np\n'), ((4365, 4382), 'numpy.kron', 'np.kron', (['e_1', 'e_0'], {}), '(e_1, e_0)\n', (4372, 4382), True, 'import numpy as np\n'), ((8595, 8612), 'numpy.kron', 'np.kron', (['e_1', 'e_0'], {}), '(e_1, e_0)\n', (8602, 8612), True, 'import numpy as np\n'), ((8798, 8812), 'numpy.sqrt', 'np.sqrt', (['(3 / 4)'], {}), '(3 / 4)\n', (8805, 8812), True, 'import numpy as np\n'), ((8816, 8830), 'numpy.sqrt', 'np.sqrt', (['(1 / 4)'], {}), '(1 / 4)\n', (8823, 8830), True, 'import numpy as np\n'), ((1103, 1120), 'numpy.kron', 'np.kron', (['e_0', 'e_0'], {}), '(e_0, e_0)\n', (1110, 1120), True, 'import numpy as np\n'), ((1123, 1140), 'numpy.kron', 'np.kron', (['e_0', 'e_1'], {}), '(e_0, e_1)\n', (1130, 1140), True, 'import numpy as np\n'), ((2022, 2039), 'numpy.kron', 'np.kron', (['e_0', 'e_0'], {}), '(e_0, e_0)\n', (2029, 2039), True, 'import numpy as np\n'), ((2042, 2059), 'numpy.kron', 'np.kron', (['e_0', 'e_1'], {}), '(e_0, e_1)\n', (2049, 2059), True, 'import numpy as np\n'), ((2732, 2766), 'numpy.kron', 'np.kron', (['vt_mat[:, 0]', 'u_mat[:, 0]'], {}), '(vt_mat[:, 0], u_mat[:, 0])\n', (2739, 2766), True, 'import numpy as np\n'), ((2813, 2847), 'numpy.kron', 'np.kron', (['vt_mat[:, 1]', 'u_mat[:, 1]'], {}), '(vt_mat[:, 1], u_mat[:, 1])\n', (2820, 2847), True, 'import numpy as np\n'), ((3842, 3876), 'numpy.kron', 'np.kron', (['vt_mat[:, 0]', 'u_mat[:, 0]'], {}), '(vt_mat[:, 0], u_mat[:, 0])\n', (3849, 3876), True, 'import numpy as np\n'), ((3923, 3957), 'numpy.kron', 'np.kron', (['vt_mat[:, 1]', 'u_mat[:, 1]'], {}), '(vt_mat[:, 1], u_mat[:, 1])\n', (3930, 3957), True, 'import numpy as np\n'), ((4325, 4342), 'numpy.kron', 'np.kron', (['e_0', 'e_0'], {}), '(e_0, e_0)\n', (4332, 4342), True, 'import numpy as np\n'), ((4345, 4362), 'numpy.kron', 'np.kron', (['e_0', 'e_1'], {}), '(e_0, e_1)\n', (4352, 4362), True, 'import numpy as np\n'), ((8454, 8471), 'numpy.kron', 'np.kron', (['e_0', 'e_0'], {}), '(e_0, e_0)\n', (8461, 8471), True, 'import numpy as np\n'), ((8520, 8537), 'numpy.kron', 'np.kron', (['e_0', 'e_1'], {}), '(e_0, e_1)\n', (8527, 8537), True, 'import numpy as np\n'), ((8624, 8634), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (8631, 8634), True, 'import numpy as np\n'), ((8637, 8647), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (8644, 8647), True, 'import numpy as np\n'), ((8656, 8666), 'numpy.sqrt', 'np.sqrt', (['(6)'], {}), '(6)\n', (8663, 8666), True, 'import numpy as np\n'), ((8549, 8559), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (8556, 8559), True, 'import numpy as np\n'), ((8562, 8572), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (8569, 8572), True, 'import numpy as np\n'), ((8581, 8591), 'numpy.sqrt', 'np.sqrt', (['(6)'], {}), '(6)\n', (8588, 8591), True, 'import numpy as np\n'), ((8421, 8431), 'numpy.sqrt', 'np.sqrt', (['(6)'], {}), '(6)\n', (8428, 8431), True, 'import numpy as np\n'), ((8440, 8450), 'numpy.sqrt', 'np.sqrt', (['(6)'], {}), '(6)\n', (8447, 8450), True, 'import numpy as np\n'), ((8487, 8497), 'numpy.sqrt', 'np.sqrt', (['(6)'], {}), '(6)\n', (8494, 8497), True, 'import numpy as np\n'), ((8506, 8516), 'numpy.sqrt', 'np.sqrt', (['(6)'], {}), '(6)\n', (8513, 8516), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Here is a three inputs runner. We make it for 3 inputs becuase mostly we run a
function at different basis function order p, elements layout k and crazy
coefficient c.
<unittest> <unittests_P_Solvers> <test_No3_TIR>.
<NAME> (C)
Created on Mon Oct 29 15:38:46 2018
Aerodynamics, AE
TU Delft
"""
import types
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from time import localtime, strftime, time
from screws.decorators.accepts import accepts
from tools.deprecated.serial_runners._runner_ import Runner
from tools.deprecated.serial_runners.INSTANCES.COMPONENTS.tir_drw import TIR_DRW
from tools.deprecated.serial_runners.INSTANCES.COMPONENTS.m_tir_tabular import M_TIR_Tabulate
from root.config.main import sIze
assert sIze == 1, "Runners can only be run in single thread."
class TimeIteration:
""" We use this contextmanager to time an iteration. """
def __init__(self, m, num_iterations, total_cost_list):
"""
Parameters
----------
m : int
Currently, it is mth iteration.
num_iterations : int
Total amount of iterations.
total_cost_list : list
"""
self.m = m
self.num_iterations = num_iterations
if total_cost_list == list():
self.already_cost = 0
else:
self.already_cost = total_cost_list[-1]
# noinspection PyUnresolvedReferences
if self.already_cost[0] == '[' and self.already_cost[-1] == ']':
self.already_cost = self.already_cost[1:-1]
hh, mm, ss = self.already_cost.split(':')
hh = int(hh) * 3600
mm = int(mm) * 60
ss = int(ss)
self.already_cost = hh + mm + ss
else:
self.already_cost = 0
def __enter__(self):
""" do something before executing the context."""
self.t1 = time()
print("\n\n______________________________________________________________________")
print(">>> Do {}th of {} iterations......".format(self.m + 1, self.num_iterations))
print(" start at [" + strftime("%Y-%m-%d %H:%M:%S", localtime()) + ']')
return self
def __exit__(self, exc_type, exc_val, exc_tb):
""" Do some tear down action; execute after each time the contents are run."""
self.t2 = time()
# mth iteration costs?_________________________________________________
t = self.t2 - self.t1
if t < 10:
print("\n ~> {}th of {} iterations costs: [{:.2f} seconds]".format(
self.m + 1, self.num_iterations, t))
else:
minutes, seconds = divmod(t, 60)
hours, minutes = divmod(minutes, 60)
print("\n ~> {}th of {} iterations costs: [%02d:%02d:%02d (hh:mm:ss)]".format(
self.m + 1, self.num_iterations) % (hours, minutes, seconds))
self.mth_iteration_cost = t
minutes, seconds = divmod(self.mth_iteration_cost, 60)
hours, minutes = divmod(minutes, 60)
if hours > 99:
hours, minutes, seconds = 99, 59, 59
self.mth_iteration_cost_HMS = '[%02d:%02d:%02d]' % (hours, minutes, seconds)
# m iterations cost?___________________________________________________
minutes, seconds = divmod(t + self.already_cost, 60)
hours, minutes = divmod(minutes, 60)
print(" ~> {} of {} iterations cost: [%02d:%02d:%02d (hh:mm:ss)]".format(
self.m + 1, self.num_iterations) % (hours, minutes, seconds))
if hours > 99:
hours, minutes, seconds = 99, 59, 59
self.total_cost = '[%02d:%02d:%02d]' % (hours, minutes, seconds)
# ERT?_________________________________________________________________
minutes, seconds = divmod((t + self.already_cost) * (self.num_iterations / (self.m + 1)) -
(t + self.already_cost), 60)
hours, minutes = divmod(minutes, 60)
print(" ~> Estimated remaining time: [%02d:%02d:%02d (hh:mm:ss)]\n"
% (hours, minutes, seconds))
if hours > 99:
hours, minutes, seconds = 99, 59, 59
self.ERT = '[%02d:%02d:%02d]' % (hours, minutes, seconds)
return
class ThreeInputsRunner(Runner):
"""
We use this Class to run functions of three inputs. Normally, the three
inputs are basis function degree, mesh density and a domain coefficient
(like the deformation coefficient for Crazy mesh.). Therefore, we normally
have input[0] or input[1] has many values but input[2] has very few values.
This is not very good since we will meshgrid the inputs in input[0] and input[1],
which may cause very computational costing run at case (input[0][-1],input[1][-1]).
But anyway, this runner is still very useful at a lot of situations. And we have
made a remedy: we have `skip` option.
Therefore, for the matplot method, we can only plot against input[0] or
input[1]. And let input[2] be a value and to help determine how many lines
we are going to plot.
<test_unittests> <Test_solvers> <test_No3_TIR>.
"""
def __init__(self, solver=None, ___file___=None, task_name=None):
"""
Parameters
----------
solver :
The solver. If solver is set to None, then we probably only use it
to read data from somewhere, like '.txt' file, and then plot the
data.
"""
super().__init__(solver=solver, ___file___=___file___, task_name=task_name)
if solver is not None:
# noinspection PyTypeChecker
if isinstance(solver, types.FunctionType):
# noinspection PyUnresolvedReferences
assert solver.__code__.co_argcount >= 3, \
" <ThreeInputsRunner> : function solver needs to at least have 3 inputs."
elif isinstance(solver, types.MethodType):
# noinspection PyUnresolvedReferences
assert solver.__code__.co_argcount >= 4, \
" <ThreeInputsRunner> : method needs to at least have 3 inputs (besides `self`)."
elif solver.__class__.__name__ == 'CPUDispatcher':
pass
else:
raise NotImplementedError()
assert len(self._input_names_) == 3, " <ThreeInputsRunner> : we need 3 input names."
self._I0seq_, self._I1seq_, self._I2seq_ = None, None, None
self._S0seq_, self._S1seq_, self._S2seq_ = None, None, None
self._TIR_DRW_ = TIR_DRW(self)
self._tabular_ = M_TIR_Tabulate(self)
self._freeze_self_()
@classmethod
def ___file_name_extension___(cls):
return '.3ir'
@property
def drw(self):
"""Data reader and writer."""
return self._TIR_DRW_
@property
def input_shape(self):
return len(self._I0seq_), len(self._I1seq_), len(self._I2seq_)
@accepts('self', (list, tuple), (list, tuple), (list, tuple))
def iterate(self, I0seq, I1seq, I2seq, writeto=None, saveto=None,
skips=None, **kwargs):
"""
Parameters
----------
I0seq:
The sequence for the first input of `solver`.
I1seq:
The sequence for the second input of `solver`.
I2seq:
The sequence for the third input of `solver`.
writeto : None or str, optional
If `writeto` is not None, we write the result after each iteration
to the file named `writeto`. Notice that when there is already a
file named `writeto`.txt, this means we already computed some
results and they are put in this .txt file. So we first read data
from this .txt file (of course, we first check if the file is
correct), and then we only compute the remaining iterations and
append all these newly computed results to `writeto`.txt.
saveto : Nonetype or str, optional
If `saveto` is not None, after all iterations, we save self to
`saveto`.
skips : list, tuple, optional
The compuating sequence to be skipped. The outputs will be put as
nan in the results.
Note that this `skips` option also result in a `meshgrid` structure, so a
block of inputs matrix will be skipped.
"""
if skips is None:
skips = [None, None, None]
self._I0seq_, self._I1seq_, self._I2seq_ = I0seq, I1seq, I2seq
# noinspection PyTypeChecker
self._S0seq_ = [skips[0],] if np.shape(skips[0])==() else skips[0]
# noinspection PyTypeChecker
self._S1seq_ = [skips[1],] if np.shape(skips[1])==() else skips[1]
# noinspection PyTypeChecker
self._S2seq_ = [skips[2],] if np.shape(skips[2])==() else skips[2]
I, J, K = len(I0seq), len(I1seq), len(I2seq)
print("-<ThreeInputsRunner>-<I0>: {}".format(I0seq))
print("-<ThreeInputsRunner>-<I1>: {}".format(I1seq))
print("-<ThreeInputsRunner>-<I2>: {}".format(I2seq))
print("-<ThreeInputsRunner>-<kwargs>: {}".format(kwargs))
num_iterations = I * J * K
print("-<ThreeInputsRunner>-<total iterations>: {}.".format(num_iterations))
self.___kwargs___ = kwargs
self.___init_results___()
if writeto is not None: self.drw.read(writeto)
print("\n\n------------------------- > TIR Iterations < -------------------------")
for k in range(K):
for j in range(J):
for i in range(I):
m = i + j*I + k*I*J
if m not in self.drw._computed_m_:
if self._I0seq_[i] in self._S0seq_ and \
self._I1seq_[j] in self._S1seq_ and \
self._I2seq_[k] in self._S2seq_:
outputs = [np.NaN for _ in range(len(self._output_names_))]
if m == 0:
ITC = TTC = ERT = '[00:00:00]'
else:
ITC = '[00:00:00]'
TTC = self._results_['total_cost'][-1]
ERT = self._results_['ERT'][-1]
self.___update_results___(self._I0seq_[i], self._I1seq_[j], self._I2seq_[k],
outputs, ITC, TTC, ERT)
else:
with TimeIteration(m, num_iterations, self._results_['total_cost']) as TIcontextmanager:
print('\t> input[0]: {}'.format(self._I0seq_[i]))
print('\t> input[1]: {}'.format(self._I1seq_[j]))
print('\t> input[2]: {}'.format(self._I2seq_[k]))
print('-------------------------------------------------\n')
outputs = self._solver_(self._I0seq_[i],
self._I1seq_[j],
self._I2seq_[k], **kwargs)
assert len(outputs) == len(self._output_names_), " <ThreeInputsRunner> "
self.___update_results___(self._I0seq_[i], self._I1seq_[j], self._I2seq_[k]
,outputs
,TIcontextmanager.mth_iteration_cost_HMS
,TIcontextmanager.total_cost
,TIcontextmanager.ERT)
self.drw.write_iteration(m)
self.___deal_with_saveto___(writeto, saveto)
self.___send_an_completion_reminder_email_to_me___(writeto, saveto)
print("_______________________ > TIR IterationsDone < _______________________\n\n")
def ___init_results___(self):
""" """
self._results_ = dict()
self._results_['I0'] = []
self._results_['I1'] = []
self._results_['I2'] = []
for inn in self._input_names_: self._results_[inn] = []
for on in self._output_names_: self._results_[on] = []
self._results_['solver_time_cost'] = []
self._results_['total_cost'] = []
self._results_['ERT'] = []
def ___update_results___(self, I0, I1, I2, outputs, time_cost, tc, ERT):
""" """
self._results_['I0'].append(I0)
self._results_['I1'].append(I1)
self._results_['I2'].append(I2)
self._results_[self._input_names_[0]].append(I0)
self._results_[self._input_names_[1]].append(I1)
self._results_[self._input_names_[2]].append(I2)
for m, on in enumerate(self.output_names):
self._results_[on].append(outputs[m])
self._results_['solver_time_cost'].append(time_cost)
self._results_['total_cost'].append(tc)
self._results_['ERT'].append(ERT)
# noinspection PyStatementEffect
self.results
@property
def _inputs_index_dict_(self):
"""
Returns
-------
_icd_ : dict
A dict which key: value means 'ith iteration': '(input[0], input[1]
, input[2])'.
"""
_icd_ = {}
try:
for i in range(len(self.results['I0'])):
_icd_[i] = (self.results['I0'][i], self.results['I1'][i], self.results['I2'][i])
except KeyError:
pass
return _icd_
@property
def tabular(self):
""" """
return self._tabular_
@classmethod
def readfile(cls, readfilename):
"""
A overwirte of `readfile` in its parent 'Runner'. We overwrite it because this
class is not standard, it has special data structure for writting.
"""
assert '.txt' in readfilename, " <ThreeInputsRunner> : I only read .txt files."
return cls.readtxt(readfilename)
@classmethod
def readtxt(cls, filename):
"""
We use this method to read a '.txt' file and make self capble to
plot the results in the '.txt' file.
"""
with open(filename, 'r') as f:
fstr = f.readlines()
total_lines = len(fstr)
assert fstr[0][:-1] == '<ThreeInputsRunner>', \
" <TIR_DRW> : I need a <ThreeInputsRunner> file."
TIRinstance = ThreeInputsRunner()
i = fstr.index('<inputs>:\n')
input_0, I0sequence = fstr[i+1].split(' sequence: ')
input_1, I1sequence = fstr[i+2].split(' sequence: ')
input_2, I2sequence = fstr[i+3].split(' sequence: ')
TIRinstance._input_names_ = (input_0, input_1, input_2)
TIRinstance._I0seq_ = list(eval(I0sequence))
TIRinstance._I1seq_ = list(eval(I1sequence))
TIRinstance._I2seq_ = list(eval(I2sequence))
i = fstr.index('<kwargs>:\n')
TIRinstance.___kwargs___ = fstr[i+1][:-1]
i = fstr.index('<results>:\n')
i += 1
stored = fstr[i].split()
num_stored = len(stored)
j = stored.index('|')
outputs = stored[4:j]
TIRinstance._output_names_ = tuple(outputs)
TIRinstance.___init_results___()
while i < total_lines:
try:
int(fstr[i][0])
fstr_i_split = list(fstr[i].split())
if len(fstr_i_split) == num_stored:
# when this is happening, we stored full values at this line, so
# we can keep it to `self._TIR_.results`.
for k in range(j):
fstr_i_split[k] = float(fstr_i_split[k])
TIRinstance.___update_results___(*fstr_i_split[1:4],
fstr_i_split[4:j],
*fstr_i_split[j+1:])
else:
break
except ValueError:
pass
i += 1
return TIRinstance
def writetxt(self, filename):
"""We write self to the file named `filename.txt`."""
self.drw.readwritefilename = filename
self.drw.initialize_writing()
for m in range(len(self.results['I0'])):
self.drw.write_iteration(m)
@property
def I0seq(self):
return self._I0seq_
@property
def I1seq(self):
return self._I1seq_
@property
def I2seq(self):
return self._I2seq_
@property
def rdf(self):
""" The `results` in `DataFrame` format."""
if self.results is None:
return None
else:
self._rdf_ = pd.DataFrame(self.results)
self._rdf_ = self._rdf_.drop(columns=['I0', 'I1', 'I2'])
self._rdf_.columns = (*self.input_names, *self.output_names, 'ITC', 'TTC', 'ERT')
return self._rdf_
@classmethod
def merge(cls, *args):
"""
We try to merge serveral 'ThreeInputsRunner' into a single one.
We only merge ThreeInputsRunner that have the same results keys. In
principle, we shoulad also need the same sover and so on. But we skip
that here to leave the users more freedom.
"""
for i, arg in enumerate(args):
assert arg.__class__.__name__ == 'ThreeInputsRunner', \
" <ThreeInputsRunner> : {}th arg is not ThreeInputsRunner.".format(i)
_input_names_ = args[0]._input_names_
_output_names_ = args[0]._output_names_
___kwargs___ = args[0].___kwargs___
for arg in args:
assert arg._input_names_ == _input_names_, " <ThreeInputsRunner> "
assert arg._output_names_ == _output_names_, " <ThreeInputsRunner> "
assert arg.___kwargs___ == ___kwargs___, " <ThreeInputsRunner> "
TIRinstance = ThreeInputsRunner()
TIRinstance._input_names_ = _input_names_
TIRinstance._output_names_ = _output_names_
TIRinstance.___kwargs___ = ___kwargs___
I0seq, I1seq, I2seq = set(), set(), set()
for i, tir in enumerate(args):
I0seq.update(set(tir.I0seq))
I1seq.update(set(tir.I1seq))
I2seq.update(set(tir.I2seq))
TIRinstance._I0seq_ = list(I0seq)
TIRinstance._I1seq_ = list(I1seq)
TIRinstance._I2seq_ = list(I2seq)
_inputs_dict_ = {}
for i, tiri in enumerate(args):
for inputs in tiri._inputs_index_dict_:
_inputs_dict_[str(i) + '-' + str(inputs)] = tiri._inputs_index_dict_[inputs]
result_keys = args[0].results.keys()
TIRinstance._results_ = {}
for key in result_keys:
TIRinstance._results_[key] = []
I, J, K = len(I0seq), len(I1seq), len(I2seq)
for k in range(K):
for j in range(J):
for i in range(I):
_inputs_ = (TIRinstance._I0seq_[i], TIRinstance._I1seq_[j], TIRinstance._I2seq_[k])
for thekey, value in _inputs_dict_.items():
if _inputs_ == value:
break
# noinspection PyUnboundLocalVariable
assert _inputs_dict_[thekey] == _inputs_, \
" <ThreeInputsRunner> : no data found for inputs: {}.".format(_inputs_)
ith_arg, jth_result = thekey.split('-')
ith_arg = int(ith_arg)
jth_result = int(jth_result)
for key in result_keys:
TIRinstance._results_[key].append(args[ith_arg].results[key][jth_result])
return TIRinstance
def matplot(self, res2plot, against, plot_type='loglog',
hcp=None, show_order=True, # h-convergence plot related
title=None, left=0.15, bottom=0.15,
ylabel=None, yticks=None,
xlabel=None, xticks=None,
labels=None, linewidth=None, style=None, color=None,
line_styles=None, line_colors=None, legend_local='best',
tick_size=15, label_size=15, legend_size=15, title_size=15,
minor_tick_length=6, major_tick_length=12, tick_pad=8, legend_frame=False,
figsize=(8, 6),
usetex=False, saveto=None):
"""
We use this method to do plot.
Paramterts
----------
res2plot : str
hcp : NoneType or float, optional
When is_h is NOT None, then we know we are doing h-convergence
plotting, then the x-date will become h/x-data.
show_order : bool
If True, show the order.
"""
assert self.results is not None, \
" <ThreeInputsRunner> : SR_iterative is empty, no p-convergence plot."
if isinstance(res2plot, str):
res2plot = (res2plot,)
for r2p in res2plot:
assert r2p in self.results, " <ThreeInputsRunner> : res2plot={} is wrong.".format(r2p)
num_r2p = len(res2plot)
assert against in (0, 1), " <ThreeInputsRunner> : against={} is wrong.".format(against)
assert len(self.results['I0']) == len(self.I0seq) * len(self.I1seq) * len(self.I2seq), \
" <ThreeInputsRunner> : results are not complete."
plt.rc('text', usetex=usetex)
if usetex:
plt.rcParams['text.latex.preamble'] = [r"\usepackage{amsmath}"]
if line_styles is None:
line_styles = ('-o', '-v', '-s', '-<', '-H', '-8', '-^', '-p',
'-*', '-h', '->', '-D', '-d', '-P', '-X')
if line_colors is None:
line_colors = [(0.5, 0, 0, 0.7),
(0, 0, 0.5, 0.7),
(0, 0.5, 0, 0.7),
(1, 0, 0, 0.7),
(0.5, 0.5, 0, 0.7),
(0.5, 0, 0.5, 0.7),
(0, 0.5, 0.5, 0.7)]
I, J, K = len(self._I0seq_), len(self._I1seq_), len(self._I2seq_)
if against == 0:
num_lines = num_r2p * J * K
points_per_line = I
elif against == 1:
num_lines = num_r2p * I * K
points_per_line = J
else:
raise Exception
labels_real_time = []
line_style_real_time = []
line_color_real_time = []
y_data = np.zeros((num_lines, points_per_line))
for s in range(num_r2p):
if against == 0:
for k in range(K):
for j in range(J):
m = j + k * J
n = j + k * J + s * J * K
x_data = np.array(self.I0seq)
y_data[n, :] = self.results[res2plot[s]][m * I:(m + 1) * I]
line_style_real_time += [line_styles[j]]
line_color_real_time += [line_colors[k]]
if labels is None:
if len(res2plot) == 1:
labels_real_time.append(r'${}={}$, ${}={:.2f}$'.format(
self.input_names[1], self._I1seq_[j],
self.input_names[2], self._I2seq_[k]))
else:
labels_real_time.append(r'{}: ${}={}$, ${}={:.2f}$'.format(
res2plot[s].replace('_', '-'),
self.input_names[1], self._I1seq_[j],
self.input_names[2], self._I2seq_[k]))
elif against == 1:
for k in range(K):
for i in range(I):
m = i + k * I + s * I * K
x_data = np.array(self.I1seq)
y_data[m, :] = self.results[res2plot[s]][k * I * J + i:(k + 1) * I * J:I]
line_style_real_time += [line_styles[i]]
line_color_real_time += [line_colors[k]]
if labels is None:
if len(res2plot) == 1:
labels_real_time.append(r'${}={}$, ${}={:.2f}$'.format(
self.input_names[0], self._I0seq_[i],
self.input_names[2], self._I2seq_[k]))
else:
labels_real_time.append(r'{}: ${}={}$, ${}={:.2f}$'.format(
res2plot[s].replace('_', '-'),
self.input_names[0], self._I0seq_[i],
self.input_names[2], self._I2seq_[k]))
else:
raise Exception
if hcp is not None:
# noinspection PyUnboundLocalVariable
x_data = hcp / x_data
plt.figure(figsize=figsize)
plt.gcf().subplots_adjust(left=left)
plt.gcf().subplots_adjust(bottom=bottom)
linewidth = 1 if linewidth is None else linewidth
style = line_style_real_time if style is None else style
color = line_color_real_time if color is None else color
labels = labels_real_time if labels is None else labels
if show_order:
for m in range(num_lines):
# noinspection PyUnboundLocalVariable
order = (np.log10(y_data[m, -1]) - np.log10(y_data[m, -2])) / \
(np.log10(x_data[-1]) - np.log10(x_data[-2]))
labels[m] = labels[m] + ', $\mathrm{order}=%.2f$' % order
ploter = getattr(plt, plot_type)
for i in range(num_lines):
# ploter(x_data , y_data[i,:])
ploter(x_data, y_data[i, :], style[i],
color=color[i], label=labels[i], linewidth=linewidth)
# plot title?__________________________________________________________
if title is None:
plt.title(r'' + str(res2plot).replace('_', '-'), fontsize=title_size)
elif title is False:
pass
else:
plt.title(r'' + title, fontsize=title_size)
if xlabel is not None: plt.xlabel(xlabel, fontsize=label_size)
if ylabel is not None: plt.ylabel(ylabel, fontsize=label_size)
if xticks is not None: plt.xticks(xticks)
if yticks is not None: plt.yticks(yticks)
plt.tick_params(which='both', labeltop=False, labelright=False, top=True, right=True)
plt.tick_params(axis='both', which='minor', direction='in', length=minor_tick_length)
plt.tick_params(axis='both', which='major', direction='in', length=major_tick_length)
plt.tick_params(axis='both', which='both', labelsize=tick_size)
plt.tick_params(axis='x', which='both', pad=tick_pad)
plt.legend(fontsize=legend_size, loc=legend_local, frameon=legend_frame)
plt.tight_layout()
if saveto is not None and saveto != '':
plt.savefig(saveto, bbox_inches='tight')
plt.show()
def solver(K, O, T):
"""
Parameters
----------
K :
O :
T :
Returns
-------
d :
e :
"""
d = K + 5 * O - T
e = K + T
return d, e
if __name__ == '__main__':
from numpy import pi
# from CONFORMING.DIII.__PROGRAMS__.P2_Poisson.S1_IGAA2018.v2101_DB_solver import solver
K = [1,2,3]
O = [1,2]
T = [0, pi/4]
runner1 = ThreeInputsRunner(solver, __file__)
# runner1.iterate(K, O, T, writeto='test_write1.txt', skips=((1,2),(1,2),(0,)))
runner1.iterate(K, O, T, writeto='test_write1.txt')
K = [1,2,3]
O = [3,]
T = [0, pi/4]
runner2 = ThreeInputsRunner(solver, __file__)
runner2.iterate(K, O, T)
runner1.writetxt('test_write1')
runner2.writetxt('test_write2')
runner1 = ThreeInputsRunner.readtxt('test_write1')
runner2 = ThreeInputsRunner.readtxt('test_write2')
rm = ThreeInputsRunner.merge(runner1, runner2)
rm.matplot('d', 1, 'loglog', usetex=True)
# rm.matplot('e', 1, 'loglog', usetex=True)
runner1.saveto('save_1')
runner2.saveto('save_2')
runner1 = ThreeInputsRunner.readfrom('save_1.3ir')
runner2 = ThreeInputsRunner.readfrom('save_2.3ir')
import os
os.remove('save_1.3ir')
os.remove('save_2.3ir')
os.remove('test_write1.txt')
os.remove('test_write1')
os.remove('test_write2')
| [
"matplotlib.pyplot.title",
"os.remove",
"tools.deprecated.serial_runners.INSTANCES.COMPONENTS.m_tir_tabular.M_TIR_Tabulate",
"numpy.shape",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"matplotlib.pyplot.yticks",
"tools.deprecat... | [((7034, 7094), 'screws.decorators.accepts.accepts', 'accepts', (['"""self"""', '(list, tuple)', '(list, tuple)', '(list, tuple)'], {}), "('self', (list, tuple), (list, tuple), (list, tuple))\n", (7041, 7094), False, 'from screws.decorators.accepts import accepts\n'), ((28490, 28513), 'os.remove', 'os.remove', (['"""save_1.3ir"""'], {}), "('save_1.3ir')\n", (28499, 28513), False, 'import os\n'), ((28518, 28541), 'os.remove', 'os.remove', (['"""save_2.3ir"""'], {}), "('save_2.3ir')\n", (28527, 28541), False, 'import os\n'), ((28546, 28574), 'os.remove', 'os.remove', (['"""test_write1.txt"""'], {}), "('test_write1.txt')\n", (28555, 28574), False, 'import os\n'), ((28579, 28603), 'os.remove', 'os.remove', (['"""test_write1"""'], {}), "('test_write1')\n", (28588, 28603), False, 'import os\n'), ((28608, 28632), 'os.remove', 'os.remove', (['"""test_write2"""'], {}), "('test_write2')\n", (28617, 28632), False, 'import os\n'), ((1951, 1957), 'time.time', 'time', ([], {}), '()\n', (1955, 1957), False, 'from time import localtime, strftime, time\n'), ((2402, 2408), 'time.time', 'time', ([], {}), '()\n', (2406, 2408), False, 'from time import localtime, strftime, time\n'), ((6636, 6649), 'tools.deprecated.serial_runners.INSTANCES.COMPONENTS.tir_drw.TIR_DRW', 'TIR_DRW', (['self'], {}), '(self)\n', (6643, 6649), False, 'from tools.deprecated.serial_runners.INSTANCES.COMPONENTS.tir_drw import TIR_DRW\n'), ((6675, 6695), 'tools.deprecated.serial_runners.INSTANCES.COMPONENTS.m_tir_tabular.M_TIR_Tabulate', 'M_TIR_Tabulate', (['self'], {}), '(self)\n', (6689, 6695), False, 'from tools.deprecated.serial_runners.INSTANCES.COMPONENTS.m_tir_tabular import M_TIR_Tabulate\n'), ((21547, 21576), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': 'usetex'}), "('text', usetex=usetex)\n", (21553, 21576), True, 'import matplotlib.pyplot as plt\n'), ((22626, 22664), 'numpy.zeros', 'np.zeros', (['(num_lines, points_per_line)'], {}), '((num_lines, points_per_line))\n', (22634, 22664), True, 'import numpy as np\n'), ((25110, 25137), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (25120, 25137), True, 'import matplotlib.pyplot as plt\n'), ((26636, 26725), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'which': '"""both"""', 'labeltop': '(False)', 'labelright': '(False)', 'top': '(True)', 'right': '(True)'}), "(which='both', labeltop=False, labelright=False, top=True,\n right=True)\n", (26651, 26725), True, 'import matplotlib.pyplot as plt\n'), ((26730, 26820), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""minor"""', 'direction': '"""in"""', 'length': 'minor_tick_length'}), "(axis='both', which='minor', direction='in', length=\n minor_tick_length)\n", (26745, 26820), True, 'import matplotlib.pyplot as plt\n'), ((26824, 26914), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""major"""', 'direction': '"""in"""', 'length': 'major_tick_length'}), "(axis='both', which='major', direction='in', length=\n major_tick_length)\n", (26839, 26914), True, 'import matplotlib.pyplot as plt\n'), ((26918, 26981), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""both"""', 'which': '"""both"""', 'labelsize': 'tick_size'}), "(axis='both', which='both', labelsize=tick_size)\n", (26933, 26981), True, 'import matplotlib.pyplot as plt\n'), ((26990, 27043), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""x"""', 'which': '"""both"""', 'pad': 'tick_pad'}), "(axis='x', which='both', pad=tick_pad)\n", (27005, 27043), True, 'import matplotlib.pyplot as plt\n'), ((27052, 27124), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'legend_size', 'loc': 'legend_local', 'frameon': 'legend_frame'}), '(fontsize=legend_size, loc=legend_local, frameon=legend_frame)\n', (27062, 27124), True, 'import matplotlib.pyplot as plt\n'), ((27135, 27153), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (27151, 27153), True, 'import matplotlib.pyplot as plt\n'), ((27263, 27273), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27271, 27273), True, 'import matplotlib.pyplot as plt\n'), ((16969, 16995), 'pandas.DataFrame', 'pd.DataFrame', (['self.results'], {}), '(self.results)\n', (16981, 16995), True, 'import pandas as pd\n'), ((26415, 26454), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {'fontsize': 'label_size'}), '(xlabel, fontsize=label_size)\n', (26425, 26454), True, 'import matplotlib.pyplot as plt\n'), ((26486, 26525), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {'fontsize': 'label_size'}), '(ylabel, fontsize=label_size)\n', (26496, 26525), True, 'import matplotlib.pyplot as plt\n'), ((26557, 26575), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xticks'], {}), '(xticks)\n', (26567, 26575), True, 'import matplotlib.pyplot as plt\n'), ((26607, 26625), 'matplotlib.pyplot.yticks', 'plt.yticks', (['yticks'], {}), '(yticks)\n', (26617, 26625), True, 'import matplotlib.pyplot as plt\n'), ((27214, 27254), 'matplotlib.pyplot.savefig', 'plt.savefig', (['saveto'], {'bbox_inches': '"""tight"""'}), "(saveto, bbox_inches='tight')\n", (27225, 27254), True, 'import matplotlib.pyplot as plt\n'), ((8726, 8744), 'numpy.shape', 'np.shape', (['skips[0]'], {}), '(skips[0])\n', (8734, 8744), True, 'import numpy as np\n'), ((8838, 8856), 'numpy.shape', 'np.shape', (['skips[1]'], {}), '(skips[1])\n', (8846, 8856), True, 'import numpy as np\n'), ((8950, 8968), 'numpy.shape', 'np.shape', (['skips[2]'], {}), '(skips[2])\n', (8958, 8968), True, 'import numpy as np\n'), ((25146, 25155), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (25153, 25155), True, 'import matplotlib.pyplot as plt\n'), ((25191, 25200), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (25198, 25200), True, 'import matplotlib.pyplot as plt\n'), ((26340, 26382), 'matplotlib.pyplot.title', 'plt.title', (["('' + title)"], {'fontsize': 'title_size'}), "('' + title, fontsize=title_size)\n", (26349, 26382), True, 'import matplotlib.pyplot as plt\n'), ((2205, 2216), 'time.localtime', 'localtime', ([], {}), '()\n', (2214, 2216), False, 'from time import localtime, strftime, time\n'), ((22922, 22942), 'numpy.array', 'np.array', (['self.I0seq'], {}), '(self.I0seq)\n', (22930, 22942), True, 'import numpy as np\n'), ((25626, 25649), 'numpy.log10', 'np.log10', (['y_data[m, -1]'], {}), '(y_data[m, -1])\n', (25634, 25649), True, 'import numpy as np\n'), ((25652, 25675), 'numpy.log10', 'np.log10', (['y_data[m, -2]'], {}), '(y_data[m, -2])\n', (25660, 25675), True, 'import numpy as np\n'), ((25706, 25726), 'numpy.log10', 'np.log10', (['x_data[-1]'], {}), '(x_data[-1])\n', (25714, 25726), True, 'import numpy as np\n'), ((25729, 25749), 'numpy.log10', 'np.log10', (['x_data[-2]'], {}), '(x_data[-2])\n', (25737, 25749), True, 'import numpy as np\n'), ((24018, 24038), 'numpy.array', 'np.array', (['self.I1seq'], {}), '(self.I1seq)\n', (24026, 24038), True, 'import numpy as np\n')] |
#gen_matrix.py
#Created by ImKe on 2020/3/1
#Copyright © 2020 ImKe. All rights reserved.
import numpy as np
import random
import scipy.sparse as ss
#generate a random matrix with shape n1*n2 and rank r to evaluate the algorithm
def gen_matrix(n1, n2, r):
np.random.seed(999)
H = np.ones((n1,n2))
M = np.random.random((n1,r)).dot(np.random.random((r,n2)))
df = r*(n1+n2-r);
m = min(1*df,round(.99*n1*n2));
ind = random.sample(range(n1*n2),m)
#set sample space Omega
Omega = np.unravel_index(ind, (n1,n2))
data_H = H[Omega]
#to ndarray type
H_1 = ss.csr_matrix((data_H,Omega),shape = (n1,n2)).A
data_M = M[Omega]
#M_1 = M*H_1(Hadamard product)
M_1 = ss.csr_matrix((data_M, Omega),shape = (n1,n2)).A
return M, H_1 | [
"numpy.random.seed",
"numpy.ones",
"numpy.unravel_index",
"numpy.random.random",
"scipy.sparse.csr_matrix"
] | [((262, 281), 'numpy.random.seed', 'np.random.seed', (['(999)'], {}), '(999)\n', (276, 281), True, 'import numpy as np\n'), ((290, 307), 'numpy.ones', 'np.ones', (['(n1, n2)'], {}), '((n1, n2))\n', (297, 307), True, 'import numpy as np\n'), ((509, 540), 'numpy.unravel_index', 'np.unravel_index', (['ind', '(n1, n2)'], {}), '(ind, (n1, n2))\n', (525, 540), True, 'import numpy as np\n'), ((344, 369), 'numpy.random.random', 'np.random.random', (['(r, n2)'], {}), '((r, n2))\n', (360, 369), True, 'import numpy as np\n'), ((594, 640), 'scipy.sparse.csr_matrix', 'ss.csr_matrix', (['(data_H, Omega)'], {'shape': '(n1, n2)'}), '((data_H, Omega), shape=(n1, n2))\n', (607, 640), True, 'import scipy.sparse as ss\n'), ((714, 760), 'scipy.sparse.csr_matrix', 'ss.csr_matrix', (['(data_M, Omega)'], {'shape': '(n1, n2)'}), '((data_M, Omega), shape=(n1, n2))\n', (727, 760), True, 'import scipy.sparse as ss\n'), ((315, 340), 'numpy.random.random', 'np.random.random', (['(n1, r)'], {}), '((n1, r))\n', (331, 340), True, 'import numpy as np\n')] |
# Common library routines for the BCycle analysis
import pandas as pd
import numpy as np
INPUT_DIR = '../input'
def load_bikes(file=INPUT_DIR + '/bikes.csv'):
'''
Load the bikes CSV file, converting column types
INPUT: Filename to read (defaults to `../input/bikes.csv`
RETURNS: Pandas dataframe containing bikes information
'''
try:
bikes_df = pd.read_csv(file,
dtype={'station_id' : np.int8,
'bikes' : np.int8,
'docks' : np.int8}
)
bikes_df['datetime'] = pd.to_datetime(bikes_df['datetime'], format='%Y-%m-%d %H:%M:%S')
return bikes_df
except OSError as e:
print('Error opening {0}. Do you need to unzip {0}.zip?'.format(file))
return None
def load_stations(file=INPUT_DIR + '/stations.csv'):
'''
Load the stations CSV file, converting column types
INPUT: Filename to read (defaults to `../input/stations.csv`
RETURNS: Pandas dataframe containing stations information
'''
try:
stations_df = pd.read_csv(file,
dtype={'station_id' : np.int8,
'lat' : np.float32,
'lon' : np.float32}
)
stations_df['datetime'] = pd.to_datetime(stations_df['datetime'], format='%Y-%m-%d %H:%M:%S')
return stations_df
except OSError as e:
print('Error opening {0}. Do you need to unzip {0}.zip?'.format(file))
return None
def load_weather(file=INPUT_DIR + '/weather.csv'):
'''Loads the weather CSV and converts types'''
try:
df = pd.read_csv(file)
except OSError as e:
print('Error opening {0}. Do you need to unzip {0}.zip?'.format(file))
return None
# Remove whitespace and keep min/max values
df.columns = [col.strip() for col in df.columns]
df = df[['CDT','Max TemperatureF','Min TemperatureF',
'Max Humidity', 'Min Humidity',
'Max Sea Level PressureIn', 'Min Sea Level PressureIn',
'Max Wind SpeedMPH', 'Mean Wind SpeedMPH', 'Max Gust SpeedMPH',
'PrecipitationIn', 'CloudCover', 'Events']]
# Clean up column names, drop means as they're a linear combination of max/min
df.columns = ['date', 'max_temp', 'min_temp', 'max_humidity', 'min_humidity',
'max_pressure', 'min_pressure', 'max_wind', 'min_wind', 'max_gust',
'precipitation', 'cloud_cover', 'events']
# Convert column types appropriately
df['date'] = pd.to_datetime(df['date'], format='%Y-%m-%d')
df.index = df['date']
df = df.drop('date', axis=1)
df[['max_temp', 'min_temp']] = df[['max_temp', 'min_temp']].astype(np.uint8)
df[['max_humidity', 'min_humidity']] = df[['max_humidity', 'min_humidity']].astype(np.uint8)
df[['max_wind', 'min_wind', 'max_gust']] = df[['max_wind', 'min_wind', 'max_gust']].astype(np.uint8)
# Cloud cover is a fraction of 8 -
# http://help.wunderground.com/knowledgebase/articles/129043-how-can-i-translate-the-cloud-cover-data-on-your
df['cloud_pct'] = (df['cloud_cover'].astype(np.float32) / 8.0) * 100
df = df.drop('cloud_cover', axis=1)
# Precipitation sometimes has 'T' for trace amounts of rain. Replace this with small value
# and convert to a float
# http://help.wunderground.com/knowledgebase/articles/656875-what-does-t-stand-for-on-the-rain-precipitation
df['precipitation'] = df['precipitation'].replace('T', 0.01)
df['precipitation'] = df['precipitation'].astype(np.float32)
# Events are tricky. they're separated by hypens, and can have multiple values not in the same order !
events = set()
df['events'] = df['events'].replace(np.nan, 'None')
for row in df['events']:
if row is not np.nan:
line = row.split('-')
[events.add(word.lower()) for word in line]
for event in events:
df[event] = df['events'].apply(str.lower).str.contains(event).astype(np.uint8)
df = df.drop(['events', 'none'], axis=1)
return df
def haversine_dist(lat1, lon1, lat2, lon2, R=3961):
'''
Calculates the distance between two points in miles using the haversine formula
INPUT: lat1/lon1 and lat2/lon2 are position values
R is an optional radius of the planet
RETURNS: Distance between the points in miles
'''
dlon = np.radians(lon2 - lon1)
dlat = np.radians(lat2 - lat1)
lat1 = np.radians(lat1)
lat2 = np.radians(lat2)
a = (np.sin(dlat/2.0))**2 + np.cos(lat1) * np.cos(lat2) * (np.sin(dlon/2.0))**2
c = 2 * np.arctan2( np.sqrt(a), np.sqrt(1-a) )
d = R * c
return d
def load_bike_trips():
# Sort the bikes_df dataframe by station_id first, and then datetime so we
# can use a diff() and get the changes by time for each station
bikes_df = load_bikes()
bikes_df = bikes_df.sort_values(['station_id', 'datetime']).copy()
stations = bikes_df['station_id'].unique()
# Our dataframe is grouped by station_id first now, so grab each station in
# turn and do a diff() on bikes and docks for each station individually
diff_list = list()
for station in stations:
station_diff_df = bikes_df[bikes_df['station_id'] == station].copy()
station_diff_df['bikes_diff'] = station_diff_df['bikes'].diff()
station_diff_df['docks_diff'] = station_diff_df['docks'].diff()
diff_list.append(station_diff_df)
# Concatenate the station dataframes back together into a single one.
# Make sure we didn't lose any rows in the process (!)
bikes_diff_df = pd.concat(diff_list)
# The first row of each station-wise diff is filled with NaNs, store a 0 in these fields
# then we can convert the data type from floats to int8s
bikes_diff_df.fillna(0, inplace=True)
bikes_diff_df[['bikes_diff', 'docks_diff']] = bikes_diff_df[['bikes_diff', 'docks_diff']].astype(np.int8)
bikes_diff_df.index = bikes_diff_df['datetime']
bikes_diff_df.drop('datetime', axis=1, inplace=True)
assert(bikes_df.shape[0] == bikes_diff_df.shape[0])
bike_trips_df = bikes_diff_df
bike_trips_df['checkouts'] = bike_trips_df['bikes_diff']
bike_trips_df.loc[bike_trips_df['checkouts'] > 0, 'checkouts'] = 0
bike_trips_df['checkouts'] = bike_trips_df['checkouts'].abs()
# Conversely, checkins are positive `bikes_diff` values
bike_trips_df['checkins'] = bike_trips_df['bikes_diff']
bike_trips_df.loc[bike_trips_df['checkins'] < 0, 'checkins'] = 0
bike_trips_df['checkins'] = bike_trips_df['checkins'].abs()
# Might want to use sum of checkouts and checkins for find "busiest" stations
bike_trips_df['totals'] = bike_trips_df['checkouts'] + bike_trips_df['checkins']
return bike_trips_df
def load_daily_rentals(all_stations=False):
bike_trips_df = load_bike_trips()
daily_bikes_df = bike_trips_df.copy()
if not all_stations:
daily_bikes_df = daily_bikes_df[daily_bikes_df['station_id'] < 49]
daily_bikes_df = daily_bikes_df.reset_index()
daily_bikes_df = daily_bikes_df[['datetime', 'checkouts']]
daily_bikes_df.columns = ['date', 'rentals']
daily_bikes_df = daily_bikes_df.groupby('date').sum()
daily_bikes_df = daily_bikes_df.resample('1D').sum()
return daily_bikes_df
| [
"numpy.radians",
"pandas.read_csv",
"numpy.sin",
"pandas.to_datetime",
"numpy.cos",
"pandas.concat",
"numpy.sqrt"
] | [((2713, 2758), 'pandas.to_datetime', 'pd.to_datetime', (["df['date']"], {'format': '"""%Y-%m-%d"""'}), "(df['date'], format='%Y-%m-%d')\n", (2727, 2758), True, 'import pandas as pd\n'), ((4587, 4610), 'numpy.radians', 'np.radians', (['(lon2 - lon1)'], {}), '(lon2 - lon1)\n', (4597, 4610), True, 'import numpy as np\n'), ((4622, 4645), 'numpy.radians', 'np.radians', (['(lat2 - lat1)'], {}), '(lat2 - lat1)\n', (4632, 4645), True, 'import numpy as np\n'), ((4657, 4673), 'numpy.radians', 'np.radians', (['lat1'], {}), '(lat1)\n', (4667, 4673), True, 'import numpy as np\n'), ((4685, 4701), 'numpy.radians', 'np.radians', (['lat2'], {}), '(lat2)\n', (4695, 4701), True, 'import numpy as np\n'), ((5812, 5832), 'pandas.concat', 'pd.concat', (['diff_list'], {}), '(diff_list)\n', (5821, 5832), True, 'import pandas as pd\n'), ((380, 468), 'pandas.read_csv', 'pd.read_csv', (['file'], {'dtype': "{'station_id': np.int8, 'bikes': np.int8, 'docks': np.int8}"}), "(file, dtype={'station_id': np.int8, 'bikes': np.int8, 'docks':\n np.int8})\n", (391, 468), True, 'import pandas as pd\n'), ((635, 699), 'pandas.to_datetime', 'pd.to_datetime', (["bikes_df['datetime']"], {'format': '"""%Y-%m-%d %H:%M:%S"""'}), "(bikes_df['datetime'], format='%Y-%m-%d %H:%M:%S')\n", (649, 699), True, 'import pandas as pd\n'), ((1132, 1222), 'pandas.read_csv', 'pd.read_csv', (['file'], {'dtype': "{'station_id': np.int8, 'lat': np.float32, 'lon': np.float32}"}), "(file, dtype={'station_id': np.int8, 'lat': np.float32, 'lon':\n np.float32})\n", (1143, 1222), True, 'import pandas as pd\n'), ((1399, 1466), 'pandas.to_datetime', 'pd.to_datetime', (["stations_df['datetime']"], {'format': '"""%Y-%m-%d %H:%M:%S"""'}), "(stations_df['datetime'], format='%Y-%m-%d %H:%M:%S')\n", (1413, 1466), True, 'import pandas as pd\n'), ((1775, 1792), 'pandas.read_csv', 'pd.read_csv', (['file'], {}), '(file)\n', (1786, 1792), True, 'import pandas as pd\n'), ((4711, 4729), 'numpy.sin', 'np.sin', (['(dlat / 2.0)'], {}), '(dlat / 2.0)\n', (4717, 4729), True, 'import numpy as np\n'), ((4810, 4820), 'numpy.sqrt', 'np.sqrt', (['a'], {}), '(a)\n', (4817, 4820), True, 'import numpy as np\n'), ((4822, 4836), 'numpy.sqrt', 'np.sqrt', (['(1 - a)'], {}), '(1 - a)\n', (4829, 4836), True, 'import numpy as np\n'), ((4734, 4746), 'numpy.cos', 'np.cos', (['lat1'], {}), '(lat1)\n', (4740, 4746), True, 'import numpy as np\n'), ((4749, 4761), 'numpy.cos', 'np.cos', (['lat2'], {}), '(lat2)\n', (4755, 4761), True, 'import numpy as np\n'), ((4765, 4783), 'numpy.sin', 'np.sin', (['(dlon / 2.0)'], {}), '(dlon / 2.0)\n', (4771, 4783), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 26 10:03:34 2016
@author: <NAME>
"""
import numpy as np
import os
from copy import deepcopy
import re
import json
from typing import List
class StatsParams:
"""A class that implements the automated statistics of parameter files
in text file format. A parameter file consists of a file with the parameter
names followed by spaces (or none), then a = then spaces (or none) and then
the value, which can be a float, or a string (not implemented for now).
column being the number of the iteration, the second column the value of
the observable and the third column being the error on the observable if
applicable. There is a parameter, 'ignore_col' that is used if a column
is futile, such as the iteration column. If the error column does not
exist, it is ignored. By default the program will abort if there is more
than three columns."""
params_files_default=["params"]
# One list per element in params_files
params_names_l_default=[["beta", "mu", "tp", "U", "n", "S", "delta", "weightR",
"weightI", "EGreen", "EHyb", "EObs", "theta"]]
def __init__(self, params_files=params_files_default, params_names_l=params_names_l_default,
ext="", iter_start=1, in_dir=os.getcwd(),
warning_only=True):
"""Initialize the StatsParams object.
Args:
Keywords Args:
arrays_files (list||str): the arrays files in a list or a single string for one file
ext: the extensions of the arrays_files
iter_start: the iteration from which to start the statistics
ignore_col (None||int): the col to ignore in the computations for the std
in_dir (str): the dir in which the arrays_files are found.
warning_only: the boolen controlling if only warnings are issued if
the files are not found
Returns:
Raises:
"""
# check the existence of work dir
self.in_dir = os.path.abspath(in_dir)
assert(os.path.exists(self.in_dir)), "Ayaya, in_dir does not exist."
# creata a list of the params_files and of the params_names_l
self.params_files = params_files if isinstance(params_files, list) else [params_files]
self.params_names_l = params_names_l if isinstance(params_names_l[0], list) else [params_names_l]
self.ext = ext
self.iter_start = iter_start
self.warning_only = warning_only # make the program continue if files don't
# exist, but give warning message
files = [file + str(self.iter_start) + self.ext for file in self.params_files]
self.check_sanity(files, params_names_l, self.warning_only)
self.means = None # set by mean()
self.stds = None # set by std()
self.means_dict = None
self.stds_dict = None
@staticmethod
def find_value(param, fin_s):
""" """
pattern = re.compile("""^(?:\s*)(?:""" + param + """
\s*) # match from zero to any number of whitespaces
= # match a =
(?:\s*)? # match from zero to any number of whitespaces
([-+]?(?:\d+)?(?:\.\d*)?(?:[eE][-+]?\d+)?)
""", re.VERBOSE | re.MULTILINE)
try:
value: float = float(re.search(pattern, fin_s).groups()[0])
except AttributeError as ae:
print("\n Ayayya ", param, "is not a float or not found")
print("Attribute error: {0}".format(ae))
raise
return value
def read_file(self, file_name, params_names, delimiter="="):
"""reads a files and their contents in a numpy array
Args:
Keywords Args:
file_name: the to be read into a numpy array
Returns:
(data, file_exists):
"""
file_exists = os.path.isfile(file_name)
if file_exists:
with open (file_name) as fin:
fin_s = fin.read()
data: List[float] = []
for param in params_names:
value: float = self.find_value(param, fin_s)
data.append(value)
else:
data = None
return (np.array(data), file_exists)
def check_sanity(self, files, params_names_l, warning_only):
"""Check if the constructing attributes are sain"""
assert(len(files) == len(params_names_l)), "ayays, params_files and params_names_l do not have same length"
files_tmp = []
for (i, file) in enumerate(files):
file_path = os.path.join(self.in_dir, file)
file_path_exists = os.path.isfile(file_path)
if file_path_exists:
files_tmp.append(file)
continue
elif warning_only:
del self.params_files[i]
print("\n Warning, file ", file, " does not exist")
print("")
else:
assert(file_path_exists), "Ayaya, file does not exist"
files = files_tmp
params_names_l_tmp = []
for (params_names, file) in zip(params_names_l, files):
params_names_tmp= []
with open(os.path.join(self.in_dir, file)) as fin:
file_s = fin.read()
for param in params_names:
if re.search(param, file_s):
params_names_tmp.append(param)
elif warning_only:
continue
else:
raise IOError("parameter " + param + " not found in file", file,". Put warning only if you dont care....")
params_names_l_tmp.append(params_names_tmp)
self.params_names_l = params_names_l_tmp
def mean(self):
"""Computes the means of the params_files."""
#The means of the parameters
means = [] # a list of lists (each element of the list consists of the mean of the params_names for each param_file)
means_dict = [] # a list of dictionaries (the first dictionray is for the first params_file and its associated params_names, etc)
for (params_names, params_file) in zip(self.params_names_l, self.params_files):
cpt = 0
file_name = os.path.join(self.in_dir, params_file + str(self.iter_start) + self.ext)
# print(file_name)
(data, file_exists) = self.read_file(file_name, params_names)
if file_exists:
mean = np.zeros(data.shape)
while(file_exists):
mean += data
cpt += 1
# print("cpt ", cpt)
# print(array_file + middle_file + str(self.iter_start + cpt) + self.ext)
file_name = os.path.join(self.in_dir, params_file + str(self.iter_start + cpt) + self.ext)
(data, file_exists) = self.read_file(file_name, params_names)
mean /= cpt
mean_dict = {param_name:mean_value for (param_name, mean_value) in zip(params_names, mean)}
# print("MEAN ", mean)
means_dict.append(mean_dict)
means.append(mean)
self.means = means
self.means_dict = means_dict
#print(means)
def std(self):
"""Compute the std errors of the observables and their error if applicable"""
if self.means is None:
self.mean()
stds = [] # a list of lists
stds_dict = [] # a list of dictionaries
for (i, param_file, params_names) in zip(range(len(self.params_files)), self.params_files, self.params_names_l):
#for (j, params_names) in enumerate(self.params_names_l[i]):
cpt = 0
file_name = os.path.join(self.in_dir, param_file + str(self.iter_start) + self.ext)
(data, file_exists) = self.read_file(file_name, params_names)
if file_exists:
std = np.zeros(data.shape)
while(file_exists):
mean = self.means[i]
#print("mean ", mean)
std += np.power(data - mean, 2.0)
#print("data ", data)
#print("std " , std)
cpt += 1
file_name = os.path.join(self.in_dir, param_file + str(self.iter_start + cpt) + self.ext)
#print("file_name ", file_name)
(data, file_exists) = self.read_file(file_name, params_names)
#print("std final ", std)
std /= cpt
std = np.sqrt(std)
stds.append(std)
#print("stds list ", stds)
std_dict = {param_name:std_value for (param_name, std_value) in zip (params_names, std)}
stds_dict.append(std_dict)
#print("stds before exit ", stds)
#print("self.stds ", stds)
self.stds = stds
self.stds_dict = stds_dict
def write_results(self, out_dir="Results", file_out="test_json_params.json"):
""" """
# First concatenate the mean and the std of each parameter in the same dictionary
cpt = 0
#print("IN write results\n")
#print(self.stds_dict)
for (mean_dict, std_dict) in zip(self.means_dict, self.stds_dict):
out_dict = dict()
for key in mean_dict:
out_dict[key] = [mean_dict[key], std_dict[key]]
file_result = file_out.split(".")[0] + str(cpt) + "." + file_out.split(".")[1]
with open(os.path.join(out_dir, file_result), 'w') as fout:
json.dump(out_dict, fout, indent=4)
cpt += 1
| [
"json.dump",
"os.path.abspath",
"os.getcwd",
"numpy.power",
"os.path.exists",
"numpy.zeros",
"os.path.isfile",
"numpy.array",
"re.search",
"numpy.sqrt",
"os.path.join",
"re.compile"
] | [((1366, 1377), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1375, 1377), False, 'import os\n'), ((2123, 2146), 'os.path.abspath', 'os.path.abspath', (['in_dir'], {}), '(in_dir)\n', (2138, 2146), False, 'import os\n'), ((2162, 2189), 'os.path.exists', 'os.path.exists', (['self.in_dir'], {}), '(self.in_dir)\n', (2176, 2189), False, 'import os\n'), ((3124, 3471), 're.compile', 're.compile', (['(\'^(?:\\\\s*)(?:\' + param +\n """\n \\\\s*) # match from zero to any number of whitespaces\n = # match a =\n (?:\\\\s*)? # match from zero to any number of whitespaces\n ([-+]?(?:\\\\d+)?(?:\\\\.\\\\d*)?(?:[eE][-+]?\\\\d+)?)\n """\n )', '(re.VERBOSE | re.MULTILINE)'], {}), '(\'^(?:\\\\s*)(?:\' + param +\n """\n \\\\s*) # match from zero to any number of whitespaces\n = # match a =\n (?:\\\\s*)? # match from zero to any number of whitespaces\n ([-+]?(?:\\\\d+)?(?:\\\\.\\\\d*)?(?:[eE][-+]?\\\\d+)?)\n """\n , re.VERBOSE | re.MULTILINE)\n', (3134, 3471), False, 'import re\n'), ((4139, 4164), 'os.path.isfile', 'os.path.isfile', (['file_name'], {}), '(file_name)\n', (4153, 4164), False, 'import os\n'), ((4549, 4563), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (4557, 4563), True, 'import numpy as np\n'), ((4929, 4960), 'os.path.join', 'os.path.join', (['self.in_dir', 'file'], {}), '(self.in_dir, file)\n', (4941, 4960), False, 'import os\n'), ((4992, 5017), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (5006, 5017), False, 'import os\n'), ((5710, 5734), 're.search', 're.search', (['param', 'file_s'], {}), '(param, file_s)\n', (5719, 5734), False, 'import re\n'), ((6882, 6902), 'numpy.zeros', 'np.zeros', (['data.shape'], {}), '(data.shape)\n', (6890, 6902), True, 'import numpy as np\n'), ((8415, 8435), 'numpy.zeros', 'np.zeros', (['data.shape'], {}), '(data.shape)\n', (8423, 8435), True, 'import numpy as np\n'), ((9088, 9100), 'numpy.sqrt', 'np.sqrt', (['std'], {}), '(std)\n', (9095, 9100), True, 'import numpy as np\n'), ((10145, 10180), 'json.dump', 'json.dump', (['out_dict', 'fout'], {'indent': '(4)'}), '(out_dict, fout, indent=4)\n', (10154, 10180), False, 'import json\n'), ((5575, 5606), 'os.path.join', 'os.path.join', (['self.in_dir', 'file'], {}), '(self.in_dir, file)\n', (5587, 5606), False, 'import os\n'), ((8584, 8610), 'numpy.power', 'np.power', (['(data - mean)', '(2.0)'], {}), '(data - mean, 2.0)\n', (8592, 8610), True, 'import numpy as np\n'), ((10079, 10113), 'os.path.join', 'os.path.join', (['out_dir', 'file_result'], {}), '(out_dir, file_result)\n', (10091, 10113), False, 'import os\n'), ((3531, 3556), 're.search', 're.search', (['pattern', 'fin_s'], {}), '(pattern, fin_s)\n', (3540, 3556), False, 'import re\n')] |
# d2y/dt2 + 2*m*o*dy/dt + o2*y(t) = k*o2*u(t), y(t=0) = 0 & dy(t=0)/dt = 0
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from scipy.signal import step
from scipy.signal import TransferFunction as tf
from scipy.signal import StateSpace as ss
#########################
# SIMULATION'S SETTINGS #
#########################
k, m, o = 1, 1, 1.5
############################
# ODE: NUMERIC INTEGRATION #
############################
# d2y/dt2 + 2*m*o*dy/dt + o2*y(t) = k*o2*u(t) <===> d2y/dt2 = -2*m*o*dy/dt - o2*y(t) + k*o2*u(t)
def mySys(x, t):
u = 1
dotx = [x[1], -o**2*x[0]-2*m*o*x[1] + k*o**2*u]
return dotx
tspan = np.linspace(0.0, 10.0, 100)
vect = odeint(mySys, [0, 0], tspan)
y1 = vect[:, 0]
######
# TF #
######
hTF = tf([k*o**2], [1, 2*m*o, o**2])
_, y2 = step(hTF, T=tspan)
######
# SS #
######
A, B, C, D = np.array([[0, 1], [-o**2, -2*m*o]]), np.array([[0], [k*o**2]]), np.array([[1, 0]]), np.array([[0]])
hSS = ss(A, B, C, D)
_, y3 = step(hSS, T=tspan)
plt.plot(tspan, y1, 'r--', linewidth=2, label='ODE')
plt.plot(tspan, y2, 'g--', linewidth=2, label='TF')
plt.plot(tspan, y3, 'b--', linewidth=2, label='SS')
plt.title('Step response of 2nd order LTI plant')
plt.xlabel('t (sec)')
plt.ylabel('y(t)')
plt.grid()
plt.legend(loc='best')
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"scipy.signal.step",
"matplotlib.pyplot.plot",
"scipy.integrate.odeint",
"matplotlib.pyplot.legend",
"scipy.signal.StateSpace",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"scipy.signal.TransferFu... | [((663, 690), 'numpy.linspace', 'np.linspace', (['(0.0)', '(10.0)', '(100)'], {}), '(0.0, 10.0, 100)\n', (674, 690), True, 'import numpy as np\n'), ((698, 726), 'scipy.integrate.odeint', 'odeint', (['mySys', '[0, 0]', 'tspan'], {}), '(mySys, [0, 0], tspan)\n', (704, 726), False, 'from scipy.integrate import odeint\n'), ((772, 812), 'scipy.signal.TransferFunction', 'tf', (['[k * o ** 2]', '[1, 2 * m * o, o ** 2]'], {}), '([k * o ** 2], [1, 2 * m * o, o ** 2])\n', (774, 812), True, 'from scipy.signal import TransferFunction as tf\n'), ((811, 829), 'scipy.signal.step', 'step', (['hTF'], {'T': 'tspan'}), '(hTF, T=tspan)\n', (815, 829), False, 'from scipy.signal import step\n'), ((971, 985), 'scipy.signal.StateSpace', 'ss', (['A', 'B', 'C', 'D'], {}), '(A, B, C, D)\n', (973, 985), True, 'from scipy.signal import StateSpace as ss\n'), ((994, 1012), 'scipy.signal.step', 'step', (['hSS'], {'T': 'tspan'}), '(hSS, T=tspan)\n', (998, 1012), False, 'from scipy.signal import step\n'), ((1014, 1066), 'matplotlib.pyplot.plot', 'plt.plot', (['tspan', 'y1', '"""r--"""'], {'linewidth': '(2)', 'label': '"""ODE"""'}), "(tspan, y1, 'r--', linewidth=2, label='ODE')\n", (1022, 1066), True, 'import matplotlib.pyplot as plt\n'), ((1067, 1118), 'matplotlib.pyplot.plot', 'plt.plot', (['tspan', 'y2', '"""g--"""'], {'linewidth': '(2)', 'label': '"""TF"""'}), "(tspan, y2, 'g--', linewidth=2, label='TF')\n", (1075, 1118), True, 'import matplotlib.pyplot as plt\n'), ((1119, 1170), 'matplotlib.pyplot.plot', 'plt.plot', (['tspan', 'y3', '"""b--"""'], {'linewidth': '(2)', 'label': '"""SS"""'}), "(tspan, y3, 'b--', linewidth=2, label='SS')\n", (1127, 1170), True, 'import matplotlib.pyplot as plt\n'), ((1172, 1221), 'matplotlib.pyplot.title', 'plt.title', (['"""Step response of 2nd order LTI plant"""'], {}), "('Step response of 2nd order LTI plant')\n", (1181, 1221), True, 'import matplotlib.pyplot as plt\n'), ((1222, 1243), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t (sec)"""'], {}), "('t (sec)')\n", (1232, 1243), True, 'import matplotlib.pyplot as plt\n'), ((1244, 1262), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y(t)"""'], {}), "('y(t)')\n", (1254, 1262), True, 'import matplotlib.pyplot as plt\n'), ((1263, 1273), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1271, 1273), True, 'import matplotlib.pyplot as plt\n'), ((1274, 1296), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (1284, 1296), True, 'import matplotlib.pyplot as plt\n'), ((1297, 1307), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1305, 1307), True, 'import matplotlib.pyplot as plt\n'), ((865, 906), 'numpy.array', 'np.array', (['[[0, 1], [-o ** 2, -2 * m * o]]'], {}), '([[0, 1], [-o ** 2, -2 * m * o]])\n', (873, 906), True, 'import numpy as np\n'), ((902, 931), 'numpy.array', 'np.array', (['[[0], [k * o ** 2]]'], {}), '([[0], [k * o ** 2]])\n', (910, 931), True, 'import numpy as np\n'), ((929, 947), 'numpy.array', 'np.array', (['[[1, 0]]'], {}), '([[1, 0]])\n', (937, 947), True, 'import numpy as np\n'), ((949, 964), 'numpy.array', 'np.array', (['[[0]]'], {}), '([[0]])\n', (957, 964), True, 'import numpy as np\n')] |
from typing import List, Tuple, Dict
import numpy as np
import tensorflow as tf
class String2Tensor:
_instance: 'String2Tensor' = None
def __init__(self, node_label_max_chars: int, alphabet_string: str):
self._node_label_max_chars = node_label_max_chars
# "0" is PAD, "1" is UNK
self._alphabet_dict: Dict[str, int] = {char: idx + 2 for (idx, char) in enumerate(alphabet_string)}
self._alphabet_dict["PAD"] = 0
self._alphabet_dict["UNK"] = 1
@staticmethod
def configure_default(node_label_max_chars: int, alphabet_string: str):
String2Tensor._instance = String2Tensor(node_label_max_chars, alphabet_string)
@staticmethod
def get_default() -> 'String2Tensor':
if String2Tensor._instance is None:
raise RuntimeError("Default instance not configured")
return String2Tensor._instance
@staticmethod
def char_tensors_to_unique_indexed(string_tensor: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
node_label_chars_unique, node_label_chars_indices = np.unique(string_tensor, axis=0, return_inverse=True)
return (
tf.convert_to_tensor(node_label_chars_unique, dtype=tf.uint8),
tf.convert_to_tensor(node_label_chars_indices, dtype=tf.int32)
)
def strings_to_tensor(self, strings: List[Tuple[int, str]]) -> tf.Tensor:
node_label_chars = np.zeros(shape=(len(strings), self._node_label_max_chars), dtype=np.uint8)
for node, label in strings:
for (char_idx, label_char) in enumerate(label[:self._node_label_max_chars].lower()):
node_label_chars[int(node), char_idx] = self._alphabet_dict.get(label_char, self._alphabet_dict["UNK"])
return tf.convert_to_tensor(node_label_chars)
def get_node_label_max_chars(self) -> int:
return self._node_label_max_chars
| [
"tensorflow.convert_to_tensor",
"numpy.unique"
] | [((1091, 1144), 'numpy.unique', 'np.unique', (['string_tensor'], {'axis': '(0)', 'return_inverse': '(True)'}), '(string_tensor, axis=0, return_inverse=True)\n', (1100, 1144), True, 'import numpy as np\n'), ((1786, 1824), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['node_label_chars'], {}), '(node_label_chars)\n', (1806, 1824), True, 'import tensorflow as tf\n'), ((1176, 1237), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['node_label_chars_unique'], {'dtype': 'tf.uint8'}), '(node_label_chars_unique, dtype=tf.uint8)\n', (1196, 1237), True, 'import tensorflow as tf\n'), ((1252, 1314), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['node_label_chars_indices'], {'dtype': 'tf.int32'}), '(node_label_chars_indices, dtype=tf.int32)\n', (1272, 1314), True, 'import tensorflow as tf\n')] |
"""
create a function that returns True if vertex i and vertex j
are connected in the graph represented by the input adjacency matrix A
"""
import numpy as np
def isConnected(A: np.array, i: int, j: int) -> bool:
paths = A # initialize the paths matrix to adjacency matrix A
number_vertices = A.shape[0] # find the number of vertices in the graph
number_edges = np.sum(A) / 2 # find the number of edges in the graph
# if node vi and vj are adjacent, return True
if paths[i-1][j-1] > 0:
print(f'Vertex {i} and vertex {j} are adjacent.')
return True
else:
# run the loop until we find a path
for pathLength in range(2, number_vertices):
paths = np.dot(paths, A) # exponentiate the adjacency matrix
if paths[i - 1][j - 1] > 0:
print(f'There is a path with {pathLength} edges from vertex {i} to vertex {j}.')
return True
# found no paths, the vertices are not connected
if pathLength == number_edges:
print(f'There are no paths from vertex {i} to vertex {j}.')
return False
def main():
A1 = np.array([[0, 1, 1, 0, 1, 0], [1, 0, 1, 1, 0, 1],
[1, 1, 0, 1, 1, 0], [0, 1, 1, 0, 1, 0],
[1, 0, 1, 1, 0, 0], [0, 1, 0, 0, 0, 0]])
print(isConnected(A1, 1, 4))
print(isConnected(A1, 2, 3))
print(isConnected(A1, 5, 6))
A2 = np.array([[0, 1, 0, 0, 0, 0], [1, 0, 0, 0, 0, 1],
[0, 0, 0, 1, 1, 0], [0, 0, 1, 0, 1, 0],
[0, 0, 1, 1, 0, 0], [0, 1, 0, 0, 0, 0]])
print(isConnected(A2, 1, 6))
print(isConnected(A2, 2, 5))
print(isConnected(A2, 1, 4))
"""main() # test the code"""
| [
"numpy.dot",
"numpy.array",
"numpy.sum"
] | [((1227, 1361), 'numpy.array', 'np.array', (['[[0, 1, 1, 0, 1, 0], [1, 0, 1, 1, 0, 1], [1, 1, 0, 1, 1, 0], [0, 1, 1, 0, 1,\n 0], [1, 0, 1, 1, 0, 0], [0, 1, 0, 0, 0, 0]]'], {}), '([[0, 1, 1, 0, 1, 0], [1, 0, 1, 1, 0, 1], [1, 1, 0, 1, 1, 0], [0, 1,\n 1, 0, 1, 0], [1, 0, 1, 1, 0, 0], [0, 1, 0, 0, 0, 0]])\n', (1235, 1361), True, 'import numpy as np\n'), ((1512, 1646), 'numpy.array', 'np.array', (['[[0, 1, 0, 0, 0, 0], [1, 0, 0, 0, 0, 1], [0, 0, 0, 1, 1, 0], [0, 0, 1, 0, 1,\n 0], [0, 0, 1, 1, 0, 0], [0, 1, 0, 0, 0, 0]]'], {}), '([[0, 1, 0, 0, 0, 0], [1, 0, 0, 0, 0, 1], [0, 0, 0, 1, 1, 0], [0, 0,\n 1, 0, 1, 0], [0, 0, 1, 1, 0, 0], [0, 1, 0, 0, 0, 0]])\n', (1520, 1646), True, 'import numpy as np\n'), ((407, 416), 'numpy.sum', 'np.sum', (['A'], {}), '(A)\n', (413, 416), True, 'import numpy as np\n'), ((759, 775), 'numpy.dot', 'np.dot', (['paths', 'A'], {}), '(paths, A)\n', (765, 775), True, 'import numpy as np\n')] |
# -*- coding: UTF-8 -*-
import re
import numpy as np
import pickle
from sklearn.linear_model import LogisticRegression, RidgeClassifier
from sklearn.metrics import f1_score, accuracy_score
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
import os
from sklearn import svm
from nltk.tokenize import TweetTokenizer
import string
from nltk.stem.porter import PorterStemmer
import nltk
import spacy
import emoji
from sklearn.model_selection import cross_val_score, KFold
stemmer = PorterStemmer()
stopset = set(list(string.punctuation))
class DataProcessor(object):
n_features = 1000
n_lsi = 100
random_state = 42
n_iter = 100
n_train = 3834
n_list = [80, 100, 120]
def __init__(self):
self.ROOT_DIR = os.path.dirname(__file__)
self.ROOT_DIR = os.path.abspath(os.path.join(self.ROOT_DIR, os.pardir)) + "/"
self.embedding_model = None
self.normalisation_dict = dict()
self.positive_set = set()
self.negative_set = set()
self.tokenizer = TweetTokenizer()
self.clusters = {}
self.cluster_word_count = {}
self.n_clusters = {}
for n in self.n_list:
self.load_brown_clusters(n)
def load_brown_clusters(self, n_cluster):
self.clusters[n_cluster] = {}
self.cluster_word_count[n_cluster] = {}
ids = set()
id_map = {}
file_name = self.ROOT_DIR + "data/brownclusters/processed_data-c" + str(n_cluster) + "-p1.out/paths"
file = open(file_name)
for line in file:
elements = line.strip().split("\t")
if len(elements) == 3:
cluster_id = int(elements[0], 2)
word = elements[1]
freq = int(elements[2])
before = len(ids)
ids.add(cluster_id)
after = len(ids)
if after > before:
id_map[cluster_id] = len(ids) - 1
cluster_id = id_map[cluster_id]
if word not in self.clusters[n_cluster]:
self.clusters[n_cluster][word] = {}
self.clusters[n_cluster][word][cluster_id] = freq
if cluster_id not in self.cluster_word_count[n_cluster]:
self.cluster_word_count[n_cluster][cluster_id] = 0
self.cluster_word_count[n_cluster][cluster_id] += freq
self.n_clusters[n_cluster] = len(ids)
def get_brown_cluster_vector(self, tweet, n_cluster):
tweet = re.split("\\s+", tweet)
output = np.zeros(self.n_clusters[n_cluster])
for word in tweet:
if word in self.clusters[n_cluster]:
cluster_ids = self.clusters[n_cluster][word]
for id in cluster_ids:
output[id] += cluster_ids[id]
for id in range(len(output)):
output[id] = output[id] * 1.0/self.cluster_word_count[n_cluster][id]
return output
@staticmethod
def remove_tweet_tags(tweet_str):
tweet_str = tweet_str.replace("taggeduser", "").replace("url", "").replace("number", "")
tweet_str = re.sub("#", " ", tweet_str)
return re.sub("\\s+", " ", tweet_str).strip()
@staticmethod
def normalise_hashtag(hashtag):
word = ""
output= []
for char in hashtag:
if char.isupper() or char == "#":
if len(word) > 0:
output.append(word)
word = ""
if char != "#":
word += char
output.append(word)
return " ".join(output)
def extract_pos_tags(self, tweet_str):
tweet_str = self.remove_tweet_tags(tweet_str)
tokenizing_text = self.tokenizer.tokenize(tweet_str)
pos_tags = nltk.pos_tag(tokenizing_text)
output = [tuple[1] for tuple in pos_tags]
return np.array(output)
def process_data(self, train_file, test_file, load_saved_data=True):
if not train_file.startswith(self.ROOT_DIR):
train_file = self.ROOT_DIR + train_file
if not test_file.startswith(self.ROOT_DIR):
test_file = self.ROOT_DIR + test_file
saved_training_data_path = self.ROOT_DIR + "data/saved/training_data.pkl"
saved_test_data_path = self.ROOT_DIR + "data/saved/test_data.pkl"
if load_saved_data:
if os.path.exists(saved_training_data_path) and \
os.path.exists(saved_test_data_path):
train_data = self.load_dict(saved_training_data_path)
test_data = self.load_dict(saved_test_data_path)
if train_data is not None and test_data is not None:
return train_data, test_data
# for training word brown clusters
processed_data_file = open(self.ROOT_DIR + "data/processed_data.txt", "w")
self.load_normalisation_dict()
self.load_sentiment_words()
self.embedding_model = spacy.load("en_core_web_md")
features = []
labels = []
text_data = []
pos_tags = []
n_train = 0
train_reader = open(train_file, 'r')
for line in train_reader:
elements = line.split("\t")
elements[2] = self.normalise_tweet(elements[2])
processed_data_file.write(elements[2].encode("utf8") + "\n")
if len(elements) == 3 and 'Label' not in elements[1]:
n_train += 1
features.append(self.process_a_tweet(elements[2]))
labels.append(int(elements[1]))
text_data.append(elements[2])
pos_tags.append(' '.join(self.extract_pos_tags(elements[2])))
train_reader.close()
test_reader = open(test_file, 'r')
for line in test_reader:
elements = line.split("\t")
elements[1] = self.normalise_tweet(elements[1])
processed_data_file.write(elements[1].encode("utf8") + "\n")
if len(elements) == 2 and 'tweet index' not in elements[1]:
features.append(self.process_a_tweet(elements[1]))
text_data.append(elements[1])
pos_tags.append(' '.join(self.extract_pos_tags(elements[1])))
test_reader.close()
processed_data_file.close()
# n-gram features for POS tags
postag_tfidf_vectorizer = TfidfVectorizer(ngram_range=(1, 1), \
max_features=self.n_features, norm='l2')
postag_tfidfs = postag_tfidf_vectorizer.fit_transform(pos_tags)
postag_tfidfs_features = postag_tfidfs.toarray()
features = np.append(features, postag_tfidfs_features, 1)
print(len(postag_tfidf_vectorizer.get_feature_names()))
del postag_tfidfs_features
# n-gram features for tweets
# character-based n-grams
ngram_vectorizer = TfidfVectorizer(analyzer='char_wb', ngram_range=(1, 3), \
max_features=self.n_features, norm='l2')
counts = ngram_vectorizer.fit_transform(text_data)
n_grams_features = counts.toarray()
features = np.append(features, n_grams_features, 1)
del n_grams_features
print(len(features[0]))
# word-based n-grams
tfidf_vectorizer = TfidfVectorizer(ngram_range=(1, 3), lowercase=False, \
max_features=self.n_features, norm='l2')
tfidfs = tfidf_vectorizer.fit_transform(text_data)
tfidfs_features = tfidfs.toarray()
features = np.append(features, tfidfs_features, 1)
print(len(features[0]))
del tfidfs_features
# LSI features
if self.n_lsi > 0:
print("Training LSI!")
svd_model = TruncatedSVD(n_components=self.n_lsi, \
algorithm='arpack', \
n_iter=self.n_iter, random_state=self.random_state)
svd_matrix = svd_model.fit_transform(tfidfs)
features = np.append(features, svd_matrix, 1)
print(len(features[0]))
del tfidfs
del svd_matrix
print("Got LSI!")
if not os.path.exists(self.ROOT_DIR + 'data/saved'):
os.makedirs(self.ROOT_DIR + 'data/saved')
train_data = {"feature": features[0:n_train], "raw_data": text_data[0:n_train], "label": labels}
test_data = {"feature": features[n_train:], "raw_data": text_data[n_train:]}
self.save_dict(train_data, saved_training_data_path)
self.save_dict(test_data, saved_test_data_path)
print("Saved data!")
return train_data, test_data
def load_normalisation_dict(self):
reader = open(self.ROOT_DIR + "data/normalisation/emnlp_dict.txt")
for line in reader:
elements = re.split("\\s+", line)
self.normalisation_dict[elements[0].strip()] = elements[1].strip()
reader.close()
reader = open(self.ROOT_DIR + "data/normalisation/Test_Set_3802_Pairs.txt")
for line in reader:
elements = line.split("\t")[1].split(" | ")
self.normalisation_dict[elements[0].strip()] = elements[1].strip()
reader.close()
def load_sentiment_words(self):
reader = open(self.ROOT_DIR + "data/sentiment/negative-words.txt")
for line in reader:
if len(line.strip()) > 0:
self.negative_set.add(line.strip())
reader.close()
reader = open(self.ROOT_DIR + "data/sentiment/positive-words.txt")
for line in reader:
if len(line.strip()) > 0:
self.positive_set.add(line.strip())
reader.close()
@staticmethod
def save_dict(data, filename):
with open(filename, 'wb') as f:
pickle.dump(data, f)
f.close()
@staticmethod
def load_dict(filename):
with open(filename, 'rb') as f:
ret_dict = pickle.load(f)
f.close()
return ret_dict
def normalise_tweet(self, tweet_str):
tweet_str = (emoji.demojize(tweet_str.decode("utf8")))
tweet_str = re.sub("\\s+", " ", re.sub("http.*?\\s", "url", tweet_str)
.replace(":", " ").replace("#", " #").replace("@", " @"))
tweet = self.tokenizer.tokenize(tweet_str)
normalised_tweet = ""
for token_str in tweet:
normalised_token_str = self.normalise_str(token_str.lower())
if "haha" in normalised_token_str:
token_str = "lol"
if token_str.startswith("@"):
normalised_tweet += "taggeduser "
elif token_str.lower().startswith("http"):
normalised_tweet += "url "
elif self.is_number(token_str):
normalised_tweet += "number "
elif token_str.startswith("#"):
normalised_tweet += token_str + " "
normalised_tweet += self.normalise_hashtag(token_str) + " "
elif normalised_token_str in self.normalisation_dict:
normalised_tweet += self.normalisation_dict[normalised_token_str] + " "
else: normalised_tweet += token_str + " "
return normalised_tweet.strip().lower()
@staticmethod
def normalise_str(str_in):
normalised_str = ""
count = 0
pre_char = None
for i in range(len(str_in)):
if i > 0:
if str_in[i] == pre_char:
count += 1
else:
count = 0
if count <= 2:
normalised_str += str_in[i]
pre_char = str_in[i]
return normalised_str
@staticmethod
def is_number(s):
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
def process_a_tweet(self, tweet_str):
tweet_vector = []
if type(tweet_str) != str:
tweet_str = unicode(tweet_str).encode('utf8')
tweet_str = tweet_str.decode('utf-8')
n_token = len(re.split("\\s+", tweet_str.lower()))
embedding_vector = self.embedding_model(unicode(tweet_str))
tweet_vector.extend(embedding_vector.vector)
tweet_vector.append(self.has_irony_hashtag(tweet_str))
tweet_vector.append(self.get_hash_tag_rate(tweet_str, n_token))
tweet_vector.append(self.get_tagged_user_rate(tweet_str, n_token))
tweet_vector.append(self.get_uppercase_rate(tweet_str))
tweet_vector.extend(self.get_sentiment_word_rate(tweet_str))
for n in self.n_list:
tweet_vector.extend(self.get_brown_cluster_vector(tweet_str, n))
return tweet_vector
@staticmethod
# Return the rate of uppercase characters in a tweet
def get_uppercase_rate(tweet_str):
count = 0
for char in tweet_str:
if char.isupper():
count = count + 1
return count * 1.0/len(tweet_str)
# Return the rates of sentiment words in a tweet
def get_sentiment_word_rate(self, tweet_str):
positive_icons = ["grinning face", "beaming face with smiling eyes", "face with tears of joy",
"rolling on the floor laughing", "grinning face with big eyes",
"grinning face with smiling eyes", "grinning face with sweat", "grinning squinting face",
"winking face", "smiling face with smiling eyes", "face savoring food",
"smiling face with sunglasses", "smiling face with heart-eyes",
"smiling face with heart-shaped eyes", "face blowing a kiss", "kissing face",
"kissing face with smiling eyes", "kissing face with closed eyes", "smiling face",
"slightly smiling face", "hugging face", "star-struck", ":)", ";)", ":-)", "lol"]
negative_icons = ["frowning face", "slightly frowning face", "confounded face", "disappointed face",
"worried face", "face with steam from nose", "crying face", "loudly crying face",
"frowning face with open mouth", "anguished face", "fearful face", "weary face",
"exploding head", "grimacing face", "anxious face with sweat", "face screaming in fear",
"flushed face", "zany face", "dizzy face", "pouting face",
"angry face", "face with symbols on mouth" ,":(", ";(", ":-(", "-.-"]
sick_icons = ["face with medical mask", "face with thermometer", "face with head-bandage", "nauseated face",
"face vomiting", "sneezing face"]
tweet = re.split("\\s+", tweet_str.lower())
n_positive_words = 0.0
n_negative_words = 0.0
n_not_words = 0.0
n_pos_icon = 0.0
n_neg_icon = 0.0
n_sick_icon = 0.0
n_icon = 0.0
for token in tweet:
if token in self.positive_set:
n_positive_words += 1
elif token in self.negative_set:
n_negative_words += 1
elif "not" in token or "n't" in token:
n_not_words += 1
elif "_" in token:
icon = token.replace(":", "").replace("_", " ")
if "smiling" in icon or icon in positive_icons:
n_pos_icon += 1
elif icon in negative_icons:
n_neg_icon += 1
elif icon in sick_icons:
n_sick_icon += 1
else:
n_icon += 1
return n_positive_words/len(tweet), n_negative_words/len(tweet), \
n_not_words/len(tweet), n_pos_icon/len(tweet),\
n_neg_icon/len(tweet), n_sick_icon, n_icon/len(tweet)
def get_tagged_user_rate(self, tweet_str, n_token):
results = re.findall("@", tweet_str)
return len(results)*1.0/n_token
def get_hash_tag_rate(self, tweet_str, n_token):
results = re.findall("#", tweet_str)
return len(results) * 1.0 / n_token
@staticmethod
def has_irony_hashtag(str):
regex = re.compile("#not[\W]*$|#not\\s+?#|#not\\s*?\\bhttp\\b|#irony|#sarcasm|#fake|#naah")
if regex.search(str.lower()):
return 1.0
return 0.0
@staticmethod
def add_tweet_text(predicted_file, input_file):
tweet_file = open(input_file, "r")
tweet_map = {}
for line in tweet_file:
if "tweet index" not in line:
elements = line.split("\t")
tweet_map[int(elements[0])] = elements[1].strip()
tweet_file.close()
label_file = open(predicted_file, "r")
out_file = open(predicted_file + ".processed", "w")
for line in label_file:
elements = line.split("\t")
id = int(elements[0])
out_file.write(line.strip() + "\t" + tweet_map[id] + "\n")
out_file.close()
label_file.close()
@staticmethod
def lg_predict(train_data, valid_data, test_data, task_name="A"):
train_features = train_data["feature"]
train_labels = train_data["label"]
valid_features = valid_data["feature"]
valid_labels = valid_data["label"]
test_features = test_data["feature"]
lr = LogisticRegression(max_iter=100)
lr.fit(train_features, train_labels)
pred_valid_labels = lr.predict(valid_features)
if task_name == "A":
f1_valid = f1_score(valid_labels, pred_valid_labels, pos_label=1)
else: f1_valid = f1_score(valid_labels, pred_valid_labels, average="macro")
print("F1 on valid : %f" % f1_valid)
return lr.predict(train_features), lr.predict(valid_features), lr.predict(test_features), f1_valid
@staticmethod
def rg_predict(train_data, valid_data, test_data, task_name="A"):
train_features = train_data["feature"]
train_labels = train_data["label"]
valid_features = valid_data["feature"]
valid_labels = valid_data["label"]
test_features = test_data["feature"]
rg = RidgeClassifier(max_iter=100)
rg.fit(train_features, train_labels)
pred_valid_labels = rg.predict(valid_features)
if task_name == "A":
f1_valid = f1_score(valid_labels, pred_valid_labels, pos_label=1)
else: f1_valid = f1_score(valid_labels, pred_valid_labels, average="macro")
print("F1 on valid : %f" % f1_valid)
return rg.predict(train_features), rg.predict(valid_features), rg.predict(test_features), f1_valid
@staticmethod
def svm_predict(train_data, valid_data, test_data, task_name="A"):
train_features = train_data["feature"]
train_labels = train_data["label"]
valid_features = valid_data["feature"]
valid_labels = valid_data["label"]
test_features = test_data["feature"]
clf = svm.LinearSVC()
clf.fit(train_features, train_labels)
pred_valid_labels = clf.predict(valid_features)
if task_name == "A":
f1_valid = f1_score(valid_labels, pred_valid_labels, pos_label=1)
else: f1_valid = f1_score(valid_labels, pred_valid_labels, average="macro")
print("F1 on valid : %f" % f1_valid)
return clf.predict(train_features), clf.predict(valid_features), clf.predict(test_features), f1_valid
@staticmethod
def split_kfolds(data, n_fold):
kf = KFold(n_splits=n_fold)
train_data =[]
valid_data = []
features = np.array(data["feature"])
labels = np.array(data["label"])
raw_data = np.array(data["raw_data"])
for train_index, valid_index in kf.split(features):
train = {"feature": features[train_index], "label": labels[train_index],
"raw_data": raw_data[train_index]}
valid = {"feature": features[valid_index], "label": labels[valid_index],
"raw_data": raw_data[valid_index]}
train_data.append(train)
valid_data.append(valid)
return train_data, valid_data
| [
"pickle.dump",
"sklearn.feature_extraction.text.TfidfVectorizer",
"unicodedata.numeric",
"sklearn.metrics.f1_score",
"pickle.load",
"os.path.join",
"sklearn.decomposition.TruncatedSVD",
"os.path.dirname",
"os.path.exists",
"spacy.load",
"numpy.append",
"re.findall",
"sklearn.svm.LinearSVC",
... | [((536, 551), 'nltk.stem.porter.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (549, 551), False, 'from nltk.stem.porter import PorterStemmer\n'), ((796, 821), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (811, 821), False, 'import os\n'), ((1078, 1094), 'nltk.tokenize.TweetTokenizer', 'TweetTokenizer', ([], {}), '()\n', (1092, 1094), False, 'from nltk.tokenize import TweetTokenizer\n'), ((2559, 2582), 're.split', 're.split', (['"""\\\\s+"""', 'tweet'], {}), "('\\\\s+', tweet)\n", (2567, 2582), False, 'import re\n'), ((2600, 2636), 'numpy.zeros', 'np.zeros', (['self.n_clusters[n_cluster]'], {}), '(self.n_clusters[n_cluster])\n', (2608, 2636), True, 'import numpy as np\n'), ((3178, 3205), 're.sub', 're.sub', (['"""#"""', '""" """', 'tweet_str'], {}), "('#', ' ', tweet_str)\n", (3184, 3205), False, 'import re\n'), ((3828, 3857), 'nltk.pos_tag', 'nltk.pos_tag', (['tokenizing_text'], {}), '(tokenizing_text)\n', (3840, 3857), False, 'import nltk\n'), ((3923, 3939), 'numpy.array', 'np.array', (['output'], {}), '(output)\n', (3931, 3939), True, 'import numpy as np\n'), ((5017, 5045), 'spacy.load', 'spacy.load', (['"""en_core_web_md"""'], {}), "('en_core_web_md')\n", (5027, 5045), False, 'import spacy\n'), ((6422, 6498), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'ngram_range': '(1, 1)', 'max_features': 'self.n_features', 'norm': '"""l2"""'}), "(ngram_range=(1, 1), max_features=self.n_features, norm='l2')\n", (6437, 6498), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((6699, 6745), 'numpy.append', 'np.append', (['features', 'postag_tfidfs_features', '(1)'], {}), '(features, postag_tfidfs_features, 1)\n', (6708, 6745), True, 'import numpy as np\n'), ((6944, 7045), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'analyzer': '"""char_wb"""', 'ngram_range': '(1, 3)', 'max_features': 'self.n_features', 'norm': '"""l2"""'}), "(analyzer='char_wb', ngram_range=(1, 3), max_features=self.\n n_features, norm='l2')\n", (6959, 7045), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((7208, 7248), 'numpy.append', 'np.append', (['features', 'n_grams_features', '(1)'], {}), '(features, n_grams_features, 1)\n', (7217, 7248), True, 'import numpy as np\n'), ((7367, 7465), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'ngram_range': '(1, 3)', 'lowercase': '(False)', 'max_features': 'self.n_features', 'norm': '"""l2"""'}), "(ngram_range=(1, 3), lowercase=False, max_features=self.\n n_features, norm='l2')\n", (7382, 7465), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((7627, 7666), 'numpy.append', 'np.append', (['features', 'tfidfs_features', '(1)'], {}), '(features, tfidfs_features, 1)\n', (7636, 7666), True, 'import numpy as np\n'), ((16166, 16192), 're.findall', 're.findall', (['"""@"""', 'tweet_str'], {}), "('@', tweet_str)\n", (16176, 16192), False, 'import re\n'), ((16305, 16331), 're.findall', 're.findall', (['"""#"""', 'tweet_str'], {}), "('#', tweet_str)\n", (16315, 16331), False, 'import re\n'), ((16443, 16532), 're.compile', 're.compile', (['"""#not[\\\\W]*$|#not\\\\s+?#|#not\\\\s*?\\\\bhttp\\\\b|#irony|#sarcasm|#fake|#naah"""'], {}), "(\n '#not[\\\\W]*$|#not\\\\s+?#|#not\\\\s*?\\\\bhttp\\\\b|#irony|#sarcasm|#fake|#naah')\n", (16453, 16532), False, 'import re\n'), ((17620, 17652), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'max_iter': '(100)'}), '(max_iter=100)\n', (17638, 17652), False, 'from sklearn.linear_model import LogisticRegression, RidgeClassifier\n'), ((18426, 18455), 'sklearn.linear_model.RidgeClassifier', 'RidgeClassifier', ([], {'max_iter': '(100)'}), '(max_iter=100)\n', (18441, 18455), False, 'from sklearn.linear_model import LogisticRegression, RidgeClassifier\n'), ((19230, 19245), 'sklearn.svm.LinearSVC', 'svm.LinearSVC', ([], {}), '()\n', (19243, 19245), False, 'from sklearn import svm\n'), ((19765, 19787), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'n_fold'}), '(n_splits=n_fold)\n', (19770, 19787), False, 'from sklearn.model_selection import cross_val_score, KFold\n'), ((19854, 19879), 'numpy.array', 'np.array', (["data['feature']"], {}), "(data['feature'])\n", (19862, 19879), True, 'import numpy as np\n'), ((19897, 19920), 'numpy.array', 'np.array', (["data['label']"], {}), "(data['label'])\n", (19905, 19920), True, 'import numpy as np\n'), ((19940, 19966), 'numpy.array', 'np.array', (["data['raw_data']"], {}), "(data['raw_data'])\n", (19948, 19966), True, 'import numpy as np\n'), ((7837, 7951), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {'n_components': 'self.n_lsi', 'algorithm': '"""arpack"""', 'n_iter': 'self.n_iter', 'random_state': 'self.random_state'}), "(n_components=self.n_lsi, algorithm='arpack', n_iter=self.\n n_iter, random_state=self.random_state)\n", (7849, 7951), False, 'from sklearn.decomposition import TruncatedSVD\n'), ((8105, 8139), 'numpy.append', 'np.append', (['features', 'svd_matrix', '(1)'], {}), '(features, svd_matrix, 1)\n', (8114, 8139), True, 'import numpy as np\n'), ((8272, 8316), 'os.path.exists', 'os.path.exists', (["(self.ROOT_DIR + 'data/saved')"], {}), "(self.ROOT_DIR + 'data/saved')\n", (8286, 8316), False, 'import os\n'), ((8330, 8371), 'os.makedirs', 'os.makedirs', (["(self.ROOT_DIR + 'data/saved')"], {}), "(self.ROOT_DIR + 'data/saved')\n", (8341, 8371), False, 'import os\n'), ((8911, 8933), 're.split', 're.split', (['"""\\\\s+"""', 'line'], {}), "('\\\\s+', line)\n", (8919, 8933), False, 'import re\n'), ((9883, 9903), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (9894, 9903), False, 'import pickle\n'), ((10037, 10051), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (10048, 10051), False, 'import pickle\n'), ((11983, 12005), 'unicodedata.numeric', 'unicodedata.numeric', (['s'], {}), '(s)\n', (12002, 12005), False, 'import unicodedata\n'), ((17806, 17860), 'sklearn.metrics.f1_score', 'f1_score', (['valid_labels', 'pred_valid_labels'], {'pos_label': '(1)'}), '(valid_labels, pred_valid_labels, pos_label=1)\n', (17814, 17860), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((17887, 17945), 'sklearn.metrics.f1_score', 'f1_score', (['valid_labels', 'pred_valid_labels'], {'average': '"""macro"""'}), "(valid_labels, pred_valid_labels, average='macro')\n", (17895, 17945), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((18608, 18662), 'sklearn.metrics.f1_score', 'f1_score', (['valid_labels', 'pred_valid_labels'], {'pos_label': '(1)'}), '(valid_labels, pred_valid_labels, pos_label=1)\n', (18616, 18662), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((18689, 18747), 'sklearn.metrics.f1_score', 'f1_score', (['valid_labels', 'pred_valid_labels'], {'average': '"""macro"""'}), "(valid_labels, pred_valid_labels, average='macro')\n", (18697, 18747), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((19401, 19455), 'sklearn.metrics.f1_score', 'f1_score', (['valid_labels', 'pred_valid_labels'], {'pos_label': '(1)'}), '(valid_labels, pred_valid_labels, pos_label=1)\n', (19409, 19455), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((19482, 19540), 'sklearn.metrics.f1_score', 'f1_score', (['valid_labels', 'pred_valid_labels'], {'average': '"""macro"""'}), "(valid_labels, pred_valid_labels, average='macro')\n", (19490, 19540), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((862, 900), 'os.path.join', 'os.path.join', (['self.ROOT_DIR', 'os.pardir'], {}), '(self.ROOT_DIR, os.pardir)\n', (874, 900), False, 'import os\n'), ((3221, 3251), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'tweet_str'], {}), "('\\\\s+', ' ', tweet_str)\n", (3227, 3251), False, 'import re\n'), ((4423, 4463), 'os.path.exists', 'os.path.exists', (['saved_training_data_path'], {}), '(saved_training_data_path)\n', (4437, 4463), False, 'import os\n'), ((4490, 4526), 'os.path.exists', 'os.path.exists', (['saved_test_data_path'], {}), '(saved_test_data_path)\n', (4504, 4526), False, 'import os\n'), ((10245, 10283), 're.sub', 're.sub', (['"""http.*?\\\\s"""', '"""url"""', 'tweet_str'], {}), "('http.*?\\\\s', 'url', tweet_str)\n", (10251, 10283), False, 'import re\n')] |
import os
import numpy as np
import torch
import torchvision.transforms as transforms
import torch.utils.data as data
import matplotlib.pyplot as plt
from functions import *
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.metrics import accuracy_score
import pandas as pd
import pickle
# set path
data_path = r"/home/zhonsheng/LSTM_video/video-classification/UCF101-HHTSENG/jpegs_256/" # define UCF-101 RGB data path
action_name_path = r'/home/zhonsheng/LSTM_video/video-classification/CRNN/UCF101actions.pkl'
save_model_path = r"/home/zhonsheng/LSTM_video/video-classification/CRNN/CRNN_ckpt_batch25_step20_epoch150_150hour_stride2/"
# use same encoder CNN saved!
CNN_fc_hidden1, CNN_fc_hidden2 = 1024, 768
CNN_embed_dim = 512 # latent dim extracted by 2D CNN
img_x, img_y = 256, 342 # resize video 2d frame size
dropout_p = 0.0 # dropout probability
# use same decoder RNN saved!
RNN_hidden_layers = 3
RNN_hidden_nodes = 512
RNN_FC_dim = 256
# training parameters
k = 101 # number of target category
batch_size = 25
# Select which frame to begin & end in videos
begin_frame, end_frame, skip_frame = 1, 3, 1
with open(action_name_path, 'rb') as f:
action_names = pickle.load(f) # load UCF101 actions names
# convert labels -> category
le = LabelEncoder()
le.fit(action_names)
# show how many classes there are
list(le.classes_)
# convert category -> 1-hot
action_category = le.transform(action_names).reshape(-1, 1)
enc = OneHotEncoder()
enc.fit(action_category)
# # example
# y = ['HorseRace', 'YoYo', 'WalkingWithDog']
# y_onehot = labels2onehot(enc, le, y)
# y2 = onehot2labels(le, y_onehot)
actions = []
fnames = os.listdir(data_path)
all_names = []
for f in fnames:
loc1 = f.find('v_')
loc2 = f.find('_g')
actions.append(f[(loc1 + 2): loc2])
all_names.append(f)
# list all data files
all_X_list = all_names # all video file names
all_y_list = labels2cat(le, actions) # all video labels
# data loading parameters
use_cuda = torch.cuda.is_available() # check if GPU exists
device = torch.device("cuda" if use_cuda else "cpu") # use CPU or GPU
params = {'batch_size': batch_size, 'shuffle': True, 'num_workers': 4, 'pin_memory': True} if use_cuda else {}
transform = transforms.Compose([transforms.Resize([img_x, img_y]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
selected_frames = np.arange(begin_frame, end_frame, skip_frame).tolist()
# reset data loader
all_data_params = {'batch_size': batch_size, 'shuffle': False, 'num_workers': 4, 'pin_memory': True} if use_cuda else {}
all_data_loader = data.DataLoader(Dataset_CRNN(data_path, all_X_list, all_y_list, selected_frames, transform=transform), **all_data_params)
# reload CRNN model
cnn_encoder = EncoderCNN(img_x=img_x, img_y=img_y, fc_hidden1=CNN_fc_hidden1, fc_hidden2=CNN_fc_hidden2,
drop_p=dropout_p, CNN_embed_dim=CNN_embed_dim).to(device)
rnn_decoder = DecoderRNN(CNN_embed_dim=CNN_embed_dim, h_RNN_layers=RNN_hidden_layers, h_RNN=RNN_hidden_nodes,
h_FC_dim=RNN_FC_dim, drop_p=dropout_p, num_classes=k).to(device)
cnn_encoder.load_state_dict(torch.load(os.path.join(save_model_path, 'cnn_encoder_epoch150.pth')))
rnn_decoder.load_state_dict(torch.load(os.path.join(save_model_path, 'rnn_decoder_epoch150.pth')))
print('CRNN model reloaded!')
# make all video predictions by reloaded model
print('Predicting all {} videos:'.format(len(all_data_loader.dataset)))
all_y_pred = CRNN_final_prediction([cnn_encoder, rnn_decoder], device, all_data_loader)
# write in pandas dataframe
df = pd.DataFrame(data={'filename': fnames, 'y': cat2labels(le, all_y_list), 'y_pred': cat2labels(le, all_y_pred)})
df.to_pickle(os.path.join(save_model_path, "UCF101_videos_prediction.pkl")) # save pandas dataframe
#pd.read_pickle("./all_videos_prediction.pkl")
print('video prediction finished!')
| [
"sklearn.preprocessing.OneHotEncoder",
"sklearn.preprocessing.LabelEncoder",
"torchvision.transforms.ToTensor",
"pickle.load",
"torch.cuda.is_available",
"numpy.arange",
"torch.device",
"torchvision.transforms.Normalize",
"os.path.join",
"os.listdir",
"torchvision.transforms.Resize"
] | [((1305, 1319), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1317, 1319), False, 'from sklearn.preprocessing import OneHotEncoder, LabelEncoder\n'), ((1489, 1504), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (1502, 1504), False, 'from sklearn.preprocessing import OneHotEncoder, LabelEncoder\n'), ((1686, 1707), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (1696, 1707), False, 'import os\n'), ((2034, 2059), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2057, 2059), False, 'import torch\n'), ((2109, 2152), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (2121, 2152), False, 'import torch\n'), ((1225, 1239), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1236, 1239), False, 'import pickle\n'), ((3880, 3941), 'os.path.join', 'os.path.join', (['save_model_path', '"""UCF101_videos_prediction.pkl"""'], {}), "(save_model_path, 'UCF101_videos_prediction.pkl')\n", (3892, 3941), False, 'import os\n'), ((2316, 2349), 'torchvision.transforms.Resize', 'transforms.Resize', (['[img_x, img_y]'], {}), '([img_x, img_y])\n', (2333, 2349), True, 'import torchvision.transforms as transforms\n'), ((2383, 2404), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2402, 2404), True, 'import torchvision.transforms as transforms\n'), ((2438, 2513), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (2458, 2513), True, 'import torchvision.transforms as transforms\n'), ((2535, 2580), 'numpy.arange', 'np.arange', (['begin_frame', 'end_frame', 'skip_frame'], {}), '(begin_frame, end_frame, skip_frame)\n', (2544, 2580), True, 'import numpy as np\n'), ((3323, 3380), 'os.path.join', 'os.path.join', (['save_model_path', '"""cnn_encoder_epoch150.pth"""'], {}), "(save_model_path, 'cnn_encoder_epoch150.pth')\n", (3335, 3380), False, 'import os\n'), ((3422, 3479), 'os.path.join', 'os.path.join', (['save_model_path', '"""rnn_decoder_epoch150.pth"""'], {}), "(save_model_path, 'rnn_decoder_epoch150.pth')\n", (3434, 3479), False, 'import os\n')] |
import argparse
import numpy as np
from sklearn.metrics import accuracy_score
import scipy.sparse as sps
import torch as th
import torch.optim as optim
import torch.sparse as ths
import torch.nn.functional as F
import data
import operators
import sbm
import utils
parser = argparse.ArgumentParser()
parser.add_argument('--bs', type=int)
parser.add_argument('--c-in', type=float)
parser.add_argument('--c-out', type=float)
parser.add_argument('--eps', type=float, default=1e-5)
parser.add_argument('--gpu', type=int)
parser.add_argument('--log-every', type=int)
parser.add_argument('--n-iterations', type=int)
parser.add_argument('--op', type=str)
parser.add_argument('--ds', type=str)
for x in ['inseparable_gaussian']:
parser.add_argument('--%s-args' % x.replace('_', '-'), action=__import__(x).Parse)
parser.add_argument('--network', type=str)
for x in ['gcn', 'mlp', 'sgc']:
parser.add_argument('--%s-args' % x, action=__import__(x).Parse)
parser.add_argument('--optim', type=str)
for x in ['SGD', 'Adam']:
parser.add_argument('--%s-args' % x.lower(), action=getattr(utils, 'Parse%sArgs' % x))
args = parser.parse_args()
device = th.device('cpu') if args.gpu < 0 else th.device('cuda:%d' % args.gpu)
ds = __import__(args.ds).load_dataset(**vars(getattr(args, args.ds + '_args')))
x_train, y_train, x_val, y_val, x_test, y_test = ds
mean = np.mean(x_train, 0, keepdims=True)
x_train = x_train - mean
std = np.sqrt(np.mean(np.square(x_train), 0, keepdims=True)) + args.eps
x_train = x_train / std
x_val = (x_val - mean) / std
x_test = (x_test - mean) / std
x = th.from_numpy(np.vstack([x_train, x_val, x_test])).float().to(device)
y = th.from_numpy(np.hstack([y_train, y_val, y_test])).long().to(device)
idx_train = th.arange(len(x_train)).to(device).to(device)
idx_val = th.arange(len(x_train), len(x_train) + len(x_val)).to(device)
idx_test = th.arange(len(x_train) + len(x_val), len(x)).to(device)
k = 2
n = len(x)
p = [th.sum(y == 0), th.sum(y == 1)] # TODO
c_in = args.c_in
c_out = args.c_out
q = np.ones([k, k]) * c_out / n
q[range(k), range(k)] *= c_in / c_out
A, _ = sbm.generate(n, p, q)
op = sps.coo_matrix(getattr(operators, args.op)(A))
idx = th.from_numpy(np.vstack([op.row, op.col])).long()
dat = th.from_numpy(op.data).float()
a = ths.FloatTensor(idx, dat, [n, n]).to(device)
network_args = getattr(args, args.network + '_args')
if hasattr(network_args, 'n_feats'):
if network_args.n_feats is None:
network_args.n_feats = [x.shape[1], k]
else:
network_args.n_feats = [x.shape[1]] + network_args.n_feats + [k]
else:
network_args.in_feats = x.shape[1]
network_args.out_feats = k
network = __import__(args.network).Network(**vars(network_args)).to(device)
optim_args = getattr(args, args.optim.lower() + '_args')
optimizer = getattr(optim, args.optim)(network.parameters(), **vars(optim_args))
for i in range(args.n_iterations):
z = network(a, x)
idx_batch = th.randperm(len(x_train), device=device)[:args.bs]
idx = idx_train[idx_batch]
ce = F.cross_entropy(z[idx], y[idx])
if (i + 1) % args.log_every == 0:
y_bar = th.argmax(z, 1)
val_acc = accuracy_score(y[idx_val].tolist(), y_bar[idx_val].tolist())
test_acc = accuracy_score(y[idx_test].tolist(), y_bar[idx_test].tolist())
placeholder = '0' * (len(str(args.n_iterations)) - len(str((i + 1))))
caption = '[iteration %s%d]' % (placeholder, i + 1)
print('%strain: %.3f | val: %.3f | test: %.3f' % (caption, ce, val_acc, test_acc))
optimizer.zero_grad()
ce.backward()
optimizer.step()
| [
"argparse.ArgumentParser",
"torch.argmax",
"numpy.square",
"torch.nn.functional.cross_entropy",
"numpy.ones",
"numpy.hstack",
"sbm.generate",
"numpy.mean",
"torch.device",
"torch.sparse.FloatTensor",
"torch.sum",
"numpy.vstack",
"torch.from_numpy"
] | [((276, 301), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (299, 301), False, 'import argparse\n'), ((1364, 1398), 'numpy.mean', 'np.mean', (['x_train', '(0)'], {'keepdims': '(True)'}), '(x_train, 0, keepdims=True)\n', (1371, 1398), True, 'import numpy as np\n'), ((2102, 2123), 'sbm.generate', 'sbm.generate', (['n', 'p', 'q'], {}), '(n, p, q)\n', (2114, 2123), False, 'import sbm\n'), ((1153, 1169), 'torch.device', 'th.device', (['"""cpu"""'], {}), "('cpu')\n", (1162, 1169), True, 'import torch as th\n'), ((1191, 1222), 'torch.device', 'th.device', (["('cuda:%d' % args.gpu)"], {}), "('cuda:%d' % args.gpu)\n", (1200, 1222), True, 'import torch as th\n'), ((1948, 1962), 'torch.sum', 'th.sum', (['(y == 0)'], {}), '(y == 0)\n', (1954, 1962), True, 'import torch as th\n'), ((1964, 1978), 'torch.sum', 'th.sum', (['(y == 1)'], {}), '(y == 1)\n', (1970, 1978), True, 'import torch as th\n'), ((3031, 3062), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['z[idx]', 'y[idx]'], {}), '(z[idx], y[idx])\n', (3046, 3062), True, 'import torch.nn.functional as F\n'), ((2028, 2043), 'numpy.ones', 'np.ones', (['[k, k]'], {}), '([k, k])\n', (2035, 2043), True, 'import numpy as np\n'), ((2238, 2260), 'torch.from_numpy', 'th.from_numpy', (['op.data'], {}), '(op.data)\n', (2251, 2260), True, 'import torch as th\n'), ((2273, 2306), 'torch.sparse.FloatTensor', 'ths.FloatTensor', (['idx', 'dat', '[n, n]'], {}), '(idx, dat, [n, n])\n', (2288, 2306), True, 'import torch.sparse as ths\n'), ((3118, 3133), 'torch.argmax', 'th.argmax', (['z', '(1)'], {}), '(z, 1)\n', (3127, 3133), True, 'import torch as th\n'), ((1446, 1464), 'numpy.square', 'np.square', (['x_train'], {}), '(x_train)\n', (1455, 1464), True, 'import numpy as np\n'), ((2196, 2223), 'numpy.vstack', 'np.vstack', (['[op.row, op.col]'], {}), '([op.row, op.col])\n', (2205, 2223), True, 'import numpy as np\n'), ((1599, 1634), 'numpy.vstack', 'np.vstack', (['[x_train, x_val, x_test]'], {}), '([x_train, x_val, x_test])\n', (1608, 1634), True, 'import numpy as np\n'), ((1673, 1708), 'numpy.hstack', 'np.hstack', (['[y_train, y_val, y_test]'], {}), '([y_train, y_val, y_test])\n', (1682, 1708), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
'''
Copyright (c) 2021, MIT Interactive Robotics Group, PI <NAME>.
Authors: <NAME>, <NAME>, <NAME>, <NAME>
All rights reserved.
'''
# Adapted from https://github.com/befelix/safe-exploration/blob/master/safe_exploration/environments.py
import numpy as np
from numpy.matlib import repmat
from scipy.integrate import ode, odeint
from IPython import embed
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from casadi import reshape as cas_reshape
from matplotlib import cm
import matplotlib.patches as mpatches
import os
import yaml
from scipy import interpolate
import sdeint
import hr_planning
from hr_planning.visualization.utils_visualization import plot_ellipsoid_2D
from hr_planning.env_gridworld_human.hmdp import HumanMdp
from hr_planning.env_gridworld_human.hmpc import HumanRefTracker
from hr_planning.visualization.utils_visualization import print_FAIL, print_OK
from hr_planning.utils_interp import waypts_2_zeroOrderHold
X_LIM_RATIO_VIS = 0.1
Y_LIM_RATIO_VIS = 0.1
class HREnv(object):
def __init__(self, config_path_hr_env, config_path_hmdp,
cache_dir, pH_mode, value_iteration=True, pR_0_arg=None):
"""
Class for the human robot environment,
including the human and robot simulators.
Human dynamics = simulated via sde or ode (depending on self.use_sde).
Human controls <= human MPC (Hmpc) <= human MDP (Hmdp).
Robot dynamics = simulated via ode.
Robot controls <= robot MCP (Rmpc).
# E.g., in a 2D grid world:
pH = human position.
vH = human velocity.
pR = robot position.
vR = robot velocity.
u = aR = robot control.
State s = pHx, pHy, vHx, vHy, pRx, pRy, vRx, vRy
sH = pHx, pHy, vHx, vHy
sR = pRx, pRy, vRx, vRy
n_s = 8
n_pR = 2
n_vR = 2
n_sR = 4
n_pH = 2
n_vH = 2
n_sH = 4
u = aRx, aRy
n_u = 2
Parameters
----------
config_path_hr_env: str, path for a config file for this class - HREnv.
config_path_hmdp: str, path for a config file for an Hmdp.
cache_dir: directory to save Hmdp's computed transition, reward, and policy.
pH_mode: str
pH_indep_pR: H-Indep-R condition in the paper.
pH_avoid_pR: H-Away-R condition in the paper.
pH_move_to_pR: H-To-R condition in the paper.
value_iteration: bool, whether to run value iteration
or load the policy from files.
pR_0_arg: (n_pR,) numpy vector, initial robot position
If None, then will use the pR_0 from config_path_hr_env.
If not None, then will use pR_0_arg.
"""
assert pH_mode in ["pH_indep_pR", "pH_avoid_pR", "pH_move_to_pR"]
self.pH_mode = pH_mode
self.iteration = 0
self.cur_pR = None
self.cur_vR = None
self.cur_pH = None
self.cur_vH = None
# Used for sdeint
self.cur_uHs_interp = None
self.end_time_us_mpc = None
with open(config_path_hr_env) as f:
self.config_hr_env = yaml.load(f, Loader=yaml.FullLoader)
self.mR = self.config_hr_env["mR"]
self.mH = self.config_hr_env["mH"]
# dt used for env ode sim
self.dt_env = self.config_hr_env["dt_env"]
# dt used for rollout human mdp policy as a ref traj for huma mpc
self.dt_Hmdp = self.config_hr_env["dt_Hmdp"]
# dt used for robot mpc
self.dt_Rmpc = self.config_hr_env["dt_Rmpc"]
# dt used for higher resolution collision checking
self.dt_pH_pR_safety_checking\
= self.config_hr_env["dt_pH_pR_safety_checking"]
# How often do we update the env, i.e., the amount of time that the
# function step() will move forward into the future.
self.step_time = self.config_hr_env["step_time"]
self.H_sde_noise = self.config_hr_env["H_sde_noise"]
self.use_sde = self.config_hr_env["use_sde"]
# Override if needed
if pR_0_arg is None:
if "pR_0" in self.config_hr_env:
self.pR_0 = np.array(self.config_hr_env["pR_0"])
elif "pR_0_coll_avoid" in self.config_hr_env:
self.pR_0 = np.array(self.config_hr_env["pR_0_coll_avoid"])
elif "pR_0_handover" in self.config_hr_env:
self.pR_0 = np.array(self.config_hr_env["pR_0_handover"])
else:
raise RuntimeError()
else:
self.pR_0 = np.array(pR_0_arg)
self.vR_0 = np.array(self.config_hr_env["vR_0"])
self.n_pR = self.pR_0.shape[0]
self.n_vR = self.n_pR
self.n_uR = self.n_pR
self.pR_0 = self.pR_0.reshape((self.n_pR, 1))
self.vR_0 = self.vR_0.reshape((self.n_vR, 1))
self.pR_min = np.array(self.config_hr_env["pR_min"])
self.pR_min = self.pR_min.reshape((self.n_pR, 1))
self.pR_max = np.array(self.config_hr_env["pR_max"])
self.pR_max = self.pR_max.reshape((self.n_pR, 1))
self.vR_min = np.array(self.config_hr_env["vR_min"])
self.vR_min = self.vR_min.reshape((self.n_vR, 1))
self.vR_max = np.array(self.config_hr_env["vR_max"])
self.vR_max = self.vR_max.reshape((self.n_vR, 1))
self.uR_min = np.array(self.config_hr_env["uR_min"])
self.uR_min = self.uR_min.reshape((self.n_uR, 1))
self.uR_max = np.array(self.config_hr_env["uR_max"])
self.uR_max = self.uR_max.reshape((self.n_uR, 1))
self.pR_goal = np.array(self.config_hr_env["pR_goal"])
self.pR_goal = self.pR_goal.reshape((self.n_pR, 1))
self.pR_goal_tol = self.config_hr_env["pR_goal_tol"]
assert (self.pR_0 < 1e-5 + self.pR_max).all()
assert (self.pR_0 > -1e-5 + self.pR_min).all()
assert (self.vR_0 < 1e-5 + self.vR_max).all()
assert (self.vR_0 > -1e-5 + self.vR_min).all()
assert (self.pR_goal < 1e-5 + self.pR_max).all()
assert (self.pR_goal > -1e-5 + self.pR_min).all()
self.Hmdp = HumanMdp(config_path=config_path_hmdp,
cache_dir=cache_dir)
if value_iteration:
self.Hmdp.computeTransitionAndRewardArrays()
self.Hmdp.value_iteration(discount=1, epsilon=1e-5, max_iter=10000)
self.Hmdp.printPolicy()
self.Hmdp.loadTransitionAndRewardArrays()
self.Hmdp.loadPolicyVI()
self.n_pH = self.Hmdp.ss.n_dofs
self.n_vH = self.n_pH
self.n_uH = self.n_pH
self.pH_0 = np.array(self.config_hr_env["pH_0"])
self.pH_0 = self.pH_0.reshape((self.n_pH, 1))
self.vH_0 = np.array(self.config_hr_env["vH_0"])
self.vH_0 = self.vH_0.reshape((self.n_vH, 1))
# For the dressing task
if "pH_shoulder" in self.config_hr_env:
self.pH_shoulder = np.array(self.config_hr_env["pH_shoulder"])
self.pH_shoulder = self.pH_shoulder.reshape((self.n_pH, 1))
self.n_interp_pH_pH_shoulder = int(
self.config_hr_env["n_interp_pH_pH_shoulder"])
self.max_dist_bw_pR_arm = self.config_hr_env["max_dist_bw_pR_arm"]
self.pH_min = np.zeros((self.n_pH, 1))
self.pH_max = np.zeros((self.n_pH, 1))
for i in range(self.n_pH):
cs = self.Hmdp.ss.ind_2_center_by_dof[i]
r = self.Hmdp.ss.ind_2_radii_by_dof[i]
assert abs(cs[1] - cs[0] - r * 2) < 1e-5
self.pH_min[i] = cs[0] - r
self.pH_max[i] = cs[-1] + r
self.pH_min = self.pH_min.reshape((self.n_pH, 1))
self.pH_min = self.pH_min.reshape((self.n_pH, 1))
self.vH_min = np.array(self.config_hr_env["Hmpc"]["vH_min"])
self.vH_min = self.vH_min.reshape((self.n_vH, 1))
self.vH_max = np.array(self.config_hr_env["Hmpc"]["vH_max"])
self.vH_max = self.vH_max.reshape((self.n_vH, 1))
self.uH_min = np.array(self.config_hr_env["Hmpc"]["uH_min"])
self.uH_min = self.uH_min.reshape((self.n_uH, 1))
self.uH_max = np.array(self.config_hr_env["Hmpc"]["uH_max"])
self.uH_max = self.uH_max.reshape((self.n_uH, 1))
assert (self.pH_0 < 1e-5 + np.squeeze(self.pH_max)).all()
assert (self.pH_0 > -1e-5 + np.squeeze(self.pH_min)).all()
assert (self.vH_0 < 1e-5 + np.squeeze(self.vH_max)).all()
assert (self.vH_0 > -1e-5 + np.squeeze(self.vH_min)).all()
# For workspace constraint
if "pH_min_workspace" not in self.config_hr_env:
self.pH_min_workspace = np.copy(self.pH_min)
self.pH_max_workspace = np.copy(self.pH_max)
else:
self.pH_min_workspace = np.array(self.config_hr_env["pH_min_workspace"])
self.pH_max_workspace = np.array(self.config_hr_env["pH_max_workspace"])
self.pH_min_workspace = self.pH_min_workspace.reshape((self.n_pH, 1))
self.pH_max_workspace = self.pH_max_workspace.reshape((self.n_pH, 1))
self.w_ref = np.eye(self.n_pH)*self.config_hr_env["Hmpc"]["w_ref"]
self.w_u = np.eye(self.n_uH)*self.config_hr_env["Hmpc"]["w_u"]
self.w_move_to_pR = np.eye(self.n_pH)*self.config_hr_env["Hmpc"]["w_move_to_pR"]
self.w_avoid_pR = np.eye(self.n_pH)*self.config_hr_env["Hmpc"]["w_avoid_pR"]
# dt used for human mpc
self.dt_Hmpc = self.config_hr_env["Hmpc"]["dt_Hmpc"]
# This is from robot MPC (Rmpc)'s view, how far to avoid pH
self.pH_pR_min_sep_dist = float(
self.config_hr_env["pH_pR_min_sep_dist"])
# This is from human MPC (Hmpc)'s view, how far to avoid pR_0
pH_view_pH_pR_min_sep_dist = float(
self.config_hr_env["Hmpc"]["pH_view_pH_pR_min_sep_dist"])
self.Hmpc = HumanRefTracker(
pH_min=np.squeeze(self.pH_min),
pH_max=np.squeeze(self.pH_max),
vH_min=np.squeeze(self.vH_min),
vH_max=np.squeeze(self.vH_max),
uH_min=np.squeeze(self.uH_min),
uH_max=np.squeeze(self.uH_max),
w_ref=self.w_ref, w_u=self.w_u,
w_move_to_pR=self.w_move_to_pR,
w_avoid_pR=self.w_avoid_pR,
dt=self.dt_Hmpc, mass=self.mH,
pH_view_pH_pR_min_sep_dist=pH_view_pH_pR_min_sep_dist)
self.color_robot = "r"
self.color_human = "g"
self.color_hmdp_grid = "k"
# https://matplotlib.org/3.3.3/api/markers_api.html
self.marker_hmdp_obstacle = "x"
self.marker_hmdp_grid = "."
self.marker_robot_init_state = "X"
self.marker_human_init_state = "X"
self.marker_robot_traj = "s"
self.marker_human_traj = "s"
self.marker_human_data = "o"
self.alpha_hmdp_grid = 0.1
self.alpha_hmdp_obstacle = 0.1
self.markersize_state = 10
# https://matplotlib.org/3.1.0/tutorials/colors/colormaps.html
self.ellipsoid_cmap = cm.get_cmap('Paired')
# ---------------------
# Collision checking
# We approximate the robot collision volume as a set of pts
# to ease the collision checking wrt human as ellipsoids.
# In MPC, we will add the offsets to pR and check whether
# that point is inside or not the ellipsoids.
self.R_col_volume_offsets = np.array([[0, 0]]) # only check collision at pR without any offsets.
assert self.R_col_volume_offsets.shape[1] == self.n_pR
# Safe impact potential
assert self.n_vR == self.n_vH
# Worst case = 1, based on <Quanti...> paper
self.coeff_restit = self.config_hr_env["coeff_restit"]
self.F_HR_max_safe_impact = self.config_hr_env["F_HR_max_safe_impact"]
# Convert it to inscribed hyperrectangle, along x,y
# nvRx0
self.F_HR_max = np.zeros(self.n_vR,)
self.F_HR_max.fill(self.F_HR_max_safe_impact / np.sqrt(self.n_vR))
# Eq6 in paper:
# 1x1
tmp = abs((self.coeff_restit + 1.) / (1./self.mR + 1./self.mH))
assert tmp > 1e-5
# \rho in App.D(A):
# nvRx0
self.safe_pot_offset = self.F_HR_max / tmp
# Eq.11 in paper: Cst: h_mat x vH <= [-vR, vR]^T + h
# nvRx1
tmp2 = np.reshape(self.safe_pot_offset, (self.n_vR, 1))
# (2*nvR)x1
self.h_safe_imp_pot = np.vstack((tmp2, tmp2))
# (2*nvR)xnvR
self.h_mat_safe_imp_pot = np.vstack(
(-np.eye(self.n_vR), np.eye(self.n_vR)))
def noise_H_sde(self, x, t):
"""Definition of the noise for human sde"""
# https://pypi.org/project/sdeint/
# diagonal, so independent driving Wiener processes
pH_noise = [self.H_sde_noise] * self.n_pH
vH_noise = [0.] * self.n_vH
return np.diag(pH_noise + vH_noise)
def dynamics_H_sde(self, y, t):
"""Definition of the dynamics for human sde.
sdeint does not allow passing in args"""
return self.dynamics(
y=y, t=t, uts=self.cur_uHs_interp,
end_time=self.end_time_us_mpc, m=self.mH)
def dynamics(self, y, t, uts, end_time, m):
"""Definition of the double integrator dynamics
(used for human ode and robot ode).
https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.odeint.html
Computes the derivative of y at t.
Parameters
----------
y: (np+nv)x0 = state s = [px, py, vx, vy].
t: float, the current time.
uts: scipy.interpolate.interpolate.interp1d = controls across time.
end_time: the end time of uts.
m: mass.
Returns
-------
dy: (np+nv)x1.
"""
ut = np.zeros((uts.y.shape[1],))
if t <= end_time:
ut = uts(t)
n_x = y.shape[0]
n_p = int(n_x / 2.)
dy = np.zeros((n_x,))
assert n_x / 2 == 2
dy[:n_p] = y[2:]
dy[n_p:] = ut / m
return dy
def sample_init_sR(self, mean, std, n_samples=1):
"""Sample an initial robot state, sR.
mean: nsx0.
std: float.
n_samples: int.
"""
n_s = mean.shape[0]
samples = (repmat(std, n_samples, 1)
* np.random.randn(n_samples, n_s)
+ repmat(mean, n_samples, 1))
return (samples.T.squeeze()).T
def random_uR(self):
"""Sample a random control, uR."""
return np.random.rand(self.n_uR)\
* (self.uR_max - self.uR_min) + self.uR_min
def reset(self, pR_0=None, vR_0=None, pH_0=None, vH_0=None):
"""
Reset the env to:
pR_0: initial robot position.
vR_0: initial robot velocity.
pH_0: initial human position.
vH_0: initial human velocity.
"""
self.iteration = 0
if pR_0 is None:
pR_0 = self.pR_0
if vR_0 is None:
vR_0 = self.vR_0
if pH_0 is None:
pH_0 = self.pH_0
if vH_0 is None:
vH_0 = self.vH_0
assert (pR_0 < 1e-5 + np.squeeze(self.pR_max)).all()
assert (pR_0 > -1e-5 + np.squeeze(self.pR_min)).all()
assert (vR_0 < 1e-5 + np.squeeze(self.vR_max)).all()
assert (vR_0 > -1e-5 + np.squeeze(self.vR_min)).all()
assert (pH_0 < 1e-5 + np.squeeze(self.pH_max)).all()
assert (pH_0 > -1e-5 + np.squeeze(self.pH_min)).all()
assert (vH_0 < 1e-5 + np.squeeze(self.vH_max)).all()
assert (vH_0 > -1e-5 + np.squeeze(self.vH_min)).all()
self.cur_pR = pR_0
self.cur_vR = vR_0
self.cur_pH = pH_0
self.cur_vH = vH_0
return self.cur_pR, self.cur_vR, self.cur_pH, self.cur_vH
def sim_ode(self, cur_p, cur_v, us_1_T, dt_us, human_or_robot, plot=False):
"""
Simulate human/robot ode, and human sde for 1 step.
Parameters
----------
cur_p: current position.
cur_v: current velocity.
us_1_T: controls from time index 1 to T.
dt_us: the dt associated for us_1_T.
human_or_robot: str, "R" OR "H".
If "H": this is called for simulating the human ode+sde.
If "R": this is called for simulating the robot ode.
plot: bool, plot for debugging.
Returns
-------
next_p: next position from the ode.
next_v: next velocity from the ode.
next_p_sde: next velocity from the sde.
next_v_sde: next velocity from the sde.
ax: Axes object, containing the plot.
"""
assert human_or_robot in ["R", "H"]
cur_p = np.squeeze(cur_p)
cur_v = np.squeeze(cur_v)
n_s = cur_p.shape[0]
# us_1_T = [1:T], so its length = horizon
horizon_us_mpc = us_1_T.shape[0]
# dts_us_mpc = [0:T-dt_us]
dts_us_mpc, step_uHs_mpc = np.linspace(
start=0.0, stop=horizon_us_mpc*dt_us - dt_us,
num=horizon_us_mpc, endpoint=True, retstep=True)
u_interp = waypts_2_zeroOrderHold(wp_traj=us_1_T, dt=dt_us,
axis=0, plot=False)
end_time_us_mpc = horizon_us_mpc*dt_us - dt_us
# Integrate 1 step = self.step_time
# dts_ode = 0:self.step_time
horizon_ode = int(np.ceil(self.step_time / self.dt_env)) + 1
dts_ode, step_ode = np.linspace(
start=0., stop=self.step_time,
num=horizon_ode, endpoint=True, retstep=True)
assert step_ode <= self.dt_env
horizon_ode = int(np.ceil(self.step_time / self.dt_env)) + 1
dts_ode, step_ode = np.linspace(
start=0., stop=self.step_time,
num=horizon_ode, endpoint=True, retstep=True)
assert step_ode <= self.dt_env
# Rollout the traj by Hmpc in ode
mass = None
if human_or_robot == "H":
mass = self.mH
else:
mass = self.mR
y0 = np.squeeze(np.hstack((cur_p, cur_v)))
sol = odeint(func=self.dynamics, y0=y0, t=dts_ode,
args=(u_interp, end_time_us_mpc, mass))
if human_or_robot == "H":
self.cur_uHs_interp = u_interp
self.end_time_us_mpc = end_time_us_mpc
# https://pypi.org/project/sdeint/
# https://stackoverflow.com/questions/54532246/how-to-implement-a-system-of-stochastic-odes-sdes-in-python
# https://ipython-books.github.io/134-simulating-a-stochastic-differential-equation/
sol_sde = sdeint.itoint(f=self.dynamics_H_sde,
G=self.noise_H_sde,
y0=y0, tspan=dts_ode)
next_p = sol[-1, :n_s].reshape((n_s, 1))
next_v = sol[-1, n_s:].reshape((n_s, 1))
next_p_sde = None
next_v_sde = None
if human_or_robot == "H":
next_p_sde = sol_sde[-1, :n_s].reshape((n_s, 1))
next_v_sde = sol_sde[-1, n_s:].reshape((n_s, 1))
ax = None
if plot:
# Plot beyond self.step_time
end_time = dts_us_mpc[-1] + dt_us
horizon_full_ode = int(np.ceil(end_time / self.dt_env)) + 1
dts_full_ode, step_full_ode = np.linspace(
start=0., stop=end_time, num=horizon_full_ode,
endpoint=True, retstep=True)
assert step_full_ode <= self.dt_env
full_sol = odeint(func=self.dynamics, y0=y0, t=dts_full_ode,
args=(u_interp, end_time_us_mpc, mass))
if human_or_robot == "H":
full_sol_sde = sdeint.itoint(f=self.dynamics_H_sde,
G=self.noise_H_sde,
y0=y0, tspan=dts_full_ode)
fig, ax = plt.subplots()
ax.plot(dts_full_ode, full_sol[:, :n_s], label='ode')
ax.plot(dts_ode, sol[:, :n_s], label='partial ode',
linewidth=7, alpha=0.4)
if human_or_robot == "H":
ax.plot(dts_full_ode, full_sol_sde[:, :n_s], label='sde')
ax.plot(dts_ode, sol_sde[:, :n_s], label='partial sde',
linewidth=7, alpha=0.4)
# ax.legend(loc='lower right', ncol=2)
# plt.show()
return next_p, next_v, next_p_sde, next_v_sde, ax
def step_human(self, cur_pR, cur_vR, cur_pH, cur_vH, plot=False):
"""
Simulate the human Hmpc (Hmdp) and ode/sde for 1 step,
by calling Hmpc (Hmdp) and then sim_ode().
Parameters
----------
cur_pR: current robot position.
cur_vR: current robot velocity.
cur_pH: current human position.
cur_vH: current human velocity.
plot: bool, plot for debugging.
Returns
-------
next_pH: next human position from the ode.
next_vH: next human velocity from the ode.
next_pH_sde: next human velocity from the sde.
next_vH_sde: next human velocity from the sde.
"""
cur_pH = np.reshape(cur_pH, (self.n_pH,))
assert (cur_pH < np.squeeze(self.pH_max)).all()
assert (cur_pH > np.squeeze(self.pH_min)).all()
# Rollout till human reaches goal
ind0 = self.Hmdp.ss.positions2Ind(tuple(cur_pH))
# Ensure that ind_traj is longer than self.step_time.
# i.e, we want traj with length >= min_horizon+1,
# which represents the time index = 0:min_horizon
min_horizon = int(np.ceil(self.step_time / self.dt_Hmdp))
# If len of ref traj < 4, cubic splines will fail
min_horizon = max(min_horizon, 3)
ind_traj = self.Hmdp.rollout_ind_traj(
ind0=ind0, min_horizon=min_horizon)
assert (len(ind_traj) >= min_horizon + 1)
assert ind0 in ind_traj
pHs_0_T_ref = np.zeros((len(ind_traj), self.n_pH))
for t, indt in enumerate(ind_traj):
pHs_0_T_ref[t, :] = self.Hmdp.ss.ind2Positions(indt)
# pHs_0_T_ref[0] is the center of the grid at t=0.
# However, at t=0, human is at cur_pH.
# So need to adjust that:
pHs_0_T_ref[0, :] = cur_pH
assert (pHs_0_T_ref[0] == cur_pH).all()
for t in range(pHs_0_T_ref.shape[0]):
assert (pHs_0_T_ref[t, :] < np.squeeze(self.pH_max)).all()
assert (pHs_0_T_ref[t, :] > np.squeeze(self.pH_min)).all()
# pHs_0_T_ref = 0:T, so its length = horizon + 1
horizon_ref_hmdp = pHs_0_T_ref.shape[0] - 1
dt_ref_hmdp = self.dt_Hmdp
dts_ref_hmdp, step_ref_hmdp = np.linspace(
start=0.0, stop=horizon_ref_hmdp*dt_ref_hmdp,
num=horizon_ref_hmdp + 1, endpoint=True, retstep=True)
assert dts_ref_hmdp[0] == 0.
assert dts_ref_hmdp[-1] >= self.step_time
# MPC
# If uHs_1_T_opt.shape[0] <= 1, cubic splines will fail
assert pHs_0_T_ref.shape[0] >= 4
pHs_1_T_opt, vHs_1_T_opt, uHs_1_T_opt = self.Hmpc.solve_mpc(
pHs_0_T_ref=pHs_0_T_ref, dt_ref=self.dt_Hmdp,
vH_0=cur_vH, pR_0=cur_pR,
pH_mode=self.pH_mode, plot=False)
'''
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(pHs_0_T_ref[:, 0], pHs_0_T_ref[:, 1], 'g', label='ref')
ax.plot(pHs_1_T_opt[:, 0], pHs_1_T_opt[:, 1], 'r', label='opt')
ax.plot(cur_pR[0, 0], cur_pR[1, 0], 'bx', label='cur_pR')
ax.plot(cur_pH[0], cur_pH[1], 'rx', label='cur_pH')
ax.legend(loc='upper right', ncol=2)
plt.show()
plt.cla()
plt.clf()
plt.close()
for i in range(pHs_1_T_opt.shape[0]):
dist = np.linalg.norm(pHs_1_T_opt[i, :] - cur_pH, ord=2)
print(dist)
'''
# If uHs_1_T_opt.shape[0] <= 1, splines will fail
assert uHs_1_T_opt.shape[0] > 1
next_pH, next_vH, next_pH_sde, next_vH_sde, ax = self.sim_ode(
cur_p=cur_pH, cur_v=cur_vH, us_1_T=uHs_1_T_opt,
dt_us=self.dt_Hmpc, human_or_robot="H", plot=plot)
if plot:
ax.plot(dts_ref_hmdp, pHs_0_T_ref, 'o',
markersize=10, label='Hmdp')
horizon_us_mpc = uHs_1_T_opt.shape[0]
dt_us = self.dt_Hmpc
dts_us_mpc, step_uHs_mpc = np.linspace(
start=0.0, stop=horizon_us_mpc*dt_us - dt_us,
num=horizon_us_mpc, endpoint=True, retstep=True)
dts_mpc = np.hstack((dts_us_mpc, dts_ref_hmdp[-1]))
pHs_0_T_opt = np.vstack((cur_pH.T, pHs_1_T_opt))
ax.plot(dts_mpc, pHs_0_T_opt, label='Hmpc')
ax.legend(loc='lower right', ncol=2)
plt.show()
return next_pH, next_vH, next_pH_sde, next_vH_sde
def step_robot(self, cur_pR, cur_vR, cur_pH, cur_vH, uRs_1_T, plot=False):
"""
Simulate the robot ode for 1 step, by calling sim_ode().
Parameters
----------
cur_pR: current robot position.
cur_vR: current robot velocity.
cur_pH: current human position.
cur_vH: current human velocity.
uRs_1_T: robot controls from time step 1 to T.
plot: bool, plot for debugging.
Returns
-------
next_pR: next robot position from the ode.
next_vR: next robot velocity from the ode.
"""
assert uRs_1_T.shape[1] == self.n_uR
# Ensure that uRs_1_T is longer than self.step_time.
# i.e, we want uRs_1_T with length >= min_horizon,
# which represents the time index = 0+dt_Rmpc : min_horizon
min_horizon = int(np.ceil(self.step_time / self.dt_Rmpc))
tmp = np.zeros((min_horizon, self.n_uR), dtype=np.float32)
for i in range(min(min_horizon, uRs_1_T.shape[0])):
tmp[i, :] = uRs_1_T[i, :]
uRs_1_T = tmp
assert (uRs_1_T.shape[0] == min_horizon)
# If uRs_1_T.shape[0] <= 1, splines will fail
if uRs_1_T.shape[0] <= 1:
uRs_1_T = np.vstack((uRs_1_T, np.zeros(self.n_uR,)))
assert uRs_1_T.shape[0] > 1
next_pR, next_vR, _, _, ax = self.sim_ode(
cur_p=cur_pR, cur_v=cur_vR, us_1_T=uRs_1_T,
dt_us=self.dt_Rmpc, human_or_robot="R", plot=plot)
if plot:
ax.legend(loc='lower right', ncol=2)
plt.show()
return next_pR, next_vR
def step(self, uRs_1_T, cur_pR=None, cur_vR=None, cur_pH=None,
cur_vH=None, set_cur_state=True):
"""
1. Simulate the human Hmpc (Hmdp) and ode/sde for 1 step,
by calling Hmpc (Hmdp) and then sim_ode().
2. Simulate the robot ode for 1 step, by calling sim_ode().
3. Check safety metrics between human and robot.
Parameters
----------
uRs_1_T: robot controls from time step 1 to T.
cur_pR: current robot position.
cur_vR: current robot velocity.
cur_pH: current human position.
cur_vH: current human velocity.
set_cur_state: bool, whether to set the class member variables
regarding the current state.
Returns
-------
next_pH: next human position from the ode/sde.
next_vH: next human velocity from the ode/sde.
next_pR: next robot position from the ode.
next_vR: next robot velocity from the ode.
collision: bool, whether human and robot are in collision in this step.
safe_impact: bool, whether human and robot have safe impact in this step.
HR_min_dist: float, min separation distance between human and robot.
HR_max_vel_diff: float, max velocity difference between human and robot.
"""
if cur_pR is None:
cur_pR = self.cur_pR
if cur_vR is None:
cur_vR = self.cur_vR
if cur_pH is None:
cur_pH = self.cur_pH
if cur_vH is None:
cur_vH = self.cur_vH
assert (cur_pR < 1e-5 + np.squeeze(self.pR_max)).all()
assert (cur_pR > -1e-5 + np.squeeze(self.pR_min)).all()
assert (cur_vR < 1e-5 + np.squeeze(self.vR_max)).all()
assert (cur_vR > -1e-5 + np.squeeze(self.vR_min)).all()
assert (cur_pH < 1e-5 + np.squeeze(self.pH_max)).all()
assert (cur_pH > -1e-5 + np.squeeze(self.pH_min)).all()
assert (cur_vH < 1e-5 + np.squeeze(self.vH_max)).all()
assert (cur_vH > -1e-5 + np.squeeze(self.vH_min)).all()
# Step human
next_pH, next_vH, _next_pH_sde, _next_vH_sde = self.step_human(
cur_pR=cur_pR, cur_vR=cur_vR,
cur_pH=cur_pH, cur_vH=cur_vH, plot=False)
# Step robot
next_pR, next_vR = self.step_robot(
cur_pR=cur_pR, cur_vR=cur_vR, cur_pH=cur_pH,
cur_vH=cur_vH, uRs_1_T=uRs_1_T, plot=False)
if self.use_sde:
next_pH = _next_pH_sde
next_vH = _next_vH_sde
# Need clipping since ode might out of bound.
tol = 1e-5
next_pH = np.clip(next_pH, self.pH_min+tol, self.pH_max-tol)
next_vH = np.clip(next_vH, self.vH_min+tol, self.vH_max-tol)
next_pR = np.clip(next_pR, self.pR_min+tol, self.pR_max-tol)
next_vR = np.clip(next_vR, self.vR_min+tol, self.vR_max-tol)
print("BEFORE: pR={}".format(np.squeeze(cur_pR)))
print("AFTER: pR={}".format(np.squeeze(next_pR)))
print("BEFORE: vR={}".format(np.squeeze(cur_vR)))
print("AFTER: vR={}".format(np.squeeze(next_vR)))
print("BEFORE: pH={}".format(np.squeeze(cur_pH)))
print("AFTER: pH={}".format(np.squeeze(next_pH)))
print("BEFORE: vH={}".format(np.squeeze(cur_vH)))
print("AFTER: vH={}".format(np.squeeze(next_vH)))
collision, safe_impact, HR_min_dist, HR_max_vel_diff\
= self.check_safety_interp(
cur_pR=cur_pR, cur_vR=cur_vR,
cur_pH=cur_pH, cur_vH=cur_vH,
next_pR=next_pR, next_vR=next_vR,
next_pH=next_pH, next_vH=next_vH)
if collision:
if safe_impact:
print_FAIL("Collision && safe impact")
print_OK("HR_min_dist={}".format(HR_min_dist))
print_OK("HR_max_vel_diff={}".format(HR_max_vel_diff))
else:
print_FAIL("Collision && UNsafe impact")
print_FAIL("HR_min_dist={}".format(HR_min_dist))
print_FAIL("HR_max_vel_diff={}".format(HR_max_vel_diff))
else:
print_FAIL("SAFE")
print("HR_min_dist={}".format(HR_min_dist))
print("HR_max_vel_diff={}".format(HR_max_vel_diff))
print("safe_pot_offset={}".format(
np.squeeze(self.safe_pot_offset)))
if set_cur_state:
self.iteration += 1
self.cur_pR = next_pR
self.cur_vR = next_vR
self.cur_pH = next_pH
self.cur_vH = next_vH
return next_pH, next_vH, next_pR, next_vR,\
collision, safe_impact, HR_min_dist, HR_max_vel_diff
def plot_safety_bounds(self, ax=None, plot_human_grid=False,
plot_world_rectangle=True):
"""
Given a 2D axes object, plot the safety bounds on it.
Parameters
----------
ax: Axes object,
The input axes object to plot on
Returns
-------
ax: Axes object
The same Axes object as the input ax but now contains the rectangle
"""
new_ax = False
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
new_ax = True
plt.sca(ax)
# Create a Rectangle patch
rect_length1 = (self.pR_max - self.pR_min)[0]
rect_length2 = (self.pR_max - self.pR_min)[1]
rect = patches.Rectangle(
(tuple(self.pR_min)), rect_length1, rect_length2,
linewidth=1, edgecolor=self.color_robot, facecolor='none')
if plot_world_rectangle:
ax.add_patch(rect)
rect_length3 = (self.pH_max - self.pH_min)[0]
rect_length4 = (self.pH_max - self.pH_min)[1]
rect2 = patches.Rectangle(
(tuple(self.pH_min)), rect_length3, rect_length4,
linewidth=1, edgecolor=self.color_human, facecolor='none')
if plot_world_rectangle:
ax.add_patch(rect2)
if plot_human_grid:
assert self.n_pH == 2
xs = []
ys = []
xs_obs = []
ys_obs = []
for s in range(self.Hmdp.ss.n_states):
positions = self.Hmdp.ss.ind2Positions(s)
if s not in self.Hmdp.inds_obstacle:
xs.append(positions[0])
ys.append(positions[1])
else:
xs_obs.append(positions[0])
ys_obs.append(positions[1])
plt.scatter(xs, ys, alpha=self.alpha_hmdp_grid,
c=self.color_hmdp_grid,
marker=self.marker_hmdp_grid)
plt.scatter(xs_obs, ys_obs, alpha=self.alpha_hmdp_obstacle,
c=self.color_hmdp_grid,
marker=self.marker_hmdp_obstacle)
if new_ax:
x_padding = max(rect_length1, rect_length3) * X_LIM_RATIO_VIS
y_padding = max(rect_length2, rect_length4) * Y_LIM_RATIO_VIS
min_x_lim = min(self.pH_min[0], self.pR_min[0]) - x_padding
max_x_lim = max(self.pH_max[0], self.pR_max[0]) + x_padding
min_y_lim = min(self.pH_min[1], self.pR_min[1]) - y_padding
max_y_lim = max(self.pH_max[1], self.pR_max[1]) + y_padding
ax.set_xlim(min_x_lim, max_x_lim)
ax.set_ylim(min_y_lim, max_y_lim)
return fig, ax
return ax
def plot_state(self, ax, x, color="b", label="", alpha=1.0, annotate=True,
marker="o", markersize_state=1.):
"""
Plot a given state vector
Parameters:
-----------
ax: Axes Object
The axes to plot the state on
x: 2x0 array_like[float], optional
A state vector of the dynamics
Returns
-------
ax: Axes Object
The axes with the state plotted
"""
n_s = x.shape[0]
assert x.shape == (n_s,)
plt.sca(ax)
handles = ax.plot(x[0], x[1], color=color, marker=marker,
markersize=markersize_state, alpha=alpha)
if annotate:
ax.annotate(label, (x[0], x[1]))
return ax, handles
def plot_traj(self, traj, human_or_robot, ax=None, annotate=True):
"""
Plot a trajectory of states, either of human or robot
Parameters
----------
traj: trajectory of states.
ax: Axes Object
The axes to plot the state on
Returns
-------
ax: Axes Object
The axes with the trajectory plotted
"""
assert human_or_robot in ["H", "R"]
if human_or_robot == "H":
n_s = self.n_pH
color = self.color_human
marker_init_state = self.marker_human_init_state
marker_traj = self.marker_human_traj
txt = "H"
else:
n_s = self.n_pR
color = self.color_robot
marker_init_state = self.marker_robot_init_state
marker_traj = self.marker_robot_traj
txt = "R"
assert traj.shape[1] == n_s
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
plt.sca(ax)
horizon = traj.shape[0]
handles = [None] * horizon
for i in range(horizon):
pt = traj[i, :]
if i == 0:
ax, handles[i] = self.plot_state(
ax=ax, x=pt, color=color, label=txt+str(i),
annotate=annotate,
marker=marker_init_state, alpha=1.,
markersize_state=self.markersize_state)
else:
ax, handles[i] = self.plot_state(
ax=ax, x=pt, color=color,
label=txt+str(i), alpha=i/(horizon-1)*4/5+0.2,
annotate=annotate,
marker=marker_traj,
markersize_state=self.markersize_state)
xlim_old = ax.get_xbound()
ylim_old = ax.get_ybound()
max_x = max(np.max(traj[:, 0]), xlim_old[1])
min_x = min(np.min(traj[:, 0]), xlim_old[0])
max_y = max(np.max(traj[:, 1]), ylim_old[1])
min_y = min(np.min(traj[:, 1]), ylim_old[0])
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_y, max_y)
return ax, handles
def plot_ellipsoid_traj(self, pHs_1_T, qHs_1_T, ax=None, plot_lines=False):
"""
Plot a trajectory of ellipsoids for the human reachability.
Parameters
----------
pHs_1_T: centers of ellipsoids from time step 1 to T.
qHs_1_T: shape matrices of ellipsoids from time step 1 to T.
ax: Axes Object
The axes to plot the state on
Returns
-------
ax: Axes Object
The axes with the trajectory plotted
"""
# new_ax = False
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
# new_ax = True
plt.sca(ax)
horizon = pHs_1_T.shape[0]
assert horizon == qHs_1_T.shape[0]
assert pHs_1_T.shape[1] == self.n_pH
assert qHs_1_T.shape[1] == self.n_pH**2
handles = []
# https://stackoverflow.com/a/25408562
color_range = np.linspace(0., 1., num=horizon, endpoint=True)
for i in range(horizon):
color = self.ellipsoid_cmap(color_range[i])
p_i = cas_reshape(pHs_1_T[i, :], (self.n_pH, 1))
q_i = cas_reshape(qHs_1_T[i, :], (self.n_pH, self.n_pH))
ax_, handle_ = plot_ellipsoid_2D(
centroid=p_i.toarray(), Q=q_i.toarray(),
ax=ax, color=color)
# Ellipsoid computation encounters nan
if ax_ is None:
assert handle_ is None
else:
assert handle_ is not None
handles.append(handle_)
ax = ax_
if plot_lines:
p_is = []
for i in range(horizon):
p_i = cas_reshape(pHs_1_T[i, :], (self.n_pH, 1))
p_is.append(p_i.toarray())
for i in range(horizon-1):
color = self.ellipsoid_cmap(color_range[i])
handle = ax.plot(
[p_is[i][0], p_is[i+1][0]],
[p_is[i][1], p_is[i+1][1]], color=color)
handles.extend([handle])
# Create legend
legend_handles = []
for i in range(horizon):
color = self.ellipsoid_cmap(color_range[i])
patch = mpatches.Patch(color=color, label=str(i))
legend_handles.append(patch)
plt.legend(handles=legend_handles)
handles.extend(legend_handles)
return ax, handles
def check_safety_interp(self, cur_pR, cur_vR, cur_pH, cur_vH,
next_pR, next_vR, next_pH, next_vH):
"""
Check safety by conducting cubic interpolation
between cur_xx and next_xx, and checking safety at every dt.
Parameters
----------
cur_pR: current robot position.
cur_vR: current robot velocity.
cur_pH: current human position.
cur_vH: current human velocity.
next_pR: next robot position.
next_vR: next robot velocity.
next_pH: next human position.
next_vH: next human velocity.
Returns
-------
collision: bool, whether human and robot are in collision
within this interpolated trajectory.
safe_impact: bool, whether human and robot have safe impact
within this interpolated trajectory.
min_HR_min_dists: float, min separation distance between human
and robot within this interpolated trajectory.
max_HR_vel_diff: float, max velocity difference between human
and robot within this interpolated trajectory.
"""
assert self.n_pH == self.n_pR
dts_low_res = [0.0, self.step_time]
horizon = int(np.ceil(
self.step_time / self.dt_pH_pR_safety_checking)) + 1
dts_high_res, step = np.linspace(
start=0., stop=self.step_time,
num=horizon, endpoint=True, retstep=True)
assert step <= self.dt_pH_pR_safety_checking
# pR, vR
y = np.vstack((cur_pR.squeeze(), next_pR.squeeze()))
dydt = np.vstack((cur_vR.squeeze(), next_vR.squeeze()))
pR_spline = interpolate.CubicHermiteSpline(
x=dts_low_res, y=y, dydx=dydt, extrapolate=False)
vR_spline = pR_spline.derivative()
# pH, vH
y = np.vstack((cur_pH.squeeze(), next_pH.squeeze()))
dydt = np.vstack((cur_vH.squeeze(), next_vH.squeeze()))
pH_spline = interpolate.CubicHermiteSpline(
x=dts_low_res, y=y, dydx=dydt, extrapolate=False)
vH_spline = pH_spline.derivative()
pR_interp = pR_spline(dts_high_res)
vR_interp = vR_spline(dts_high_res)
pH_interp = pH_spline(dts_high_res)
vH_interp = vH_spline(dts_high_res)
collision = False
safe_impact = True
HR_min_dists = []
HR_vel_diffs = []
for i in range(len(dts_high_res)):
pR = pR_interp[i, :].reshape((self.n_pR, 1))
vR = vR_interp[i, :].reshape((self.n_vR, 1))
pH = pH_interp[i, :].reshape((self.n_pH, 1))
vH = vH_interp[i, :].reshape((self.n_vH, 1))
collisions_, collision_dists_, vel_diff_, safe_impact_\
= self.check_safety_single_state(pR=pR, vR=vR, pH=pH, vH=vH)
if True in collisions_:
collision = True
if not safe_impact_:
safe_impact = False
HR_min_dists.append(min(collision_dists_))
HR_vel_diffs.append(vel_diff_)
min_HR_min_dists = float(min(HR_min_dists))
max_HR_vel_diff = float(max(HR_vel_diffs))
if collision:
assert min_HR_min_dists < self.pH_pR_min_sep_dist
return collision, safe_impact, min_HR_min_dists, max_HR_vel_diff
def check_safety_single_state(self, pR, pH, vR=None, vH=None):
"""
Check safety for a single state.
Parameters
----------
pR: current robot position.
pH: current human position.
vR: current robot velocity.
vH: current human velocity.
Returns
-------
collision: list of bools, collision or not for each point to check.
collision_dists: list of floats, seperation distance for each point to check.
norm_v_diff: float, velocity difference between human and robot.
safe_impact: bool, whether human and robot have safe impact.
"""
assert pR.shape == (self.n_pR, 1)
if vR is not None:
assert vR.shape == (self.n_vR, 1)
assert pH.shape == (self.n_pH, 1)
if vH is not None:
assert vH.shape == (self.n_vH, 1)
# Collision avoidance
n_pts = self.R_col_volume_offsets.shape[0]
collisions = []
collision_dists = []
for j in range(n_pts):
offset = self.R_col_volume_offsets[j, :]
offset = np.reshape(offset, (self.n_pR, 1))
assert pR.shape == offset.shape
coll_pt = pR + offset
d = np.linalg.norm(coll_pt - pH, ord=2)
collision_dists.append(d)
# Since we have already used R_col_volume_offsets,
# we don't need to add R volume here.
collisions.append(d < self.pH_pR_min_sep_dist)
# Safe impact
# Cst: h_mat x vH <= [-vR, vR]^T + h
# Here we don't use the over-approximated cst:
# h_mat x vH <= [-vR, vR]^T + h.
# Instead, we check safe impact exactly:
# l*||v1j-v2j||_2 <= F_max
norm_v_diff = np.linalg.norm(vR - vH, ord=2)
tmp = abs((self.coeff_restit + 1.) / (1./self.mR + 1./self.mH))
LFS = norm_v_diff * tmp
RHS = self.F_HR_max_safe_impact
safe_impact = LFS <= RHS
return collisions, collision_dists, norm_v_diff, safe_impact
if __name__ == "__main__":
# Test
np.random.seed(0)
# https://stackoverflow.com/a/2891805
np.set_printoptions(precision=3, suppress=True)
config_dir_name = "config_2d_simple"
path = os.path.abspath(hr_planning.__file__)
module_dir = os.path.split(path)[0]
config_dir = os.path.join(
module_dir,
"env_gridworld_human/" + config_dir_name)
config_path_hr_env = os.path.join(config_dir, "hr_env.yaml")
config_path_hmdp = os.path.join(config_dir, "hmdp.yaml")
cache_dir = os.path.join(module_dir, "env_gridworld_human/cache")
# for pH_mode in ["pH_indep_pR", "pH_avoid_pR", "pH_move_to_pR"]:
# for pH_mode in ["pH_indep_pR", "pH_avoid_pR"]:
for pH_mode in ["pH_avoid_pR"]:
env = HREnv(config_path_hr_env=config_path_hr_env,
config_path_hmdp=config_path_hmdp,
cache_dir=cache_dir,
pH_mode=pH_mode,
value_iteration=True)
_, ax = env.plot_safety_bounds(plot_human_grid=True, ax=None)
# plt.show()
mean = np.array([1, 2, 3, 4])
std = 0.1
n_samples = 2
sampled_sR = env.sample_init_sR(mean=mean, std=std, n_samples=n_samples)
assert sampled_sR.shape == (n_samples, mean.shape[0])
pR_0 = np.array([0.3, 0.0]).reshape((env.n_pR, 1))
vR_0 = np.array([0.0, 0.0]).reshape((env.n_vR, 1))
pH_0 = np.array([0.1, 0.0]).reshape((env.n_pH, 1))
vH_0 = np.array([0.0, 0.0]).reshape((env.n_vH, 1))
cur_pR, cur_vR, cur_pH, cur_vH = env.reset(
pR_0=pR_0, vR_0=vR_0, pH_0=pH_0, vH_0=vH_0)
horizon = 10
pRs_0_T = np.zeros((horizon+1, env.n_pR), dtype=np.float32)
vRs_0_T = np.zeros((horizon+1, env.n_vR), dtype=np.float32)
pHs_0_T = np.zeros((horizon+1, env.n_pH), dtype=np.float32)
vHs_0_T = np.zeros((horizon+1, env.n_vH), dtype=np.float32)
pRs_0_T[0, :] = np.squeeze(env.cur_pR)
vRs_0_T[0, :] = np.squeeze(env.cur_vR)
pHs_0_T[0, :] = np.squeeze(env.cur_pH)
vHs_0_T[0, :] = np.squeeze(env.cur_vH)
# We can verify using vt=v0+a*t, xt=x0+v0*t+0.5*a*t^2
for t in range(1, horizon + 1):
print_OK("----\nItr={}".format(t))
# uR = env.random_uR()
# uRs_1_T = np.array([[0.1, 0.1]])
uRs_1_T = np.array([[0.0, 0.0]])
# uRs_1_T = np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]])
assert uRs_1_T.shape[1] == env.n_uR
next_pH, next_vH, next_pR, next_vR,\
collision, safe_impact, HR_min_dist, HR_max_vel_diff\
= env.step(uRs_1_T=uRs_1_T, set_cur_state=True)
pRs_0_T[t, :] = np.squeeze(env.cur_pR)
vRs_0_T[t, :] = np.squeeze(env.cur_vR)
pHs_0_T[t, :] = np.squeeze(env.cur_pH)
vHs_0_T[t, :] = np.squeeze(env.cur_vH)
print("pRs_0_T={}".format(pRs_0_T))
print("pHs_0_T={}".format(pHs_0_T))
ax, handles = env.plot_traj(traj=pHs_0_T, human_or_robot="H", ax=ax)
ax, handles = env.plot_traj(traj=pRs_0_T, human_or_robot="R", ax=ax)
plt.show()
plt.cla()
plt.clf()
plt.close()
print("Done")
| [
"yaml.load",
"numpy.random.seed",
"matplotlib.cm.get_cmap",
"matplotlib.pyplot.clf",
"numpy.clip",
"matplotlib.pyplot.figure",
"numpy.linalg.norm",
"numpy.diag",
"os.path.join",
"scipy.interpolate.CubicHermiteSpline",
"os.path.abspath",
"numpy.set_printoptions",
"numpy.copy",
"numpy.random... | [((44994, 45011), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (45008, 45011), True, 'import numpy as np\n'), ((45058, 45105), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)', 'suppress': '(True)'}), '(precision=3, suppress=True)\n', (45077, 45105), True, 'import numpy as np\n'), ((45160, 45197), 'os.path.abspath', 'os.path.abspath', (['hr_planning.__file__'], {}), '(hr_planning.__file__)\n', (45175, 45197), False, 'import os\n'), ((45255, 45321), 'os.path.join', 'os.path.join', (['module_dir', "('env_gridworld_human/' + config_dir_name)"], {}), "(module_dir, 'env_gridworld_human/' + config_dir_name)\n", (45267, 45321), False, 'import os\n'), ((45372, 45411), 'os.path.join', 'os.path.join', (['config_dir', '"""hr_env.yaml"""'], {}), "(config_dir, 'hr_env.yaml')\n", (45384, 45411), False, 'import os\n'), ((45435, 45472), 'os.path.join', 'os.path.join', (['config_dir', '"""hmdp.yaml"""'], {}), "(config_dir, 'hmdp.yaml')\n", (45447, 45472), False, 'import os\n'), ((45489, 45542), 'os.path.join', 'os.path.join', (['module_dir', '"""env_gridworld_human/cache"""'], {}), "(module_dir, 'env_gridworld_human/cache')\n", (45501, 45542), False, 'import os\n'), ((4663, 4699), 'numpy.array', 'np.array', (["self.config_hr_env['vR_0']"], {}), "(self.config_hr_env['vR_0'])\n", (4671, 4699), True, 'import numpy as np\n'), ((4932, 4970), 'numpy.array', 'np.array', (["self.config_hr_env['pR_min']"], {}), "(self.config_hr_env['pR_min'])\n", (4940, 4970), True, 'import numpy as np\n'), ((5051, 5089), 'numpy.array', 'np.array', (["self.config_hr_env['pR_max']"], {}), "(self.config_hr_env['pR_max'])\n", (5059, 5089), True, 'import numpy as np\n'), ((5171, 5209), 'numpy.array', 'np.array', (["self.config_hr_env['vR_min']"], {}), "(self.config_hr_env['vR_min'])\n", (5179, 5209), True, 'import numpy as np\n'), ((5290, 5328), 'numpy.array', 'np.array', (["self.config_hr_env['vR_max']"], {}), "(self.config_hr_env['vR_max'])\n", (5298, 5328), True, 'import numpy as np\n'), ((5410, 5448), 'numpy.array', 'np.array', (["self.config_hr_env['uR_min']"], {}), "(self.config_hr_env['uR_min'])\n", (5418, 5448), True, 'import numpy as np\n'), ((5529, 5567), 'numpy.array', 'np.array', (["self.config_hr_env['uR_max']"], {}), "(self.config_hr_env['uR_max'])\n", (5537, 5567), True, 'import numpy as np\n'), ((5650, 5689), 'numpy.array', 'np.array', (["self.config_hr_env['pR_goal']"], {}), "(self.config_hr_env['pR_goal'])\n", (5658, 5689), True, 'import numpy as np\n'), ((6166, 6225), 'hr_planning.env_gridworld_human.hmdp.HumanMdp', 'HumanMdp', ([], {'config_path': 'config_path_hmdp', 'cache_dir': 'cache_dir'}), '(config_path=config_path_hmdp, cache_dir=cache_dir)\n', (6174, 6225), False, 'from hr_planning.env_gridworld_human.hmdp import HumanMdp\n'), ((6661, 6697), 'numpy.array', 'np.array', (["self.config_hr_env['pH_0']"], {}), "(self.config_hr_env['pH_0'])\n", (6669, 6697), True, 'import numpy as np\n'), ((6772, 6808), 'numpy.array', 'np.array', (["self.config_hr_env['vH_0']"], {}), "(self.config_hr_env['vH_0'])\n", (6780, 6808), True, 'import numpy as np\n'), ((7308, 7332), 'numpy.zeros', 'np.zeros', (['(self.n_pH, 1)'], {}), '((self.n_pH, 1))\n', (7316, 7332), True, 'import numpy as np\n'), ((7355, 7379), 'numpy.zeros', 'np.zeros', (['(self.n_pH, 1)'], {}), '((self.n_pH, 1))\n', (7363, 7379), True, 'import numpy as np\n'), ((7790, 7836), 'numpy.array', 'np.array', (["self.config_hr_env['Hmpc']['vH_min']"], {}), "(self.config_hr_env['Hmpc']['vH_min'])\n", (7798, 7836), True, 'import numpy as np\n'), ((7917, 7963), 'numpy.array', 'np.array', (["self.config_hr_env['Hmpc']['vH_max']"], {}), "(self.config_hr_env['Hmpc']['vH_max'])\n", (7925, 7963), True, 'import numpy as np\n'), ((8044, 8090), 'numpy.array', 'np.array', (["self.config_hr_env['Hmpc']['uH_min']"], {}), "(self.config_hr_env['Hmpc']['uH_min'])\n", (8052, 8090), True, 'import numpy as np\n'), ((8171, 8217), 'numpy.array', 'np.array', (["self.config_hr_env['Hmpc']['uH_max']"], {}), "(self.config_hr_env['Hmpc']['uH_max'])\n", (8179, 8217), True, 'import numpy as np\n'), ((11104, 11125), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""Paired"""'], {}), "('Paired')\n", (11115, 11125), False, 'from matplotlib import cm\n'), ((11478, 11496), 'numpy.array', 'np.array', (['[[0, 0]]'], {}), '([[0, 0]])\n', (11486, 11496), True, 'import numpy as np\n'), ((11976, 11995), 'numpy.zeros', 'np.zeros', (['self.n_vR'], {}), '(self.n_vR)\n', (11984, 11995), True, 'import numpy as np\n'), ((12397, 12445), 'numpy.reshape', 'np.reshape', (['self.safe_pot_offset', '(self.n_vR, 1)'], {}), '(self.safe_pot_offset, (self.n_vR, 1))\n', (12407, 12445), True, 'import numpy as np\n'), ((12496, 12519), 'numpy.vstack', 'np.vstack', (['(tmp2, tmp2)'], {}), '((tmp2, tmp2))\n', (12505, 12519), True, 'import numpy as np\n'), ((12934, 12962), 'numpy.diag', 'np.diag', (['(pH_noise + vH_noise)'], {}), '(pH_noise + vH_noise)\n', (12941, 12962), True, 'import numpy as np\n'), ((13862, 13889), 'numpy.zeros', 'np.zeros', (['(uts.y.shape[1],)'], {}), '((uts.y.shape[1],))\n', (13870, 13889), True, 'import numpy as np\n'), ((14007, 14023), 'numpy.zeros', 'np.zeros', (['(n_x,)'], {}), '((n_x,))\n', (14015, 14023), True, 'import numpy as np\n'), ((16786, 16803), 'numpy.squeeze', 'np.squeeze', (['cur_p'], {}), '(cur_p)\n', (16796, 16803), True, 'import numpy as np\n'), ((16820, 16837), 'numpy.squeeze', 'np.squeeze', (['cur_v'], {}), '(cur_v)\n', (16830, 16837), True, 'import numpy as np\n'), ((17029, 17142), 'numpy.linspace', 'np.linspace', ([], {'start': '(0.0)', 'stop': '(horizon_us_mpc * dt_us - dt_us)', 'num': 'horizon_us_mpc', 'endpoint': '(True)', 'retstep': '(True)'}), '(start=0.0, stop=horizon_us_mpc * dt_us - dt_us, num=\n horizon_us_mpc, endpoint=True, retstep=True)\n', (17040, 17142), True, 'import numpy as np\n'), ((17188, 17256), 'hr_planning.utils_interp.waypts_2_zeroOrderHold', 'waypts_2_zeroOrderHold', ([], {'wp_traj': 'us_1_T', 'dt': 'dt_us', 'axis': '(0)', 'plot': '(False)'}), '(wp_traj=us_1_T, dt=dt_us, axis=0, plot=False)\n', (17210, 17256), False, 'from hr_planning.utils_interp import waypts_2_zeroOrderHold\n'), ((17532, 17625), 'numpy.linspace', 'np.linspace', ([], {'start': '(0.0)', 'stop': 'self.step_time', 'num': 'horizon_ode', 'endpoint': '(True)', 'retstep': '(True)'}), '(start=0.0, stop=self.step_time, num=horizon_ode, endpoint=True,\n retstep=True)\n', (17543, 17625), True, 'import numpy as np\n'), ((17791, 17884), 'numpy.linspace', 'np.linspace', ([], {'start': '(0.0)', 'stop': 'self.step_time', 'num': 'horizon_ode', 'endpoint': '(True)', 'retstep': '(True)'}), '(start=0.0, stop=self.step_time, num=horizon_ode, endpoint=True,\n retstep=True)\n', (17802, 17884), True, 'import numpy as np\n'), ((18181, 18269), 'scipy.integrate.odeint', 'odeint', ([], {'func': 'self.dynamics', 'y0': 'y0', 't': 'dts_ode', 'args': '(u_interp, end_time_us_mpc, mass)'}), '(func=self.dynamics, y0=y0, t=dts_ode, args=(u_interp,\n end_time_us_mpc, mass))\n', (18187, 18269), False, 'from scipy.integrate import ode, odeint\n'), ((21247, 21279), 'numpy.reshape', 'np.reshape', (['cur_pH', '(self.n_pH,)'], {}), '(cur_pH, (self.n_pH,))\n', (21257, 21279), True, 'import numpy as np\n'), ((22779, 22898), 'numpy.linspace', 'np.linspace', ([], {'start': '(0.0)', 'stop': '(horizon_ref_hmdp * dt_ref_hmdp)', 'num': '(horizon_ref_hmdp + 1)', 'endpoint': '(True)', 'retstep': '(True)'}), '(start=0.0, stop=horizon_ref_hmdp * dt_ref_hmdp, num=\n horizon_ref_hmdp + 1, endpoint=True, retstep=True)\n', (22790, 22898), True, 'import numpy as np\n'), ((25885, 25937), 'numpy.zeros', 'np.zeros', (['(min_horizon, self.n_uR)'], {'dtype': 'np.float32'}), '((min_horizon, self.n_uR), dtype=np.float32)\n', (25893, 25937), True, 'import numpy as np\n'), ((29246, 29300), 'numpy.clip', 'np.clip', (['next_pH', '(self.pH_min + tol)', '(self.pH_max - tol)'], {}), '(next_pH, self.pH_min + tol, self.pH_max - tol)\n', (29253, 29300), True, 'import numpy as np\n'), ((29315, 29369), 'numpy.clip', 'np.clip', (['next_vH', '(self.vH_min + tol)', '(self.vH_max - tol)'], {}), '(next_vH, self.vH_min + tol, self.vH_max - tol)\n', (29322, 29369), True, 'import numpy as np\n'), ((29384, 29438), 'numpy.clip', 'np.clip', (['next_pR', '(self.pR_min + tol)', '(self.pR_max - tol)'], {}), '(next_pR, self.pR_min + tol, self.pR_max - tol)\n', (29391, 29438), True, 'import numpy as np\n'), ((29453, 29507), 'numpy.clip', 'np.clip', (['next_vR', '(self.vR_min + tol)', '(self.vR_max - tol)'], {}), '(next_vR, self.vR_min + tol, self.vR_max - tol)\n', (29460, 29507), True, 'import numpy as np\n'), ((31910, 31921), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (31917, 31921), True, 'import matplotlib.pyplot as plt\n'), ((34656, 34667), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (34663, 34667), True, 'import matplotlib.pyplot as plt\n'), ((35922, 35933), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (35929, 35933), True, 'import matplotlib.pyplot as plt\n'), ((37756, 37767), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (37763, 37767), True, 'import matplotlib.pyplot as plt\n'), ((38031, 38080), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': 'horizon', 'endpoint': '(True)'}), '(0.0, 1.0, num=horizon, endpoint=True)\n', (38042, 38080), True, 'import numpy as np\n'), ((39420, 39454), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': 'legend_handles'}), '(handles=legend_handles)\n', (39430, 39454), True, 'import matplotlib.pyplot as plt\n'), ((40929, 41018), 'numpy.linspace', 'np.linspace', ([], {'start': '(0.0)', 'stop': 'self.step_time', 'num': 'horizon', 'endpoint': '(True)', 'retstep': '(True)'}), '(start=0.0, stop=self.step_time, num=horizon, endpoint=True,\n retstep=True)\n', (40940, 41018), True, 'import numpy as np\n'), ((41263, 41348), 'scipy.interpolate.CubicHermiteSpline', 'interpolate.CubicHermiteSpline', ([], {'x': 'dts_low_res', 'y': 'y', 'dydx': 'dydt', 'extrapolate': '(False)'}), '(x=dts_low_res, y=y, dydx=dydt, extrapolate=False\n )\n', (41293, 41348), False, 'from scipy import interpolate\n'), ((41567, 41652), 'scipy.interpolate.CubicHermiteSpline', 'interpolate.CubicHermiteSpline', ([], {'x': 'dts_low_res', 'y': 'y', 'dydx': 'dydt', 'extrapolate': '(False)'}), '(x=dts_low_res, y=y, dydx=dydt, extrapolate=False\n )\n', (41597, 41652), False, 'from scipy import interpolate\n'), ((44672, 44702), 'numpy.linalg.norm', 'np.linalg.norm', (['(vR - vH)'], {'ord': '(2)'}), '(vR - vH, ord=2)\n', (44686, 44702), True, 'import numpy as np\n'), ((45215, 45234), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (45228, 45234), False, 'import os\n'), ((46044, 46066), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (46052, 46066), True, 'import numpy as np\n'), ((46639, 46690), 'numpy.zeros', 'np.zeros', (['(horizon + 1, env.n_pR)'], {'dtype': 'np.float32'}), '((horizon + 1, env.n_pR), dtype=np.float32)\n', (46647, 46690), True, 'import numpy as np\n'), ((46707, 46758), 'numpy.zeros', 'np.zeros', (['(horizon + 1, env.n_vR)'], {'dtype': 'np.float32'}), '((horizon + 1, env.n_vR), dtype=np.float32)\n', (46715, 46758), True, 'import numpy as np\n'), ((46775, 46826), 'numpy.zeros', 'np.zeros', (['(horizon + 1, env.n_pH)'], {'dtype': 'np.float32'}), '((horizon + 1, env.n_pH), dtype=np.float32)\n', (46783, 46826), True, 'import numpy as np\n'), ((46843, 46894), 'numpy.zeros', 'np.zeros', (['(horizon + 1, env.n_vH)'], {'dtype': 'np.float32'}), '((horizon + 1, env.n_vH), dtype=np.float32)\n', (46851, 46894), True, 'import numpy as np\n'), ((46917, 46939), 'numpy.squeeze', 'np.squeeze', (['env.cur_pR'], {}), '(env.cur_pR)\n', (46927, 46939), True, 'import numpy as np\n'), ((46964, 46986), 'numpy.squeeze', 'np.squeeze', (['env.cur_vR'], {}), '(env.cur_vR)\n', (46974, 46986), True, 'import numpy as np\n'), ((47011, 47033), 'numpy.squeeze', 'np.squeeze', (['env.cur_pH'], {}), '(env.cur_pH)\n', (47021, 47033), True, 'import numpy as np\n'), ((47058, 47080), 'numpy.squeeze', 'np.squeeze', (['env.cur_vH'], {}), '(env.cur_vH)\n', (47068, 47080), True, 'import numpy as np\n'), ((48115, 48125), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (48123, 48125), True, 'import matplotlib.pyplot as plt\n'), ((48134, 48143), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (48141, 48143), True, 'import matplotlib.pyplot as plt\n'), ((48152, 48161), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (48159, 48161), True, 'import matplotlib.pyplot as plt\n'), ((48170, 48181), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (48179, 48181), True, 'import matplotlib.pyplot as plt\n'), ((3207, 3243), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'yaml.FullLoader'}), '(f, Loader=yaml.FullLoader)\n', (3216, 3243), False, 'import yaml\n'), ((4624, 4642), 'numpy.array', 'np.array', (['pR_0_arg'], {}), '(pR_0_arg)\n', (4632, 4642), True, 'import numpy as np\n'), ((6975, 7018), 'numpy.array', 'np.array', (["self.config_hr_env['pH_shoulder']"], {}), "(self.config_hr_env['pH_shoulder'])\n", (6983, 7018), True, 'import numpy as np\n'), ((8672, 8692), 'numpy.copy', 'np.copy', (['self.pH_min'], {}), '(self.pH_min)\n', (8679, 8692), True, 'import numpy as np\n'), ((8729, 8749), 'numpy.copy', 'np.copy', (['self.pH_max'], {}), '(self.pH_max)\n', (8736, 8749), True, 'import numpy as np\n'), ((8800, 8848), 'numpy.array', 'np.array', (["self.config_hr_env['pH_min_workspace']"], {}), "(self.config_hr_env['pH_min_workspace'])\n", (8808, 8848), True, 'import numpy as np\n'), ((8885, 8933), 'numpy.array', 'np.array', (["self.config_hr_env['pH_max_workspace']"], {}), "(self.config_hr_env['pH_max_workspace'])\n", (8893, 8933), True, 'import numpy as np\n'), ((9120, 9137), 'numpy.eye', 'np.eye', (['self.n_pH'], {}), '(self.n_pH)\n', (9126, 9137), True, 'import numpy as np\n'), ((9193, 9210), 'numpy.eye', 'np.eye', (['self.n_uH'], {}), '(self.n_uH)\n', (9199, 9210), True, 'import numpy as np\n'), ((9273, 9290), 'numpy.eye', 'np.eye', (['self.n_pH'], {}), '(self.n_pH)\n', (9279, 9290), True, 'import numpy as np\n'), ((9360, 9377), 'numpy.eye', 'np.eye', (['self.n_pH'], {}), '(self.n_pH)\n', (9366, 9377), True, 'import numpy as np\n'), ((14446, 14472), 'numpy.matlib.repmat', 'repmat', (['mean', 'n_samples', '(1)'], {}), '(mean, n_samples, 1)\n', (14452, 14472), False, 'from numpy.matlib import repmat\n'), ((18140, 18165), 'numpy.hstack', 'np.hstack', (['(cur_p, cur_v)'], {}), '((cur_p, cur_v))\n', (18149, 18165), True, 'import numpy as np\n'), ((18700, 18778), 'sdeint.itoint', 'sdeint.itoint', ([], {'f': 'self.dynamics_H_sde', 'G': 'self.noise_H_sde', 'y0': 'y0', 'tspan': 'dts_ode'}), '(f=self.dynamics_H_sde, G=self.noise_H_sde, y0=y0, tspan=dts_ode)\n', (18713, 18778), False, 'import sdeint\n'), ((19394, 19486), 'numpy.linspace', 'np.linspace', ([], {'start': '(0.0)', 'stop': 'end_time', 'num': 'horizon_full_ode', 'endpoint': '(True)', 'retstep': '(True)'}), '(start=0.0, stop=end_time, num=horizon_full_ode, endpoint=True,\n retstep=True)\n', (19405, 19486), True, 'import numpy as np\n'), ((19595, 19688), 'scipy.integrate.odeint', 'odeint', ([], {'func': 'self.dynamics', 'y0': 'y0', 't': 'dts_full_ode', 'args': '(u_interp, end_time_us_mpc, mass)'}), '(func=self.dynamics, y0=y0, t=dts_full_ode, args=(u_interp,\n end_time_us_mpc, mass))\n', (19601, 19688), False, 'from scipy.integrate import ode, odeint\n'), ((19980, 19994), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (19992, 19994), True, 'import matplotlib.pyplot as plt\n'), ((21696, 21734), 'numpy.ceil', 'np.ceil', (['(self.step_time / self.dt_Hmdp)'], {}), '(self.step_time / self.dt_Hmdp)\n', (21703, 21734), True, 'import numpy as np\n'), ((24512, 24625), 'numpy.linspace', 'np.linspace', ([], {'start': '(0.0)', 'stop': '(horizon_us_mpc * dt_us - dt_us)', 'num': 'horizon_us_mpc', 'endpoint': '(True)', 'retstep': '(True)'}), '(start=0.0, stop=horizon_us_mpc * dt_us - dt_us, num=\n horizon_us_mpc, endpoint=True, retstep=True)\n', (24523, 24625), True, 'import numpy as np\n'), ((24682, 24723), 'numpy.hstack', 'np.hstack', (['(dts_us_mpc, dts_ref_hmdp[-1])'], {}), '((dts_us_mpc, dts_ref_hmdp[-1]))\n', (24691, 24723), True, 'import numpy as np\n'), ((24750, 24784), 'numpy.vstack', 'np.vstack', (['(cur_pH.T, pHs_1_T_opt)'], {}), '((cur_pH.T, pHs_1_T_opt))\n', (24759, 24784), True, 'import numpy as np\n'), ((24902, 24912), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24910, 24912), True, 'import matplotlib.pyplot as plt\n'), ((25831, 25869), 'numpy.ceil', 'np.ceil', (['(self.step_time / self.dt_Rmpc)'], {}), '(self.step_time / self.dt_Rmpc)\n', (25838, 25869), True, 'import numpy as np\n'), ((26553, 26563), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26561, 26563), True, 'import matplotlib.pyplot as plt\n'), ((30758, 30776), 'hr_planning.visualization.utils_visualization.print_FAIL', 'print_FAIL', (['"""SAFE"""'], {}), "('SAFE')\n", (30768, 30776), False, 'from hr_planning.visualization.utils_visualization import print_FAIL, print_OK\n'), ((31825, 31837), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (31835, 31837), True, 'import matplotlib.pyplot as plt\n'), ((33186, 33291), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xs', 'ys'], {'alpha': 'self.alpha_hmdp_grid', 'c': 'self.color_hmdp_grid', 'marker': 'self.marker_hmdp_grid'}), '(xs, ys, alpha=self.alpha_hmdp_grid, c=self.color_hmdp_grid,\n marker=self.marker_hmdp_grid)\n', (33197, 33291), True, 'import matplotlib.pyplot as plt\n'), ((33348, 33470), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xs_obs', 'ys_obs'], {'alpha': 'self.alpha_hmdp_obstacle', 'c': 'self.color_hmdp_grid', 'marker': 'self.marker_hmdp_obstacle'}), '(xs_obs, ys_obs, alpha=self.alpha_hmdp_obstacle, c=self.\n color_hmdp_grid, marker=self.marker_hmdp_obstacle)\n', (33359, 33470), True, 'import matplotlib.pyplot as plt\n'), ((35863, 35875), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (35873, 35875), True, 'import matplotlib.pyplot as plt\n'), ((36802, 36820), 'numpy.max', 'np.max', (['traj[:, 0]'], {}), '(traj[:, 0])\n', (36808, 36820), True, 'import numpy as np\n'), ((36855, 36873), 'numpy.min', 'np.min', (['traj[:, 0]'], {}), '(traj[:, 0])\n', (36861, 36873), True, 'import numpy as np\n'), ((36908, 36926), 'numpy.max', 'np.max', (['traj[:, 1]'], {}), '(traj[:, 1])\n', (36914, 36926), True, 'import numpy as np\n'), ((36961, 36979), 'numpy.min', 'np.min', (['traj[:, 1]'], {}), '(traj[:, 1])\n', (36967, 36979), True, 'import numpy as np\n'), ((37669, 37681), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (37679, 37681), True, 'import matplotlib.pyplot as plt\n'), ((38187, 38229), 'casadi.reshape', 'cas_reshape', (['pHs_1_T[i, :]', '(self.n_pH, 1)'], {}), '(pHs_1_T[i, :], (self.n_pH, 1))\n', (38198, 38229), True, 'from casadi import reshape as cas_reshape\n'), ((38248, 38298), 'casadi.reshape', 'cas_reshape', (['qHs_1_T[i, :]', '(self.n_pH, self.n_pH)'], {}), '(qHs_1_T[i, :], (self.n_pH, self.n_pH))\n', (38259, 38298), True, 'from casadi import reshape as cas_reshape\n'), ((44027, 44061), 'numpy.reshape', 'np.reshape', (['offset', '(self.n_pR, 1)'], {}), '(offset, (self.n_pR, 1))\n', (44037, 44061), True, 'import numpy as np\n'), ((44156, 44191), 'numpy.linalg.norm', 'np.linalg.norm', (['(coll_pt - pH)'], {'ord': '(2)'}), '(coll_pt - pH, ord=2)\n', (44170, 44191), True, 'import numpy as np\n'), ((47335, 47357), 'numpy.array', 'np.array', (['[[0.0, 0.0]]'], {}), '([[0.0, 0.0]])\n', (47343, 47357), True, 'import numpy as np\n'), ((47688, 47710), 'numpy.squeeze', 'np.squeeze', (['env.cur_pR'], {}), '(env.cur_pR)\n', (47698, 47710), True, 'import numpy as np\n'), ((47739, 47761), 'numpy.squeeze', 'np.squeeze', (['env.cur_vR'], {}), '(env.cur_vR)\n', (47749, 47761), True, 'import numpy as np\n'), ((47790, 47812), 'numpy.squeeze', 'np.squeeze', (['env.cur_pH'], {}), '(env.cur_pH)\n', (47800, 47812), True, 'import numpy as np\n'), ((47841, 47863), 'numpy.squeeze', 'np.squeeze', (['env.cur_vH'], {}), '(env.cur_vH)\n', (47851, 47863), True, 'import numpy as np\n'), ((4230, 4266), 'numpy.array', 'np.array', (["self.config_hr_env['pR_0']"], {}), "(self.config_hr_env['pR_0'])\n", (4238, 4266), True, 'import numpy as np\n'), ((9930, 9953), 'numpy.squeeze', 'np.squeeze', (['self.pH_min'], {}), '(self.pH_min)\n', (9940, 9953), True, 'import numpy as np\n'), ((9979, 10002), 'numpy.squeeze', 'np.squeeze', (['self.pH_max'], {}), '(self.pH_max)\n', (9989, 10002), True, 'import numpy as np\n'), ((10028, 10051), 'numpy.squeeze', 'np.squeeze', (['self.vH_min'], {}), '(self.vH_min)\n', (10038, 10051), True, 'import numpy as np\n'), ((10077, 10100), 'numpy.squeeze', 'np.squeeze', (['self.vH_max'], {}), '(self.vH_max)\n', (10087, 10100), True, 'import numpy as np\n'), ((10126, 10149), 'numpy.squeeze', 'np.squeeze', (['self.uH_min'], {}), '(self.uH_min)\n', (10136, 10149), True, 'import numpy as np\n'), ((10175, 10198), 'numpy.squeeze', 'np.squeeze', (['self.uH_max'], {}), '(self.uH_max)\n', (10185, 10198), True, 'import numpy as np\n'), ((12052, 12070), 'numpy.sqrt', 'np.sqrt', (['self.n_vR'], {}), '(self.n_vR)\n', (12059, 12070), True, 'import numpy as np\n'), ((12624, 12641), 'numpy.eye', 'np.eye', (['self.n_vR'], {}), '(self.n_vR)\n', (12630, 12641), True, 'import numpy as np\n'), ((14346, 14371), 'numpy.matlib.repmat', 'repmat', (['std', 'n_samples', '(1)'], {}), '(std, n_samples, 1)\n', (14352, 14371), False, 'from numpy.matlib import repmat\n'), ((14393, 14424), 'numpy.random.randn', 'np.random.randn', (['n_samples', 'n_s'], {}), '(n_samples, n_s)\n', (14408, 14424), True, 'import numpy as np\n'), ((14597, 14622), 'numpy.random.rand', 'np.random.rand', (['self.n_uR'], {}), '(self.n_uR)\n', (14611, 14622), True, 'import numpy as np\n'), ((17461, 17498), 'numpy.ceil', 'np.ceil', (['(self.step_time / self.dt_env)'], {}), '(self.step_time / self.dt_env)\n', (17468, 17498), True, 'import numpy as np\n'), ((17720, 17757), 'numpy.ceil', 'np.ceil', (['(self.step_time / self.dt_env)'], {}), '(self.step_time / self.dt_env)\n', (17727, 17757), True, 'import numpy as np\n'), ((19784, 19872), 'sdeint.itoint', 'sdeint.itoint', ([], {'f': 'self.dynamics_H_sde', 'G': 'self.noise_H_sde', 'y0': 'y0', 'tspan': 'dts_full_ode'}), '(f=self.dynamics_H_sde, G=self.noise_H_sde, y0=y0, tspan=\n dts_full_ode)\n', (19797, 19872), False, 'import sdeint\n'), ((29542, 29560), 'numpy.squeeze', 'np.squeeze', (['cur_pR'], {}), '(cur_pR)\n', (29552, 29560), True, 'import numpy as np\n'), ((29599, 29618), 'numpy.squeeze', 'np.squeeze', (['next_pR'], {}), '(next_pR)\n', (29609, 29618), True, 'import numpy as np\n'), ((29658, 29676), 'numpy.squeeze', 'np.squeeze', (['cur_vR'], {}), '(cur_vR)\n', (29668, 29676), True, 'import numpy as np\n'), ((29715, 29734), 'numpy.squeeze', 'np.squeeze', (['next_vR'], {}), '(next_vR)\n', (29725, 29734), True, 'import numpy as np\n'), ((29774, 29792), 'numpy.squeeze', 'np.squeeze', (['cur_pH'], {}), '(cur_pH)\n', (29784, 29792), True, 'import numpy as np\n'), ((29831, 29850), 'numpy.squeeze', 'np.squeeze', (['next_pH'], {}), '(next_pH)\n', (29841, 29850), True, 'import numpy as np\n'), ((29890, 29908), 'numpy.squeeze', 'np.squeeze', (['cur_vH'], {}), '(cur_vH)\n', (29900, 29908), True, 'import numpy as np\n'), ((29947, 29966), 'numpy.squeeze', 'np.squeeze', (['next_vH'], {}), '(next_vH)\n', (29957, 29966), True, 'import numpy as np\n'), ((30346, 30384), 'hr_planning.visualization.utils_visualization.print_FAIL', 'print_FAIL', (['"""Collision && safe impact"""'], {}), "('Collision && safe impact')\n", (30356, 30384), False, 'from hr_planning.visualization.utils_visualization import print_FAIL, print_OK\n'), ((30553, 30593), 'hr_planning.visualization.utils_visualization.print_FAIL', 'print_FAIL', (['"""Collision && UNsafe impact"""'], {}), "('Collision && UNsafe impact')\n", (30563, 30593), False, 'from hr_planning.visualization.utils_visualization import print_FAIL, print_OK\n'), ((38794, 38836), 'casadi.reshape', 'cas_reshape', (['pHs_1_T[i, :]', '(self.n_pH, 1)'], {}), '(pHs_1_T[i, :], (self.n_pH, 1))\n', (38805, 38836), True, 'from casadi import reshape as cas_reshape\n'), ((40826, 40881), 'numpy.ceil', 'np.ceil', (['(self.step_time / self.dt_pH_pR_safety_checking)'], {}), '(self.step_time / self.dt_pH_pR_safety_checking)\n', (40833, 40881), True, 'import numpy as np\n'), ((46266, 46286), 'numpy.array', 'np.array', (['[0.3, 0.0]'], {}), '([0.3, 0.0])\n', (46274, 46286), True, 'import numpy as np\n'), ((46325, 46345), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (46333, 46345), True, 'import numpy as np\n'), ((46384, 46404), 'numpy.array', 'np.array', (['[0.1, 0.0]'], {}), '([0.1, 0.0])\n', (46392, 46404), True, 'import numpy as np\n'), ((46443, 46463), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (46451, 46463), True, 'import numpy as np\n'), ((4353, 4400), 'numpy.array', 'np.array', (["self.config_hr_env['pR_0_coll_avoid']"], {}), "(self.config_hr_env['pR_0_coll_avoid'])\n", (4361, 4400), True, 'import numpy as np\n'), ((12605, 12622), 'numpy.eye', 'np.eye', (['self.n_vR'], {}), '(self.n_vR)\n', (12611, 12622), True, 'import numpy as np\n'), ((19315, 19346), 'numpy.ceil', 'np.ceil', (['(end_time / self.dt_env)'], {}), '(end_time / self.dt_env)\n', (19322, 19346), True, 'import numpy as np\n'), ((21305, 21328), 'numpy.squeeze', 'np.squeeze', (['self.pH_max'], {}), '(self.pH_max)\n', (21315, 21328), True, 'import numpy as np\n'), ((21361, 21384), 'numpy.squeeze', 'np.squeeze', (['self.pH_min'], {}), '(self.pH_min)\n', (21371, 21384), True, 'import numpy as np\n'), ((26237, 26256), 'numpy.zeros', 'np.zeros', (['self.n_uR'], {}), '(self.n_uR)\n', (26245, 26256), True, 'import numpy as np\n'), ((30960, 30992), 'numpy.squeeze', 'np.squeeze', (['self.safe_pot_offset'], {}), '(self.safe_pot_offset)\n', (30970, 30992), True, 'import numpy as np\n'), ((4485, 4530), 'numpy.array', 'np.array', (["self.config_hr_env['pR_0_handover']"], {}), "(self.config_hr_env['pR_0_handover'])\n", (4493, 4530), True, 'import numpy as np\n'), ((8312, 8335), 'numpy.squeeze', 'np.squeeze', (['self.pH_max'], {}), '(self.pH_max)\n', (8322, 8335), True, 'import numpy as np\n'), ((8379, 8402), 'numpy.squeeze', 'np.squeeze', (['self.pH_min'], {}), '(self.pH_min)\n', (8389, 8402), True, 'import numpy as np\n'), ((8445, 8468), 'numpy.squeeze', 'np.squeeze', (['self.vH_max'], {}), '(self.vH_max)\n', (8455, 8468), True, 'import numpy as np\n'), ((8512, 8535), 'numpy.squeeze', 'np.squeeze', (['self.vH_min'], {}), '(self.vH_min)\n', (8522, 8535), True, 'import numpy as np\n'), ((15221, 15244), 'numpy.squeeze', 'np.squeeze', (['self.pR_max'], {}), '(self.pR_max)\n', (15231, 15244), True, 'import numpy as np\n'), ((15283, 15306), 'numpy.squeeze', 'np.squeeze', (['self.pR_min'], {}), '(self.pR_min)\n', (15293, 15306), True, 'import numpy as np\n'), ((15344, 15367), 'numpy.squeeze', 'np.squeeze', (['self.vR_max'], {}), '(self.vR_max)\n', (15354, 15367), True, 'import numpy as np\n'), ((15406, 15429), 'numpy.squeeze', 'np.squeeze', (['self.vR_min'], {}), '(self.vR_min)\n', (15416, 15429), True, 'import numpy as np\n'), ((15467, 15490), 'numpy.squeeze', 'np.squeeze', (['self.pH_max'], {}), '(self.pH_max)\n', (15477, 15490), True, 'import numpy as np\n'), ((15529, 15552), 'numpy.squeeze', 'np.squeeze', (['self.pH_min'], {}), '(self.pH_min)\n', (15539, 15552), True, 'import numpy as np\n'), ((15590, 15613), 'numpy.squeeze', 'np.squeeze', (['self.vH_max'], {}), '(self.vH_max)\n', (15600, 15613), True, 'import numpy as np\n'), ((15652, 15675), 'numpy.squeeze', 'np.squeeze', (['self.vH_min'], {}), '(self.vH_min)\n', (15662, 15675), True, 'import numpy as np\n'), ((22495, 22518), 'numpy.squeeze', 'np.squeeze', (['self.pH_max'], {}), '(self.pH_max)\n', (22505, 22518), True, 'import numpy as np\n'), ((22566, 22589), 'numpy.squeeze', 'np.squeeze', (['self.pH_min'], {}), '(self.pH_min)\n', (22576, 22589), True, 'import numpy as np\n'), ((28197, 28220), 'numpy.squeeze', 'np.squeeze', (['self.pR_max'], {}), '(self.pR_max)\n', (28207, 28220), True, 'import numpy as np\n'), ((28261, 28284), 'numpy.squeeze', 'np.squeeze', (['self.pR_min'], {}), '(self.pR_min)\n', (28271, 28284), True, 'import numpy as np\n'), ((28324, 28347), 'numpy.squeeze', 'np.squeeze', (['self.vR_max'], {}), '(self.vR_max)\n', (28334, 28347), True, 'import numpy as np\n'), ((28388, 28411), 'numpy.squeeze', 'np.squeeze', (['self.vR_min'], {}), '(self.vR_min)\n', (28398, 28411), True, 'import numpy as np\n'), ((28451, 28474), 'numpy.squeeze', 'np.squeeze', (['self.pH_max'], {}), '(self.pH_max)\n', (28461, 28474), True, 'import numpy as np\n'), ((28515, 28538), 'numpy.squeeze', 'np.squeeze', (['self.pH_min'], {}), '(self.pH_min)\n', (28525, 28538), True, 'import numpy as np\n'), ((28578, 28601), 'numpy.squeeze', 'np.squeeze', (['self.vH_max'], {}), '(self.vH_max)\n', (28588, 28601), True, 'import numpy as np\n'), ((28642, 28665), 'numpy.squeeze', 'np.squeeze', (['self.vH_min'], {}), '(self.vH_min)\n', (28652, 28665), True, 'import numpy as np\n')] |
# -*- coding: utf8 -*-
# Copyright 2019 JSALT2019 Distant Supervision Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import six
import numpy as np
import torch
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence
def to_device(m, x):
"""Function to send tensor into corresponding device
:param torch.nn.Module m: torch module
:param torch.Tensor x: torch tensor
:return: torch tensor located in the same place as torch module
:rtype: torch.Tensor
"""
assert isinstance(m, torch.nn.Module)
device = next(m.parameters()).device
return x.to(device)
# TODO: use the already existing code
def make_pad_mask(lengths, xs=None, length_dim=-1):
"""Function to make mask tensor containing indices of padded part
e.g.: lengths = [5, 3, 2]
mask = [[0, 0, 0, 0 ,0],
[0, 0, 0, 1, 1],
[0, 0, 1, 1, 1]]
:param list lengths: list of lengths (B)
:param torch.Tensor xs: Make the shape to be like.
:param int length_dim:
:return: mask tensor containing indices of padded part (B, Tmax)
:rtype: torch.Tensor
"""
if length_dim == 0:
raise ValueError('length_dim cannot be 0: {}'.format(length_dim))
if not isinstance(lengths, list):
lengths = lengths.tolist()
bs = int(len(lengths))
if xs is None:
maxlen = int(max(lengths))
else:
maxlen = xs.size(length_dim)
seq_range = torch.arange(0, maxlen, dtype=torch.int64)
seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)
seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)
mask = seq_range_expand >= seq_length_expand
if xs is not None:
assert xs.size(0) == bs, (xs.size(0), bs)
if length_dim < 0:
length_dim = xs.dim() + length_dim
# ind = (:, None, ..., None, :, , None, ..., None)
ind = tuple(slice(None) if i in (0, length_dim) else None
for i in range(xs.dim()))
mask = mask[ind].expand_as(xs).to(xs.device)
return mask
def get_vgg2l_odim(idim, in_channel=3, out_channel=128):
idim = idim / in_channel
idim = np.ceil(np.array(idim, dtype=np.float32) / 2) # 1st max pooling
idim = np.ceil(np.array(idim, dtype=np.float32) / 2) # 2nd max pooling
return int(idim) * out_channel # numer of channels
class RNNP(torch.nn.Module):
"""RNN with projection layer module
:param int idim: dimension of inputs
:param int elayers: number of encoder layers
:param int cdim: number of rnn units (resulted in cdim * 2 if bidirectional)
:param int hdim: number of projection units
:param np.ndarray subsample: list of subsampling numbers
:param float dropout: dropout rate
:param str typ: The RNN type
"""
def __init__(self, idim, elayers, cdim, hdim, subsample, dropout, typ="blstm"):
super(RNNP, self).__init__()
bidir = typ[0] == "b"
for i in six.moves.range(elayers):
if i == 0:
inputdim = idim
else:
inputdim = hdim
rnn = torch.nn.LSTM(inputdim, cdim, dropout=dropout, num_layers=1, bidirectional=bidir,
batch_first=True) if "lstm" in typ \
else torch.nn.GRU(inputdim, cdim, dropout=dropout, num_layers=1, bidirectional=bidir, batch_first=True)
setattr(self, "%s%d" % ("birnn" if bidir else "rnn", i), rnn)
# bottleneck layer to merge
if bidir:
setattr(self, "bt%d" % i, torch.nn.Linear(2 * cdim, hdim))
else:
setattr(self, "bt%d" % i, torch.nn.Linear(cdim, hdim))
self.elayers = elayers
self.cdim = cdim
self.subsample = subsample
self.typ = typ
self.bidir = bidir
def forward(self, xs_pad, ilens, prev_state=None):
"""RNNP forward
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor prev_state: batch of previous RNN states
:return: batch of hidden state sequences (B, Tmax, hdim)
:rtype: torch.Tensor
"""
# logging.info(self.__class__.__name__ + ' input lengths: ' + str(ilens))
elayer_states = []
for layer in six.moves.range(self.elayers):
xs_pack = pack_padded_sequence(xs_pad, ilens, batch_first=True)
rnn = getattr(self, ("birnn" if self.bidir else "rnn") + str(layer))
rnn.flatten_parameters()
if prev_state is not None and rnn.bidirectional:
prev_state = reset_backward_rnn_state(prev_state)
ys, states = rnn(xs_pack, hx=None if prev_state is None else prev_state[layer])
elayer_states.append(states)
# ys: utt list of frame x cdim x 2 (2: means bidirectional)
ys_pad, ilens = pad_packed_sequence(ys, batch_first=True)
sub = self.subsample[layer + 1]
if sub > 1:
ys_pad = ys_pad[:, ::sub]
ilens = [int(i + 1) // sub for i in ilens]
# (sum _utt frame_utt) x dim
projected = getattr(self, 'bt' + str(layer)
)(ys_pad.contiguous().view(-1, ys_pad.size(2)))
xs_pad = torch.tanh(projected.view(ys_pad.size(0), ys_pad.size(1), -1))
return xs_pad, ilens, elayer_states # x: utt list of frame x dim
class RNN(torch.nn.Module):
"""RNN module
:param int idim: dimension of inputs
:param int elayers: number of encoder layers
:param int cdim: number of rnn units (resulted in cdim * 2 if bidirectional)
:param int hdim: number of final projection units
:param float dropout: dropout rate
:param str typ: The RNN type
"""
def __init__(self, idim, elayers, cdim, hdim, dropout, typ="blstm"):
super(RNN, self).__init__()
bidir = typ[0] == "b"
self.nbrnn = torch.nn.LSTM(idim, cdim, elayers, batch_first=True,
dropout=dropout, bidirectional=bidir) if "lstm" in typ \
else torch.nn.GRU(idim, cdim, elayers, batch_first=True, dropout=dropout,
bidirectional=bidir)
if bidir:
self.l_last = torch.nn.Linear(cdim * 2, hdim)
else:
self.l_last = torch.nn.Linear(cdim, hdim)
self.typ = typ
def forward(self, xs_pad, ilens, prev_state=None):
"""RNN forward
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, D)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor prev_state: batch of previous RNN states
:return: batch of hidden state sequences (B, Tmax, eprojs)
:rtype: torch.Tensor
"""
logging.info(self.__class__.__name__ + ' input lengths: ' + str(ilens))
xs_pack = pack_padded_sequence(xs_pad, ilens, batch_first=True)
self.nbrnn.flatten_parameters()
if prev_state is not None and self.nbrnn.bidirectional:
# We assume that when previous state is passed, it means that we're streaming the input
# and therefore cannot propagate backward BRNN state (otherwise it goes in the wrong direction)
prev_state = reset_backward_rnn_state(prev_state)
ys, states = self.nbrnn(xs_pack, hx=prev_state)
# ys: utt list of frame x cdim x 2 (2: means bidirectional)
ys_pad, ilens = pad_packed_sequence(ys, batch_first=True)
# (sum _utt frame_utt) x dim
projected = torch.tanh(self.l_last(
ys_pad.contiguous().view(-1, ys_pad.size(2))))
xs_pad = projected.view(ys_pad.size(0), ys_pad.size(1), -1)
return xs_pad, ilens, states # x: utt list of frame x dim
def reset_backward_rnn_state(states):
"""Sets backward BRNN states to zeroes - useful in processing of sliding windows over the inputs"""
if isinstance(states, (list, tuple)):
for state in states:
state[1::2] = 0.
else:
states[1::2] = 0.
return states
class VGG2L(torch.nn.Module):
"""VGG-like module
:param int in_channel: number of input channels
"""
def __init__(self, in_channel=1):
super(VGG2L, self).__init__()
# CNN layer (VGG motivated)
self.conv1_1 = torch.nn.Conv2d(in_channel, 64, 3, stride=1, padding=1)
self.conv1_2 = torch.nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.conv2_1 = torch.nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.conv2_2 = torch.nn.Conv2d(128, 128, 3, stride=1, padding=1)
self.in_channel = in_channel
def forward(self, xs_pad, ilens, **kwargs):
"""VGG2L forward
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, D)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:return: batch of padded hidden state sequences (B, Tmax // 4, 128 * D // 4)
:rtype: torch.Tensor
"""
logging.info(self.__class__.__name__ + ' input lengths: ' + str(ilens))
# x: utt x frame x dim
# xs_pad = F.pad_sequence(xs_pad)
# x: utt x 1 (input channel num) x frame x dim
xs_pad = xs_pad.view(xs_pad.size(0), xs_pad.size(1), self.in_channel,
xs_pad.size(2) // self.in_channel).transpose(1, 2)
# NOTE: max_pool1d ?
xs_pad = F.relu(self.conv1_1(xs_pad))
xs_pad = F.relu(self.conv1_2(xs_pad))
xs_pad = F.max_pool2d(xs_pad, 2, stride=2, ceil_mode=True)
xs_pad = F.relu(self.conv2_1(xs_pad))
xs_pad = F.relu(self.conv2_2(xs_pad))
xs_pad = F.max_pool2d(xs_pad, 2, stride=2, ceil_mode=True)
if torch.is_tensor(ilens):
ilens = ilens.cpu().numpy()
else:
ilens = np.array(ilens, dtype=np.float32)
ilens = np.array(np.ceil(ilens / 2), dtype=np.int64)
ilens = np.array(
np.ceil(np.array(ilens, dtype=np.float32) / 2), dtype=np.int64).tolist()
# x: utt_list of frame (remove zeropaded frames) x (input channel num x dim)
xs_pad = xs_pad.transpose(1, 2)
xs_pad = xs_pad.contiguous().view(
xs_pad.size(0), xs_pad.size(1), xs_pad.size(2) * xs_pad.size(3))
return xs_pad, ilens, None # no state in this layer
class Encoder(torch.nn.Module):
"""Encoder module
:param str etype: type of encoder network
:param int idim: number of dimensions of encoder network
:param int elayers: number of layers of encoder network
:param int eunits: number of lstm units of encoder network
:param int eprojs: number of projection units of encoder network
:param np.ndarray subsample: list of subsampling numbers
:param float dropout: dropout rate
:param int in_channel: number of input channels
"""
def __init__(self, etype, idim, elayers, eunits, eprojs, subsample, dropout, in_channel=1):
super(Encoder, self).__init__()
typ = etype.lstrip("vgg").rstrip("p")
if typ not in ['lstm', 'gru', 'blstm', 'bgru']:
logging.error("Error: need to specify an appropriate encoder architecture")
if etype.startswith("vgg"):
if etype[-1] == "p":
self.enc = torch.nn.ModuleList([VGG2L(in_channel),
RNNP(get_vgg2l_odim(idim, in_channel=in_channel), elayers, eunits,
eprojs,
subsample, dropout, typ=typ)])
logging.info('Use CNN-VGG + ' + typ.upper() + 'P for encoder')
else:
self.enc = torch.nn.ModuleList([VGG2L(in_channel),
RNN(get_vgg2l_odim(idim, in_channel=in_channel), elayers, eunits,
eprojs,
dropout, typ=typ)])
logging.info('Use CNN-VGG + ' + typ.upper() + ' for encoder')
else:
if etype[-1] == "p":
self.enc = torch.nn.ModuleList(
[RNNP(idim, elayers, eunits, eprojs, subsample, dropout, typ=typ)])
logging.info(typ.upper() + ' with every-layer projection for encoder')
else:
self.enc = torch.nn.ModuleList([RNN(idim, elayers, eunits, eprojs, dropout, typ=typ)])
logging.info(typ.upper() + ' without projection for encoder')
def forward(self, xs_pad, ilens, prev_states=None):
"""Encoder forward
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, D)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor prev_state: batch of previous encoder hidden states (?, ...)
:return: batch of hidden state sequences (B, Tmax, eprojs)
:rtype: torch.Tensor
"""
if prev_states is None:
prev_states = [None] * len(self.enc)
assert len(prev_states) == len(self.enc)
current_states = []
for module, prev_state in zip(self.enc, prev_states):
xs_pad, ilens, states = module(xs_pad, ilens, prev_state=prev_state)
current_states.append(states)
# make mask to remove bias value in padded part
mask = to_device(self, make_pad_mask(ilens).unsqueeze(-1))
return xs_pad.masked_fill(mask, 0.0), ilens, current_states
def encoder_for(args, idim, subsample):
return Encoder(args.etype, idim, args.elayers, args.eunits, args.eprojs, subsample, args.dropout_rate)
if __name__ == "__main__":
x = torch.randn(2, 20, 81)
ilens = torch.tensor([20., 10.]).long()
enc = Encoder(etype="vgglstmp",
idim=81,
elayers=3,
subsample=[1, 1, 1, 1],
eunits=256,
eprojs=128,
dropout=0.1)
out, olens, _ = enc(x, ilens)
print(out.shape, olens)
| [
"torch.nn.GRU",
"logging.error",
"numpy.ceil",
"six.moves.range",
"torch.nn.Conv2d",
"torch.randn",
"torch.nn.LSTM",
"torch.nn.Linear",
"numpy.array",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.arange",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.functional.max_pool2d",
"tor... | [((2050, 2092), 'torch.arange', 'torch.arange', (['(0)', 'maxlen'], {'dtype': 'torch.int64'}), '(0, maxlen, dtype=torch.int64)\n', (2062, 2092), False, 'import torch\n'), ((14381, 14403), 'torch.randn', 'torch.randn', (['(2)', '(20)', '(81)'], {}), '(2, 20, 81)\n', (14392, 14403), False, 'import torch\n'), ((3561, 3585), 'six.moves.range', 'six.moves.range', (['elayers'], {}), '(elayers)\n', (3576, 3585), False, 'import six\n'), ((4968, 4997), 'six.moves.range', 'six.moves.range', (['self.elayers'], {}), '(self.elayers)\n', (4983, 4997), False, 'import six\n'), ((7574, 7627), 'torch.nn.utils.rnn.pack_padded_sequence', 'pack_padded_sequence', (['xs_pad', 'ilens'], {'batch_first': '(True)'}), '(xs_pad, ilens, batch_first=True)\n', (7594, 7627), False, 'from torch.nn.utils.rnn import pack_padded_sequence\n'), ((8150, 8191), 'torch.nn.utils.rnn.pad_packed_sequence', 'pad_packed_sequence', (['ys'], {'batch_first': '(True)'}), '(ys, batch_first=True)\n', (8169, 8191), False, 'from torch.nn.utils.rnn import pad_packed_sequence\n'), ((9017, 9072), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['in_channel', '(64)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(in_channel, 64, 3, stride=1, padding=1)\n', (9032, 9072), False, 'import torch\n'), ((9096, 9143), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(64)', '(64)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(64, 64, 3, stride=1, padding=1)\n', (9111, 9143), False, 'import torch\n'), ((9167, 9215), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(64)', '(128)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(64, 128, 3, stride=1, padding=1)\n', (9182, 9215), False, 'import torch\n'), ((9239, 9288), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(128)', '(128)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(128, 128, 3, stride=1, padding=1)\n', (9254, 9288), False, 'import torch\n'), ((10191, 10240), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['xs_pad', '(2)'], {'stride': '(2)', 'ceil_mode': '(True)'}), '(xs_pad, 2, stride=2, ceil_mode=True)\n', (10203, 10240), True, 'import torch.nn.functional as F\n'), ((10351, 10400), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['xs_pad', '(2)'], {'stride': '(2)', 'ceil_mode': '(True)'}), '(xs_pad, 2, stride=2, ceil_mode=True)\n', (10363, 10400), True, 'import torch.nn.functional as F\n'), ((10412, 10434), 'torch.is_tensor', 'torch.is_tensor', (['ilens'], {}), '(ilens)\n', (10427, 10434), False, 'import torch\n'), ((2771, 2803), 'numpy.array', 'np.array', (['idim'], {'dtype': 'np.float32'}), '(idim, dtype=np.float32)\n', (2779, 2803), True, 'import numpy as np\n'), ((2847, 2879), 'numpy.array', 'np.array', (['idim'], {'dtype': 'np.float32'}), '(idim, dtype=np.float32)\n', (2855, 2879), True, 'import numpy as np\n'), ((5021, 5074), 'torch.nn.utils.rnn.pack_padded_sequence', 'pack_padded_sequence', (['xs_pad', 'ilens'], {'batch_first': '(True)'}), '(xs_pad, ilens, batch_first=True)\n', (5041, 5074), False, 'from torch.nn.utils.rnn import pack_padded_sequence\n'), ((5553, 5594), 'torch.nn.utils.rnn.pad_packed_sequence', 'pad_packed_sequence', (['ys'], {'batch_first': '(True)'}), '(ys, batch_first=True)\n', (5572, 5594), False, 'from torch.nn.utils.rnn import pad_packed_sequence\n'), ((6614, 6708), 'torch.nn.LSTM', 'torch.nn.LSTM', (['idim', 'cdim', 'elayers'], {'batch_first': '(True)', 'dropout': 'dropout', 'bidirectional': 'bidir'}), '(idim, cdim, elayers, batch_first=True, dropout=dropout,\n bidirectional=bidir)\n', (6627, 6708), False, 'import torch\n'), ((6776, 6869), 'torch.nn.GRU', 'torch.nn.GRU', (['idim', 'cdim', 'elayers'], {'batch_first': '(True)', 'dropout': 'dropout', 'bidirectional': 'bidir'}), '(idim, cdim, elayers, batch_first=True, dropout=dropout,\n bidirectional=bidir)\n', (6788, 6869), False, 'import torch\n'), ((6940, 6971), 'torch.nn.Linear', 'torch.nn.Linear', (['(cdim * 2)', 'hdim'], {}), '(cdim * 2, hdim)\n', (6955, 6971), False, 'import torch\n'), ((7012, 7039), 'torch.nn.Linear', 'torch.nn.Linear', (['cdim', 'hdim'], {}), '(cdim, hdim)\n', (7027, 7039), False, 'import torch\n'), ((10510, 10543), 'numpy.array', 'np.array', (['ilens'], {'dtype': 'np.float32'}), '(ilens, dtype=np.float32)\n', (10518, 10543), True, 'import numpy as np\n'), ((10569, 10587), 'numpy.ceil', 'np.ceil', (['(ilens / 2)'], {}), '(ilens / 2)\n', (10576, 10587), True, 'import numpy as np\n'), ((11790, 11865), 'logging.error', 'logging.error', (['"""Error: need to specify an appropriate encoder architecture"""'], {}), "('Error: need to specify an appropriate encoder architecture')\n", (11803, 11865), False, 'import logging\n'), ((14416, 14442), 'torch.tensor', 'torch.tensor', (['[20.0, 10.0]'], {}), '([20.0, 10.0])\n', (14428, 14442), False, 'import torch\n'), ((3710, 3814), 'torch.nn.LSTM', 'torch.nn.LSTM', (['inputdim', 'cdim'], {'dropout': 'dropout', 'num_layers': '(1)', 'bidirectional': 'bidir', 'batch_first': '(True)'}), '(inputdim, cdim, dropout=dropout, num_layers=1, bidirectional=\n bidir, batch_first=True)\n', (3723, 3814), False, 'import torch\n'), ((3882, 3985), 'torch.nn.GRU', 'torch.nn.GRU', (['inputdim', 'cdim'], {'dropout': 'dropout', 'num_layers': '(1)', 'bidirectional': 'bidir', 'batch_first': '(True)'}), '(inputdim, cdim, dropout=dropout, num_layers=1, bidirectional=\n bidir, batch_first=True)\n', (3894, 3985), False, 'import torch\n'), ((4159, 4190), 'torch.nn.Linear', 'torch.nn.Linear', (['(2 * cdim)', 'hdim'], {}), '(2 * cdim, hdim)\n', (4174, 4190), False, 'import torch\n'), ((4252, 4279), 'torch.nn.Linear', 'torch.nn.Linear', (['cdim', 'hdim'], {}), '(cdim, hdim)\n', (4267, 4279), False, 'import torch\n'), ((10651, 10684), 'numpy.array', 'np.array', (['ilens'], {'dtype': 'np.float32'}), '(ilens, dtype=np.float32)\n', (10659, 10684), True, 'import numpy as np\n')] |
import numpy as np
"""
Assumed covariance matrix for cap-diameter, stem-height and stem-width to get a more realistic
simulation of mushrooms (mushrooms with larger caps -> mushrooms with higer stems)
The values are picked arbitrary and may be changed
"""
cov_mat = [[1, 0.5, 0.5],
[0.5, 1, 0.7],
[0.5, 0.7, 1]]
from scipy.stats import norm
def get_correlated_normals_in_interval(size, intervals, std):
"""
Parameters
----------
size: int
number of random generated normal values per distribution
intervals: list of lists of floats
an min max interval for each generated normal distribution
std: float
standart deviation of the normal distributions
Return
------------
list of lists of floats
each element is a list of size values representing a normal distribution in one interval
Example
------------
size = 353, intervals = [[10.0, 20.0], [15.0, 20.0], [15.0, 20.0]], std = 3
-> return [[353 random normal values between 10.0 and 20.0], [353 random normal values between 15.0 and 20.0],
[353 random normal values between 15.0 and 20.0]]
"""
corr_normal_values = get_correlated_normal_distributions(len(intervals), size, std)
resized_normal_values = []
for i in range(0, len(intervals)):
resized_normal_values.append(resize_normal_zero_mean(corr_normal_values[i],
intervals[i][0], intervals[i][1]))
return resized_normal_values
def get_correlated_normal_distributions(number, size, std):
"""
Helper function of get_correlated_normals_in_interval()
Parameters
----------
number: int
number of random generated normal distributions
size: int
number of random generated normal values per distribution
std: float
standart deviation of the normal distributions
Return
------------
list of lists of floats
each element is a list size values representing a zero mean normal distribution with std,
correlated to each other using the global covariance matrix cov_mat
"""
norm_values = np.zeros(shape=(number, size))
for i in range(0, number):
norm_values[i] = norm.rvs(0, 1 / std, size=size)
return np.dot(get_matrix_for_correlating_values("cholesky"), norm_values)
from scipy.linalg import eigh, cholesky
def get_matrix_for_correlating_values(method):
"""
Helper function of get_correlated_normal_distributions()
Parameters
------------
method: str
either "eigenvalues" or "cholesky" determining the used method
Return
------------
numpy.ndarray
returns a matrix c from the matrix decomposition c*c^T = cov_mat
Ressource: https://scipy-cookbook.readthedocs.io/items/CorrelatedRandomSamples.html
"""
# Compute the eigenvalues and eigenvectors.
evals, evecs = eigh(cov_mat)
if method == "cholesky":
return cholesky(cov_mat, lower=True)
if method == "eigenvalues":
return np.dot(evecs, np.diag(np.sqrt(evals)))
def resize_normal_zero_mean(norm_values, min, max):
"""
Helper function of get_correlated_normals_in_interval()
Parameters
----------
norm_values: list of floats
represents a zero mean normal distribution
min: int
lower interval border
max: int
upper interval border
Return
------------
list of floats
the zero mean normal distribution resized to the fall symmetrically into the interval borders
"""
l = []
for val in norm_values:
val = (val + 1) / 2
l.append(val * (max - min) + min)
return np.array(l)
if __name__ == "__main__":
"""
Running this module results in an example run of creating normal sampled values for the metrical attributes
cap-diameter, stem-height and stem-width using the values of the mushroom species 'Fly Agaric'.
The three resulting normal distributions are then visualized with two plots:
1) scatter plots showing the correlations between the attributes
2) bar plot showing that the distribution is normal
"""
size = 353
norm_values_corr = get_correlated_normal_distributions(3, size, 3)
intervals = [[10, 20], [15, 20], [15, 20]]
for i in range(0, 3):
norm_values_corr[i] = resize_normal_zero_mean(norm_values_corr[i],
intervals[i][0], intervals[i][1])
print(norm_values_corr)
corr_in_interv = get_correlated_normals_in_interval(size, [[10, 20], [15, 20], [15, 20]], 3)
# plot correlated and uncorrelated random samples
from pylab import plot, show, axis, subplot, xlabel, ylabel, grid, hist
import matplotlib.pyplot as plt
subplot(1, 3, 1)
plot(corr_in_interv[0], corr_in_interv[1], 'b.', c='grey')
xlabel('cap diameter')
ylabel('stem height')
axis('equal')
grid(True)
subplot(1, 3, 2)
plot(norm_values_corr[0], norm_values_corr[2], 'b.', c='grey')
xlabel('cap diameter')
ylabel('stem width')
axis('equal')
grid(True)
subplot(1, 3, 3)
plot(norm_values_corr[1], norm_values_corr[2], 'b.', c='grey')
xlabel('stem height')
ylabel('stem width')
axis('equal')
plt.tight_layout()
grid(True)
# improve spacing
fig, ax = plt.subplots()
plt.tight_layout()
show()
for i in range(0, 3):
subplot(1, 3, i + 1)
hist(norm_values_corr[i], color='grey')
if i == 0:
xlabel('cap diameter [10, 20]')
if i == 1:
xlabel('stem height [15, 20]')
if i == 2:
xlabel('stem width [15, 20]')
grid(True)
fig, ax = plt.subplots()
plt.tight_layout()
show()
| [
"pylab.hist",
"pylab.show",
"pylab.axis",
"scipy.stats.norm.rvs",
"scipy.linalg.cholesky",
"pylab.ylabel",
"numpy.zeros",
"pylab.grid",
"matplotlib.pyplot.subplots",
"pylab.subplot",
"numpy.array",
"scipy.linalg.eigh",
"pylab.xlabel",
"matplotlib.pyplot.tight_layout",
"pylab.plot",
"nu... | [((2157, 2187), 'numpy.zeros', 'np.zeros', ([], {'shape': '(number, size)'}), '(shape=(number, size))\n', (2165, 2187), True, 'import numpy as np\n'), ((2934, 2947), 'scipy.linalg.eigh', 'eigh', (['cov_mat'], {}), '(cov_mat)\n', (2938, 2947), False, 'from scipy.linalg import eigh, cholesky\n'), ((3723, 3734), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (3731, 3734), True, 'import numpy as np\n'), ((4805, 4821), 'pylab.subplot', 'subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (4812, 4821), False, 'from pylab import plot, show, axis, subplot, xlabel, ylabel, grid, hist\n'), ((4827, 4885), 'pylab.plot', 'plot', (['corr_in_interv[0]', 'corr_in_interv[1]', '"""b."""'], {'c': '"""grey"""'}), "(corr_in_interv[0], corr_in_interv[1], 'b.', c='grey')\n", (4831, 4885), False, 'from pylab import plot, show, axis, subplot, xlabel, ylabel, grid, hist\n'), ((4891, 4913), 'pylab.xlabel', 'xlabel', (['"""cap diameter"""'], {}), "('cap diameter')\n", (4897, 4913), False, 'from pylab import plot, show, axis, subplot, xlabel, ylabel, grid, hist\n'), ((4919, 4940), 'pylab.ylabel', 'ylabel', (['"""stem height"""'], {}), "('stem height')\n", (4925, 4940), False, 'from pylab import plot, show, axis, subplot, xlabel, ylabel, grid, hist\n'), ((4946, 4959), 'pylab.axis', 'axis', (['"""equal"""'], {}), "('equal')\n", (4950, 4959), False, 'from pylab import plot, show, axis, subplot, xlabel, ylabel, grid, hist\n'), ((4965, 4975), 'pylab.grid', 'grid', (['(True)'], {}), '(True)\n', (4969, 4975), False, 'from pylab import plot, show, axis, subplot, xlabel, ylabel, grid, hist\n'), ((4983, 4999), 'pylab.subplot', 'subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (4990, 4999), False, 'from pylab import plot, show, axis, subplot, xlabel, ylabel, grid, hist\n'), ((5005, 5067), 'pylab.plot', 'plot', (['norm_values_corr[0]', 'norm_values_corr[2]', '"""b."""'], {'c': '"""grey"""'}), "(norm_values_corr[0], norm_values_corr[2], 'b.', c='grey')\n", (5009, 5067), False, 'from pylab import plot, show, axis, subplot, xlabel, ylabel, grid, hist\n'), ((5073, 5095), 'pylab.xlabel', 'xlabel', (['"""cap diameter"""'], {}), "('cap diameter')\n", (5079, 5095), False, 'from pylab import plot, show, axis, subplot, xlabel, ylabel, grid, hist\n'), ((5101, 5121), 'pylab.ylabel', 'ylabel', (['"""stem width"""'], {}), "('stem width')\n", (5107, 5121), False, 'from pylab import plot, show, axis, subplot, xlabel, ylabel, grid, hist\n'), ((5127, 5140), 'pylab.axis', 'axis', (['"""equal"""'], {}), "('equal')\n", (5131, 5140), False, 'from pylab import plot, show, axis, subplot, xlabel, ylabel, grid, hist\n'), ((5146, 5156), 'pylab.grid', 'grid', (['(True)'], {}), '(True)\n', (5150, 5156), False, 'from pylab import plot, show, axis, subplot, xlabel, ylabel, grid, hist\n'), ((5164, 5180), 'pylab.subplot', 'subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (5171, 5180), False, 'from pylab import plot, show, axis, subplot, xlabel, ylabel, grid, hist\n'), ((5186, 5248), 'pylab.plot', 'plot', (['norm_values_corr[1]', 'norm_values_corr[2]', '"""b."""'], {'c': '"""grey"""'}), "(norm_values_corr[1], norm_values_corr[2], 'b.', c='grey')\n", (5190, 5248), False, 'from pylab import plot, show, axis, subplot, xlabel, ylabel, grid, hist\n'), ((5254, 5275), 'pylab.xlabel', 'xlabel', (['"""stem height"""'], {}), "('stem height')\n", (5260, 5275), False, 'from pylab import plot, show, axis, subplot, xlabel, ylabel, grid, hist\n'), ((5281, 5301), 'pylab.ylabel', 'ylabel', (['"""stem width"""'], {}), "('stem width')\n", (5287, 5301), False, 'from pylab import plot, show, axis, subplot, xlabel, ylabel, grid, hist\n'), ((5307, 5320), 'pylab.axis', 'axis', (['"""equal"""'], {}), "('equal')\n", (5311, 5320), False, 'from pylab import plot, show, axis, subplot, xlabel, ylabel, grid, hist\n'), ((5326, 5344), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5342, 5344), True, 'import matplotlib.pyplot as plt\n'), ((5350, 5360), 'pylab.grid', 'grid', (['(True)'], {}), '(True)\n', (5354, 5360), False, 'from pylab import plot, show, axis, subplot, xlabel, ylabel, grid, hist\n'), ((5401, 5415), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5413, 5415), True, 'import matplotlib.pyplot as plt\n'), ((5421, 5439), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5437, 5439), True, 'import matplotlib.pyplot as plt\n'), ((5447, 5453), 'pylab.show', 'show', ([], {}), '()\n', (5451, 5453), False, 'from pylab import plot, show, axis, subplot, xlabel, ylabel, grid, hist\n'), ((5791, 5805), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5803, 5805), True, 'import matplotlib.pyplot as plt\n'), ((5811, 5829), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5827, 5829), True, 'import matplotlib.pyplot as plt\n'), ((5837, 5843), 'pylab.show', 'show', ([], {}), '()\n', (5841, 5843), False, 'from pylab import plot, show, axis, subplot, xlabel, ylabel, grid, hist\n'), ((2246, 2277), 'scipy.stats.norm.rvs', 'norm.rvs', (['(0)', '(1 / std)'], {'size': 'size'}), '(0, 1 / std, size=size)\n', (2254, 2277), False, 'from scipy.stats import norm\n'), ((2994, 3023), 'scipy.linalg.cholesky', 'cholesky', (['cov_mat'], {'lower': '(True)'}), '(cov_mat, lower=True)\n', (3002, 3023), False, 'from scipy.linalg import eigh, cholesky\n'), ((5492, 5512), 'pylab.subplot', 'subplot', (['(1)', '(3)', '(i + 1)'], {}), '(1, 3, i + 1)\n', (5499, 5512), False, 'from pylab import plot, show, axis, subplot, xlabel, ylabel, grid, hist\n'), ((5522, 5561), 'pylab.hist', 'hist', (['norm_values_corr[i]'], {'color': '"""grey"""'}), "(norm_values_corr[i], color='grey')\n", (5526, 5561), False, 'from pylab import plot, show, axis, subplot, xlabel, ylabel, grid, hist\n'), ((5763, 5773), 'pylab.grid', 'grid', (['(True)'], {}), '(True)\n', (5767, 5773), False, 'from pylab import plot, show, axis, subplot, xlabel, ylabel, grid, hist\n'), ((5595, 5626), 'pylab.xlabel', 'xlabel', (['"""cap diameter [10, 20]"""'], {}), "('cap diameter [10, 20]')\n", (5601, 5626), False, 'from pylab import plot, show, axis, subplot, xlabel, ylabel, grid, hist\n'), ((5660, 5690), 'pylab.xlabel', 'xlabel', (['"""stem height [15, 20]"""'], {}), "('stem height [15, 20]')\n", (5666, 5690), False, 'from pylab import plot, show, axis, subplot, xlabel, ylabel, grid, hist\n'), ((5724, 5753), 'pylab.xlabel', 'xlabel', (['"""stem width [15, 20]"""'], {}), "('stem width [15, 20]')\n", (5730, 5753), False, 'from pylab import plot, show, axis, subplot, xlabel, ylabel, grid, hist\n'), ((3095, 3109), 'numpy.sqrt', 'np.sqrt', (['evals'], {}), '(evals)\n', (3102, 3109), True, 'import numpy as np\n')] |
"""
Noether (+matplotlib): easy graphing
"""
import argparse
from collections import namedtuple
import sys
import numpy as np
import matplotlib # noqa: F401
from matplotlib import animation, pyplot as plt
from .matrix import Matrix, Vector # noqa: F401
import noether
__all__ = """\
np matplotlib plt Vector Matrix \
plot""".split()
def limit_heuristic(a):
"""Heuristic to determine if an array has a plottable limit."""
return np.std(a[1:] - a[:-1]) < np.std(a)
_gr = namedtuple("GraphResult", "data label hasLimits isTimeFunc")
def GraphResult(data, domain, hasInputSpace):
label = getattr(data, "__name__", None)
if label == '<lambda>':
label = None
isTimeFunc = False
if callable(data):
try:
data(1)
data = data(domain)
except TypeError:
data(1, 2) # Functions should be one or two arguments
isTimeFunc = True
elif not hasInputSpace:
raise ValueError("Cannot plot function without input domain")
hasLimits = not isTimeFunc and limit_heuristic(data)
return _gr(data, label, hasLimits, isTimeFunc)
class Animation:
line_args = [
dict(lw=2),
]
def animate(self, fig=None, axes=None, frames=10_000, blit=False, repeat=True):
if fig is None and axes is None:
fig, axes = plt.subplots()
self.frames = frames
self.fig = fig
self.axes = axes
self.lines = tuple(axes.plot([], [], **kw)[0] for kw in self.line_args)
self.anim = animation.FuncAnimation(
fig, frames=self.data,
func=(lambda data: self.onFrame(data) or self.lines),
init_func=(lambda: self.onStart() or self.lines),
blit=blit, repeat=repeat, interval=1
)
return self
lines = tuple()
axes = None
fig = None
anim = None
def onStart(self):
pass
def data(self, t=0):
for frame in range(self.frames):
t += 1
yield t
def onFrame(self, data):
return self.lines
def plot(
*funcs, axis="x", jmin=None, jmax=None,
axisLines=True, title=None,
dt=0.01, frames=10_000
):
"""Plot a variable amount of functions or data in Cartesian space.
If the first value provided is an array, it will be consumed as the
domain.
Provide startj and endj to manually control the limit of the output axis."""
hasInputSpace = funcs and isinstance(funcs[0], np.ndarray)
if hasInputSpace:
x, *funcs = funcs
else:
x = np.linspace(-6, 6, 2000)
dynamics, bounded, unbounded = [], [], []
for f in funcs:
g = GraphResult(f, x, hasInputSpace)
l = dynamics if g.isTimeFunc else bounded if g.hasLimits else unbounded
l.append(g)
fig, axes = plt.subplots()
if title:
axes.set_title(title)
if len(funcs) > 1:
plt.legend()
elif not title and hasattr(funcs[0], '__name__'):
axes.set_title(funcs[0].__name__)
# Determine range if not provided
xlim = min(x), max(x)
if not bounded:
jmin = jmin or -6
jmax = jmax or 6
elif not (jmin and jmax):
mins, maxs = zip(*[
(min(k.data), max(k.data)) for k in bounded if k.hasLimits
])
jmin, jmax = min(mins), max(maxs)
reach = 1.1 if unbounded else 1.5
c, d = (jmax + jmin) / 2, (jmax - jmin) / 2
jmin = c - (d * reach)
jmax = c + (d * reach)
ylim = jmin, jmax
if axis == "y":
xlim, ylim = ylim, xlim
axes.set_xlim(*xlim)
axes.set_ylim(*ylim)
if axisLines:
inf = 1e17
axes.plot([-inf, inf], [0, 0], "gray", lw=0.5)
axes.plot([0, 0], [-inf, inf], "gray", lw=0.5)
for k in bounded + unbounded:
y = k.data
if axis == "y":
x, y = y, x
axes.plot(x, y, label=k.label)
if dynamics:
class newAnim(Animation):
line_args = [{}] * len(dynamics)
def data(self, t=0):
for i in range(frames):
t += dt
yield t
def onFrame(self, t):
for line, k in zip(self.lines, dynamics):
f_t = np.vectorize(lambda x: k.data(x, t))
if axis == 'x':
line.set_data(x, f_t(x))
else:
line.set_data(f_t(x), x)
anim = newAnim().animate(fig, axes, frames=10_000, blit=True, repeat=True)
else:
anim = None
plt.show()
return fig, axes, anim
# TODO: polar
def main(ns):
import astley
if ns.axis is not None:
axis = ns.axis
else:
axis = "y" if any("y" in e for e in ns.function) else "x"
signature = astley.arguments([astley.arg(axis)])
funcs = []
for expression in ns.function:
node = astley.parse(expression, mode='eval').body
func = astley.Lambda(signature, node).eval(noether.__dict__)
func.__name__ = expression.replace('**', '^').replace('*', '·')
funcs.append(func)
title = 'noether.graphing ' + ' '.join(
repr(i) if ' ' in i else i for i in sys.argv[1:]
)
plot(
np.linspace(ns.start, ns.end, ns.count), *funcs,
axis=axis, jmin=ns.min, jmax=ns.max, axisLines=ns.axisLines,
title=title
)
parser = argparse.ArgumentParser(
"noether.graphing", description="Noether quick f(x)/f(y) grapher"
)
parser.add_argument(
"function", nargs="+", help="expression of x or y"
)
parser.add_argument(
"--axis", type=str, default=None, help="input axis of functions"
)
parser.add_argument("--min", '-m', type=float, default=None)
parser.add_argument("--max", '-M', type=float, default=None)
parser.add_argument("--start", '-s', type=float, default=-6)
parser.add_argument("--end", '-e', type=float, default=6)
parser.add_argument("--count", '-n', type=float, default=2000)
parser.add_argument(
"--noAxisLines", dest="axisLines", action="store_false")
if __name__ == '__main__':
main(parser.parse_args())
| [
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"astley.Lambda",
"numpy.std",
"matplotlib.pyplot.legend",
"astley.parse",
"collections.namedtuple",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"astley.arg"
] | [((487, 547), 'collections.namedtuple', 'namedtuple', (['"""GraphResult"""', '"""data label hasLimits isTimeFunc"""'], {}), "('GraphResult', 'data label hasLimits isTimeFunc')\n", (497, 547), False, 'from collections import namedtuple\n'), ((5400, 5495), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""noether.graphing"""'], {'description': '"""Noether quick f(x)/f(y) grapher"""'}), "('noether.graphing', description=\n 'Noether quick f(x)/f(y) grapher')\n", (5423, 5495), False, 'import argparse\n'), ((2819, 2833), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2831, 2833), True, 'from matplotlib import animation, pyplot as plt\n'), ((4573, 4583), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4581, 4583), True, 'from matplotlib import animation, pyplot as plt\n'), ((444, 466), 'numpy.std', 'np.std', (['(a[1:] - a[:-1])'], {}), '(a[1:] - a[:-1])\n', (450, 466), True, 'import numpy as np\n'), ((469, 478), 'numpy.std', 'np.std', (['a'], {}), '(a)\n', (475, 478), True, 'import numpy as np\n'), ((2565, 2589), 'numpy.linspace', 'np.linspace', (['(-6)', '(6)', '(2000)'], {}), '(-6, 6, 2000)\n', (2576, 2589), True, 'import numpy as np\n'), ((2911, 2923), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2921, 2923), True, 'from matplotlib import animation, pyplot as plt\n'), ((5245, 5284), 'numpy.linspace', 'np.linspace', (['ns.start', 'ns.end', 'ns.count'], {}), '(ns.start, ns.end, ns.count)\n', (5256, 5284), True, 'import numpy as np\n'), ((1345, 1359), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1357, 1359), True, 'from matplotlib import animation, pyplot as plt\n'), ((4822, 4838), 'astley.arg', 'astley.arg', (['axis'], {}), '(axis)\n', (4832, 4838), False, 'import astley\n'), ((4907, 4944), 'astley.parse', 'astley.parse', (['expression'], {'mode': '"""eval"""'}), "(expression, mode='eval')\n", (4919, 4944), False, 'import astley\n'), ((4965, 4995), 'astley.Lambda', 'astley.Lambda', (['signature', 'node'], {}), '(signature, node)\n', (4978, 4995), False, 'import astley\n')] |
from energyOptimal.powerModel import powerModel
from energyOptimal.performanceModel import performanceModel
from energyOptimal.energyModel import energyModel
from energyOptimal.monitor import monitorProcess
from energyOptimal.dvfsModel import dvfsModel
import _pickle as pickle
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import os
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
# plt.style.use('seaborn')
arg_dict= {"black":1, "canneal":4, "dedup":6,
"ferret":0, "fluid":1, "freq":1,
"rtview":7, "swap":3, "vips":1,
"x264":23, "xhpl":1, "openmc":0,
"body":2}
def createPowerModels(profile_path= "data/power_model/", output_path="data/models/power_model/", appname=None):
import numpy as np
for p in os.listdir(profile_path):
print(p)
pw_model= powerModel()
pw_model.loadData(filename=profile_path+p,verbose=1,freqs_filter=np.arange(1.2,2.3,0.1))
pw_model.fit()
pickle.dump(pw_model, open(output_path+p,"wb"))
error= pw_model.error()
print("Power model constants, ", pw_model.power_model_c)
print("Error, ", error)
def createPerformanceModels(profile_path= "data/performance_model/", output_path="data/models/performance_model/", appname=None):
for p in os.listdir(profile_path): #zip(parsecapps,parsecapps_argnum):
if not p.endswith("pkl"): continue
# if "freq" not in p: continue
# if "canneal" not in p: continue
if p in os.listdir(output_path): continue
print(p)
idx= -1
for k,v in arg_dict.items():
if k in p:
idx= v
break
if idx == -1:
raise("Program arg not found")
if appname and not appname in p:
continue
perf_model= performanceModel()
df= perf_model.loadData(filename=profile_path+p, arg_num=idx, verbose=1, method='constTime')
print("Inputs: {} Freqs: {} Thrs: {}".format(len(df["in"].unique()), len(df["freq"].unique()), len(df["thr"].unique())))
print("Total ", len(df))
# print("Inputs : ", df["in"].unique())
# print("Threads : ", df["thr"].unique())
# print("Frequencies : ", df["freq"].unique())
# print("")
# continue
if 'fluid' in p:
perf_model.dataFrame= perf_model.dataFrame[perf_model.dataFrame['thr'].isin([1,2,4,8,16,32])]
if 'x264' in p:
perf_model.dataFrame['in_cat']= 6-perf_model.dataFrame['in_cat']
if len(df['in_cat']) > 5: #limit to 5 inputs
cats= perf_model.dataFrame['in_cat'].unique()[-5:]
perf_model.dataFrame= perf_model.dataFrame[perf_model.dataFrame['in_cat'].isin(cats)]
perf_model.fit(C_=10e3,gamma_=0.1)
# scores= perf_model.crossValidate(method='mpe')
pickle.dump(perf_model, open(output_path+p,"wb"))
# print("Program", p)
# print(df.head(5))
print("MPE ", perf_model.error(method='mpe')*100)
print("MAE ", perf_model.error(method='mae'))
# print("CrossValidation ", np.mean(scores)*100, scores)
def figures(appname=None, energy= True, in_cmp=3):
from energyOptimal import plotData
for app, title in zip(parsec_models,titles):
if (appname and not appname in app) or (not app):
continue
pw_model= pickle.load(open("data/models/power_model/ipmi_2-32_cpuload.pkl","rb"))
perf_model= pickle.load(open("data/models/performance_model/"+app,"rb"))
en_model= energyModel(pw_model,perf_model,freq_range_=np.arange(1.2e6,2.3e6,0.1e6)/1e6)
plotData.setProps(xlabel='Frequencies (GHz)', ylabel='Active threads',
zlabel='Energy (kJ)' if energy else 'Time (s)', title=title)
df_= perf_model.dataFrame[perf_model.dataFrame['in_cat']==in_cmp].sort_values(['freq','thr'])
df_pred_= en_model.dataFrame[en_model.dataFrame['in_cat']==in_cmp].sort_values(['freq','thr'])
# df_pred_= df_pred_[df_pred_['thr'].isin(list(range(8,33,2)))]
# df_= df_[df_['thr'].isin(list(range(8,33,2)))]
plotData.plot3D(x=df_['freq'].unique(),y=df_['thr'].unique(),
z=df_['energy'].values/1e3 if energy else df_['time'].values,
points=True,legend='Measurements')
plotData.plot3D(x=df_pred_['freq'].unique(),y=df_pred_['thr'].unique(),
z=df_pred_['energy_model'].values/1e3 if energy else df_pred_['time'].values,
points=False,legend='Model')
plotData.ax.view_init(30,60)
if 'HPL' in app:
plotData.ax.set_zlim(0,15)
aux= 'energy' if energy else 'time'
plotData.savePlot('fotos/{}/{}.png'.format(aux, app),showLegend=True)
def createReducedPerformanceModel(path, arg_num, title_='', save_df='', save_svr=''):
perf_model= performanceModel()
perf_model.loadData(filename=path, arg_num=int(arg_num))
cats= perf_model.dataFrame['in_cat'].unique()[-5:]
perf_model.dataFrame= perf_model.dataFrame[perf_model.dataFrame['in_cat'].isin(cats)]
perf_model.dataFrame= perf_model.dataFrame[perf_model.dataFrame['freq']!=2.3]
if 'fluid' in path:
perf_model.dataFrame= perf_model.dataFrame[perf_model.dataFrame['thr'].isin([1,2,4,8,16,32])]
if 'x264' in path:
perf_model.dataFrame['in_cat']= 6-perf_model.dataFrame['in_cat']
df_ori= perf_model.dataFrame.sort_values(['freq','thr','in_cat']).copy()
x=[]
y_time=[]
y_en=[]
less_5= 0
for train_sz in range(0, perf_model.dataFrame.shape[0], 100)[1:]:
# print("Program", path)
aux= perf_model.fit(C_=10e3,gamma_=0.1,train_size_=train_sz,dataframe=True)
aux= pd.merge(aux[['freq','thr','in_cat']],df_ori)
perf_model.estimate(df_ori[['freq','thr','in_cat']],dataframe=True).sort_values(['freq','thr','in_cat'])
x.append(train_sz)
y_time.append(perf_model.error()*100)
y_en.append( aux['energy'].sum()/1e6 )
# print(y_en[-1])
# print( x[-1], y_time[-1] )
if y_time[-1] <= 6 and less_5 == 0:
less_5= y_time[-1]
print('%s_%i.pkl'%(title_,train_sz))
pickle.dump(perf_model, open("data/model/performance_model/%s_%i.pkl"%(title_,train_sz),"wb"))
break
# scores= perf_model.crossValidate(method='mpe')
# print("CrossValidation ", np.mean(scores)*100, scores)
fig, ax1 = plt.subplots()
ax1.plot(x,y_time)
# ax1.plot([min(x),max(x)],[less_5, less_5],'-')
ax1.set_ylabel('Mean error (%)')
ax2 = ax1.twinx()
ax2.plot(x,y_en)
ax2.set_ylabel('Energy (KJ)')
plt.xlabel('Train size')
plt.title(title_)
plt.savefig('fotos/over/%s.png'%title_)
# plt.show()
def mean_df():
def avg_ondemand(onds, arg):
ond= dvfsModel()
ond.loadData(filename= 'data/dvfs/ondemand/'+onds[0], arg_num= arg, method='constTime')
df= ond.dataFrame
for f in onds[1:]:
ond.loadData(filename= 'data/dvfs/ondemand/'+onds[0], arg_num= arg, method='constTime')
df['energy']+= ond.dataFrame['energy']
df['energy']/=len(onds)
return df
ondemand= avg_ondemand(['ferret_completo_2.pkl','ferret_completo_3.pkl'],6)
pw_model= pickle.load(open("data/models/power_model/ipmi_2-32_cpuload.pkl","rb"))
perf_model= pickle.load(open("data/models/performance_model/completo_ferret_3.pkl","rb"))
en_model= energyModel(pw_model,perf_model)
ond= ondemand[['in','thr','time','energy']].sort_values(['in','thr'])
nthreads= ond['thr'].unique().shape[0]
ond= pd.crosstab(ond['in'], ond['thr'], ond['energy'],aggfunc=min)
df= en_model.realMinimalEnergy().sort_values('in_cat')['energy']
df= pd.concat([df]*nthreads,axis=1)
ond= pd.DataFrame(ond.values/df.values,columns=ond.columns)
ond.plot.bar()
plt.plot([-1,6],[1,1], '--',color='k',label='proposto')
plt.title('Ferret')
plt.tight_layout()
plt.savefig('fotos/comp2/ferret.png')
def createReducedPerformanceModel2(path, arg_num, title_='', save_df='', save_svr=''):
perf_model= performanceModel()
perf_model.loadData(filename=path, arg_num=int(arg_num))
cats= perf_model.dataFrame['in_cat'].unique()[-5:]
perf_model.dataFrame= perf_model.dataFrame[perf_model.dataFrame['in_cat'].isin(cats)]
perf_model.dataFrame= perf_model.dataFrame[perf_model.dataFrame['freq']<2.3]
if 'fluid' in path:
perf_model.dataFrame= perf_model.dataFrame[perf_model.dataFrame['thr'].isin([1,2,4,8,16,32])]
if 'x264' in path:
perf_model.dataFrame['in_cat']= 6-perf_model.dataFrame['in_cat']
df_ori= perf_model.dataFrame.sort_values(['freq','thr','in_cat']).copy()
ori= perf_model.dataFrame.copy()
freqs= perf_model.dataFrame['freq'].unique()
x= []
y_time= []
y_en= []
for f in range(1,len(freqs),1):
use_freq= list(freqs[:int(f)])+list(freqs[-int(f):])
perf_model.dataFrame= perf_model.dataFrame[perf_model.dataFrame['freq'].isin(use_freq)]
# print(perf_model.dataFrame['freq'].unique())
aux= perf_model.fit(C_=10e3,gamma_=0.1,train_size_=0.9,dataframe=True)
aux= pd.merge(aux[['freq','thr','in_cat']],df_ori)
perf_model.dataFrame= ori.copy()
df_est= perf_model.estimate(df_ori[['freq','thr','in_cat']],dataframe=True).sort_values(['freq','thr','in_cat'])
error= sum( (abs(df_est['time']-df_ori['time']))/df_ori['time'] )/df_ori.shape[0]*100
x.append(aux.shape[0])
y_time.append(error)
y_en.append(aux['energy'].sum()/1e6)
# scores= perf_model.crossValidate(method='mpe')
print('%s_%i.pkl'%(title_,f), aux.shape, aux['energy'].sum()/1e6, error, perf_model.error()*100)
print(use_freq)
pickle.dump(perf_model, open("data/model/performance_model/%s_%i.pkl"%(title_,f),"wb"))
fig, ax1 = plt.subplots()
ax1.plot(x,y_time)
ax1.set_ylabel('Mean error (%)')
ax2 = ax1.twinx()
ax2.plot(x,y_en)
ax2.set_ylabel('Energy (KJ)')
plt.xlabel('Train size')
plt.title(title_)
plt.savefig('fotos/over/%s.png'%title_)
# plt.show()
def comparation(appname=None, proposed_bar=False, relative=True, thrs_filter= []):
row=[]
for title, dvfs, model, arg in zip(titles,parsec_dvfs,parsec_models,parsecapps_argnum):
if 'freq' in model or not model:
continue
if appname and not appname in dvfs:
continue
ondemand= dvfsModel()
ondemand.loadData(filename= 'data/dvfs/ondemand/'+dvfs, arg_num= arg, method='constTime')
pw_model= pickle.load(open("data/models/power_model/ipmi_2-32_cpuload.pkl","rb"))
perf_model= pickle.load(open("data/models/performance_model/"+model,"rb"))
en_model= energyModel(pw_model,perf_model)
#TODO verify if arguments match
ond= ondemand.dataFrame[['in','thr','time','energy']]
ond= pd.merge(ond,perf_model.dataFrame[['in','in_cat']]).drop_duplicates().sort_values(['in_cat','thr'])
if thrs_filter:
ond= ond[ond['thr'].isin(thrs_filter)]
ond_en= pd.crosstab(ond['in_cat'], ond['thr'], ond['energy'],aggfunc=min)
ond_time= pd.crosstab(ond['in_cat'], ond['thr'], ond['time'],aggfunc=min)
min_df= en_model.realMinimalEnergy().sort_values('in_cat')
if proposed_bar:
ond_en['proposed']= min_df['energy'].values
ond_time['proposed']= min_df['time'].values
if relative:
aux= pd.concat([min_df['energy']]*(len(ond_en.columns)),axis=1)
saving= pd.DataFrame(ond_en.values-aux.values,columns=ond_en.columns)
ond_en= pd.DataFrame(ond_en.values/aux.values,columns=ond_en.columns)
aux= pd.concat([min_df['time']]*(len(ond_en.columns)),axis=1)
ond_time= pd.DataFrame(ond_time.values/aux.values,columns=ond_time.columns)
row.append([dvfs, ond_en.max(axis=1).mean(), ond_en.mean(axis=1).mean(), ond_en[32].mean(), ond_en.min(axis=1).mean(),
ond_time.max(axis=1).mean(), ond_time.mean(axis=1).mean(), ond_time[32].mean(), ond_time.min(axis=1).mean(),
saving.max(axis=1).sum(), saving.median(axis=1).sum(), saving[32].sum(), saving.min(axis=1).sum(),
perf_model.dataFrame.energy.sum()])
ond_en.plot.bar(figsize=(12,7))
if relative:
plt.plot([-1,6],[1,1], '--',color='k',label='proposed')
plt.plot([-1,6],[ond_en.mean().mean(),ond_en.mean().mean()], ':',color='k',label='average gain')
plt.xlabel('Inputs',fontsize=18)
plt.ylabel('Energy relative' if relative else 'Eenergy (J)',fontsize=18)
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.title(title,fontsize=22)
plt.legend(loc='center right',bbox_to_anchor=(1.2,0.55),fontsize=12)
plt.tight_layout()
plt.savefig('fotos/relative/%s.png'%dvfs)
# plt.show()
if relative and not proposed_bar:
df= pd.DataFrame(row,columns=['app','max_en','mean_en','32_en','min_en',
'max_time','mean_time','32_time','min_time',
'max_save','median_save','32_save','min_save', 'train_energy'])
df[['max_en','mean_en','32_en','min_en',
'max_time','mean_time','32_time','min_time']]-=1
df[['max_en','mean_en','32_en','min_en',
'max_time','mean_time','32_time','min_time']]*=100
df['max_run']= df['train_energy']/df['max_save']
df['median_run']= df['train_energy']/df['median_save']
df['32_run']= df['train_energy']/df['32_save']
df['min_run']= df['train_energy']/df['min_save']
df= df.sort_values('32_en',ascending=False)
df= pd.concat( (df,pd.DataFrame([['mean']+list(df[df>0].mean().values)],columns=df.columns)) )
df.to_csv('tables/relative.csv')
print(df)
titles=['Blackscholes','Canneal','Dedup',
'Ferret','Fluidanimate','Freqmine',
'Raytrace','Swaptions','Vips',
'x264','HPL','Openmc',
'Bodytrack']
# parsecapps=['completo_black_3.pkl','completo_canneal_2.pkl','completo_dedup_3.pkl',
# 'completo_ferret_3.pkl','completo_fluid_2.pkl','completo_freq.pkl',
# 'completo_rtview_2.pkl','completo_swaptions_1.pkl','completo_vips_3.pkl',
# 'completo_x264_3.pkl','completo_xhpl.pkl','completo_openmc_kernel_novo.pkl']
# parsec_models=['completo_black_5.pkl','completo_canneal_2.pkl','completo_dedup_3.pkl',
# 'completo_ferret_3.pkl','completo_fluid_3.pkl','completo_freq.pkl',
# 'completo_rtview_3.pkl','completo_swaptions_2.pkl','completo_vips_3.pkl',
# 'completo_x264_3.pkl','completo_xhpl.pkl','completo_openmc_kernel_novo.pkl',
# 'bodytrack_completo.pkl']
parsecapps=['completo_black_5.pkl','completo_canneal_2.pkl','completo_dedup_3.pkl',
'completo_ferret_3.pkl','completo_fluid_3.pkl','completo_freqmine_1.pkl',
'completo_rtview_3.pkl','completo_swaptions_2.pkl','completo_vips_3.pkl',
'completo_x264_3.pkl','completo_xhpl.pkl','completo_openmc_kernel_novo.pkl',
'completo_bodytrack_1.pkl']
parsec_models=['Blackscholes_2.pkl', 'Canneal_2.pkl', 'Dedup_2.pkl',
'Ferret_2.pkl', 'Fluidanimate_2.pkl', 'Freqmine_2.pkl',
'Raytrace_2.pkl', 'Swaptions_2.pkl', 'Vips_2.pkl',
'x264_2.pkl', 'HPL_2.pkl', 'Openmc_3.pkl',
'Bodytrack_2.pkl']
# parsec_models= parsecapps
parsec_dvfs=['blackscholes_completo_3.pkl', 'canneal_completo_3.pkl', 'dedup_completo_3.pkl',
'ferret_completo_3.pkl', 'fluidanimate_completo_3.pkl', 'freqmine_completo_5.pkl',
'rtview_completo_3.pkl', 'swaptions_completo_3.pkl', 'vips_completo_3.pkl',
'x264_completo_3.pkl', 'xhpl_completo_3.pkl', 'openmc_completo_2.pkl',
'bodytrack_completo_5.pkl']
parsecapps_argnum= [1, 4, 6,
0, 1, 0,
7, 3, 1,
23, 1, 0,
2]
# createPowerModels()
createPerformanceModels()
# createPerformanceModels("new_data/","new_data/ready/","ferret")
# energy_figures(parsecapps[0])
# comparation(appname='dedup',proposed_bar=False,relative=True,thrs_filter=[])
# figures(energy=False, in_cmp=3)
# comparation(appname="", proposed_bar=False,relative=True)
# for app,arg,title in zip(parsecapps,parsecapps_argnum,titles):
# if not 'freq' in app:
# continue
# createReducedPerformanceModel2('data/performance_model/%s'%app,arg,title_=title) | [
"matplotlib.pyplot.title",
"energyOptimal.energyModel.energyModel",
"energyOptimal.performanceModel.performanceModel",
"energyOptimal.powerModel.powerModel",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"energyOptimal.plotData.ax.view_init",
"pandas.merge",
"matplotlib.pyp... | [((382, 439), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'FutureWarning'}), "('ignore', category=FutureWarning)\n", (405, 439), False, 'import warnings\n'), ((818, 842), 'os.listdir', 'os.listdir', (['profile_path'], {}), '(profile_path)\n', (828, 842), False, 'import os\n'), ((1341, 1365), 'os.listdir', 'os.listdir', (['profile_path'], {}), '(profile_path)\n', (1351, 1365), False, 'import os\n'), ((4951, 4969), 'energyOptimal.performanceModel.performanceModel', 'performanceModel', ([], {}), '()\n', (4967, 4969), False, 'from energyOptimal.performanceModel import performanceModel\n'), ((6552, 6566), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6564, 6566), True, 'from matplotlib import pyplot as plt\n'), ((6763, 6787), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Train size"""'], {}), "('Train size')\n", (6773, 6787), True, 'from matplotlib import pyplot as plt\n'), ((6792, 6809), 'matplotlib.pyplot.title', 'plt.title', (['title_'], {}), '(title_)\n', (6801, 6809), True, 'from matplotlib import pyplot as plt\n'), ((6814, 6855), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('fotos/over/%s.png' % title_)"], {}), "('fotos/over/%s.png' % title_)\n", (6825, 6855), True, 'from matplotlib import pyplot as plt\n'), ((7572, 7605), 'energyOptimal.energyModel.energyModel', 'energyModel', (['pw_model', 'perf_model'], {}), '(pw_model, perf_model)\n', (7583, 7605), False, 'from energyOptimal.energyModel import energyModel\n'), ((7732, 7794), 'pandas.crosstab', 'pd.crosstab', (["ond['in']", "ond['thr']", "ond['energy']"], {'aggfunc': 'min'}), "(ond['in'], ond['thr'], ond['energy'], aggfunc=min)\n", (7743, 7794), True, 'import pandas as pd\n'), ((7871, 7905), 'pandas.concat', 'pd.concat', (['([df] * nthreads)'], {'axis': '(1)'}), '([df] * nthreads, axis=1)\n', (7880, 7905), True, 'import pandas as pd\n'), ((7912, 7969), 'pandas.DataFrame', 'pd.DataFrame', (['(ond.values / df.values)'], {'columns': 'ond.columns'}), '(ond.values / df.values, columns=ond.columns)\n', (7924, 7969), True, 'import pandas as pd\n'), ((7991, 8051), 'matplotlib.pyplot.plot', 'plt.plot', (['[-1, 6]', '[1, 1]', '"""--"""'], {'color': '"""k"""', 'label': '"""proposto"""'}), "([-1, 6], [1, 1], '--', color='k', label='proposto')\n", (7999, 8051), True, 'from matplotlib import pyplot as plt\n'), ((8051, 8070), 'matplotlib.pyplot.title', 'plt.title', (['"""Ferret"""'], {}), "('Ferret')\n", (8060, 8070), True, 'from matplotlib import pyplot as plt\n'), ((8075, 8093), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8091, 8093), True, 'from matplotlib import pyplot as plt\n'), ((8098, 8135), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""fotos/comp2/ferret.png"""'], {}), "('fotos/comp2/ferret.png')\n", (8109, 8135), True, 'from matplotlib import pyplot as plt\n'), ((8241, 8259), 'energyOptimal.performanceModel.performanceModel', 'performanceModel', ([], {}), '()\n', (8257, 8259), False, 'from energyOptimal.performanceModel import performanceModel\n'), ((10032, 10046), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10044, 10046), True, 'from matplotlib import pyplot as plt\n'), ((10190, 10214), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Train size"""'], {}), "('Train size')\n", (10200, 10214), True, 'from matplotlib import pyplot as plt\n'), ((10219, 10236), 'matplotlib.pyplot.title', 'plt.title', (['title_'], {}), '(title_)\n', (10228, 10236), True, 'from matplotlib import pyplot as plt\n'), ((10241, 10282), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('fotos/over/%s.png' % title_)"], {}), "('fotos/over/%s.png' % title_)\n", (10252, 10282), True, 'from matplotlib import pyplot as plt\n'), ((879, 891), 'energyOptimal.powerModel.powerModel', 'powerModel', ([], {}), '()\n', (889, 891), False, 'from energyOptimal.powerModel import powerModel\n'), ((1864, 1882), 'energyOptimal.performanceModel.performanceModel', 'performanceModel', ([], {}), '()\n', (1880, 1882), False, 'from energyOptimal.performanceModel import performanceModel\n'), ((3677, 3812), 'energyOptimal.plotData.setProps', 'plotData.setProps', ([], {'xlabel': '"""Frequencies (GHz)"""', 'ylabel': '"""Active threads"""', 'zlabel': "('Energy (kJ)' if energy else 'Time (s)')", 'title': 'title'}), "(xlabel='Frequencies (GHz)', ylabel='Active threads',\n zlabel='Energy (kJ)' if energy else 'Time (s)', title=title)\n", (3694, 3812), False, 'from energyOptimal import plotData\n'), ((4631, 4660), 'energyOptimal.plotData.ax.view_init', 'plotData.ax.view_init', (['(30)', '(60)'], {}), '(30, 60)\n', (4652, 4660), False, 'from energyOptimal import plotData\n'), ((5818, 5866), 'pandas.merge', 'pd.merge', (["aux[['freq', 'thr', 'in_cat']]", 'df_ori'], {}), "(aux[['freq', 'thr', 'in_cat']], df_ori)\n", (5826, 5866), True, 'import pandas as pd\n'), ((6934, 6945), 'energyOptimal.dvfsModel.dvfsModel', 'dvfsModel', ([], {}), '()\n', (6943, 6945), False, 'from energyOptimal.dvfsModel import dvfsModel\n'), ((9322, 9370), 'pandas.merge', 'pd.merge', (["aux[['freq', 'thr', 'in_cat']]", 'df_ori'], {}), "(aux[['freq', 'thr', 'in_cat']], df_ori)\n", (9330, 9370), True, 'import pandas as pd\n'), ((10631, 10642), 'energyOptimal.dvfsModel.dvfsModel', 'dvfsModel', ([], {}), '()\n', (10640, 10642), False, 'from energyOptimal.dvfsModel import dvfsModel\n'), ((10933, 10966), 'energyOptimal.energyModel.energyModel', 'energyModel', (['pw_model', 'perf_model'], {}), '(pw_model, perf_model)\n', (10944, 10966), False, 'from energyOptimal.energyModel import energyModel\n'), ((11283, 11349), 'pandas.crosstab', 'pd.crosstab', (["ond['in_cat']", "ond['thr']", "ond['energy']"], {'aggfunc': 'min'}), "(ond['in_cat'], ond['thr'], ond['energy'], aggfunc=min)\n", (11294, 11349), True, 'import pandas as pd\n'), ((11367, 11431), 'pandas.crosstab', 'pd.crosstab', (["ond['in_cat']", "ond['thr']", "ond['time']"], {'aggfunc': 'min'}), "(ond['in_cat'], ond['thr'], ond['time'], aggfunc=min)\n", (11378, 11431), True, 'import pandas as pd\n'), ((12760, 12793), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Inputs"""'], {'fontsize': '(18)'}), "('Inputs', fontsize=18)\n", (12770, 12793), True, 'from matplotlib import pyplot as plt\n'), ((12801, 12874), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["('Energy relative' if relative else 'Eenergy (J)')"], {'fontsize': '(18)'}), "('Energy relative' if relative else 'Eenergy (J)', fontsize=18)\n", (12811, 12874), True, 'from matplotlib import pyplot as plt\n'), ((12882, 12905), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(18)'}), '(fontsize=18)\n', (12892, 12905), True, 'from matplotlib import pyplot as plt\n'), ((12914, 12937), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(18)'}), '(fontsize=18)\n', (12924, 12937), True, 'from matplotlib import pyplot as plt\n'), ((12946, 12975), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(22)'}), '(title, fontsize=22)\n', (12955, 12975), True, 'from matplotlib import pyplot as plt\n'), ((12983, 13054), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""center right"""', 'bbox_to_anchor': '(1.2, 0.55)', 'fontsize': '(12)'}), "(loc='center right', bbox_to_anchor=(1.2, 0.55), fontsize=12)\n", (12993, 13054), True, 'from matplotlib import pyplot as plt\n'), ((13060, 13078), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13076, 13078), True, 'from matplotlib import pyplot as plt\n'), ((13087, 13130), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('fotos/relative/%s.png' % dvfs)"], {}), "('fotos/relative/%s.png' % dvfs)\n", (13098, 13130), True, 'from matplotlib import pyplot as plt\n'), ((13201, 13397), 'pandas.DataFrame', 'pd.DataFrame', (['row'], {'columns': "['app', 'max_en', 'mean_en', '32_en', 'min_en', 'max_time', 'mean_time',\n '32_time', 'min_time', 'max_save', 'median_save', '32_save', 'min_save',\n 'train_energy']"}), "(row, columns=['app', 'max_en', 'mean_en', '32_en', 'min_en',\n 'max_time', 'mean_time', '32_time', 'min_time', 'max_save',\n 'median_save', '32_save', 'min_save', 'train_energy'])\n", (13213, 13397), True, 'import pandas as pd\n'), ((1543, 1566), 'os.listdir', 'os.listdir', (['output_path'], {}), '(output_path)\n', (1553, 1566), False, 'import os\n'), ((4697, 4724), 'energyOptimal.plotData.ax.set_zlim', 'plotData.ax.set_zlim', (['(0)', '(15)'], {}), '(0, 15)\n', (4717, 4724), False, 'from energyOptimal import plotData\n'), ((11761, 11825), 'pandas.DataFrame', 'pd.DataFrame', (['(ond_en.values - aux.values)'], {'columns': 'ond_en.columns'}), '(ond_en.values - aux.values, columns=ond_en.columns)\n', (11773, 11825), True, 'import pandas as pd\n'), ((11843, 11907), 'pandas.DataFrame', 'pd.DataFrame', (['(ond_en.values / aux.values)'], {'columns': 'ond_en.columns'}), '(ond_en.values / aux.values, columns=ond_en.columns)\n', (11855, 11907), True, 'import pandas as pd\n'), ((12001, 12069), 'pandas.DataFrame', 'pd.DataFrame', (['(ond_time.values / aux.values)'], {'columns': 'ond_time.columns'}), '(ond_time.values / aux.values, columns=ond_time.columns)\n', (12013, 12069), True, 'import pandas as pd\n'), ((12586, 12646), 'matplotlib.pyplot.plot', 'plt.plot', (['[-1, 6]', '[1, 1]', '"""--"""'], {'color': '"""k"""', 'label': '"""proposed"""'}), "([-1, 6], [1, 1], '--', color='k', label='proposed')\n", (12594, 12646), True, 'from matplotlib import pyplot as plt\n'), ((965, 989), 'numpy.arange', 'np.arange', (['(1.2)', '(2.3)', '(0.1)'], {}), '(1.2, 2.3, 0.1)\n', (974, 989), True, 'import numpy as np\n'), ((3634, 3675), 'numpy.arange', 'np.arange', (['(1200000.0)', '(2300000.0)', '(100000.0)'], {}), '(1200000.0, 2300000.0, 100000.0)\n', (3643, 3675), True, 'import numpy as np\n'), ((11091, 11144), 'pandas.merge', 'pd.merge', (['ond', "perf_model.dataFrame[['in', 'in_cat']]"], {}), "(ond, perf_model.dataFrame[['in', 'in_cat']])\n", (11099, 11144), True, 'import pandas as pd\n')] |
import math
import numpy as np
import scipy
from python_reference import sparsemax
from python_reference import sparsemax_loss
class SparsemaxRegression:
transform_type = 'sparsemax'
def __init__(self, input_size, output_size, observations=None,
regualizer=1e-1, learning_rate=1e-2,
random_state=None):
self.name = 'Numpy'
self.random_state = random_state
# intialize weights
self.input_size = input_size
self.output_size = output_size
self.reset()
# set hyper parameters
self.regualizer = regualizer
self.learning_rate = learning_rate
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def reset(self):
self.W = scipy.stats.truncnorm.rvs(
-2, 2, size=(self.input_size, self.output_size),
random_state=self.random_state
)
self.W *= math.sqrt(2 / (self.input_size + self.output_size))
self.b = np.zeros((1, self.output_size))
def gradient(self, x, t):
n = x.shape[0]
z = np.dot(x, self.W) + self.b
loss_grad = sparsemax_loss.grad(z, t)
return (
np.dot(x.T, loss_grad) / n,
np.sum(loss_grad, axis=0) / n
)
def update(self, x, t, epochs=1):
for _ in range(epochs):
(dW, db) = self.gradient(x, t)
self.W += - self.learning_rate * (self.regualizer * self.W + dW)
self.b += - self.learning_rate * (self.regualizer * self.b + db)
def loss(self, x, t):
l2 = np.linalg.norm(self.W, 'fro')**2 + np.linalg.norm(self.b, 2)**2
return 0.5 * self.regualizer * l2 + \
np.mean(sparsemax_loss.forward(np.dot(x, self.W) + self.b, t))
def predict(self, x):
return sparsemax.forward(np.dot(x, self.W) + self.b)
| [
"numpy.sum",
"python_reference.sparsemax_loss.grad",
"math.sqrt",
"numpy.zeros",
"numpy.linalg.norm",
"scipy.stats.truncnorm.rvs",
"numpy.dot"
] | [((812, 922), 'scipy.stats.truncnorm.rvs', 'scipy.stats.truncnorm.rvs', (['(-2)', '(2)'], {'size': '(self.input_size, self.output_size)', 'random_state': 'self.random_state'}), '(-2, 2, size=(self.input_size, self.output_size),\n random_state=self.random_state)\n', (837, 922), False, 'import scipy\n'), ((971, 1022), 'math.sqrt', 'math.sqrt', (['(2 / (self.input_size + self.output_size))'], {}), '(2 / (self.input_size + self.output_size))\n', (980, 1022), False, 'import math\n'), ((1040, 1071), 'numpy.zeros', 'np.zeros', (['(1, self.output_size)'], {}), '((1, self.output_size))\n', (1048, 1071), True, 'import numpy as np\n'), ((1185, 1210), 'python_reference.sparsemax_loss.grad', 'sparsemax_loss.grad', (['z', 't'], {}), '(z, t)\n', (1204, 1210), False, 'from python_reference import sparsemax_loss\n'), ((1138, 1155), 'numpy.dot', 'np.dot', (['x', 'self.W'], {}), '(x, self.W)\n', (1144, 1155), True, 'import numpy as np\n'), ((1241, 1263), 'numpy.dot', 'np.dot', (['x.T', 'loss_grad'], {}), '(x.T, loss_grad)\n', (1247, 1263), True, 'import numpy as np\n'), ((1281, 1306), 'numpy.sum', 'np.sum', (['loss_grad'], {'axis': '(0)'}), '(loss_grad, axis=0)\n', (1287, 1306), True, 'import numpy as np\n'), ((1630, 1659), 'numpy.linalg.norm', 'np.linalg.norm', (['self.W', '"""fro"""'], {}), "(self.W, 'fro')\n", (1644, 1659), True, 'import numpy as np\n'), ((1665, 1690), 'numpy.linalg.norm', 'np.linalg.norm', (['self.b', '(2)'], {}), '(self.b, 2)\n', (1679, 1690), True, 'import numpy as np\n'), ((1876, 1893), 'numpy.dot', 'np.dot', (['x', 'self.W'], {}), '(x, self.W)\n', (1882, 1893), True, 'import numpy as np\n'), ((1784, 1801), 'numpy.dot', 'np.dot', (['x', 'self.W'], {}), '(x, self.W)\n', (1790, 1801), True, 'import numpy as np\n')] |
import abc
import numpy as np
from math import log
from functools import lru_cache
from copy import copy
EXIT_POSITIONS = {
"red": ((3, -3), (3, -2), (3, -1), (3, 0)),
"green": ((-3, 3), (-2, 3), (-1, 3), (0, 3)),
"blue": ((0, -3), (-1, -2), (-2, -1), (-3, 0))
}
EXIT_CORNER = {
"red": ((3, -3), (3, 0)),
"green": ((-3, 3), (0, 3)),
"blue": ((0, -3), (-3, 0))
}
EXIT_EDGE = {
"red": ((3, -3), (3, 0)),
"green": ((-3, 3), (0, 3)),
"blue": ((0, -3), (-3, 0))
}
NEEDED = 4
@lru_cache(maxsize=10)
def grid_dist(pos1, pos2):
"""
Get the grid distance between two different grid locations
:param pos1: first position (tuple)
:param pos2: second position (tuple)
:return: The `manhattan` distance between those two positions
"""
x1, y1 = pos1
x2, y2 = pos2
dy = y2 - y1
dx = x2 - x1
# If different sign, take the max of difference in position
if dy * dx < 0:
return max([abs(dy), abs(dx)])
# Same sign or zero just take sum
else:
return abs(dy + dx)
@lru_cache(maxsize=10)
def exit_distance(piece, state, player):
pos_to_piece = state.pos_to_piece
available_positions = \
list(filter(lambda x: x not in pos_to_piece or x == piece,
EXIT_POSITIONS[player]))
if available_positions:
return min([grid_dist(i, piece) for i in available_positions])
return 7
# @lru_cache(maxsize=10)
# def exit_distance(piece, state, player):
#
#
# if player == 'red':
# return 3 - piece[0]
# if player == 'green':
# return 3 - piece[1]
# if player == 'blue':
# return 3 + piece[0]
@lru_cache(maxsize=10)
def exit_corner_distance(piece, state, players):
"""
Compute the distance from a piece to corner of some players.
"""
dists = []
pos_to_piece = state.pos_to_piece
piece_to_pos = state.piece_to_pos
# For all players you want to block
for player in players:
if len(piece_to_pos[player]) == 0:
continue
for corner in EXIT_CORNER[player]:
# If not occupied by us already
if not (corner in pos_to_piece and
pos_to_piece[corner] == pos_to_piece[corner])\
and piece != corner:
dists.append(grid_dist(corner, piece))
if dists:
return min(dists)
if dists:
return min(dists)
# Didn't find any place
else:
return 0
@lru_cache(maxsize=10)
def sum_exit_distance(state, player):
pieces = state.piece_to_pos[player]
distances = {}
for piece in pieces:
distances[piece] = exit_distance(piece, state, player)
return sum(distances.values())
@lru_cache(maxsize=10)
def num_exited_piece(state, player):
completed = state.completed
n_exited_pieces = completed[player]
return n_exited_pieces
@lru_cache(maxsize=10)
def num_board_piece(state, player):
piece_to_pos = state.piece_to_pos
n_pieces = len(piece_to_pos[player])
return n_pieces
@lru_cache(maxsize=10)
def sum_number_pieces(state, player):
return num_board_piece(state, player) + num_exited_piece(state, player)
@lru_cache(maxsize=10)
def sum_completed_piece(state, player):
n_complete = state.completed[player]
return n_complete
@lru_cache(maxsize=10)
def other_player_piece_worth(state, player):
numbers = []
for other_player in state.code_map:
if other_player == player:
continue
numbers.append(sum_number_pieces(state, other_player))
difference = max(numbers) - min(numbers)
return sum(numbers) + difference
@lru_cache(maxsize=10)
def leading_opponent_and_neg_distance(state, player):
numbers = {}
for other_player in state.code_map:
if other_player == player:
continue
numbers[other_player] = \
modified_negative_sum_distance(state, other_player)
return max(numbers.items(), key=lambda x: x[1])
@lru_cache(maxsize=10)
def leading_opponent_negative_distance(state, player):
opponent, opponent_neg_dist = \
leading_opponent_and_neg_distance(state, player)
return opponent_neg_dist
@lru_cache(maxsize=10)
def modified_negative_sum_distance(state, player):
pieces = state.piece_to_pos[player]
# print(len(pieces), pieces, player, state.piece_to_pos)
n_completed = state.completed[player]
if n_completed >= 4:
return 10000000
distances = {}
for piece in pieces:
distances[piece] = exit_distance(piece, state, player)
if len(pieces) + n_completed < NEEDED:
# try to compete for more pieces
return -28
sorted_distance = sorted(distances.values())
# Only consider the top 4 when have more
return -sum(sorted_distance[:NEEDED - n_completed])
@lru_cache(maxsize=10)
def excess_piece_negative_sum_distance(state, player, offset=7):
pieces = state.piece_to_pos[player]
n_completed = state.completed[player]
# No spare pieces
if NEEDED >= len(pieces) + n_completed:
return 0
opponents = tuple(i for i in state.code_map if i != player)
sorted_opponents = \
tuple(sorted(opponents,
key=lambda x: modified_negative_sum_distance(state, x)))
distances = {}
for piece in pieces:
distances[piece] = exit_distance(piece, state, player)
sorted_distance = sorted(distances.items(), key=lambda x: x[1])
excess_pieces = sorted_distance[NEEDED-n_completed:]
distances = {}
for piece, _ in excess_pieces:
distances[piece] = exit_corner_distance(piece, state, sorted_opponents)
# Only consider the top 4 when have more
return -sum(distances.values()) + offset * len(distances)
@lru_cache(maxsize=10)
def regular_neg_corner_distance(state, player):
pieces = copy(state.piece_to_pos[player])
if not len(pieces):
return 0
opponents = tuple(i for i in state.code_map if i != player)
sorted_opponents = \
tuple(sorted(opponents,
key=lambda x: modified_negative_sum_distance(state, x),
reverse=True))
total_distance = 0
for opponent in sorted_opponents:
for corner in EXIT_CORNER[opponent]:
corner_distances = {}
for piece in pieces:
corner_distances[piece] = grid_dist(piece, corner)
if corner_distances:
min_pair = min(corner_distances.items(), key=lambda x: x[1])
pieces.remove(min_pair[0])
total_distance += min_pair[1]
return total_distance
def occupied_enemy_corner_weights(state, player):
piece_to_pos = state.piece_to_pos
opponents_neg_distances = \
tuple((modified_negative_sum_distance(state, i), i) for i in
state.code_map if i != player)
total_pieces = 0
for i, (neg_dist, opponent) in enumerate(opponents_neg_distances):
if not len(piece_to_pos[opponent]):
continue
for piece in piece_to_pos[player]:
if piece in EXIT_CORNER[opponent]:
total_pieces += 1
return total_pieces
def occupied_enemy_edge_weights(state, player):
piece_to_pos = state.piece_to_pos
opponents_neg_distances = \
tuple((modified_negative_sum_distance(state, i), i) for i in
state.code_map if i != player)
total_pieces = 0
for i, (neg_dist, opponent) in enumerate(opponents_neg_distances):
if not len(piece_to_pos[opponent]):
continue
for piece in piece_to_pos[player]:
if piece in EXIT_POSITIONS[opponent] and piece not in EXIT_CORNER:
total_pieces += 1
return total_pieces
@lru_cache(maxsize=10)
def sum_number_needed_pieces(state, player):
return min(num_board_piece(state, player) +
num_exited_piece(state, player), 4)
@lru_cache(maxsize=10)
def excess_pieces(state, player):
return sum_number_pieces(state, player) - 4
@lru_cache(maxsize=10)
def utility_completed_piece(state, player):
# if there not enough pieces to exit to win
# then player is punished if choose to exit
exited = num_exited_piece(state, player)
n = exited + num_board_piece(state, player)
if n < NEEDED:
return -exited
else:
return exited
class EvaluatorGenerator(abc.ABC):
"""
The class to wrap a evaluation function disregard of the internal and
provide a function interface to the player class for evaluating player's
situation
"""
@abc.abstractmethod
def __init__(self, *args, **kwargs):
"""
Initialise the evaluator
"""
pass
@abc.abstractmethod
def __call__(self, state, player, *args, **kwargs):
"""
Compute the value of a state based on the input.
This will always compute wrt the perspective of a red player
:param state: The state to evaluate on
:return: int, The value of that specific state
"""
pass
class DummyEvaluator(EvaluatorGenerator):
"""
An evaluator that only consider amount of exited pieces
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
pass
def __call__(self, state, player, *args, **kwargs):
return 0
class WinLossEvaluator(EvaluatorGenerator):
"""
Return 1 if win, -1 if lost, else 0
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __call__(self, state, player, *args, **kwargs):
if state.completed[player] == 4:
return 1
for p in state.completed:
if state.completed[p] == 4:
return -1
return 0
class FunctionalEvaluator:
def __init__(self, state, weights, funcs):
self._state = state
self._weights = weights
self._funcs = funcs
self._value = dict()
def __call__(self, player):
if player in self._value:
return self._value[player]
X = np.array([fn(self._state, player) if self._weights[i] != 0 else
0 for i, fn in enumerate(self._funcs)])
X = X.T
value = np.dot(X, self._weights)
self._value[player] = value
return value
class FunctionalEvaluatorGenerator(EvaluatorGenerator):
"""
Evaluate a state based on set of features computed by functions and
return single scalar indicating the value of the state.
The value is computed by feeding an arbitrary function to the state
"""
def __init__(self, weights, functions, *args, **kwargs):
self._functions = functions
self._weights = weights
super().__init__(*args, **kwargs)
def __call__(self, state, *args, **kwargs):
return FunctionalEvaluator(state, self._weights, self._functions)
class NaiveEvaluatorGenerator(EvaluatorGenerator):
"""
* weights are defined beforehand
An evaluator that only considers
1. Number of your pieces on the board
2. Number of your exited pieces
3. Reciprocal of the sum of grid distance of each nodes to nearest exit
"""
@staticmethod
def negative_distance(state, player):
return -sum_exit_distance(state, player)
@staticmethod
def utility_exit_pieces(state, player):
exited = num_exited_piece(state, player)
if sum_number_pieces(state, player) < 4:
return -exited
return exited
# weights in the format of [pieces, exited, distance]
def __init__(self, weights, *args, **kwargs):
self._weights = weights
func = [self.utility_exit_pieces, sum_number_pieces,
self.negative_distance]
self._eval = FunctionalEvaluatorGenerator(self._weights, func)
super().__init__(*args, **kwargs)
# returns an evaluator for that state
def __call__(self, state, *args, **kwargs):
return self._eval(state)
class AdvanceEG(EvaluatorGenerator):
"""
* weights are defined beforehand
An evaluator that only considers
1. Number of your pieces
2. Distance to exit
3. Number of exited pieces
"""
@staticmethod
def utilty_distance_piece(piece, state, player):
MAX_DISTANCE = 6
distance = exit_distance(piece, state, player)
if distance > MAX_DISTANCE:
return 0
s = MAX_DISTANCE - distance
return -1 / 24 * s ** 2 + 4 * s
@staticmethod
def utility_distance(state, player):
pieces = state.piece_to_pos[player]
return sum([AdvanceEG.utilty_distance_piece(piece, state, player) for piece in pieces])
@staticmethod
def utility_completed_piece(state, player):
# return 0 if the player don't enough pieces
NEEDED = 4
exited = num_exited_piece(state, player)
n = exited + num_board_piece(state, player)
if n < NEEDED:
return 0
else:
return exited
@staticmethod
def utility_pieces(state, player):
n = num_exited_piece(state, player) + num_board_piece(state, player)
# log(0) is close to -inf
if n == 0:
return -1000000
return 10*log(n)+3*n
# weights in the format of [utility_distance, utility_pieces]
def __init__(self, weights, *args, **kwargs):
self._weights = weights
func = [self.utility_pieces, self.utility_completed_piece,
self.utility_distance]
self._eval = FunctionalEvaluatorGenerator(self._weights, func)
super().__init__(*args, **kwargs)
# returns an evaluator for that state
def __call__(self, state, *args, **kwargs):
return self._eval(state)
class MinimaxEvaluator(EvaluatorGenerator):
"""
* weights are defined beforehand
An evaluator that considers
1. (max) number of needed player piece
2. (max neg) leading player's distance from winning
3. (max) offset negative distance of excess piece to opponent exit
try to block leading player from exiting
4. (max neg) networth of other players' pieces
5. (max) negative sum distance to goal
6. (max) number of completed piece
7. (max) excess pieces
8. (max) negative corner distance
9. (max) pieces in corner
10. (min) piece in edge of enemy exit
"""
def __init__(self, weights, *args, **kwargs):
func = [
sum_number_needed_pieces,
leading_opponent_negative_distance,
excess_piece_negative_sum_distance,
other_player_piece_worth,
modified_negative_sum_distance,
sum_completed_piece,
excess_pieces,
regular_neg_corner_distance,
occupied_enemy_corner_weights,
occupied_enemy_edge_weights
]
# print('the weights are:', weights)
assert len(weights) != func
self._weights = weights
self._eval = FunctionalEvaluatorGenerator(self._weights, func)
super().__init__(*args, **kwargs)
# returns an evaluator for that state
def __call__(self, state, *args, **kwargs):
return self._eval(state)
| [
"math.log",
"numpy.dot",
"functools.lru_cache",
"copy.copy"
] | [((513, 534), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (522, 534), False, 'from functools import lru_cache\n'), ((1062, 1083), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (1071, 1083), False, 'from functools import lru_cache\n'), ((1663, 1684), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (1672, 1684), False, 'from functools import lru_cache\n'), ((2480, 2501), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (2489, 2501), False, 'from functools import lru_cache\n'), ((2725, 2746), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (2734, 2746), False, 'from functools import lru_cache\n'), ((2886, 2907), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (2895, 2907), False, 'from functools import lru_cache\n'), ((3046, 3067), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (3055, 3067), False, 'from functools import lru_cache\n'), ((3185, 3206), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (3194, 3206), False, 'from functools import lru_cache\n'), ((3313, 3334), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (3322, 3334), False, 'from functools import lru_cache\n'), ((3641, 3662), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (3650, 3662), False, 'from functools import lru_cache\n'), ((3983, 4004), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (3992, 4004), False, 'from functools import lru_cache\n'), ((4185, 4206), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (4194, 4206), False, 'from functools import lru_cache\n'), ((4813, 4834), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (4822, 4834), False, 'from functools import lru_cache\n'), ((5740, 5761), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (5749, 5761), False, 'from functools import lru_cache\n'), ((7711, 7732), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (7720, 7732), False, 'from functools import lru_cache\n'), ((7880, 7901), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (7889, 7901), False, 'from functools import lru_cache\n'), ((7987, 8008), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(10)'}), '(maxsize=10)\n', (7996, 8008), False, 'from functools import lru_cache\n'), ((5823, 5855), 'copy.copy', 'copy', (['state.piece_to_pos[player]'], {}), '(state.piece_to_pos[player])\n', (5827, 5855), False, 'from copy import copy\n'), ((10196, 10220), 'numpy.dot', 'np.dot', (['X', 'self._weights'], {}), '(X, self._weights)\n', (10202, 10220), True, 'import numpy as np\n'), ((13203, 13209), 'math.log', 'log', (['n'], {}), '(n)\n', (13206, 13209), False, 'from math import log\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 3 15:09:44 2018
@author: SilverDoe
"""
'''
The SciPy ndimage submodule is dedicated to image processing.
1. Input/Output, displaying images
2. Basic manipulations − Cropping, flipping, rotating, etc.
3. Image filtering − De-noising, sharpening, etc.
4. Image segmentation − Labeling pixels corresponding to different objects
5. Classification
6. Feature extraction
7. Registration
'''
from scipy import misc
f = misc.face()
misc.imsave('E:\\Documents\\PythonProjects\\1_Basics\\DataForFiles\\array.jpg', f) # uses the Image module (PIL)
import matplotlib.pyplot as plt
plt.imshow(f)
plt.show()
face = misc.face(gray = False)
face.mean()
face.max()
face.min()
width, height,t = face.shape
# Cropping
crop_face = face[300: - 300, 400: - 400]
import matplotlib.pyplot as plt
plt.imshow(crop_face)
plt.show()
# up <-> down flip
import numpy as np
from scipy import misc
face = misc.face()
flip_ud_face = np.flipud(face)
import matplotlib.pyplot as plt
plt.imshow(flip_ud_face)
plt.show()
# rotation
from scipy import misc,ndimage
face = misc.face()
rotate_face = ndimage.rotate(face, 30)
import matplotlib.pyplot as plt
plt.imshow(rotate_face)
plt.show()
'''==================== Filtering ============================================='''
# blurring
'''
Blurring is widely used to reduce the noise in the image
'''
from scipy import misc,ndimage
face = misc.face()
blurred_face = ndimage.gaussian_filter(face, sigma=1)
import matplotlib.pyplot as plt
plt.imshow(blurred_face)
plt.show()
# edge detection
'''
Edge detection is an image processing technique for finding the boundaries of objects
within images. It works by detecting discontinuities in brightness. Edge detection is
used for image segmentation and data extraction in areas such as Image Processing,
Computer Vision and Machine Vision.
'''
import scipy.ndimage as nd
import numpy as np
im = np.zeros((256, 256))
im[64:-64, 64:-64] = 1
im[90:-90,90:-90] = 2
im = ndimage.gaussian_filter(im, 8)
import matplotlib.pyplot as plt
plt.imshow(im)
plt.show()
'''
The image looks like a square block of colors. Now, we will detect the edges of those
colored blocks. Here, ndimage provides a function called Sobel to carry out this operation.
Whereas, NumPy provides the Hypot function to combine the two resultant matrices to one.
'''
import scipy.ndimage as nd
import matplotlib.pyplot as plt
im = np.zeros((256, 256))
im[64:-64, 64:-64] = 1
im[90:-90,90:-90] = 2
im = nd.gaussian_filter(im, 8)
sx = nd.sobel(im, axis = 0, mode = 'constant')
sy = nd.sobel(im, axis = 1, mode = 'constant')
sob = np.hypot(sx, sy)
plt.imshow(sob)
plt.show()
'''
The most commonly used edge detection algorithms include
Sobel
Canny
Prewitt
Roberts
Fuzzy Logic methods
'''
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"scipy.ndimage.gaussian_filter",
"numpy.zeros",
"numpy.flipud",
"scipy.ndimage.sobel",
"numpy.hypot",
"scipy.misc.imsave",
"scipy.misc.face",
"scipy.ndimage.rotate"
] | [((469, 480), 'scipy.misc.face', 'misc.face', ([], {}), '()\n', (478, 480), False, 'from scipy import misc, ndimage\n'), ((481, 567), 'scipy.misc.imsave', 'misc.imsave', (['"""E:\\\\Documents\\\\PythonProjects\\\\1_Basics\\\\DataForFiles\\\\array.jpg"""', 'f'], {}), "('E:\\\\Documents\\\\PythonProjects\\\\1_Basics\\\\DataForFiles\\\\array.jpg',\n f)\n", (492, 567), False, 'from scipy import misc, ndimage\n'), ((627, 640), 'matplotlib.pyplot.imshow', 'plt.imshow', (['f'], {}), '(f)\n', (637, 640), True, 'import matplotlib.pyplot as plt\n'), ((641, 651), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (649, 651), True, 'import matplotlib.pyplot as plt\n'), ((660, 681), 'scipy.misc.face', 'misc.face', ([], {'gray': '(False)'}), '(gray=False)\n', (669, 681), False, 'from scipy import misc, ndimage\n'), ((834, 855), 'matplotlib.pyplot.imshow', 'plt.imshow', (['crop_face'], {}), '(crop_face)\n', (844, 855), True, 'import matplotlib.pyplot as plt\n'), ((856, 866), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (864, 866), True, 'import matplotlib.pyplot as plt\n'), ((938, 949), 'scipy.misc.face', 'misc.face', ([], {}), '()\n', (947, 949), False, 'from scipy import misc, ndimage\n'), ((965, 980), 'numpy.flipud', 'np.flipud', (['face'], {}), '(face)\n', (974, 980), True, 'import numpy as np\n'), ((1014, 1038), 'matplotlib.pyplot.imshow', 'plt.imshow', (['flip_ud_face'], {}), '(flip_ud_face)\n', (1024, 1038), True, 'import matplotlib.pyplot as plt\n'), ((1039, 1049), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1047, 1049), True, 'import matplotlib.pyplot as plt\n'), ((1101, 1112), 'scipy.misc.face', 'misc.face', ([], {}), '()\n', (1110, 1112), False, 'from scipy import misc, ndimage\n'), ((1127, 1151), 'scipy.ndimage.rotate', 'ndimage.rotate', (['face', '(30)'], {}), '(face, 30)\n', (1141, 1151), False, 'from scipy import misc, ndimage\n'), ((1185, 1208), 'matplotlib.pyplot.imshow', 'plt.imshow', (['rotate_face'], {}), '(rotate_face)\n', (1195, 1208), True, 'import matplotlib.pyplot as plt\n'), ((1209, 1219), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1217, 1219), True, 'import matplotlib.pyplot as plt\n'), ((1422, 1433), 'scipy.misc.face', 'misc.face', ([], {}), '()\n', (1431, 1433), False, 'from scipy import misc, ndimage\n'), ((1449, 1487), 'scipy.ndimage.gaussian_filter', 'ndimage.gaussian_filter', (['face'], {'sigma': '(1)'}), '(face, sigma=1)\n', (1472, 1487), False, 'from scipy import misc, ndimage\n'), ((1520, 1544), 'matplotlib.pyplot.imshow', 'plt.imshow', (['blurred_face'], {}), '(blurred_face)\n', (1530, 1544), True, 'import matplotlib.pyplot as plt\n'), ((1545, 1555), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1553, 1555), True, 'import matplotlib.pyplot as plt\n'), ((1931, 1951), 'numpy.zeros', 'np.zeros', (['(256, 256)'], {}), '((256, 256))\n', (1939, 1951), True, 'import numpy as np\n'), ((2002, 2032), 'scipy.ndimage.gaussian_filter', 'ndimage.gaussian_filter', (['im', '(8)'], {}), '(im, 8)\n', (2025, 2032), False, 'from scipy import misc, ndimage\n'), ((2066, 2080), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {}), '(im)\n', (2076, 2080), True, 'import matplotlib.pyplot as plt\n'), ((2081, 2091), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2089, 2091), True, 'import matplotlib.pyplot as plt\n'), ((2435, 2455), 'numpy.zeros', 'np.zeros', (['(256, 256)'], {}), '((256, 256))\n', (2443, 2455), True, 'import numpy as np\n'), ((2506, 2531), 'scipy.ndimage.gaussian_filter', 'nd.gaussian_filter', (['im', '(8)'], {}), '(im, 8)\n', (2524, 2531), True, 'import scipy.ndimage as nd\n'), ((2538, 2575), 'scipy.ndimage.sobel', 'nd.sobel', (['im'], {'axis': '(0)', 'mode': '"""constant"""'}), "(im, axis=0, mode='constant')\n", (2546, 2575), True, 'import scipy.ndimage as nd\n'), ((2585, 2622), 'scipy.ndimage.sobel', 'nd.sobel', (['im'], {'axis': '(1)', 'mode': '"""constant"""'}), "(im, axis=1, mode='constant')\n", (2593, 2622), True, 'import scipy.ndimage as nd\n'), ((2633, 2649), 'numpy.hypot', 'np.hypot', (['sx', 'sy'], {}), '(sx, sy)\n', (2641, 2649), True, 'import numpy as np\n'), ((2651, 2666), 'matplotlib.pyplot.imshow', 'plt.imshow', (['sob'], {}), '(sob)\n', (2661, 2666), True, 'import matplotlib.pyplot as plt\n'), ((2667, 2677), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2675, 2677), True, 'import matplotlib.pyplot as plt\n')] |
import os
from pathlib import Path
import numpy as np
import time
target_word_en = ['zui_da_liang_du', 'zui_xiao_liang_du']
MAX_LOOP = 2 # expand each .wav MAX_LOOP times
MIN_STRETCH = 0.6
MAX_STRETCH = 1.1
src_path = '\\big_scale\\downsample\\'
dst_path = '\\big_scale\\time_stretch\\'
if (Path(src_path).exists()==False):
os.mkdir(src_path)
target_list = os.listdir(src_path)
if (Path(dst_path).exists()==False):
os.mkdir(dst_path)
rubberband_path = 'E:\\rubberband\\command_line\\rubberband-1.8.2-gpl-executable-windows\\'
for target_en in target_word_en:
target_scr_path = src_path + target_en + '\\'
target_dst_path = dst_path + target_en + '\\'
if (Path(target_scr_path).exists()==False):
os.mkdir(target_scr_path)
if (Path(target_dst_path).exists()==False):
os.mkdir(target_dst_path)
file_list = os.listdir(target_scr_path)
#count = 1
for i in range(MAX_LOOP):
for wave_file in file_list:
rand_time_stretch = np.random.randint(MIN_STRETCH*100, MAX_STRETCH*100)/100.0
new_wave_file = wave_file[:wave_file.find('_')+3] + 'ratio_%.2f.wav'%(rand_time_stretch)
cmd = rubberband_path + 'rubberband -c 6 -q --realtime -t ' + str(rand_time_stretch) + ' ' + target_scr_path + wave_file + ' ' + target_dst_path + new_wave_file
res = os.popen(cmd)
time.sleep(0.04) # wait
#count += 1
#count = 1
| [
"os.mkdir",
"os.popen",
"time.sleep",
"pathlib.Path",
"numpy.random.randint",
"os.listdir"
] | [((364, 384), 'os.listdir', 'os.listdir', (['src_path'], {}), '(src_path)\n', (374, 384), False, 'import os\n'), ((331, 349), 'os.mkdir', 'os.mkdir', (['src_path'], {}), '(src_path)\n', (339, 349), False, 'import os\n'), ((426, 444), 'os.mkdir', 'os.mkdir', (['dst_path'], {}), '(dst_path)\n', (434, 444), False, 'import os\n'), ((851, 878), 'os.listdir', 'os.listdir', (['target_scr_path'], {}), '(target_scr_path)\n', (861, 878), False, 'import os\n'), ((727, 752), 'os.mkdir', 'os.mkdir', (['target_scr_path'], {}), '(target_scr_path)\n', (735, 752), False, 'import os\n'), ((809, 834), 'os.mkdir', 'os.mkdir', (['target_dst_path'], {}), '(target_dst_path)\n', (817, 834), False, 'import os\n'), ((294, 308), 'pathlib.Path', 'Path', (['src_path'], {}), '(src_path)\n', (298, 308), False, 'from pathlib import Path\n'), ((389, 403), 'pathlib.Path', 'Path', (['dst_path'], {}), '(dst_path)\n', (393, 403), False, 'from pathlib import Path\n'), ((1342, 1355), 'os.popen', 'os.popen', (['cmd'], {}), '(cmd)\n', (1350, 1355), False, 'import os\n'), ((1368, 1384), 'time.sleep', 'time.sleep', (['(0.04)'], {}), '(0.04)\n', (1378, 1384), False, 'import time\n'), ((679, 700), 'pathlib.Path', 'Path', (['target_scr_path'], {}), '(target_scr_path)\n', (683, 700), False, 'from pathlib import Path\n'), ((761, 782), 'pathlib.Path', 'Path', (['target_dst_path'], {}), '(target_dst_path)\n', (765, 782), False, 'from pathlib import Path\n'), ((992, 1047), 'numpy.random.randint', 'np.random.randint', (['(MIN_STRETCH * 100)', '(MAX_STRETCH * 100)'], {}), '(MIN_STRETCH * 100, MAX_STRETCH * 100)\n', (1009, 1047), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
# polytope python module
import pycapacity.pycapacity as capacity_solver
# URDF parsing an kinematics
from urdf_parser_py.urdf import URDF
from pykdl_utils.kdl_kinematics import KDLKinematics
import hrl_geom.transformations as trans
class RobotSolver:
base_link = ""
tip_link = ""
robot_urdf = ""
kdl_kin = ""
# constructor - reading urdf and constructing the robots kinematic chain
def __init__(self, base, tip):
self.base_link = base
self.tip_link = tip
# loading the root urdf from robot_description parameter
self.robot_urdf = URDF.from_parameter_server()
self.kdl_kin = KDLKinematics(self.robot_urdf , self.base_link, self.tip_link)
# https://frankaemika.github.io/docs/control_parameters.html
# maximal torques
self.t_max = np.array([self.kdl_kin.joint_limits_effort]).T
self.t_min = -self.t_max
# maximal joint velocities
self.dq_max = np.array([self.kdl_kin.joint_limits_velocity]).T
self.dq_min = -self.dq_max
# maximal joint angles
self.q_max = np.array([self.kdl_kin.joint_limits_upper]).T
self.q_min = np.array([self.kdl_kin.joint_limits_lower]).T
# direct kinematics functions for 7dof
def forward(self, q):
return self.kdl_kin.forward(q)
def dk_position(self, q):
return self.forward(q)[:3,3]
def dk_orientation_matrix(self, q):
return self.forward(q)[0:3,0:3]
def jacobian(self, q):
Jac = self.kdl_kin.jacobian(q)
return Jac
def jacobian_position(self, q):
return self.jacobian(q)[:3, :]
def jacobian_pseudo_inv(self, q):
return np.linalg.pinv(self.jacobian(q))
# iterative solving of inverse kinematics
def ik_solve(self, x_d, q_0, iterations):
return self.kdl_kin.inverse(x_d, q_0)
def gravity_torque(self, q):
return np.array([self.kdl_kin.gravity(q)]).T
# velocity manipulability calculation
def manipulability_velocity(self, q, direction = None):
# avoid 0 angles
joint_pos = np.array(q)
joint_pos[joint_pos==0] = 10**-7
# jacobian calculation
if direction is None:
Jac = self.jacobian_position(q)
else:
Jac = np.array(direction).dot(self.jacobian_position(q))
# use the capacity module
return capacity_solver.manipulability_velocity(Jac,self.dq_max)
# force manipulability calculation
def manipulability_force(self, q, direction = None):
# avoid 0 angles
joint_pos = np.array(q)
joint_pos[joint_pos==0] = 10**-7
# jacobian calculation
if direction is None:
Jac = self.jacobian_position(q)
else:
Jac = np.array(direction).dot(self.jacobian_position(q))
# use the capacity module
return capacity_solver.manipulability_force(Jac, self.t_max)
# maximal end effector force
def force_polytope_intersection(self, q1,q2):
# jacobian calculation
Jac1 = self.jacobian_position(q1)
Jac2 = self.jacobian_position(q2)
return capacity_solver.force_polytope_intersection_ordered(Jac1,Jac2,self.t_max,self.t_min,self.t_max,self.t_min, self.gravity_torque(q1), self.gravity_torque(q2))
# maximal end effector force
def force_polytope_sum(self, q1,q2):
# jacobian calculation
Jac1 = self.jacobian_position(q1)
Jac2 = self.jacobian_position(q2)
return capacity_solver.force_polytope_sum_ordered(Jac1,Jac2,self.t_max,self.t_min,self.t_max,self.t_min, self.gravity_torque(q1), self.gravity_torque(q2))
# maximal end effector force
def force_polytope(self, q, direction = None, force = None):
# jacobian calculation
Jac_full = self.jacobian_position(q)
if direction is not None:
Jac = np.array(direction).dot(Jac_full)
else:
Jac = Jac_full
return capacity_solver.force_polytope_ordered(Jac, self.t_max, self.t_min, self.gravity_torque(q))
| [
"pycapacity.pycapacity.manipulability_force",
"numpy.array",
"pykdl_utils.kdl_kinematics.KDLKinematics",
"urdf_parser_py.urdf.URDF.from_parameter_server",
"pycapacity.pycapacity.manipulability_velocity"
] | [((644, 672), 'urdf_parser_py.urdf.URDF.from_parameter_server', 'URDF.from_parameter_server', ([], {}), '()\n', (670, 672), False, 'from urdf_parser_py.urdf import URDF\n'), ((697, 758), 'pykdl_utils.kdl_kinematics.KDLKinematics', 'KDLKinematics', (['self.robot_urdf', 'self.base_link', 'self.tip_link'], {}), '(self.robot_urdf, self.base_link, self.tip_link)\n', (710, 758), False, 'from pykdl_utils.kdl_kinematics import KDLKinematics\n'), ((2152, 2163), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (2160, 2163), True, 'import numpy as np\n'), ((2443, 2500), 'pycapacity.pycapacity.manipulability_velocity', 'capacity_solver.manipulability_velocity', (['Jac', 'self.dq_max'], {}), '(Jac, self.dq_max)\n', (2482, 2500), True, 'import pycapacity.pycapacity as capacity_solver\n'), ((2642, 2653), 'numpy.array', 'np.array', (['q'], {}), '(q)\n', (2650, 2653), True, 'import numpy as np\n'), ((2933, 2986), 'pycapacity.pycapacity.manipulability_force', 'capacity_solver.manipulability_force', (['Jac', 'self.t_max'], {}), '(Jac, self.t_max)\n', (2969, 2986), True, 'import pycapacity.pycapacity as capacity_solver\n'), ((879, 923), 'numpy.array', 'np.array', (['[self.kdl_kin.joint_limits_effort]'], {}), '([self.kdl_kin.joint_limits_effort])\n', (887, 923), True, 'import numpy as np\n'), ((1017, 1063), 'numpy.array', 'np.array', (['[self.kdl_kin.joint_limits_velocity]'], {}), '([self.kdl_kin.joint_limits_velocity])\n', (1025, 1063), True, 'import numpy as np\n'), ((1153, 1196), 'numpy.array', 'np.array', (['[self.kdl_kin.joint_limits_upper]'], {}), '([self.kdl_kin.joint_limits_upper])\n', (1161, 1196), True, 'import numpy as np\n'), ((1220, 1263), 'numpy.array', 'np.array', (['[self.kdl_kin.joint_limits_lower]'], {}), '([self.kdl_kin.joint_limits_lower])\n', (1228, 1263), True, 'import numpy as np\n'), ((2343, 2362), 'numpy.array', 'np.array', (['direction'], {}), '(direction)\n', (2351, 2362), True, 'import numpy as np\n'), ((2833, 2852), 'numpy.array', 'np.array', (['direction'], {}), '(direction)\n', (2841, 2852), True, 'import numpy as np\n'), ((3942, 3961), 'numpy.array', 'np.array', (['direction'], {}), '(direction)\n', (3950, 3961), True, 'import numpy as np\n')] |
import numpy as np
import healpy as hp
#res = "2048"
#res = "512"
res = "4096"
input_data_prefix = "/resource/data/MICE/maps/"
output_data_prefix = "/arxiv/projects/MICEDataAnalysis/ForEuclidMeetingLausanne/spice_pcl_analysis/"
ninv_file_name = input_data_prefix + res + "/mice_v2_0_shear_G_ninv.fits"
# read the n_inv data
m0 = hp.read_map(ninv_file_name,field=0)
m1 = hp.read_map(ninv_file_name,field=1)
sig0 = np.mean(np.sqrt(1./m0[np.where(m0>0)]))
sig1 = np.mean(np.sqrt(1./m1[np.where(m1>0)]))
output_file_name = output_data_prefix + res + "/mice_v2_0_shear_shape_noise.dat"
strsig = str(sig0) + "\t" + str(sig1)
with open(output_file_name, 'w') as f:
f.write(strsig)
| [
"healpy.read_map",
"numpy.where"
] | [((333, 369), 'healpy.read_map', 'hp.read_map', (['ninv_file_name'], {'field': '(0)'}), '(ninv_file_name, field=0)\n', (344, 369), True, 'import healpy as hp\n'), ((374, 410), 'healpy.read_map', 'hp.read_map', (['ninv_file_name'], {'field': '(1)'}), '(ninv_file_name, field=1)\n', (385, 410), True, 'import healpy as hp\n'), ((440, 456), 'numpy.where', 'np.where', (['(m0 > 0)'], {}), '(m0 > 0)\n', (448, 456), True, 'import numpy as np\n'), ((487, 503), 'numpy.where', 'np.where', (['(m1 > 0)'], {}), '(m1 > 0)\n', (495, 503), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.colors as colors
import numpy as np
import pandas as pd
def plot_pca_contribution(x, y, variance, num_components):
"""
Plots the contribution of PCA components towards variance ratio
:return:
"""
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
plt.plot(x, y, color='blue')
plt.axhline(variance, linestyle='--', color='red')
plt.axvline(num_components, linestyle='--', color='red')
plt.xticks(np.append(np.append(np.arange(0, 11, 5), 11), np.arange(15, 50, 5)),
np.append(np.append(np.append(np.arange(0, 10, 5), ' '), 11), np.arange(15, 50, 5)))
plt.yticks(np.arange(0, 1.1, 0.1))
ax.yaxis.set_major_formatter(ticker.PercentFormatter(1.0))
plt.text(15, variance, "Variance: {0:}%".format(variance * 100))
plt.xlabel('Number of Components')
plt.ylabel('Cumulative Explained Variance')
plt.title('Cumulative Explained Variance by Principal Components')
plt.grid(axis='both', linestyle='--')
plt.savefig('pca_contribution.png')
plt.show()
def plot_pca_heatmap(components, columns):
"""
Plots the contribution of PCA components with original features
:return:
"""
map_ = pd.DataFrame(components, columns=columns)
plt.figure(figsize=(12, 6))
fig, ax = plt.subplots()
im = ax.imshow(map_)
norm = colors.BoundaryNorm(boundaries=np.arange(-1, 1.2, 0.2).tolist(), ncolors=256)
cbar = ax.figure.colorbar(im, ax=ax, cmap='RdYlGn_r', orientation='horizontal', norm=norm)
cbar.set_ticks(np.arange(-1, 1.2, 0.2))
cbar.ax.set_ylabel("Correlation")
ax.set_xticks(np.arange(map_.shape[1]))
ax.set_yticks(np.arange(map_.shape[0]))
ax.set_xticklabels(columns)
ax.set_yticklabels(np.arange(1, map_.shape[1] + 1, 1))
ax.set_ylabel("Component")
ax.set_xlabel("Feature")
ax.tick_params(top=True, bottom=False,
labeltop=True, labelbottom=False, labelsize=7)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=90, ha="left",
rotation_mode="anchor")
plt.savefig('pca_contribution-heatmap.png')
plt.show()
| [
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.title",
"pandas.DataFrame",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.ticker.PercentFormatter",
"matplotlib.pyplot.ylabel",... | [((309, 335), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (319, 335), True, 'import matplotlib.pyplot as plt\n'), ((370, 398), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': '"""blue"""'}), "(x, y, color='blue')\n", (378, 398), True, 'import matplotlib.pyplot as plt\n'), ((403, 453), 'matplotlib.pyplot.axhline', 'plt.axhline', (['variance'], {'linestyle': '"""--"""', 'color': '"""red"""'}), "(variance, linestyle='--', color='red')\n", (414, 453), True, 'import matplotlib.pyplot as plt\n'), ((458, 514), 'matplotlib.pyplot.axvline', 'plt.axvline', (['num_components'], {'linestyle': '"""--"""', 'color': '"""red"""'}), "(num_components, linestyle='--', color='red')\n", (469, 514), True, 'import matplotlib.pyplot as plt\n'), ((874, 908), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Components"""'], {}), "('Number of Components')\n", (884, 908), True, 'import matplotlib.pyplot as plt\n'), ((913, 956), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cumulative Explained Variance"""'], {}), "('Cumulative Explained Variance')\n", (923, 956), True, 'import matplotlib.pyplot as plt\n'), ((961, 1027), 'matplotlib.pyplot.title', 'plt.title', (['"""Cumulative Explained Variance by Principal Components"""'], {}), "('Cumulative Explained Variance by Principal Components')\n", (970, 1027), True, 'import matplotlib.pyplot as plt\n'), ((1032, 1069), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'axis': '"""both"""', 'linestyle': '"""--"""'}), "(axis='both', linestyle='--')\n", (1040, 1069), True, 'import matplotlib.pyplot as plt\n'), ((1074, 1109), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""pca_contribution.png"""'], {}), "('pca_contribution.png')\n", (1085, 1109), True, 'import matplotlib.pyplot as plt\n'), ((1114, 1124), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1122, 1124), True, 'import matplotlib.pyplot as plt\n'), ((1278, 1319), 'pandas.DataFrame', 'pd.DataFrame', (['components'], {'columns': 'columns'}), '(components, columns=columns)\n', (1290, 1319), True, 'import pandas as pd\n'), ((1324, 1351), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (1334, 1351), True, 'import matplotlib.pyplot as plt\n'), ((1366, 1380), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1378, 1380), True, 'import matplotlib.pyplot as plt\n'), ((2174, 2217), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""pca_contribution-heatmap.png"""'], {}), "('pca_contribution-heatmap.png')\n", (2185, 2217), True, 'import matplotlib.pyplot as plt\n'), ((2222, 2232), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2230, 2232), True, 'import matplotlib.pyplot as plt\n'), ((714, 736), 'numpy.arange', 'np.arange', (['(0)', '(1.1)', '(0.1)'], {}), '(0, 1.1, 0.1)\n', (723, 736), True, 'import numpy as np\n'), ((771, 799), 'matplotlib.ticker.PercentFormatter', 'ticker.PercentFormatter', (['(1.0)'], {}), '(1.0)\n', (794, 799), True, 'import matplotlib.ticker as ticker\n'), ((1609, 1632), 'numpy.arange', 'np.arange', (['(-1)', '(1.2)', '(0.2)'], {}), '(-1, 1.2, 0.2)\n', (1618, 1632), True, 'import numpy as np\n'), ((1690, 1714), 'numpy.arange', 'np.arange', (['map_.shape[1]'], {}), '(map_.shape[1])\n', (1699, 1714), True, 'import numpy as np\n'), ((1734, 1758), 'numpy.arange', 'np.arange', (['map_.shape[0]'], {}), '(map_.shape[0])\n', (1743, 1758), True, 'import numpy as np\n'), ((1815, 1849), 'numpy.arange', 'np.arange', (['(1)', '(map_.shape[1] + 1)', '(1)'], {}), '(1, map_.shape[1] + 1, 1)\n', (1824, 1849), True, 'import numpy as np\n'), ((576, 596), 'numpy.arange', 'np.arange', (['(15)', '(50)', '(5)'], {}), '(15, 50, 5)\n', (585, 596), True, 'import numpy as np\n'), ((676, 696), 'numpy.arange', 'np.arange', (['(15)', '(50)', '(5)'], {}), '(15, 50, 5)\n', (685, 696), True, 'import numpy as np\n'), ((550, 569), 'numpy.arange', 'np.arange', (['(0)', '(11)', '(5)'], {}), '(0, 11, 5)\n', (559, 569), True, 'import numpy as np\n'), ((644, 663), 'numpy.arange', 'np.arange', (['(0)', '(10)', '(5)'], {}), '(0, 10, 5)\n', (653, 663), True, 'import numpy as np\n'), ((1448, 1471), 'numpy.arange', 'np.arange', (['(-1)', '(1.2)', '(0.2)'], {}), '(-1, 1.2, 0.2)\n', (1457, 1471), True, 'import numpy as np\n')] |
import os, sys, numpy as np
import config
os.chdir('src/') # fix for data.init_dataset()
np.random.seed(config.seed)
import data, tfidf, models, sentimentanalysis
from utils import utils, io
# info = pandas.read_csv(config.dataset_dir + 'final_data.csv')
dataset = data.init_dataset()
# load model
m = config.dataset_dir + 'models/default_model.json'
w = config.dataset_dir + 'models/default_model_w.h5'
model = models.load_model(m, w)
if __name__ == '__main__':
args = sys.argv
if len(args) > 1:
filename = '../' + args[1]
else:
filename = config.dataset_dir + '1118.txt'
print('\n filename:', filename)
tokens, lines = io.read_book3(filename)
# build feature vector
v1 = data.tokenlist_to_vector(tokens, dataset.sentiment_dataset)
v2 = np.array(sentimentanalysis.per_book(lines))
x = np.append(v1, v2)
x_test = np.stack([x])
# predict y
y_test = model.predict(x_test)[0]
results, best = data.decode_y(dataset, y_test, 5)
print("-----------------------------\n\n")
print("-- Results --")
# print all values
ls = list(results.items())
ls.sort(key=lambda x: x[1], reverse=True)
th = ['Genre', 'Score']
rows = []
for k, v in ls:
rows.append([k, utils.format_score(v)])
print(utils.gen_table(th, rows))
print('\n Top 5 genres:')
th = ['#', 'Genre']
rows = []
for i, v in enumerate(best):
rows.append([str(i + 1), v])
print(utils.gen_table(th, rows))
print('\n - \n')
v = utils.format_score(results[best[0]])
print('Predicted genre: "%s" with a score of %s%s \n\n' % (best[0], v,
'%'))
# for x in range(3):
# # import os, sys, numpy as np
# # import config
# # np.random.seed(config.seed)
# # print("NP - - -", np.random.random(2))
# # import data, tfidf, models, sentimentanalysis
# # from utils import utils, io
# # dataset = data.init_dataset()
# # tokens, lines = io.read_book3(filename)
# v1_ = data.tokenlist_to_vector(tokens, dataset.sentiment_dataset)
# print("V - :::::", v1_[:20])
# for i, val in enumerate(v1):
# if not val == v1_[i]:
# print('not eq', val, v1_[i])
# v1 = data.tokenlist_to_vector(tokens, dataset.sentiment_dataset)
# v2 = np.array(sentimentanalysis.per_book(lines))
# x = np.append(v1, v2)
# x_test = np.stack([x])
# model = models.load_model(m, w)
# y_test = model.predict(x_test)[0]
# results, best = data.decode_y(dataset, y_test, 6)
# print(best)
| [
"utils.io.read_book3",
"numpy.stack",
"numpy.random.seed",
"utils.utils.format_score",
"data.tokenlist_to_vector",
"utils.utils.gen_table",
"models.load_model",
"numpy.append",
"data.init_dataset",
"data.decode_y",
"sentimentanalysis.per_book",
"os.chdir"
] | [((42, 58), 'os.chdir', 'os.chdir', (['"""src/"""'], {}), "('src/')\n", (50, 58), False, 'import os, sys, numpy as np\n'), ((90, 117), 'numpy.random.seed', 'np.random.seed', (['config.seed'], {}), '(config.seed)\n', (104, 117), True, 'import os, sys, numpy as np\n'), ((268, 287), 'data.init_dataset', 'data.init_dataset', ([], {}), '()\n', (285, 287), False, 'import data, tfidf, models, sentimentanalysis\n'), ((416, 439), 'models.load_model', 'models.load_model', (['m', 'w'], {}), '(m, w)\n', (433, 439), False, 'import data, tfidf, models, sentimentanalysis\n'), ((663, 686), 'utils.io.read_book3', 'io.read_book3', (['filename'], {}), '(filename)\n', (676, 686), False, 'from utils import utils, io\n'), ((724, 783), 'data.tokenlist_to_vector', 'data.tokenlist_to_vector', (['tokens', 'dataset.sentiment_dataset'], {}), '(tokens, dataset.sentiment_dataset)\n', (748, 783), False, 'import data, tfidf, models, sentimentanalysis\n'), ((846, 863), 'numpy.append', 'np.append', (['v1', 'v2'], {}), '(v1, v2)\n', (855, 863), True, 'import os, sys, numpy as np\n'), ((877, 890), 'numpy.stack', 'np.stack', (['[x]'], {}), '([x])\n', (885, 890), True, 'import os, sys, numpy as np\n'), ((966, 999), 'data.decode_y', 'data.decode_y', (['dataset', 'y_test', '(5)'], {}), '(dataset, y_test, 5)\n', (979, 999), False, 'import data, tfidf, models, sentimentanalysis\n'), ((1529, 1565), 'utils.utils.format_score', 'utils.format_score', (['results[best[0]]'], {}), '(results[best[0]])\n', (1547, 1565), False, 'from utils import utils, io\n'), ((802, 835), 'sentimentanalysis.per_book', 'sentimentanalysis.per_book', (['lines'], {}), '(lines)\n', (828, 835), False, 'import data, tfidf, models, sentimentanalysis\n'), ((1296, 1321), 'utils.utils.gen_table', 'utils.gen_table', (['th', 'rows'], {}), '(th, rows)\n', (1311, 1321), False, 'from utils import utils, io\n'), ((1472, 1497), 'utils.utils.gen_table', 'utils.gen_table', (['th', 'rows'], {}), '(th, rows)\n', (1487, 1497), False, 'from utils import utils, io\n'), ((1262, 1283), 'utils.utils.format_score', 'utils.format_score', (['v'], {}), '(v)\n', (1280, 1283), False, 'from utils import utils, io\n')] |
#! /usr/bin/env python3
import argparse
import json
import logging
import logging.config
import os
import sys
import time
from concurrent import futures
from datetime import datetime
import numpy as np
from sklearn.linear_model import LinearRegression
from datetime import datetime
import ServerSideExtension_pb2 as SSE
import grpc
from SSEData_linearRegression import FunctionType, \
get_func_type
from ScriptEval_linearRegression import ScriptEval
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
##logger = os.getcwd().replace('\\','/') + '/' + 'linearRegressionLogger.txt'
class ExtensionService(SSE.ConnectorServicer):
"""
A simple SSE-plugin created for the ARIMA example.
"""
def __init__(self, funcdef_file):
"""
Class initializer.
:param funcdef_file: a function definition JSON file
"""
self._function_definitions = funcdef_file
self.scriptEval = ScriptEval()
if not os.path.exists('logs'):
os.mkdir('logs')
logging.config.fileConfig('logger.config')
logging.info('Logging enabled')
@property
def function_definitions(self):
"""
:return: json file with function definitions
"""
return self._function_definitions
@property
def functions(self):
"""
:return: Mapping of function id and implementation
"""
return {
0: '_linearRegression'
}
@staticmethod
def _get_function_id(context):
"""
Retrieve function id from header.
:param context: context
:return: function id
"""
metadata = dict(context.invocation_metadata())
header = SSE.FunctionRequestHeader()
header.ParseFromString(metadata['qlik-functionrequestheader-bin'])
return header.functionId
"""
Implementation of added functions.
"""
@staticmethod
def _linearRegression(request, context):
# clear the log for ARIMA details
## f = open(logger,'w')
## f.write('New function call\n')
# instantiate a list for measure data
dataList = []
for request_rows in request:
# iterate over each request row (contains rows, duals, numData)
## f.write('Request Rows: ' + str(request_rows) + '\n')
# pull duals from each row, and the numData from duals
for row in request_rows.rows:
# the first numData contains the measure data
data = [d.numData for d in row.duals][0]
# try to convert number to float
try:
float(data)
except ValueError:
data = 0
# append each data point to a list and log it
dataList.append(data)
## f.write('Row: ' + str(data) + '\n')
# grab the length of the data list and convert
X_len = len(dataList)
X = np.asarray(range(X_len))
# convert the data into an array
Y = np.asarray(dataList)
# fit linear regression model
mdl = LinearRegression().fit(X.reshape(-1, 1),Y)
# grab m and b from y = mx + b
m = mdl.coef_[0]
b = mdl.intercept_
# calculate regression line points
regressionResults = []
gen = (i for i in range(X_len))
for i in gen:
y = m * i + b
regressionResults.append(y)
# Create an iterable of dual with the result
duals = iter([[SSE.Dual(numData=d)] for d in regressionResults])
# Yield the row data as bundled rows
yield SSE.BundledRows(rows=[SSE.Row(duals=d) for d in duals])
"""
Implementation of rpc functions.
"""
def GetCapabilities(self, request, context):
"""
Get capabilities.
Note that either request or context is used in the implementation of this method, but still added as
parameters. The reason is that gRPC always sends both when making a function call and therefore we must include
them to avoid error messages regarding too many parameters provided from the client.
:param request: the request, not used in this method.
:param context: the context, not used in this method.
:return: the capabilities.
"""
logging.info('GetCapabilities')
# Create an instance of the Capabilities grpc message
# Enable(or disable) script evaluation
# Set values for pluginIdentifier and pluginVersion
capabilities = SSE.Capabilities(allowScript=True,
pluginIdentifier='Hello World - Qlik',
pluginVersion='v1.0.0-beta1')
# If user defined functions supported, add the definitions to the message
with open(self.function_definitions) as json_file:
# Iterate over each function definition and add data to the capabilities grpc message
for definition in json.load(json_file)['Functions']:
function = capabilities.functions.add()
function.name = definition['Name']
function.functionId = definition['Id']
function.functionType = definition['Type']
function.returnType = definition['ReturnType']
# Retrieve name and type of each parameter
for param_name, param_type in sorted(definition['Params'].items()):
function.params.add(name=param_name, dataType=param_type)
logging.info('Adding to capabilities: {}({})'.format(function.name,
[p.name for p in function.params]))
return capabilities
def ExecuteFunction(self, request_iterator, context):
"""
Execute function call.
:param request_iterator: an iterable sequence of Row.
:param context: the context.
:return: an iterable sequence of Row.
"""
# Retrieve function id
func_id = self._get_function_id(context)
# Call corresponding function
logging.info('ExecuteFunction (functionId: {})'.format(func_id))
return getattr(self, self.functions[func_id])(request_iterator, context)
def EvaluateScript(self, request, context):
"""
This plugin provides functionality only for script calls with no parameters and tensor script calls.
:param request:
:param context:
:return:
"""
# Parse header for script request
metadata = dict(context.invocation_metadata())
header = SSE.ScriptRequestHeader()
header.ParseFromString(metadata['qlik-scriptrequestheader-bin'])
# Retrieve function type
func_type = get_func_type(header)
# Verify function type
if (func_type == FunctionType.Aggregation) or (func_type == FunctionType.Tensor):
return self.scriptEval.EvaluateScript(header, request, func_type)
else:
# This plugin does not support other function types than aggregation and tensor.
raise grpc.RpcError(grpc.StatusCode.UNIMPLEMENTED,
'Function type {} is not supported in this plugin.'.format(func_type.name))
"""
Implementation of the Server connecting to gRPC.
"""
def Serve(self, port, pem_dir):
"""
Sets up the gRPC Server with insecure connection on port
:param port: port to listen on.
:param pem_dir: Directory including certificates
:return: None
"""
# Create gRPC server
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
SSE.add_ConnectorServicer_to_server(self, server)
if pem_dir:
# Secure connection
with open(os.path.join(pem_dir, 'sse_server_key.pem'), 'rb') as f:
private_key = f.read()
with open(os.path.join(pem_dir, 'sse_server_cert.pem'), 'rb') as f:
cert_chain = f.read()
with open(os.path.join(pem_dir, 'root_cert.pem'), 'rb') as f:
root_cert = f.read()
credentials = grpc.ssl_server_credentials([(private_key, cert_chain)], root_cert, True)
server.add_secure_port('[::]:{}'.format(port), credentials)
logging.info('*** Running server in secure mode on port: {} ***'.format(port))
else:
# Insecure connection
server.add_insecure_port('[::]:{}'.format(port))
logging.info('*** Running server in insecure mode on port: {} ***'.format(port))
# Start gRPC server
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--port', nargs='?', default='50059')
parser.add_argument('--pem_dir', nargs='?')
parser.add_argument('--definition-file', nargs='?', default='FuncDefs_linearRegression.json')
args = parser.parse_args()
# need to locate the file when script is called from outside it's location dir.
def_file = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), args.definition_file)
calc = ExtensionService(def_file)
calc.Serve(args.port, args.pem_dir)
| [
"os.mkdir",
"argparse.ArgumentParser",
"ServerSideExtension_pb2.FunctionRequestHeader",
"os.path.join",
"grpc.ssl_server_credentials",
"ServerSideExtension_pb2.Dual",
"os.path.exists",
"concurrent.futures.ThreadPoolExecutor",
"ServerSideExtension_pb2.Capabilities",
"numpy.asarray",
"os.path.real... | [((8973, 8998), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8996, 8998), False, 'import argparse\n'), ((943, 955), 'ScriptEval_linearRegression.ScriptEval', 'ScriptEval', ([], {}), '()\n', (953, 955), False, 'from ScriptEval_linearRegression import ScriptEval\n'), ((1032, 1074), 'logging.config.fileConfig', 'logging.config.fileConfig', (['"""logger.config"""'], {}), "('logger.config')\n", (1057, 1074), False, 'import logging\n'), ((1083, 1114), 'logging.info', 'logging.info', (['"""Logging enabled"""'], {}), "('Logging enabled')\n", (1095, 1114), False, 'import logging\n'), ((1723, 1750), 'ServerSideExtension_pb2.FunctionRequestHeader', 'SSE.FunctionRequestHeader', ([], {}), '()\n', (1748, 1750), True, 'import ServerSideExtension_pb2 as SSE\n'), ((3098, 3118), 'numpy.asarray', 'np.asarray', (['dataList'], {}), '(dataList)\n', (3108, 3118), True, 'import numpy as np\n'), ((4407, 4438), 'logging.info', 'logging.info', (['"""GetCapabilities"""'], {}), "('GetCapabilities')\n", (4419, 4438), False, 'import logging\n'), ((4631, 4738), 'ServerSideExtension_pb2.Capabilities', 'SSE.Capabilities', ([], {'allowScript': '(True)', 'pluginIdentifier': '"""Hello World - Qlik"""', 'pluginVersion': '"""v1.0.0-beta1"""'}), "(allowScript=True, pluginIdentifier='Hello World - Qlik',\n pluginVersion='v1.0.0-beta1')\n", (4647, 4738), True, 'import ServerSideExtension_pb2 as SSE\n'), ((6739, 6764), 'ServerSideExtension_pb2.ScriptRequestHeader', 'SSE.ScriptRequestHeader', ([], {}), '()\n', (6762, 6764), True, 'import ServerSideExtension_pb2 as SSE\n'), ((6892, 6913), 'SSEData_linearRegression.get_func_type', 'get_func_type', (['header'], {}), '(header)\n', (6905, 6913), False, 'from SSEData_linearRegression import FunctionType, get_func_type\n'), ((7818, 7867), 'ServerSideExtension_pb2.add_ConnectorServicer_to_server', 'SSE.add_ConnectorServicer_to_server', (['self', 'server'], {}), '(self, server)\n', (7853, 7867), True, 'import ServerSideExtension_pb2 as SSE\n'), ((971, 993), 'os.path.exists', 'os.path.exists', (['"""logs"""'], {}), "('logs')\n", (985, 993), False, 'import os\n'), ((1007, 1023), 'os.mkdir', 'os.mkdir', (['"""logs"""'], {}), "('logs')\n", (1015, 1023), False, 'import os\n'), ((7766, 7808), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', ([], {'max_workers': '(10)'}), '(max_workers=10)\n', (7792, 7808), False, 'from concurrent import futures\n'), ((8294, 8367), 'grpc.ssl_server_credentials', 'grpc.ssl_server_credentials', (['[(private_key, cert_chain)]', 'root_cert', '(True)'], {}), '([(private_key, cert_chain)], root_cert, True)\n', (8321, 8367), False, 'import grpc\n'), ((9367, 9396), 'os.path.realpath', 'os.path.realpath', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (9383, 9396), False, 'import os\n'), ((3172, 3190), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (3188, 3190), False, 'from sklearn.linear_model import LinearRegression\n'), ((5085, 5105), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (5094, 5105), False, 'import json\n'), ((8838, 8869), 'time.sleep', 'time.sleep', (['_ONE_DAY_IN_SECONDS'], {}), '(_ONE_DAY_IN_SECONDS)\n', (8848, 8869), False, 'import time\n'), ((3597, 3616), 'ServerSideExtension_pb2.Dual', 'SSE.Dual', ([], {'numData': 'd'}), '(numData=d)\n', (3605, 3616), True, 'import ServerSideExtension_pb2 as SSE\n'), ((7943, 7986), 'os.path.join', 'os.path.join', (['pem_dir', '"""sse_server_key.pem"""'], {}), "(pem_dir, 'sse_server_key.pem')\n", (7955, 7986), False, 'import os\n'), ((8061, 8105), 'os.path.join', 'os.path.join', (['pem_dir', '"""sse_server_cert.pem"""'], {}), "(pem_dir, 'sse_server_cert.pem')\n", (8073, 8105), False, 'import os\n'), ((8179, 8217), 'os.path.join', 'os.path.join', (['pem_dir', '"""root_cert.pem"""'], {}), "(pem_dir, 'root_cert.pem')\n", (8191, 8217), False, 'import os\n'), ((3729, 3745), 'ServerSideExtension_pb2.Row', 'SSE.Row', ([], {'duals': 'd'}), '(duals=d)\n', (3736, 3745), True, 'import ServerSideExtension_pb2 as SSE\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
def f(a,b,c):
return a ** b - c
x = np.ogrid[0:1:24j, 0:1:12j, 0:1:6j]
values = f(x[0],x[1],x[2])
value = np.mean(values)
print(value)
exact = np.log(2) - 0.5
print(exact)
differential = np.abs(exact - value)
print(differential)
| [
"numpy.mean",
"numpy.abs",
"numpy.log"
] | [((180, 195), 'numpy.mean', 'np.mean', (['values'], {}), '(values)\n', (187, 195), True, 'import numpy as np\n'), ((263, 284), 'numpy.abs', 'np.abs', (['(exact - value)'], {}), '(exact - value)\n', (269, 284), True, 'import numpy as np\n'), ((218, 227), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (224, 227), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__email__ = "<EMAIL>"
from ddf_library.utils import generate_info, read_stage_file
from pycompss.api.task import task
from pycompss.functions.reduce import merge_reduce
from pycompss.api.api import compss_wait_on, compss_delete_object
from pycompss.api.parameter import FILE_IN
import pandas as pd
import numpy as np
import time
def drop_nan_rows(data, settings):
thresh = settings['thresh']
how = settings['how']
frag = settings['id_frag']
subset = settings['attributes']
if subset is None or len(subset) == 0:
subset = data.columns.tolist()
else:
subset = [att for att in subset if att in data.columns]
data.dropna(how=how, subset=subset, thresh=thresh, inplace=True)
data.reset_index(drop=True, inplace=True)
info = generate_info(data, frag)
return data, info
def fill_by_value(data, settings):
value = settings['value']
subset = settings['attributes']
frag = settings['id_frag']
if isinstance(value, dict):
data.fillna(value=value, inplace=True)
else:
if not subset:
subset = data.columns.tolist()
values = {key: value for key in subset}
data.fillna(value=values, inplace=True)
data.reset_index(drop=True, inplace=True)
info = generate_info(data, frag)
return data, info
def drop_nan_columns(data, settings):
"""
:param data: A list with pandas's DataFrame.
:param settings: A dictionary with:
* subset: optional list of column names to consider.
* thresh: int, default None If specified, drop rows that have less
than thresh non-null values. This overwrites the how parameter.
* how: ‘any’ or ‘all’. If ‘any’, drop a row if it contains any
nulls. If ‘all’, drop a row only if all its values are null.
:return: Returns a list with pandas's DataFrame.
"""
data, settings = drop_nan_columns_stage_1(data, settings)
nfrag = len(data)
result = [[] for _ in range(nfrag)]
info = result[:]
for f in range(nfrag):
settings['id_frag'] = f
result[f], info[f] = drop_nan_columns_stage_2(data[f], settings.copy())
output = {'key_data': ['data'], 'key_info': ['schema'],
'data': result, 'schema': info}
return output
def drop_nan_columns_stage_1(data, settings):
return clean_missing_preprocessing(data, settings)
def drop_nan_columns_stage_2(data, settings):
return _clean_missing(data, settings)
def fill_nan(data, settings):
"""
:param data: A list with pandas's DataFrame.
:param settings: A dictionary with:
* subset: optional list of column names to consider.
* thresh: int, default None If specified, drop rows that have less
than thresh non-null values. This overwrites the how parameter.
* how: ‘any’ or ‘all’. If ‘any’, drop a row if it contains any
nulls. If ‘all’, drop a row only if all its values are null.
:return: Returns a list with pandas's DataFrame.
"""
data, settings = fill_nan_stage_1(data, settings)
nfrag = len(data)
result = [[] for _ in range(nfrag)]
info = result[:]
for f in range(nfrag):
settings['id_frag'] = f
result[f], info[f] = fill_nan_stage_2(data[f], settings.copy())
output = {'key_data': ['data'], 'key_info': ['schema'],
'data': result, 'schema': info}
return output
def fill_nan_stage_1(data, settings):
return clean_missing_preprocessing(data, settings)
def fill_nan_stage_2(data, settings):
return _clean_missing(data, settings)
def clean_missing_preprocessing(data, settings):
if settings['cleaning_mode'] is not 'MEDIAN':
# we need to generate mean value
params = [_clean_missing_pre(df, settings) for df in data]
settings = merge_reduce(merge_clean_options, params)
settings = compss_wait_on(settings)
else:
# noinspection SpellCheckingInspection
"""
Based on :
FUJIWARA, Akihiro; INOUE, Michiko; MASUZAWA, Toshimitsu. Parallel
selection algorithms for CGM and BSP models with application to
sorting. IPSJ Journal, v. 41, p. 1500-1508, 2000.
"""
# 1- On each processor, find the median of all elements on the
# processor.
stage1 = [_median_stage1(df, settings) for df in data]
stage1 = merge_reduce(_median_stage1_merge, stage1)
# 2- Select the median of the medians. Let M M be the median of the
# medians.
nfrag = len(data)
info = [[] for _ in range(nfrag)]
result = [[] for _ in range(nfrag)]
for f in range(nfrag):
info[f], result[f] = _median_stage2(data[f], stage1)
info_stage2 = merge_reduce(_median_stage2_merge, info)
compss_delete_object(info)
# 3 Broadcast M M to all processors.
# 4- Split the elements on each processor into two subsets, L and U.
# The subset L contains elements that are smaller than MM, and
# the subset U contains elements that are larger than MM .
# 5 - Compute SUM L
info = [[] for _ in range(nfrag)]
for f in range(nfrag):
info[f] = _median_stage3(result[f], info_stage2)
medians_info = merge_reduce(_median_stage3_merge, info)
compss_delete_object(info)
medians_info = compss_wait_on(medians_info)
settings['values'] = _median_define(medians_info)
settings['intermediate_result'] = False
return data, settings
@task(returns=1, data_input=FILE_IN)
def _median_stage1(data_input, params):
subset = params['attributes']
df = read_stage_file(data_input, subset)
dict_median = {}
for att in subset:
x = [x for x in df[att].values if ~np.isnan(x)]
size = len(x)
if size > 0:
median = [np.median(x)]
else:
median = []
dict_median[att] = [size, median]
return dict_median
@task(returns=1)
def _median_stage1_merge(dict_median1, dict_median2):
for att in dict_median2:
if att not in dict_median1:
dict_median1[att] = dict_median2[att]
else:
s1, m1 = dict_median1[att]
s2, m2 = dict_median2[att]
dict_median1[att] = [s1+s2, m1+m2]
return dict_median1
@task(returns=2, data_input=FILE_IN)
def _median_stage2(data_input, dict_median):
df = read_stage_file(data_input)
u_l_list = {}
info = {}
for att in dict_median:
size, medians = dict_median[att]
median_of_medians = np.median(medians)
x = [x for x in df[att].values if ~np.isnan(x)]
low_list = [low for low in x if low <= median_of_medians]
upper_list = [high for high in x if high >= median_of_medians]
sum_l = len(low_list)
sum_u = len(upper_list)
u_l_list[att] = [low_list, upper_list]
info[att] = [median_of_medians, size, sum_l, sum_u]
return info, u_l_list
@task(returns=1)
def _median_stage2_merge(info1, info2):
for att in info2:
if att not in info1:
info1[att] = info2[att]
else:
median_of_medians1, size1, sum_l1, sum_u1 = info1[att]
_, _, sum_l2, sum_u2 = info2[att]
info1[att] = [median_of_medians1, size1,
sum_l1 + sum_l2, sum_u1 + sum_u2]
return info1
@task(returns=1)
def _median_stage3(u_l_list, info):
for att in info:
median_of_medians, size, sum_l, sum_u = info[att]
cond = 0
ith = float(size) / 2
if size % 2 == 0:
last = 2
ith = ith + 0.5
else:
last = 1
low, high = u_l_list[att]
if ith < sum_l:
low = sorted(low)[-last:]
high = sorted(high)[:1]
info[att] = [last, low, 0, high, cond]
elif ith == sum_l:
info[att] = [1, [median_of_medians], 1, [], cond]
else:
if ith >= sum_u:
cond = 1
high = sorted(high)[:last]
low = sorted(low)[-1:]
info[att] = [last, high, 2, low, cond]
return info
@task(returns=1)
def _median_stage3_merge(info1, info2):
for att in info2:
if att not in info1:
info1[att] = info2[att]
else:
last, num_p2, op, num_o2, cond = info2[att]
_, num_p1, _, num_o1, _ = info1[att]
if op == 2:
nums1 = sorted(num_p1 + num_p2)[:last]
nums2 = sorted(num_o1 + num_o2)
if len(nums2) == 0:
nums2 = []
else:
nums2 = [nums2[-1]]
else:
nums1 = sorted(num_p1 + num_p2)[-last:]
nums2 = sorted(num_o1 + num_o2)
if len(nums2) == 0:
nums2 = []
else:
nums2 = [nums2[0]]
info1[att] = [last, nums1, op, nums2, cond]
return info1
def _median_define(info):
for att in info:
last, nums1, op, nums2, cond = info[att]
if last == 2:
if cond:
nums = nums2[0] + nums1[0]
else:
nums = sum(nums1)
else:
nums = sum(nums1)
info[att] = nums / last
return info
@task(returns=1, data_input=FILE_IN)
def _clean_missing_pre(data_input, params):
"""REMOVE_COLUMN, MEAN, MODE and MEDIAN needs pre-computation."""
t_start = time.time()
subset = params['attributes']
cleaning_mode = params['cleaning_mode']
thresh = params.get('thresh', None)
how = params.get('how', None)
data = read_stage_file(data_input, subset)
if cleaning_mode == "REMOVE_COLUMN":
# list of columns of the current fragment
# that contains a null value
if thresh:
null_fields = data[subset].isnull().sum()
elif how == 'any':
null_fields = \
data[subset].columns[data[subset].isnull().any()].tolist()
else:
null_fields = \
data[subset].columns[data[subset].isnull().all()].tolist()
params['columns_drop'] = null_fields
elif cleaning_mode == "MEAN":
# generate a partial mean of each subset column
params['values'] = [len(data),
data[subset].sum(numeric_only=True,
skipna=True).values]
elif cleaning_mode in "MODE":
# generate a frequency list of each subset column
dict_mode = {}
for att in subset:
dict_mode[att] = data[att].value_counts()
params['dict_mode'] = dict_mode
t_end = time.time()
print("[INFO] - Time to process task '{}': {:.0f} seconds"
.format('_clean_missing_pre', t_end - t_start))
return params
@task(returns=1)
def merge_clean_options(params1, params2):
"""Merge pre-computations."""
cleaning_mode = params1['cleaning_mode']
thresh = params1.get('thresh', None)
how = params1.get('how', None)
if cleaning_mode == "REMOVE_COLUMN":
drops1 = params1['columns_drop']
drops2 = params2['columns_drop']
if thresh:
params1['columns_drop'] = drops1 + drops2
elif how == 'any':
params1['columns_drop'] = list(set(drops1 + drops2))
else:
params1['columns_drop'] = list(set(drops1).intersection(drops2))
elif cleaning_mode is "MEAN":
size1, sums1 = params1['values']
size2, sums2 = params2['values']
params1['values'] = [size1+size2, sums1 + sums2]
elif cleaning_mode is "MODE":
dict_mode1 = params1['dict_mode']
dict_mode2 = params2['dict_mode']
dict_mode = {}
for att in dict_mode1:
dict_mode[att] = \
pd.concat([dict_mode1[att], dict_mode2[att]], axis=0).\
fillna(0).sum(level=0)
params1['dict_mode'] = dict_mode
return params1
def _clean_missing(data, params):
"""Perform REMOVE_ROW, REMOVE_COLUMN, VALUE, MEAN, MODE and MEDIAN."""
attributes = params['attributes']
cleaning_mode = params['cleaning_mode']
frag = params['id_frag']
if cleaning_mode == "REMOVE_COLUMN":
thresh = params['thresh']
if thresh:
subset = []
cols = params['columns_drop']
for c in cols.index:
if cols.loc[c] > thresh:
subset.append(c)
else:
subset = params['columns_drop']
data = data.drop(subset, axis=1)
elif cleaning_mode == "MEAN":
sizes, sums = params['values']
values = np.divide(sums, sizes)
for v, a in zip(values, attributes):
data[a] = data[a].fillna(value=v)
elif cleaning_mode == "MODE":
dict_mode = params['dict_mode']
for att in dict_mode:
mode = dict_mode[att].idxmax()
data[att] = data[att].fillna(value=mode)
elif cleaning_mode == 'MEDIAN':
medians = params['values']
for att in medians:
data[att] = data[att].fillna(value=medians[att])
data.reset_index(drop=True, inplace=True)
info = generate_info(data, frag)
return data, info
| [
"numpy.divide",
"ddf_library.utils.read_stage_file",
"ddf_library.utils.generate_info",
"numpy.median",
"pycompss.api.task.task",
"numpy.isnan",
"time.time",
"pycompss.api.api.compss_delete_object",
"pycompss.api.api.compss_wait_on",
"pandas.concat",
"pycompss.functions.reduce.merge_reduce"
] | [((5578, 5613), 'pycompss.api.task.task', 'task', ([], {'returns': '(1)', 'data_input': 'FILE_IN'}), '(returns=1, data_input=FILE_IN)\n', (5582, 5613), False, 'from pycompss.api.task import task\n'), ((6019, 6034), 'pycompss.api.task.task', 'task', ([], {'returns': '(1)'}), '(returns=1)\n', (6023, 6034), False, 'from pycompss.api.task import task\n'), ((6371, 6406), 'pycompss.api.task.task', 'task', ([], {'returns': '(2)', 'data_input': 'FILE_IN'}), '(returns=2, data_input=FILE_IN)\n', (6375, 6406), False, 'from pycompss.api.task import task\n'), ((7030, 7045), 'pycompss.api.task.task', 'task', ([], {'returns': '(1)'}), '(returns=1)\n', (7034, 7045), False, 'from pycompss.api.task import task\n'), ((7435, 7450), 'pycompss.api.task.task', 'task', ([], {'returns': '(1)'}), '(returns=1)\n', (7439, 7450), False, 'from pycompss.api.task import task\n'), ((8215, 8230), 'pycompss.api.task.task', 'task', ([], {'returns': '(1)'}), '(returns=1)\n', (8219, 8230), False, 'from pycompss.api.task import task\n'), ((9393, 9428), 'pycompss.api.task.task', 'task', ([], {'returns': '(1)', 'data_input': 'FILE_IN'}), '(returns=1, data_input=FILE_IN)\n', (9397, 9428), False, 'from pycompss.api.task import task\n'), ((10927, 10942), 'pycompss.api.task.task', 'task', ([], {'returns': '(1)'}), '(returns=1)\n', (10931, 10942), False, 'from pycompss.api.task import task\n'), ((849, 874), 'ddf_library.utils.generate_info', 'generate_info', (['data', 'frag'], {}), '(data, frag)\n', (862, 874), False, 'from ddf_library.utils import generate_info, read_stage_file\n'), ((1341, 1366), 'ddf_library.utils.generate_info', 'generate_info', (['data', 'frag'], {}), '(data, frag)\n', (1354, 1366), False, 'from ddf_library.utils import generate_info, read_stage_file\n'), ((5697, 5732), 'ddf_library.utils.read_stage_file', 'read_stage_file', (['data_input', 'subset'], {}), '(data_input, subset)\n', (5712, 5732), False, 'from ddf_library.utils import generate_info, read_stage_file\n'), ((6461, 6488), 'ddf_library.utils.read_stage_file', 'read_stage_file', (['data_input'], {}), '(data_input)\n', (6476, 6488), False, 'from ddf_library.utils import generate_info, read_stage_file\n'), ((9557, 9568), 'time.time', 'time.time', ([], {}), '()\n', (9566, 9568), False, 'import time\n'), ((9732, 9767), 'ddf_library.utils.read_stage_file', 'read_stage_file', (['data_input', 'subset'], {}), '(data_input, subset)\n', (9747, 9767), False, 'from ddf_library.utils import generate_info, read_stage_file\n'), ((10773, 10784), 'time.time', 'time.time', ([], {}), '()\n', (10782, 10784), False, 'import time\n'), ((13292, 13317), 'ddf_library.utils.generate_info', 'generate_info', (['data', 'frag'], {}), '(data, frag)\n', (13305, 13317), False, 'from ddf_library.utils import generate_info, read_stage_file\n'), ((3860, 3901), 'pycompss.functions.reduce.merge_reduce', 'merge_reduce', (['merge_clean_options', 'params'], {}), '(merge_clean_options, params)\n', (3872, 3901), False, 'from pycompss.functions.reduce import merge_reduce\n'), ((3921, 3945), 'pycompss.api.api.compss_wait_on', 'compss_wait_on', (['settings'], {}), '(settings)\n', (3935, 3945), False, 'from pycompss.api.api import compss_wait_on, compss_delete_object\n'), ((4426, 4468), 'pycompss.functions.reduce.merge_reduce', 'merge_reduce', (['_median_stage1_merge', 'stage1'], {}), '(_median_stage1_merge, stage1)\n', (4438, 4468), False, 'from pycompss.functions.reduce import merge_reduce\n'), ((4795, 4835), 'pycompss.functions.reduce.merge_reduce', 'merge_reduce', (['_median_stage2_merge', 'info'], {}), '(_median_stage2_merge, info)\n', (4807, 4835), False, 'from pycompss.functions.reduce import merge_reduce\n'), ((4844, 4870), 'pycompss.api.api.compss_delete_object', 'compss_delete_object', (['info'], {}), '(info)\n', (4864, 4870), False, 'from pycompss.api.api import compss_wait_on, compss_delete_object\n'), ((5316, 5356), 'pycompss.functions.reduce.merge_reduce', 'merge_reduce', (['_median_stage3_merge', 'info'], {}), '(_median_stage3_merge, info)\n', (5328, 5356), False, 'from pycompss.functions.reduce import merge_reduce\n'), ((5365, 5391), 'pycompss.api.api.compss_delete_object', 'compss_delete_object', (['info'], {}), '(info)\n', (5385, 5391), False, 'from pycompss.api.api import compss_wait_on, compss_delete_object\n'), ((5416, 5444), 'pycompss.api.api.compss_wait_on', 'compss_wait_on', (['medians_info'], {}), '(medians_info)\n', (5430, 5444), False, 'from pycompss.api.api import compss_wait_on, compss_delete_object\n'), ((6618, 6636), 'numpy.median', 'np.median', (['medians'], {}), '(medians)\n', (6627, 6636), True, 'import numpy as np\n'), ((12757, 12779), 'numpy.divide', 'np.divide', (['sums', 'sizes'], {}), '(sums, sizes)\n', (12766, 12779), True, 'import numpy as np\n'), ((5898, 5910), 'numpy.median', 'np.median', (['x'], {}), '(x)\n', (5907, 5910), True, 'import numpy as np\n'), ((5820, 5831), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (5828, 5831), True, 'import numpy as np\n'), ((6680, 6691), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (6688, 6691), True, 'import numpy as np\n'), ((11915, 11968), 'pandas.concat', 'pd.concat', (['[dict_mode1[att], dict_mode2[att]]'], {'axis': '(0)'}), '([dict_mode1[att], dict_mode2[att]], axis=0)\n', (11924, 11968), True, 'import pandas as pd\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from koheron import connect
from spectrum import Spectrum
import os
import numpy as np
import math
import matplotlib
matplotlib.use('TKAgg')
from matplotlib import pyplot as plt
host = os.getenv('HOST','192.168.1.100')
client = connect(host, name='spectrum')
spectrum = Spectrum(client)
spectrum.reset_acquisition()
wfm_size = 4096
mhz = 1e6
sampling_rate = 125e6
spectrum.reset_acquisition()
decimation_factor = 1
index_low = 0
index_high = wfm_size / 2
decimated_data = spectrum.get_decimated_data(decimation_factor, index_low, index_high)
freq_min = 0
freq_max = sampling_rate / mhz / 2
freq_range = np.linspace(freq_min, freq_max, (wfm_size / 2))
plt.plot(freq_range, 10 * np.log10(decimated_data), 'b')
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.use",
"spectrum.Spectrum",
"numpy.linspace",
"koheron.connect",
"numpy.log10",
"os.getenv"
] | [((166, 189), 'matplotlib.use', 'matplotlib.use', (['"""TKAgg"""'], {}), "('TKAgg')\n", (180, 189), False, 'import matplotlib\n'), ((235, 269), 'os.getenv', 'os.getenv', (['"""HOST"""', '"""192.168.1.100"""'], {}), "('HOST', '192.168.1.100')\n", (244, 269), False, 'import os\n'), ((278, 308), 'koheron.connect', 'connect', (['host'], {'name': '"""spectrum"""'}), "(host, name='spectrum')\n", (285, 308), False, 'from koheron import connect\n'), ((321, 337), 'spectrum.Spectrum', 'Spectrum', (['client'], {}), '(client)\n', (329, 337), False, 'from spectrum import Spectrum\n'), ((660, 705), 'numpy.linspace', 'np.linspace', (['freq_min', 'freq_max', '(wfm_size / 2)'], {}), '(freq_min, freq_max, wfm_size / 2)\n', (671, 705), True, 'import numpy as np\n'), ((766, 776), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (774, 776), True, 'from matplotlib import pyplot as plt\n'), ((735, 759), 'numpy.log10', 'np.log10', (['decimated_data'], {}), '(decimated_data)\n', (743, 759), True, 'import numpy as np\n')] |
import codecademylib
import numpy as np
calorie_stats = np.genfromtxt('cereal.csv',delimiter=',')
average_calories = np.mean(calorie_stats)
print('maximum calories '+str(np.max(calorie_stats)))
print('Minimum calories '+ str (np.min(calorie_stats)))
print('average calories ' +str(average_calories))
calorie_stats_sorted = np.sort(calorie_stats)
print(calorie_stats_sorted)
median_calories = np.mean(calorie_stats)
print('median of calories is '+str(median_calories))
nth_percentile = np.percentile(calorie_stats,62)
print(nth_percentile)
more_calories = np.mean(calorie_stats > 60)
print('percentage is '+ str(more_calories))
calorie_std = np.std(calorie_stats)
print('standard deviation is ' +str(calorie_std)) | [
"numpy.std",
"numpy.genfromtxt",
"numpy.percentile",
"numpy.sort",
"numpy.max",
"numpy.mean",
"numpy.min"
] | [((58, 100), 'numpy.genfromtxt', 'np.genfromtxt', (['"""cereal.csv"""'], {'delimiter': '""","""'}), "('cereal.csv', delimiter=',')\n", (71, 100), True, 'import numpy as np\n'), ((120, 142), 'numpy.mean', 'np.mean', (['calorie_stats'], {}), '(calorie_stats)\n', (127, 142), True, 'import numpy as np\n'), ((327, 349), 'numpy.sort', 'np.sort', (['calorie_stats'], {}), '(calorie_stats)\n', (334, 349), True, 'import numpy as np\n'), ((397, 419), 'numpy.mean', 'np.mean', (['calorie_stats'], {}), '(calorie_stats)\n', (404, 419), True, 'import numpy as np\n'), ((491, 523), 'numpy.percentile', 'np.percentile', (['calorie_stats', '(62)'], {}), '(calorie_stats, 62)\n', (504, 523), True, 'import numpy as np\n'), ((562, 589), 'numpy.mean', 'np.mean', (['(calorie_stats > 60)'], {}), '(calorie_stats > 60)\n', (569, 589), True, 'import numpy as np\n'), ((649, 670), 'numpy.std', 'np.std', (['calorie_stats'], {}), '(calorie_stats)\n', (655, 670), True, 'import numpy as np\n'), ((173, 194), 'numpy.max', 'np.max', (['calorie_stats'], {}), '(calorie_stats)\n', (179, 194), True, 'import numpy as np\n'), ((229, 250), 'numpy.min', 'np.min', (['calorie_stats'], {}), '(calorie_stats)\n', (235, 250), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
"""
Activation functions
"""
def slog_f(x): return (-1.)**(x < 0)*np.log(np.fabs(x)+1.)
def slog_df(x): return 1./(np.fabs(x)+1.)
def slog_af(y): return (-1.)**(y < 0)*(np.exp(np.fabs(y))-1.)
def tanh_df(x): return 1. - np.tanh(x)**2.
"""
Activity pattern manipulations
"""
def random_patterns(N, num_patterns):
return np.sign(np.random.randn(N,num_patterns))
def int_to_pattern(N, i):
return (-1)**(1 & (i >> np.arange(N)[:,np.newaxis]))
def patterns_to_ints(P):
N = P.shape[0]
ints = (2**np.arange(N)).dot(P < 1).flatten()
ints[np.isnan(ints)] = -1
return ints
def hash_pattern(p):
return tuple(p.flatten())
def unhash_pattern(p):
return np.array((p,)).T
"""
Passive tick generating functions
"""
def constant_tick_fun(num_ticks):
return lambda random_state: num_ticks
def uniform_tick_fun(min_num_ticks, max_num_ticks):
# inclusive, exclusive
return lambda random_state: random_state.randint(min_num_ticks, max_num_ticks)
"""
Backprop helpers
Following Xie, Seung 2002
"""
def forward_pass(x_0, W, f):
# x_0: input layer activity pattern
# W[k]: weights from layer k-1 to k
# f[k]: activation function at layer k
# returns dict x[k]: activity at layer k
x = {0: x_0}
for k in range(1,len(W)+1):
x[k] = f[k]( W[k].dot(x[k-1]) )
return x
def backward_pass(x, e, W, df):
# x[k]: k^th layer activity pattern from forward pass
# e: error vector at last layer, after differentiating loss function (e.g., x[L]-target for squared loss)
# W[k]: weights from layer k-1 to k
# df[k]: derivative of activation function at layer k
# returns dict y[k]:
y = {}
for k in range(len(x)-1, 0, -1):
y[k] = df[k]( W[k].dot(x[k-1]) ) * e
e = W[k].T.dot(y[k])
return y
def error_gradient(x, y):
# x,y from forward/backward pass
# returns G[k]: error gradient wrt W[k]
return {k: y[k].dot(x[k-1].T) for k in range(1,len(x))}
def init_randn_W(N):
return {k: 0.01*np.random.randn(N[k],N[k-1])/(N[k]*N[k-1]) for k in range(1,len(N))}
if __name__ == "__main__":
N = [3]*3
L = len(N)-1
f = {k: np.tanh for k in range(1,L+1)}
df = {k: tanh_df for k in range(1,L+1)}
W = init_randn_W(N)
# W = {k: np.eye(N[k],N[k-1]) for k in range(1,L+1)}
print('W')
for k in range(1,L+1):
print(W[k])
x_0 = np.array([[1,1,1]]).T
z_L = 0.5*np.array([[-1,1,-1]]).T
lcurve = []
gcurve = []
for epoch in range(2000):
x = forward_pass(x_0, W, f)
# print('x %d'%len(x))
# for k in range(L+1):
# print(x[k].T)
# print('z')
# print(z_L.T)
e = x[L] - z_L
y = backward_pass(x, e, W, df)
# print('y %d'%len(y))
# for k in range(1,L+1):
# print(y[k].T)
G = error_gradient(x, y)
# print('G %d'%len(G))
# for k in range(1,L+1):
# print(G[k])
for k in range(1,L+1):
W[k] += -0.001*G[k] #/np.sqrt((G[k]**2).sum())
x_new = forward_pass(x_0, W, f)
e_new = x_new[L] - z_L
# print('error: old %f vs new %f'%(e.T.dot(e), e_new.T.dot(e_new)))
# print(e.T)
# print(e_new.T)
lcurve.append((e**2).sum())
gcurve.append(sum([(G[k]**2).sum() for k in range(1,L+1)]))
print('W %d'%len(W))
for k in range(1,L+1):
print(W[k])
plt.plot(np.arange(len(lcurve)),np.array(lcurve))
plt.plot(np.arange(len(lcurve)),np.array(gcurve))
plt.legend(['l','g'])
plt.show()
# raw_input('..')
| [
"matplotlib.pyplot.show",
"numpy.tanh",
"numpy.random.randn",
"matplotlib.pyplot.legend",
"numpy.isnan",
"numpy.fabs",
"numpy.array",
"numpy.arange"
] | [((3606, 3628), 'matplotlib.pyplot.legend', 'plt.legend', (["['l', 'g']"], {}), "(['l', 'g'])\n", (3616, 3628), True, 'import matplotlib.pyplot as plt\n'), ((3632, 3642), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3640, 3642), True, 'import matplotlib.pyplot as plt\n'), ((384, 416), 'numpy.random.randn', 'np.random.randn', (['N', 'num_patterns'], {}), '(N, num_patterns)\n', (399, 416), True, 'import numpy as np\n'), ((603, 617), 'numpy.isnan', 'np.isnan', (['ints'], {}), '(ints)\n', (611, 617), True, 'import numpy as np\n'), ((725, 739), 'numpy.array', 'np.array', (['(p,)'], {}), '((p,))\n', (733, 739), True, 'import numpy as np\n'), ((2425, 2446), 'numpy.array', 'np.array', (['[[1, 1, 1]]'], {}), '([[1, 1, 1]])\n', (2433, 2446), True, 'import numpy as np\n'), ((3530, 3546), 'numpy.array', 'np.array', (['lcurve'], {}), '(lcurve)\n', (3538, 3546), True, 'import numpy as np\n'), ((3584, 3600), 'numpy.array', 'np.array', (['gcurve'], {}), '(gcurve)\n', (3592, 3600), True, 'import numpy as np\n'), ((167, 177), 'numpy.fabs', 'np.fabs', (['x'], {}), '(x)\n', (174, 177), True, 'import numpy as np\n'), ((272, 282), 'numpy.tanh', 'np.tanh', (['x'], {}), '(x)\n', (279, 282), True, 'import numpy as np\n'), ((2461, 2484), 'numpy.array', 'np.array', (['[[-1, 1, -1]]'], {}), '([[-1, 1, -1]])\n', (2469, 2484), True, 'import numpy as np\n'), ((125, 135), 'numpy.fabs', 'np.fabs', (['x'], {}), '(x)\n', (132, 135), True, 'import numpy as np\n'), ((228, 238), 'numpy.fabs', 'np.fabs', (['y'], {}), '(y)\n', (235, 238), True, 'import numpy as np\n'), ((2047, 2078), 'numpy.random.randn', 'np.random.randn', (['N[k]', 'N[k - 1]'], {}), '(N[k], N[k - 1])\n', (2062, 2078), True, 'import numpy as np\n'), ((471, 483), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (480, 483), True, 'import numpy as np\n'), ((559, 571), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (568, 571), True, 'import numpy as np\n')] |
"""
This file define the function used in camera callibration
"""
import numpy as np
import cv2
import matplotlib.image as mpimg
import glob
def calibrate_camera(dir_path):
"""
This function use the images in dir_path directy to calibrate the camera.
Then save the result in calibrate_param file.
:param dir_path string: matching regular experssion can match images
:return mtx and dist
"""
images = glob.glob(dir_path)
objp = np.zeros((9*6,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
objpoints = [] # 3D points in real world space
imgpoints = [] # 2D points in image plane
for f in images:
img = mpimg.imread(f);
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ret , corners = cv2.findChessboardCorners(gray, (9,6), None)
if ret:
imgpoints.append(corners)
objpoints.append(objp)
img_shape = mpimg.imread(images[0]).shape;
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints,
img_shape[1::-1], None, None)
return mtx, dist
| [
"matplotlib.image.imread",
"cv2.findChessboardCorners",
"cv2.cvtColor",
"numpy.zeros",
"cv2.calibrateCamera",
"glob.glob"
] | [((432, 451), 'glob.glob', 'glob.glob', (['dir_path'], {}), '(dir_path)\n', (441, 451), False, 'import glob\n'), ((464, 496), 'numpy.zeros', 'np.zeros', (['(9 * 6, 3)', 'np.float32'], {}), '((9 * 6, 3), np.float32)\n', (472, 496), True, 'import numpy as np\n'), ((990, 1061), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['objpoints', 'imgpoints', 'img_shape[1::-1]', 'None', 'None'], {}), '(objpoints, imgpoints, img_shape[1::-1], None, None)\n', (1009, 1061), False, 'import cv2\n'), ((679, 694), 'matplotlib.image.imread', 'mpimg.imread', (['f'], {}), '(f)\n', (691, 694), True, 'import matplotlib.image as mpimg\n'), ((711, 748), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (723, 748), False, 'import cv2\n'), ((773, 818), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['gray', '(9, 6)', 'None'], {}), '(gray, (9, 6), None)\n', (798, 818), False, 'import cv2\n'), ((924, 947), 'matplotlib.image.imread', 'mpimg.imread', (['images[0]'], {}), '(images[0])\n', (936, 947), True, 'import matplotlib.image as mpimg\n')] |
import json
import csv
import numpy as np
# import matplotlib.pyplot as plt
# import matplotlib.colors as mcolors
from scipy import stats
from calibration import *
import pandas as pd
from eval_metrics import *
from tqdm import tqdm
from scipy.stats import norm
from scipy.stats import pearsonr
import argparse
import itertools
from os import listdir
from os.path import isfile, join
from normalisation import compute_z_norm, compute_fixed_std
import copy
import ast
def get_df(comet_dir, da_dir, nruns=100, docs=False):
SETUP_PATH = comet_dir
if args.score_type.lower() == 'da' or args.score_type.lower() == 'mqm':
if args.ensemble:
df_all = pd.read_csv(SETUP_PATH)
df_all.dp_runs_scores = df_all.dp_runs_scores.apply(lambda x: [float(i) for i in x.split('|')])
else:
files = [f for f in listdir(SETUP_PATH) if isfile(join(SETUP_PATH, f))]
sys_files = [f for f in files if (f.split('_')[0] == 'system') and ('Human' not in f)]
da_scores = pd.read_csv(da_dir)
da_scores.system = da_scores.system.apply(lambda x: x.split('.')[0])
if args.score_type.lower() == 'mqm':
da_scores = da_scores.rename(columns={'source': 'src', 'reference': 'ref', 'score': 'human_score'})
da_scores.loc[:,'human_score'] = da_scores['human_score'].apply(lambda x:100-x)
dfs = []
for s in sys_files:
f = open(join(SETUP_PATH,s), 'r')
data = json.loads(f.read())
f.close()
system_name = '_'.join(s.split('.')[0].split('_')[1:])
lines = [[i['src'], i['mt'], i['ref'], i['dp_runs_scores']] for i in data if 'dp_runs_scores' in i.keys()]
df_ = pd.DataFrame(data=np.array(lines), columns=['src','mt', 'ref', 'dp_runs_scores'])
da_scores_ = da_scores[da_scores.system == system_name]
df = df_.merge(da_scores_, how='inner', on=['src', 'mt'])
# df['dp_runs_scores'] = df['dp_runs_scores'].apply(lambda x: x[:nruns])
df.drop(['ref_x', 'ref_y'], axis=1, inplace=True)
dfs.append(df)
df_all = pd.concat(dfs)
df_all.reset_index(inplace=True)
elif args.score_type.lower() == 'hter':
df_all = pd.read_csv(SETUP_PATH)
if args.ensemble:
df_all.dp_runs_scores = df_all.dp_runs_scores.apply(lambda x: [float(i) for i in x.split('|')])
else:
# df_all.dp_runs_scores = df_all.dp_runs_scores.apply(lambda x: [float(i) for i in x.split('[')[1].split(']')[0].split(', ')])
df_all.dp_runs_scores = df_all.dp_runs_scores.apply(lambda x: ast.literal_eval(x))
df_all.doc_id = df_all.doc_id.apply(lambda x: int(x))
df_all.drop(['ref', 'src', 'mt', 'pe'], axis=1, inplace=True)
if docs:
try:
#print('doc')
sys_doc_ids = df_all.sys_doc_id.unique().tolist()
doc_dp_runs_scores = [np.mean(df_all[df_all.sys_doc_id == i].dp_runs_scores.tolist(), axis=0) for i in sys_doc_ids]
doc_z_score = [np.mean(df_all[df_all.sys_doc_id == i].human_score.tolist()) for i in sys_doc_ids]
doc_sys = [df_all[df_all.sys_doc_id == i].system.unique()[0] for i in sys_doc_ids]
df_doc = pd.DataFrame(data=np.array([doc_sys, sys_doc_ids, doc_dp_runs_scores, doc_z_score]).T, columns=['system', 'doc_id','dp_runs_scores', 'human_score'])
df_doc.reset_index(inplace=True)
return df_doc
except:
return df_all
return df_all
# def map_psqm(psqm_file, df):
# # file format: Human-B.0 independent.281139 1 1 rater2 <NAME> wore tape on his nose to get front pages, former bodyguard claims Ehemaliger Bodyguard behauptet, <NAME> trug Pflaster auf der Nase, um in die Presse zu kommen 4
# # file format: System doc_name system_id? ? annot# src mt score
# scores = [-1.0]*len(df)
# print(df.head())
# df['psqm_score'] = scores
# with open(psqm_file, 'r') as psqmf:
# for line in tqdm(psqmf):
# fields = line.split('\t')
# system = fields[0].split('.')[0]
# src = fields[5]
# score = fields[7]
# idx_to_change = df.index[(df['system'] == system) & (df['src'] == src)]
# df.loc[idx_to_change,'psqm_score'] = float(score)
# #print(len(df))
# new_dataframe = df[df['psqm_score'] >=0]
# #print(len(new_dataframe))f
# return new_dataframe
# def load_mqm_scores_from_df(mqm_df):
# df = mqm_df
# print(df.head())
# systems_comet_scores = {}
# systems_mqm_scores = {}
# systems_ext = []
# for i, row in df.iterrows():
# if row['psqm_score']>=0.0:
# sent_id = int(row['index'])
# system = row['system']
# system_ext = 'system_'+system+'.json'
# if not system_ext in systems_ext:
# systems_ext.append(system_ext)
# if not system_ext in systems_comet_scores:
# systems_comet_scores[system_ext]={}
# if not system_ext in systems_mqm_scores:
# systems_mqm_scores[system_ext]={}
# scores = row['dp_runs_scores']
# systems_comet_scores[system_ext][sent_id] = scores
# systems_mqm_scores[system_ext][sent_id] = row['psqm_score']
# return(systems_comet_scores, systems_mqm_scores, systems_ext)
def load_da_scores_from_df(df):
systems_comet_scores = {}
systems_human_scores = {}
systems_ext = {}
for i, row in df.iterrows():
sent_id = int(row['index'])
system = row['system']
system_ext = 'system_'+system+'.json'
doc_id = row['doc_id']
if not system_ext in systems_ext:
#systems_ext.append(system_ext)
systems_ext[system_ext]=[]
systems_ext[system_ext].append(doc_id)
if not system_ext in systems_comet_scores:
systems_comet_scores[system_ext]={}
if not doc_id in systems_comet_scores[system_ext]:
systems_comet_scores[system_ext][doc_id]=[]
if not system_ext in systems_human_scores:
systems_human_scores[system_ext]={}
if not doc_id in systems_human_scores[system_ext]:
systems_human_scores[system_ext][doc_id]=[]
scores = row['dp_runs_scores']
systems_comet_scores[system_ext][doc_id].append(scores)
systems_human_scores[system_ext][doc_id].append(row['human_score'])
return(systems_comet_scores, systems_human_scores, systems_ext)
def split_k_fold(comet_scores, scores, systems_list, k=5):
final_folds = []
for system in systems_list:
comet_sys_scores = comet_scores[system]
sys_scores = scores[system]
assert(len(comet_sys_scores) == len(sys_scores))
# split based on the doc level
zipped = [i for i in zip(comet_sys_scores, sys_scores)]
folds = np.array_split(zipped, k)
for i, fold in enumerate(folds):
comet_scores_fold = {}
human_scores_fold = {}
if system not in comet_scores_fold:
comet_scores_fold[system] = {}
human_scores_fold[system] = {}
for comet_doc_id, score_doc_id in fold:
assert(len(comet_sys_scores[comet_doc_id]) == len(sys_scores[score_doc_id]))
comet_scores_fold[system][comet_doc_id] = comet_sys_scores[comet_doc_id]
human_scores_fold[system][score_doc_id] = sys_scores[score_doc_id]
if len(final_folds) <= i:
final_folds.append({})
final_folds[i]['human'] = {}
final_folds[i]['comet'] = {}
final_folds[i]['human'][system] = human_scores_fold[system]
final_folds[i]['comet'][system] = comet_scores_fold[system]
# levels of nested dicts :
# 5 folds -> each fold has 2 dicts 'human' and "comet" ->
# -> each 'human' and "comet" has N keys == systems -> docs -> segments
return final_folds
def merge_folds(list_of_folds):
dev_fold = {}
for i, fold in enumerate(list_of_folds):
if i == 0:
dev_fold = copy.deepcopy(fold)
else:
for human_sys, comet_sys in zip(fold['human'].keys(), fold['comet'].keys()):
dev_fold['human'][human_sys].update(fold['human'][human_sys])
dev_fold['comet'][comet_sys].update(fold['comet'][comet_sys])
return dev_fold
def batch_data(all_da, all_comet, all_comet_avg, batch_size=1):
n = len(all_da) - (len(all_da) % batch_size)
batch_da = all_da[:n].reshape(n//batch_size, batch_size).mean(axis=1)
#print(all_comet[:n, :].reshape(n//batch_size, batch_size, -1).shape)
batch_comet_scores = [i.mean(axis=0) for i in all_comet[:n, :].reshape(n//batch_size, batch_size, -1)]
batch_comet_avg = all_comet_avg[:n].reshape(
n//batch_size, batch_size).mean(axis=1)
batch_comet_std = all_comet[:n, :].reshape(
n//batch_size, batch_size, -1).mean(axis=1).std(axis=-1)
return batch_da, batch_comet_scores, batch_comet_avg, batch_comet_std
def standardize(scores_test, scores_dev, norm):
norm_mean = 0.0
norm_std = 1.0
if norm:
norm_mean, norm_std = compute_z_norm(scores_dev)
#all_scores = np.array([val for _,sys in scores_test.items() for _,doc in sys.items() for _,val in doc.items() ])
all_scores = np.array([val for _,sys in scores_test.items() for _,doc in sys.items() for val in doc ])
#print(all_scores.shape)
all_scores -= norm_mean
all_scores /= norm_std
#print(all_scores.shape)
#np.squeeze(all_scores,axis=1)
return all_scores
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process comet outputs')
parser.add_argument('--comet-setup-file', type=str,
help='path to comet setup to test on')
parser.add_argument('--scores-file', type=str,
help='path to scores for testing on')
parser.add_argument('--norm', type=bool, default=True,
help='set to true to normalise the std on the ECE')
parser.add_argument('--score-type', type=str, default='da',
help='Choose type of scores between da | mqm | hter')
parser.add_argument('--docs', default=False, action='store_true',
help= 'select segment or document level eval')
parser.add_argument('--nruns', type=int, default=100,
help= 'select how many drpout runs to evaluate')
parser.add_argument('--baseline', default=False, action='store_true',
help= 'select to evaluate the baseline only')
parser.add_argument('--ensemble', default=False, action='store_true',
help= 'specify that you are evaluating ensemble merged data')
args = parser.parse_args()
test_year='2020'
if '2019' in args.comet_setup_file:
test_year='2019'
#if args.score_type.lower()=='da':
combined_df = get_df(args.comet_setup_file, args.scores_file, args.nruns, args.docs)
print(list(combined_df.columns))
systems_comet_scores, systems_human_scores, systems_ext = load_da_scores_from_df(combined_df)
k_folds = split_k_fold(systems_comet_scores, systems_human_scores, systems_ext)
cal_avgll_folds = []
calibration_error_folds = []
sharpness_cal_folds = []
epiw_cal_folds = []
pearson_acc_folds = []
pearson_d2_cal_folds = []
np_calibration_error_folds = []
np_sharpness_cal_folds = []
np_pearson_folds = []
for i, fold in enumerate(k_folds):
keys = np.arange(len(k_folds)) # 0,1,2,3,4
dev = keys[keys != i]
merged_dev = merge_folds([k_folds[k] for k in dev])
# systems_comet_scores_test, systems_scores_test = k_folds[i]['comet'], k_folds[i]['human']
systems_comet_scores_test, systems_scores_test = fold['comet'], fold['human']
systems_comet_scores_dev, systems_scores_dev = merged_dev['comet'], merged_dev['human']
print()
print('- - - making dev/test split - - -')
print('processing as test fold #', i)
print('processing as dev folds #', dev)
print()
print(len(systems_comet_scores_test))
print(len(systems_scores_test))
norm_human_test = standardize(systems_scores_test, systems_scores_dev, args.norm)
norm_comet_test = standardize(systems_comet_scores_test, systems_comet_scores_dev, args.norm)
norm_comet_avg_test = norm_comet_test.mean(axis=1)
# we need to repeat on the dev set to optimise the calibration parameters!
norm_human_dev = standardize(systems_scores_dev, systems_scores_dev, args.norm)
norm_comet_dev = standardize(systems_comet_scores_dev, systems_comet_scores_dev, args.norm)
norm_comet_avg_dev = norm_comet_dev.mean(axis=1)
# if args.docs:
# batch_range = [1]
# else:
batch_range = [1]
for batch_size in batch_range:
batch_human_test, batch_comet_scores_test, batch_comet_avg_test, batch_comet_std_test = batch_data(
norm_human_test, norm_comet_test, norm_comet_avg_test, batch_size=batch_size)
batch_human_dev, batch_comet_scores_dev, batch_comet_avg_dev, batch_comet_std_dev = batch_data(
norm_human_dev, norm_comet_dev, norm_comet_avg_dev, batch_size=batch_size)
# Compute fixed std to use as a baseline model
if args.baseline:
fixed_std = compute_fixed_std(batch_comet_avg_dev, batch_human_dev)
batch_baseline_stds_test = np.full_like(batch_comet_std_test, fixed_std)
batch_baseline_stds_dev = np.full_like(batch_comet_std_dev, fixed_std)
# Compute Pearson correlation between average COMET and DA.
pearson_acc = stats.pearsonr(batch_comet_avg_test, batch_human_test)[0]
base_calibration_error, gammas, base_matches = compute_calibration_error(
batch_human_test, batch_comet_avg_test, batch_baseline_stds_test, std_sum=0, std_scale=1)
mcpe_base, gammas, mcpe_matches_base = compute_mcpe(
batch_human_test, batch_comet_avg_test, batch_baseline_stds_test, std_sum=0, std_scale=1)
sharpness_base = compute_sharpness(batch_baseline_stds_test, std_sum=0, std_scale=1)
epiw_base, gammas, epiw_matches_base = compute_epiw(
batch_comet_avg_test, batch_baseline_stds_test, std_sum=0, std_scale=1)
# mpiw_base, gammas, mpiw_matches_base = compute_mpiw(
# batch_comet_avg_test, batch_baseline_stds_test, std_sum=0, std_scale=1)
# ence_base, ence_gammas, ence_matches_base = compute_ence(
# batch_human_test, batch_comet_avg_test, batch_baseline_stds_test, std_sum=0, std_scale=1)
# ence_base_rn, ence_gammas_rn, ence_matches_base_rn = compute_ence_rn(
# batch_human_test, batch_comet_avg_test, batch_baseline_stds_test, std_sum=0, std_scale=1)
# ence_base_nn, ence_gammas_nn, ence_matches_base_nn = compute_ence_nn(
# batch_human_test, batch_comet_avg_test, batch_baseline_stds_test, std_sum=0, std_scale=1)
# print("Baseline ECE = %f (baseline std = %f)" % (base_calibration_error, fixed_std))
# print("Baseline MCE = %f (baseline std = %f)" % (mcpe_base, fixed_std))
# print("Baseline sharpness = %f (baseline std = %f)" % (sharpness_base, fixed_std))
# print("Baseline epiw sharpness = %f (baseline std = %f)" % (epiw_base, fixed_std))
# print("Baseline mpiw sharpness = %f (baseline std = %f)" % (mpiw_base, fixed_std))
# print("Baseline ENCE = %f (baseline std = %f)" % (ence_base, fixed_std))
# print("Baseline ENCE_RN = %f (baseline std = %f)" % (ence_base_rn, fixed_std))
# print("Baseline ENCE_NN = %f (baseline std = %f)" % (ence_base_nn, fixed_std))
# print("Baseline Parametric CE = %f (baseline std = %f)" % (base_calibration_error, fixed_std))
# Compute Baseline ALL and NLL
base_avgll, base_negll = compute_avgll(batch_human_test, batch_comet_avg_test, batch_baseline_stds_test)
print("Baseline ALL = %f" % base_avgll)
print("Baseline NLL = %f" % base_negll)
print()
medians = [np.median(i) for i in batch_comet_scores_test]
np_pearson = stats.pearsonr(medians, batch_human_test)[0]
np_s_vals = np.linspace(0, 5, 500)
_, best_s = optimize_calibration_error_non_parametric_base(
batch_human_dev, batch_comet_avg_dev, s_vals=np_s_vals)
np_calibration_error, np_gammas, np_matches = compute_calibration_error_non_parametric_base(
batch_human_test, batch_comet_avg_test, best_s)
print("Non-parametric CE baseline = %f (best_s = %f)" % (np_calibration_error, best_s))
np_sharpness_cal, gammas, matches = compute_epiw_np_base(batch_comet_avg_test, best_s)
print("Non-parametric Sharpness = %f " % np_sharpness_cal)
cal_avgll_folds.append(base_avgll)
calibration_error_folds.append(base_calibration_error)
sharpness_cal_folds.append(sharpness_base)
epiw_cal_folds.append(epiw_base)
pearson_acc_folds.append(pearson_acc)
# np_calibration_error_folds.append(np_calibration_error)
# np_sharpness_cal_folds.append(np_sharpness_cal)
np_pearson_folds.append(np_pearson)
############# LATEX #################
print('----------LATEX OUTPUTS----------')
print('& average NLL & ECE & Sharpness & EPIW \\\\')
print('& %f & %f & %f & %f \\\\' % (base_avgll, base_calibration_error, sharpness_base, epiw_base))
print('& r(human, pred) & r(|pred-human|,std) \\\\')
print('& %f & %f \\\\' % (pearson_acc, 0))
# print('& ECE_np & Sharpness_np \\\\')
# print('& %f & %f \\\\' % (np_calibration_error, np_sharpness_cal))
print('& np_pearson \\\\')
print('& %f \\\\' % np_pearson)
else:
# Parametric CE
# It assumes a parametric Gaussian distribution for the COMET scores.
calibration_error, gammas, matches = compute_calibration_error(
batch_human_test, batch_comet_avg_test, batch_comet_std_test, std_sum=0, std_scale=1)
# mcpe, gammas, mcpe_matches = compute_mcpe(
# batch_human_test, batch_comet_avg_test, batch_comet_std_test, std_sum=0, std_scale=1)
# sharpness = compute_sharpness(batch_comet_std_test, std_sum=0, std_scale=1)
# epiw, gammas, epiw_matches = compute_epiw(
# batch_comet_avg_test, batch_comet_std_test, std_sum=0, std_scale=1)
# mpiw, gammas, mpiw_matches = compute_mpiw(
# batch_comet_avg_test, batch_comet_std_test, std_sum=0, std_scale=1)
# ence, ence_gammas, ence_matches = compute_ence(
# batch_human_test, batch_comet_avg_test, batch_comet_std_test, std_sum=0, std_scale=1)
# ence_rn, ence_gammas_rn, ence_matches_rn = compute_ence_rn(
# batch_human_test, batch_comet_avg_test, batch_comet_std_test, std_sum=0, std_scale=1)
# ence_nn, ence_gammas_nn, ence_matches_nn = compute_ence_nn(
# batch_human_test, batch_comet_avg_test, batch_comet_std_test, std_sum=0, std_scale=1)
# print("ECE = %f" % calibration_error)
# print("MCE = %f" % mcpe)
# print("Sharpness = %f" % sharpness)
# print("EPIW = %f" % epiw)
# print("MPIW = %f" % mpiw)
# print("ENCE = %f" % ence)
# print("ENCE_RN = %f" % ence_rn)
# print("ENCE_NN = %f" % ence_nn)
# print("Parametric CE = %f" % calibration_error)
# Seek the best post-calibration to minimize calibration error.
# The correction is std_transformed**2 = std_sum**2 + (std_scale*std)**2,
# where std_sum and std_scale are correction parameters.
std_sums = np.linspace(0, 2, 100)
std_scales = np.linspace(1, 10, 100)
_, std_sum, std_scale, = optimize_calibration_error(
batch_human_dev, batch_comet_avg_dev, batch_comet_std_dev,
std_sums=std_sums, std_scales=std_scales)
# Compute Pearson correlation between average COMET and DA.
pearson_acc = stats.pearsonr(batch_comet_avg_test, batch_human_test)[0]
# print("Pearson (COMET, MQM) batch size =%d - r= %f" % (
# batch_size, pearson_acc))
# Compute Pearson correlation between |COMET - DA| and COMET_std
abs_diff = [abs(da - cs) for da, cs in zip(batch_human_test, batch_comet_avg_test)]
pearson_d1 = stats.pearsonr(abs_diff, batch_comet_std_test)[0]
# print("Pearson (|COMET-MQM|, COMET_std) batch size =%d - r= %f" % (
# batch_size, pearson_d1))
# Compute Pearson correlation between |COMET - DA| and COMET_std
abs_diff_sq = [(da - cs)**2 for da, cs in zip(batch_human_test, batch_comet_avg_test)]
batch_comet_std_test_sq = [ x**2 for x in batch_comet_std_test]
pearson_d2 = stats.pearsonr(abs_diff_sq, batch_comet_std_test_sq)[0]
# print("Pearson ((COMET-MQM)**2, COMET_std**2) batch size =%d - r= %f" % (
# batch_size, pearson_d2))
## Calibrated pearsons
batch_comet_std_test_transformed = np.sqrt(std_sum**2 + (std_scale*batch_comet_std_test)**2)
# Compute Pearson correlation between |COMET - DA| and COMET_std
abs_diff = [abs(da - cs) for da, cs in zip(batch_human_test, batch_comet_avg_test)]
pearson_d1_cal = stats.pearsonr(abs_diff, batch_comet_std_test_transformed)[0]
# print("Calibrated Pearson (|COMET-MQM|, COMET_std) batch size =%d - r= %f" % (
# batch_size, pearson_d1_cal))
# Compute Pearson correlation between |COMET - DA| and COMET_std
abs_diff_sq = [(da - cs)**2 for da, cs in zip(batch_human_test, batch_comet_avg_test)]
batch_comet_std_test_transformed_sq = [ x**2 for x in batch_comet_std_test_transformed]
pearson_d2_cal = stats.pearsonr(abs_diff_sq, batch_comet_std_test_transformed_sq)[0]
# print("Calibrated Pearson ((COMET-MQM)**2, COMET_std**2) batch size =%d - r= %f" % (
# batch_size, pearson_d2_cal ))
calibration_error, gammas, matches_cal = compute_calibration_error(
batch_human_test, batch_comet_avg_test, batch_comet_std_test,
std_sum=std_sum, std_scale=std_scale)
# mcpe_cal, gammas, mcpe_matches_cal = compute_mcpe(
# batch_human_test, batch_comet_avg_test, batch_comet_std_test, std_sum, std_scale)
sharpness_cal = compute_sharpness(batch_comet_std_test, std_sum, std_scale)
epiw_cal, gammas, epiw_matches_cal = compute_epiw(
batch_comet_avg_test, batch_comet_std_test, std_sum, std_scale)
# mpiw_cal, gammas, mpiw_matches_cal = compute_mpiw(
# batch_comet_avg_test, batch_comet_std_test, std_sum, std_scale)
# ence_cal, ence_gammas, ence_matches_cal = compute_ence(
# batch_human_test, batch_comet_avg_test, batch_comet_std_test, std_sum, std_scale)
# ence_cal_rn, ence_gammas_rn, ence_matches_cal_rn = compute_ence_rn(
# batch_human_test, batch_comet_avg_test, batch_comet_std_test, std_sum, std_scale)
# ence_cal_nn, ence_gammas_nn, ence_matches_cal_nn = compute_ence_nn(
# batch_human_test, batch_comet_avg_test, batch_comet_std_test, std_sum, std_scale)
# print("Calibrated ECE = %f (calibrated std_sum=%f, std_scale=%f)" % (calibration_error, std_sum, std_scale))
# print("Calibrated MCE = %f (calibrated std_sum=%f, std_scale=%f)" % (mcpe_cal, std_sum, std_scale))
# print("Calibrated sharpness = %f (calibrated std_sum=%f, std_scale=%f)" % (sharpness_cal, std_sum, std_scale))
# print("Calibrated epiw sharpness = %f (calibrated std_sum=%f, std_scale=%f)" % (epiw_cal, std_sum, std_scale))
# print("Calibrated mpiw sharpness = %f (calibrated std_sum=%f, std_scale=%f)" % (mpiw_cal, std_sum, std_scale))
# print("Calibrated ENCE = %f (calibrated std_sum=%f, std_scale=%f)" % (ence_cal, std_sum, std_scale))
# print("Calibrated ENCE_RN = %f (calibrated std_sum=%f, std_scale=%f)" % (ence_cal_rn, std_sum, std_scale))
# print("Calibrated ENCE_NN = %f (calibrated std_sum=%f, std_scale=%f)" % (ence_cal_nn, std_sum, std_scale))
# print("Calibrated Parametric CE = %f (calibrated std_sum=%f, std_scale=%f)" %
# (calibration_error, std_sum, std_scale))
# Compute ALL and NLL
avgll, negll = compute_avgll(batch_human_test, batch_comet_avg_test, batch_comet_std_test)
# print("ALL = %f" % avgll)
# print("NLL = %f" % negll)
# Compute ALL and NLL
cal_avgll, cal_negll = compute_avgll(batch_human_test, batch_comet_avg_test, batch_comet_std_test, std_sum, std_scale)
print("Calibrated ALL = %f" % cal_avgll)
print("Calibrated NLL = %f" % cal_negll)
################### NON PRARAMETRIC ##################
# Compute calibration error by binning different confidence intervals.
# Non-parametric CE
np_calibration_error, np_gammas, np_matches = compute_calibration_error_non_parametric(
batch_human_test, batch_comet_scores_test)
print("Non-parametric CE = %f" % np_calibration_error)
# Best non-parametric CE
medians = [np.median(i) for i in batch_comet_scores_test]
np_pearson = stats.pearsonr(medians, batch_human_test)[0]
# np_scaling_vals = np.linspace(0.05, 3, 20)
# np_scaling_sums = np.linspace(-1, 1, 11)
# _, best_scale_val, best_scale_sum = optimize_calibration_error_non_parametric(
# batch_human_dev, batch_comet_scores_dev, scaling_vals=np_scaling_vals, scaling_sums=np_scaling_sums)
# np_calibration_error, np_gammas, np_matches = compute_calibration_error_non_parametric(
# batch_human_test, batch_comet_scores_test, scaling_val=best_scale_val, scaling_sum=best_scale_sum)
# print("Non-parametric CE = %f (calibrated, best_scaling_val=%f, best_scaling_sum=%f)" %
# (np_calibration_error, best_scale_val, best_scale_sum))
# np_sharpness_cal, gammas, matches = compute_epiw_np(batch_comet_scores_test, std_scale=best_scale_val, std_sum=best_scale_sum)
# print("Non-parametric Sharpness = %f (calibrated, best_scaling_val=%f, best_scaling_sum=%f)" %
# (np_sharpness_cal, best_scale_val, best_scale_sum))
############# LATEX #################
print()
print('----------LATEX OUTPUTS----------')
print('& average NLL & ECE & Sharpness & EPIW \\\\')
print('& %f & %f & %f & %f \\\\' % (cal_avgll, calibration_error, sharpness_cal, epiw_cal))
print('& r(human, pred) & r(|pred-human|,std) \\\\')
print('& %f & %f \\\\' % (pearson_acc, pearson_d2_cal))
# print('& ECE_np & EPIW_np \\\\')
# print('& %f & %f \\\\' % (np_calibration_error, np_sharpness_cal))
# print('& np_pearson \\\\')
# print('& %f \\\\' % np_pearson)
cal_avgll_folds.append(cal_avgll)
calibration_error_folds.append(calibration_error)
sharpness_cal_folds.append(sharpness_cal)
epiw_cal_folds.append(epiw_cal)
pearson_acc_folds.append(pearson_acc)
pearson_d2_cal_folds.append(pearson_d2_cal)
# np_calibration_error_folds.append(np_calibration_error)
# np_sharpness_cal_folds.append(np_sharpness_cal)
np_pearson_folds.append(np_pearson)
if args.baseline:
print()
print('------AVERAGED OVER k FOLDS------')
print('----------LATEX BASELINE OUTPUTS----------')
print('& r(human, pred) & r(|pred-human|,std) & average NLL & ECE & Sharpness & EPIW\\\\')
print('& %.3f & %.3f & %.3f & %.3f & %.3f & %.3f \\\\' % (round(np.mean(pearson_acc_folds),3), 0, round(np.mean(cal_avgll_folds),3), round(np.mean(calibration_error_folds),3), round(np.mean(sharpness_cal_folds),3), round(np.mean(epiw_cal_folds),3)))
# print('& ECE_np & EPIW_np \\\\')
# print('& %.3f & %.3f \\\\' % (round(np.mean(np_calibration_error_folds),3), round(np.mean(np_sharpness_cal_folds),3)))
print('& np_pearson \\\\')
print('& %.3f \\\\' % round(np.mean(np_pearson_folds),3))
else:
print()
print('------AVERAGED OVER k FOLDS------')
print('----------LATEX OUTPUTS----------')
print('& r(human, pred) & r(|pred-human|,std) & average NLL & ECE & Sharpness & EPIW \\\\')
print('& %.3f & %.3f & %.3f & %.3f & %.3f & %.3f \\\\' % (round(np.mean(pearson_acc_folds),3), round(np.mean(pearson_d2_cal_folds),3), round(np.mean(cal_avgll_folds),3), round(np.mean(calibration_error_folds),3), round(np.mean(sharpness_cal_folds),3), round(np.mean(epiw_cal_folds),3)))
# print('& ECE_np & EPIW_np \\\\')
# print('& %.3f & %.3f \\\\' % (round(np.mean(np_calibration_error_folds),3), round(np.mean(np_sharpness_cal_folds),3)))
# print('& np_pearson \\\\')
# print('& %.3f \\\\' % round(np.mean(np_pearson_folds),3))
################## FIGURES #################
# matplotlib.rc('xtick', labelsize=15)
# matplotlib.rc('ytick', labelsize=15)
# plt.figure(figsize=(6.2,2.2))
# plt.xlabel('Confidence level $\gamma$', fontsize=15)
# plt.ylabel('ECE', fontsize=15)
# # plt.title(args.score_type.upper() + ': 1719 on '+test_year+' - Batch size = %d' % batch_size)
# plt.plot(gammas, matches, 'royalblue', label="Original ECE")
# plt.plot(gammas, matches_cal, 'orangered', linestyle='dotted', label="Calibrated ECE", linewidth=3)
# # plt.plot(gammas, base_matches, 'g', label="Baseline ECE")
# plt.plot([0, 1], [0, 1], 'k', linewidth=0.9)
# plt.legend()
# plt.legend(prop={'size': 14})
# plt.show()
# plt.savefig('/media/hdd1/glushkovato/comet/COMET_uncertainty/ue_eval_scripts/figures/ECE_bs_final_squeeze_'+'.png', bbox_inches = "tight")
# plt.close()
# sample = [-0.7036617994308472, -0.39346814155578613, -0.4354693293571472, -0.48200106620788574,
# -0.6583844423294067, -0.6528894305229187, -0.4381236135959625, -0.11167386919260025,
# -0.28474313020706177, -0.40106481313705444, -0.2071761190891266, -0.3169260025024414,
# -0.42727798223495483, -0.2133534699678421, -0.37169918417930603, 0.02465342916548252,
# -0.4433746635913849, -0.2109990417957306, -0.3115224242210388, -0.12913624942302704,
# -0.3110971450805664, -0.2711679935455322, -0.2629014551639557, -0.161701962351799,
# -0.31409913301467896, -0.28766417503356934, -0.4218456745147705, -0.4927760362625122,
# -0.4791868329048157, -0.5151439309120178, -0.4783304035663605, -0.2807827591896057,
# -0.4361232817173004, -0.796786367893219, -0.2349693924188614, -0.2692130208015442,
# -0.5983560681343079, -0.3687020540237427, -0.3561617434024811, -0.35035240650177,
# -0.34771427512168884, -0.24625299870967865, -0.36683908104896545, -0.33239245414733887,
# -0.4329518973827362, -0.3675892949104309, -0.6854426860809326, -0.21822558343410492,
# -0.23549531400203705, -0.32744812965393066, -0.37420371174812317, -0.35194385051727295,
# -0.2507886588573456, -0.6340183615684509, -0.40667828917503357 -0.20614176988601685,
# -0.24489286541938782, -0.4341568648815155, -0.37508007884025574, -0.5427935719490051,
# -0.46071887016296387, -0.3867534101009369, -0.30441683530807495, -0.15482938289642334,
# -0.3157658874988556, -0.2350553274154663, -0.5219535231590271, -0.7520877718925476,
# -0.39036089181900024, -0.39128726720809937, -0.09702187776565552, -0.3885476291179657,
# -0.35855793952941895, -0.10762306302785873, -0.32352709770202637, -0.3512462377548218,
# -0.32870057225227356, -0.4129355549812317, -0.38273707032203674, -0.5623825788497925,
# 0.0948031097650528, -0.3213968575000763, -0.23260238766670227, -0.47009772062301636,
# -0.5744777321815491, -0.509739100933075, -0.15552622079849243, -0.29284384846687317,
# -0.19066350162029266, -0.44607532024383545, -0.5014781951904297, -0.4129786491394043,
# -0.40598946809768677, -0.4015771150588989, -0.29395225644111633, -0.4239853620529175,
# -0.4720333516597748, 0.004249433055520058, -0.540823757648468, -0.21973752975463867]
sample = [0.5979697704315186,
0.5243543982505798,
0.6262829899787903,
0.5946624875068665,
0.5717122554779053,
0.6173045635223389,
0.6162536144256592,
0.5854749083518982,
0.5606750845909119,
0.6813172101974487,
0.7215273380279541,
0.7598370909690857,
0.5823168754577637,
0.6067937612533569,
0.6279692649841309,
0.5281223058700562,
0.5969744324684143,
0.5853649377822876,
0.48922640085220337,
0.5766693949699402,
0.5745446085929871,
0.5487832427024841,
0.647320032119751,
0.6826099157333374,
0.6288926005363464,
0.8613891005516052,
0.5903519988059998,
0.5309380292892456,
0.6349964141845703,
0.4533690810203552,
0.6135360598564148,
0.8157045841217041,
0.41160082817077637,
0.5631874799728394,
0.5206363797187805,
0.6249307990074158,
0.6297017931938171,
0.6902846097946167,
0.6883143782615662,
0.705655038356781,
0.5418302416801453,
0.6571133136749268,
0.7079156041145325,
0.6279858946800232,
0.6430858373641968,
0.5736710429191589,
0.6936737298965454,
0.634878396987915,
0.6792322397232056,
0.32287514209747314,
0.6860127449035645,
0.6514933109283447,
0.5734164714813232,
0.6523839235305786,
0.67072993516922,
0.7287837266921997,
0.6147819757461548,
0.701930582523346,
0.5400246381759644,
0.5519304275512695,
0.7217748165130615,
0.6027462482452393,
0.6484041810035706,
0.6087967753410339,
0.5354230403900146,
0.6050034165382385,
0.5663189888000488,
0.516261875629425,
0.6997227668762207,
0.6717677712440491,
0.5033883452415466,
0.5382351875305176,
0.6828071475028992,
0.6036621928215027,
0.5766533017158508,
0.5570096969604492,
0.5567960143089294,
0.47815167903900146,
0.7115316390991211,
0.6047992706298828,
0.6455367207527161,
0.648800253868103,
0.7322787642478943,
0.6291396021842957,
0.6151097416877747,
0.6088402271270752,
0.6159900426864624,
0.6520301699638367,
0.5357393622398376,
0.552027702331543,
0.47821640968322754,
0.6975051760673523,
0.6235313415527344,
0.5580254793167114,
0.5078786611557007,
0.625751256942749,
0.6019373536109924,
0.6296380758285522,
0.6481614708900452,
0.6562914848327637]
plt.figure(figsize=(7,4))
mu = np.mean(sample)
sigma = np.std(sample)
n, bins, patches = plt.hist(sample, bins=20, color='royalblue', density=1)
y = ((1 / (np.sqrt(2 * np.pi) * sigma)) *
np.exp(-0.5 * (1 / sigma * (bins - mu))**2))
plt.plot(bins, y, '--', color='orangered')
# plt.hist(sample, bins=20, color='royalblue', density=1, alpha=0.7)
plt.xlabel("Predicted values", size=15)
plt.ylabel("Counts", size=15)
plt.show()
plt.savefig('/media/hdd1/glushkovato/comet/COMET_uncertainty/ue_eval_scripts/figures/sample_distr11.png', bbox_inches = "tight")
plt.close()
# matplotlib.rc('xtick', labelsize=15)
# matplotlib.rc('ytick', labelsize=15)
# plt.xlabel('N', fontsize=15)
# plt.ylabel('Recall@N', fontsize=15)
# plt.plot(recall, rec_comet_unc_prism_1, 'darkgreen',linestyle='dotted', label="UA-COMET")
# plt.plot(recall, rec_comet_mean_prism_1, 'darkorange',linestyle='dashed', label="MCD COMET mean")
# plt.plot(recall, rec_comet_original_prism_1, 'royalblue', label="COMET original")
# plt.legend()
# plt.legend(prop={'size': 15})
# plt.show()
# plt.savefig('figures/NNEW_combined_MQM_Recall@N_relevant_'+str(1)+'_perc.png')
# plt.close()
# plt.xlabel('Confidence level $\gamma$')
# plt.ylabel('Sharpness')
# plt.title(args.score_type.upper() + ': 1719 on '+test_year+' - Batch size = %d' % batch_size)
# plt.plot(gammas, epiw_matches, 'b', label="Original sharpness")
# plt.plot(gammas, epiw_matches_cal, 'r', label="Calibrated sharpness")
# plt.plot(gammas, epiw_matches_base, 'g', label="Baseline sharpness")
# plt.plot(gammas, mpiw_matches, 'b:', label="Original max sharpness")
# plt.plot(gammas, mpiw_matches_cal, 'r:', label="Calibrated max sharpness")
# #plt.plot([0, 1], [0, 1], 'k--')
# plt.legend()
# plt.show()
# plt.savefig('figures/'+args.score_type.upper()+'_1719-'+test_year+'-SHARP_bs_'+str(batch_size)+'.png')
# plt.close()
# plt.xlabel('Bins (ascending std values) $')
# plt.ylabel('ENCE')
# plt.title(args.score_type.upper() + ': 1719 on '+test_year+' - Batch size = %d' % batch_size)
# plt.plot(ence_gammas, ence_matches, 'b', label="Original ENCE")
# plt.plot(ence_gammas, ence_matches_cal, 'r', label="Calibrated ENCE")
# plt.plot(ence_gammas, ence_matches_base, 'g', label="Baseline ENCE")
# #plt.plot([0, 1], [0, 1], 'k--')
# plt.legend()
# plt.show()
# plt.savefig('figures/'+args.score_type.upper()+'_1719-'+test_year+'-ENCE_bs_'+str(batch_size)+'.png')
# plt.close()
# plt.xlabel('Bins (ascending std values) $')
# plt.ylabel('ENCE RN')
# plt.title(args.score_type.upper() + ': 1719 on '+test_year+' - Batch size = %d' % batch_size)
# plt.plot(ence_gammas_rn, ence_matches_rn, 'b', label="Original ENCE_RN")
# plt.plot(ence_gammas_rn, ence_matches_cal_rn, 'r', label="Calibrated ENCE_RN")
# plt.plot(ence_gammas_rn, ence_matches_base_rn, 'g', label="Baseline ENCE_RN")
# #plt.plot([0, 1], [0, 1], 'k--')
# plt.legend()
# plt.show()
# plt.savefig('figures/'+args.score_type.upper()+'_1719-'+test_year+'-ENCE_RN_bs_'+str(batch_size)+'.png')
# plt.close()
# plt.xlabel('Bins (ascending std values) $')
# plt.ylabel('ENCE NN')
# plt.title(args.score_type.upper() + ': 1719 on '+test_year+' - Batch size = %d' % batch_size)
# plt.plot(ence_gammas_nn, ence_matches_nn, 'b', label="Original ENCE_NN")
# plt.plot(ence_gammas_nn, ence_matches_cal_nn, 'r', label="Calibrated ENCE_NN")
# plt.plot(ence_gammas_nn, ence_matches_base_nn, 'g', label="Baseline ENCE_NN")
# #plt.plot([0, 1], [0, 1], 'k--')
# plt.legend()
# plt.show()
# plt.savefig('figures/'+args.score_type.upper()+'_1719-'+test_year+'-ENCE_NN_bs_'+str(batch_size)+'.png')
# plt.close()
| [
"argparse.ArgumentParser",
"pandas.read_csv",
"numpy.mean",
"numpy.exp",
"normalisation.compute_z_norm",
"numpy.array_split",
"os.path.join",
"numpy.full_like",
"numpy.std",
"numpy.linspace",
"pandas.concat",
"copy.deepcopy",
"normalisation.compute_fixed_std",
"numpy.median",
"scipy.stat... | [((9862, 9922), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process comet outputs"""'}), "(description='Process comet outputs')\n", (9885, 9922), False, 'import argparse\n'), ((7057, 7082), 'numpy.array_split', 'np.array_split', (['zipped', 'k'], {}), '(zipped, k)\n', (7071, 7082), True, 'import numpy as np\n'), ((9397, 9423), 'normalisation.compute_z_norm', 'compute_z_norm', (['scores_dev'], {}), '(scores_dev)\n', (9411, 9423), False, 'from normalisation import compute_z_norm, compute_fixed_std\n'), ((38619, 38634), 'numpy.mean', 'np.mean', (['sample'], {}), '(sample)\n', (38626, 38634), True, 'import numpy as np\n'), ((38651, 38665), 'numpy.std', 'np.std', (['sample'], {}), '(sample)\n', (38657, 38665), True, 'import numpy as np\n'), ((673, 696), 'pandas.read_csv', 'pd.read_csv', (['SETUP_PATH'], {}), '(SETUP_PATH)\n', (684, 696), True, 'import pandas as pd\n'), ((1026, 1045), 'pandas.read_csv', 'pd.read_csv', (['da_dir'], {}), '(da_dir)\n', (1037, 1045), True, 'import pandas as pd\n'), ((2214, 2228), 'pandas.concat', 'pd.concat', (['dfs'], {}), '(dfs)\n', (2223, 2228), True, 'import pandas as pd\n'), ((2336, 2359), 'pandas.read_csv', 'pd.read_csv', (['SETUP_PATH'], {}), '(SETUP_PATH)\n', (2347, 2359), True, 'import pandas as pd\n'), ((8304, 8323), 'copy.deepcopy', 'copy.deepcopy', (['fold'], {}), '(fold)\n', (8317, 8323), False, 'import copy\n'), ((38811, 38856), 'numpy.exp', 'np.exp', (['(-0.5 * (1 / sigma * (bins - mu)) ** 2)'], {}), '(-0.5 * (1 / sigma * (bins - mu)) ** 2)\n', (38817, 38856), True, 'import numpy as np\n'), ((13744, 13799), 'normalisation.compute_fixed_std', 'compute_fixed_std', (['batch_comet_avg_dev', 'batch_human_dev'], {}), '(batch_comet_avg_dev, batch_human_dev)\n', (13761, 13799), False, 'from normalisation import compute_z_norm, compute_fixed_std\n'), ((13843, 13888), 'numpy.full_like', 'np.full_like', (['batch_comet_std_test', 'fixed_std'], {}), '(batch_comet_std_test, fixed_std)\n', (13855, 13888), True, 'import numpy as np\n'), ((13931, 13975), 'numpy.full_like', 'np.full_like', (['batch_comet_std_dev', 'fixed_std'], {}), '(batch_comet_std_dev, fixed_std)\n', (13943, 13975), True, 'import numpy as np\n'), ((16926, 16948), 'numpy.linspace', 'np.linspace', (['(0)', '(5)', '(500)'], {}), '(0, 5, 500)\n', (16937, 16948), True, 'import numpy as np\n'), ((20896, 20918), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(100)'], {}), '(0, 2, 100)\n', (20907, 20918), True, 'import numpy as np\n'), ((20948, 20971), 'numpy.linspace', 'np.linspace', (['(1)', '(10)', '(100)'], {}), '(1, 10, 100)\n', (20959, 20971), True, 'import numpy as np\n'), ((22475, 22538), 'numpy.sqrt', 'np.sqrt', (['(std_sum ** 2 + (std_scale * batch_comet_std_test) ** 2)'], {}), '(std_sum ** 2 + (std_scale * batch_comet_std_test) ** 2)\n', (22482, 22538), True, 'import numpy as np\n'), ((851, 870), 'os.listdir', 'listdir', (['SETUP_PATH'], {}), '(SETUP_PATH)\n', (858, 870), False, 'from os import listdir\n'), ((1466, 1485), 'os.path.join', 'join', (['SETUP_PATH', 's'], {}), '(SETUP_PATH, s)\n', (1470, 1485), False, 'from os.path import isfile, join\n'), ((14083, 14137), 'scipy.stats.pearsonr', 'stats.pearsonr', (['batch_comet_avg_test', 'batch_human_test'], {}), '(batch_comet_avg_test, batch_human_test)\n', (14097, 14137), False, 'from scipy import stats\n'), ((16776, 16788), 'numpy.median', 'np.median', (['i'], {}), '(i)\n', (16785, 16788), True, 'import numpy as np\n'), ((16852, 16893), 'scipy.stats.pearsonr', 'stats.pearsonr', (['medians', 'batch_human_test'], {}), '(medians, batch_human_test)\n', (16866, 16893), False, 'from scipy import stats\n'), ((21305, 21359), 'scipy.stats.pearsonr', 'stats.pearsonr', (['batch_comet_avg_test', 'batch_human_test'], {}), '(batch_comet_avg_test, batch_human_test)\n', (21319, 21359), False, 'from scipy import stats\n'), ((21696, 21742), 'scipy.stats.pearsonr', 'stats.pearsonr', (['abs_diff', 'batch_comet_std_test'], {}), '(abs_diff, batch_comet_std_test)\n', (21710, 21742), False, 'from scipy import stats\n'), ((22189, 22241), 'scipy.stats.pearsonr', 'stats.pearsonr', (['abs_diff_sq', 'batch_comet_std_test_sq'], {}), '(abs_diff_sq, batch_comet_std_test_sq)\n', (22203, 22241), False, 'from scipy import stats\n'), ((22747, 22805), 'scipy.stats.pearsonr', 'stats.pearsonr', (['abs_diff', 'batch_comet_std_test_transformed'], {}), '(abs_diff, batch_comet_std_test_transformed)\n', (22761, 22805), False, 'from scipy import stats\n'), ((23295, 23359), 'scipy.stats.pearsonr', 'stats.pearsonr', (['abs_diff_sq', 'batch_comet_std_test_transformed_sq'], {}), '(abs_diff_sq, batch_comet_std_test_transformed_sq)\n', (23309, 23359), False, 'from scipy import stats\n'), ((27119, 27131), 'numpy.median', 'np.median', (['i'], {}), '(i)\n', (27128, 27131), True, 'import numpy as np\n'), ((27195, 27236), 'scipy.stats.pearsonr', 'stats.pearsonr', (['medians', 'batch_human_test'], {}), '(medians, batch_human_test)\n', (27209, 27236), False, 'from scipy import stats\n'), ((30328, 30353), 'numpy.mean', 'np.mean', (['np_pearson_folds'], {}), '(np_pearson_folds)\n', (30335, 30353), True, 'import numpy as np\n'), ((38768, 38786), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (38775, 38786), True, 'import numpy as np\n'), ((881, 900), 'os.path.join', 'join', (['SETUP_PATH', 'f'], {}), '(SETUP_PATH, f)\n', (885, 900), False, 'from os.path import isfile, join\n'), ((1797, 1812), 'numpy.array', 'np.array', (['lines'], {}), '(lines)\n', (1805, 1812), True, 'import numpy as np\n'), ((2721, 2740), 'ast.literal_eval', 'ast.literal_eval', (['x'], {}), '(x)\n', (2737, 2740), False, 'import ast\n'), ((3369, 3434), 'numpy.array', 'np.array', (['[doc_sys, sys_doc_ids, doc_dp_runs_scores, doc_z_score]'], {}), '([doc_sys, sys_doc_ids, doc_dp_runs_scores, doc_z_score])\n', (3377, 3434), True, 'import numpy as np\n'), ((29895, 29921), 'numpy.mean', 'np.mean', (['pearson_acc_folds'], {}), '(pearson_acc_folds)\n', (29902, 29921), True, 'import numpy as np\n'), ((29935, 29959), 'numpy.mean', 'np.mean', (['cal_avgll_folds'], {}), '(cal_avgll_folds)\n', (29942, 29959), True, 'import numpy as np\n'), ((29970, 30002), 'numpy.mean', 'np.mean', (['calibration_error_folds'], {}), '(calibration_error_folds)\n', (29977, 30002), True, 'import numpy as np\n'), ((30013, 30041), 'numpy.mean', 'np.mean', (['sharpness_cal_folds'], {}), '(sharpness_cal_folds)\n', (30020, 30041), True, 'import numpy as np\n'), ((30052, 30075), 'numpy.mean', 'np.mean', (['epiw_cal_folds'], {}), '(epiw_cal_folds)\n', (30059, 30075), True, 'import numpy as np\n'), ((30658, 30684), 'numpy.mean', 'np.mean', (['pearson_acc_folds'], {}), '(pearson_acc_folds)\n', (30665, 30684), True, 'import numpy as np\n'), ((30695, 30724), 'numpy.mean', 'np.mean', (['pearson_d2_cal_folds'], {}), '(pearson_d2_cal_folds)\n', (30702, 30724), True, 'import numpy as np\n'), ((30735, 30759), 'numpy.mean', 'np.mean', (['cal_avgll_folds'], {}), '(cal_avgll_folds)\n', (30742, 30759), True, 'import numpy as np\n'), ((30770, 30802), 'numpy.mean', 'np.mean', (['calibration_error_folds'], {}), '(calibration_error_folds)\n', (30777, 30802), True, 'import numpy as np\n'), ((30813, 30841), 'numpy.mean', 'np.mean', (['sharpness_cal_folds'], {}), '(sharpness_cal_folds)\n', (30820, 30841), True, 'import numpy as np\n'), ((30852, 30875), 'numpy.mean', 'np.mean', (['epiw_cal_folds'], {}), '(epiw_cal_folds)\n', (30859, 30875), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""A set of tools for calculating various material values
Includes a couple of figures of merit as well as tools for checking and updating units
.. :author:: dhancock
"""
def thermal_stress_fom(material,temperature=20,verbose = False):
r"""Calculates the Thermal Stress Figure of Merit (**M**) for a :class:`Material`
using the equation:
.. math::
M = \frac{\sigma_{UTS} k_{th}(1-\upsilon)}{\alpha_{th} E}
:math:`\sigma_{UTS}=` Ultimate Tensile Stress
:math:`k_th=` Thermal Conductivity
:math:`\upsilon=` Poisson's Ratio
:math:`\alpha_{th}=` Coefficient of Thermal Expansion
:math:`E=` Young's Modulus
Parameters
----------
material:
:class:`materialtools.Material`
temperature:
:class:`int`
verbose:
:class:`bool`
Returns
-------
M:
:class:`float`
"""
## this is the list of properties needed to calculate the fom
properties = (["Ultimate Tensile Strength"]*2,
["Thermal Conductivity"]*2,
["Coefficient of Thermal Expansion"]*2,
["Elasticity","Poisson's Ratio"],
["Elasticity","Young's Modulus"])
## check that the material has the properties needed
for p in properties:
assert p[0] in material, "{} not found in {} [{}]".format(p[0],
material.name,
material.source)
## check each property has the parameters needed
assert p[1] in material[p[0]], \
"{} not found in {} for {} [{}]".format(p[1],
p[0],
material.name,
material.source)
## try to calculate the variables
variables = [temperature]
for p in properties:
try: variables.append(
material.get_value(p[0],
temperature,
'Temperature',
p[1],
#verbose
))
except:
if verbose is True: print("Could not get {} at {}".format(
p[1],temperature))
raise
return None
## extract the variables
t,uts,k_th,a_th,nu,E = variables
if None in variables:
#return 0
pass
## calculate M
M = (uts * k_th * (1 - nu)) / (a_th * E)
return M
def thermal_missmatch_stress(material1,
material2,
thickness_1,
thickness_2,
heat_flux,
htc,
T_coolant,
T_ref = 293):
r""" Calculates Thermal mismatch stress
Parameters
----------
material1, material2:
:class:`materialtools.Material` objects for the armour and
substructure respectively
thickness1, thickness2:
layer thickness in :math:`m` of armour and substructure
heat_flux:
incident heat flux in :math:`W.m^{-2}`
htc:
convective heat transfer coefficient in :math:`W.m^{-2}.K^{-1}`
T_coolant:
bulk temperature of the coolant in :math:`K`
T_ref:
reference temperature in :math:`K`
Returns
-------
mismatch_stress:
thermal mismatch stress in :math:`Pa`
Calculation
-----------
.. math::
\sigma_{mm} & = \frac{\alpha_{2}(T_{2,mean}-T_{ref})
- \alpha_{1}(T_{1,mean} - T_{ref})}
{\frac{(1-\upsilon_{2})t_{1}}{t_{2}E_{2}}
+ \frac{1-\upsilon_{1}}{E_{1}}}\\
T_{1,mean} & = T_{coolant} + \frac{q}{h} + \frac{q.t_1}{2.k_1}\\
T_{2,mean} & = T_{coolant} + \frac{q}{h} + \frac{q.t_1}{k_1} + \frac{q.t_2}{2.k_2}\\
where
:math:`\phi_{mm}` is the mismatch stress,
:math:`T_{ref}` is the reference starting temperature of the component,
:math:`T_{coolant}` is the bulk coolant temperature,
:math:`q` is the incident heat flux,
:math:`t_1 and t_2` are the thicknesses of structure and armour respectively,
:math:`T_{1,mean}` and :math:`T_{2,mean}`
are the mean temperatures in each material,
:math:`\nu_1` and :math:`\nu_2` are Poisson's ratios,
:math:`\alpha_1` and :math:`\alpha_2` are thermal expansion coefficients,
and
:math:`E_1` and :math:`E_2` are Young's moduli
"""
q = heat_flux
h = htc
T1,T2 = T_coolant, T_coolant
method = "linear"
tolerance = 200
for iteration in range(3):
a1, a2 = (material.get_value('Coefficient of Thermal Expansion',
T,
method=method,
tolerance=tolerance)
for material,T in [(material1, T1), (material2,T2)])
nu1,nu2 = (material.get_value('Elasticity',
T,
'Temperature',
"Poisson's Ratio",
method=method,
tolerance=tolerance)
for material,T in [(material1, T1), (material2,T2)])
E1, E2 = (material.get_value('Elasticity',
T,
'Temperature',
"Young's Modulus",
method=method,
tolerance=tolerance)
for material,T in [(material1, T1), (material2,T2)])
k1, k2 = (material.get_value('Thermal Conductivity',
T,
method=method,
tolerance=tolerance)
for material,T in [(material1, T1), (material2,T2)])
t1,t2 = thickness_1, thickness_2
T1mean = T_coolant + q/h + (q*t1)/(2*k1)
T2mean = T_coolant + q/h + (q*t1)/k1 + (q*t2)/(2*k2)
T1,T2 = T1mean,T2mean
mismatch_stress = (a2*(T2mean-T_ref) - a1*(T1mean-T_ref)) / \
(((1-nu2)*t1)/(t2*E2) + (1-nu1)/E1)
#print("T_{:},mean = {:3.0f}, T_{:},mean = {:3.0f}".format(material1.name,T1mean-273,material2.name,T2mean-273))
return mismatch_stress
def check_units(parameters):
""" Checks that units are consistent
"""
from materialtools import MaterialParameter
assert all(type(p)==MaterialParameter for p in parameters), \
"list must contain only MaterialParameter objects"
allunits = []
[[allunits.append(unit) for unit in p["Units"]] for p in parameters]
#print(allunits)
unitsmatch = allunits[1:] == allunits[:-1]
return unitsmatch
def fix_units(parameter):
""" TO DO - will include some conversion factors
"""
return
if __name__ == '__main__':
import materialtools
from matplotlib import pyplot as plt
import numpy as np
materialdata = materialtools.MaterialData()
out = materialdata.import_file('/python/data/materialtools/xml/HHF_materials.xml')
for m in materialdata:
material = materialdata[m]
material.set_value('Ultimate Tensile Strength',[20,500,1000],[500e6,200e6,100e6])
Ms = []
temps = np.linspace(0,500,100)
for temp in temps:
try:
Ms.append(thermal_stress_fom(material,
temperature = temp))
except:
Ms.append(0)
plt.plot(temps,Ms,'-',label=material.name)
plt.xlabel('temperature')
plt.ylabel('M')
plt.legend(fontsize='x-small')
'''
data.sort(key=lambda x: x[1],reverse=True)
print('_'*80)
print('{:^40}:{:^20}'.format('material','M'))
print('='*80)
a = [print('{:^40}:{:^20.2e}'.format(x[0],x[1])) for x in data]
''' | [
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"materialtools.MaterialData",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((7704, 7732), 'materialtools.MaterialData', 'materialtools.MaterialData', ([], {}), '()\n', (7730, 7732), False, 'import materialtools\n'), ((8334, 8359), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""temperature"""'], {}), "('temperature')\n", (8344, 8359), True, 'from matplotlib import pyplot as plt\n'), ((8364, 8379), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""M"""'], {}), "('M')\n", (8374, 8379), True, 'from matplotlib import pyplot as plt\n'), ((8384, 8414), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '"""x-small"""'}), "(fontsize='x-small')\n", (8394, 8414), True, 'from matplotlib import pyplot as plt\n'), ((8020, 8044), 'numpy.linspace', 'np.linspace', (['(0)', '(500)', '(100)'], {}), '(0, 500, 100)\n', (8031, 8044), True, 'import numpy as np\n'), ((8287, 8332), 'matplotlib.pyplot.plot', 'plt.plot', (['temps', 'Ms', '"""-"""'], {'label': 'material.name'}), "(temps, Ms, '-', label=material.name)\n", (8295, 8332), True, 'from matplotlib import pyplot as plt\n')] |
#!/usr/bin/env python
# =============================================================================
# GLOBAL IMPORTS
# =============================================================================
import os
import math
import csv
import json
from collections import OrderedDict
import numpy as np
from simtk import unit as u
# =============================================================================
# CONSTANTS
# =============================================================================
T = 298 * u.kelvin
R = u.MOLAR_GAS_CONSTANT_R
RELATIVE_TITRANT_CONC_ERROR = 0.03
CB8_GUESTS_SMILES_PATH = '../../Isaacs_SAMPL6_guests.smi'
OA_GUESTS_SMILES_PATH = '../../Gibb_SAMPL6_guests.smi'
# Experimental results as provided by the Gibb and Isaacs groups.
# The error is relative. None means that the error is <1%.
EXPERIMENTAL_DATA = OrderedDict([
('OA-G0', OrderedDict([
('Ka', 1.47e+4 / u.molar), ('dKa', 0.02),
('DH', -4.84 * u.kilocalories_per_mole), ('dDH', None),
('TDS', 0.84 * u.kilocalories_per_mole), ('dTDS', 0.04),
('n', 1), ('DG', -5.68 * u.kilocalories_per_mole)
])),
('OA-G1', OrderedDict([
('Ka', 2.57e+3 / u.molar), ('dKa', 0.01),
('DH', -5.52 * u.kilocalories_per_mole), ('dDH', 0.01),
('TDS', -0.86 * u.kilocalories_per_mole), ('dTDS', 0.1),
('n', 1), ('DG', -4.65 * u.kilocalories_per_mole)
])),
('OA-G2', OrderedDict([
('Ka', 1.4e+6 / u.molar), ('dKa', 0.01),
('DH', -12.07 * u.kilocalories_per_mole), ('dDH', None),
('TDS', -3.69 * u.kilocalories_per_mole), ('dTDS', None),
('n', 1), ('DG', -8.38 * u.kilocalories_per_mole)
])),
('OA-G3', OrderedDict([
('Ka', 6.21e+3 / u.molar), ('dKa', None),
('DH', -7.53 * u.kilocalories_per_mole), ('dDH', None),
('TDS', -2.35 * u.kilocalories_per_mole), ('dTDS', 0.01),
('n', 1), ('DG', -5.18 * u.kilocalories_per_mole)
])),
('OA-G4', OrderedDict([
('Ka', 1.64e+5 / u.molar), ('dKa', 0.01),
('DH', -6.92 * u.kilocalories_per_mole), ('dDH', None),
('TDS', 0.19 * u.kilocalories_per_mole), ('dTDS', 0.13),
('n', 1), ('DG', -7.11 * u.kilocalories_per_mole)
])),
('OA-G5', OrderedDict([
('Ka', 2.33e+3 / u.molar), ('dKa', 0.01),
('DH', -5.31 * u.kilocalories_per_mole), ('dDH', None),
('TDS', -0.71 * u.kilocalories_per_mole), ('dTDS', 0.02),
('n', 1), ('DG', -4.59 * u.kilocalories_per_mole)
])),
('OA-G6', OrderedDict([
('Ka', 4.37e+3 / u.molar), ('dKa', None),
('DH', -5.29 * u.kilocalories_per_mole), ('dDH', None),
('TDS', -0.33 * u.kilocalories_per_mole), ('dTDS', 0.11),
('n', 1), ('DG', -4.97 * u.kilocalories_per_mole)
])),
('OA-G7', OrderedDict([
('Ka', 3.6e+4 / u.molar), ('dKa', None),
('DH', -7.44 * u.kilocalories_per_mole), ('dDH', None),
('TDS', -1.23 * u.kilocalories_per_mole), ('dTDS', 0.02),
('n', 1), ('DG', -6.22 * u.kilocalories_per_mole)
])),
('TEMOA-G0', OrderedDict([
('Ka', 2.81e+4 / u.molar), ('dKa', None),
('DH', -7.85 * u.kilocalories_per_mole), ('dDH', 0.02),
('TDS', -1.77 * u.kilocalories_per_mole), ('dTDS', 0.08),
('n', 1), ('DG', -6.06 * u.kilocalories_per_mole)
])),
('TEMOA-G1', OrderedDict([
('Ka', 2.4e+4 / u.molar), ('dKa', 0.04),
('DH', -8.25 * u.kilocalories_per_mole), ('dDH', 0.04),
('TDS', -2.27 * u.kilocalories_per_mole), ('dTDS', 0.14),
('n', 1), ('DG', -5.97 * u.kilocalories_per_mole)
])),
('TEMOA-G2', OrderedDict([
('Ka', 9.82e+4 / u.molar), ('dKa', None),
('DH', -9.27 * u.kilocalories_per_mole), ('dDH', None),
('TDS', -2.46 * u.kilocalories_per_mole), ('dTDS', None),
('n', 1), ('DG', -6.81 * u.kilocalories_per_mole)
])),
('TEMOA-G3', OrderedDict([
('Ka', 1.28e+4 / u.molar), ('dKa', 0.04),
('DH', -8.86 * u.kilocalories_per_mole), ('dDH', 0.02),
('TDS', -3.25 * u.kilocalories_per_mole), ('dTDS', 0.04),
('n', 1), ('DG', -5.6 * u.kilocalories_per_mole)
])),
('TEMOA-G4', OrderedDict([
('Ka', 5.12e+5 / u.molar), ('dKa', None),
('DH', -8.87 * u.kilocalories_per_mole), ('dDH', None),
('TDS', -1.08 * u.kilocalories_per_mole), ('dTDS', None),
('n', 1), ('DG', -7.79 * u.kilocalories_per_mole)
])),
('TEMOA-G5', OrderedDict([
('Ka', 1.13e+3 / u.molar), ('dKa', None),
('DH', -7.96 * u.kilocalories_per_mole), ('dDH', 0.01),
('TDS', -3.8 * u.kilocalories_per_mole), ('dTDS', 0.03),
('n', 1), ('DG', -4.16 * u.kilocalories_per_mole)
])),
('TEMOA-G6', OrderedDict([
('Ka', 9.12e+3 / u.molar), ('dKa', 0.02),
('DH', -6.19 * u.kilocalories_per_mole), ('dDH', None),
('TDS', -0.79 * u.kilocalories_per_mole), ('dTDS', 0.07),
('n', 1), ('DG', -5.4 * u.kilocalories_per_mole)
])),
('TEMOA-G7', OrderedDict([
('Ka', 1.07e+3 / u.molar), ('dKa', None),
('DH', -8.33 * u.kilocalories_per_mole), ('dDH', None),
('TDS', -4.2 * u.kilocalories_per_mole), ('dTDS', None),
('n', 1), ('DG', -4.13 * u.kilocalories_per_mole)
])),
('CB8-G0', OrderedDict([
('Ka', 8.06e+4 / u.molar), ('dKa', 0.05),
('DH', -4.22 * u.kilocalories_per_mole), ('dDH', 0.02),
('TDS', 2.48 * u.kilocalories_per_mole), ('dTDS', None),
('n', 1), ('DG', -6.69 * u.kilocalories_per_mole)
])),
('CB8-G1', OrderedDict([
('Ka', 4.03e+5 / u.molar**2), ('dKa', 0.04),
('DH', -5.05 * u.kilocalories_per_mole), ('dDH', None),
('TDS', 2.6 * u.kilocalories_per_mole), ('dTDS', None),
('n', 0.5), ('DG', -7.65 * u.kilocalories_per_mole)
])),
('CB8-G2', OrderedDict([
('Ka', 4.08e+5 / u.molar), ('dKa', 0.06),
('DH', -6.5 * u.kilocalories_per_mole), ('dDH', 0.01),
('TDS', 1.16 * u.kilocalories_per_mole), ('dTDS', None),
('n', 1), ('DG', -7.66 * u.kilocalories_per_mole)
])),
('CB8-G3', OrderedDict([
('Ka', 5.34e+4 / u.molar), ('dKa', 0.07),
('DH', -2.46 * u.kilocalories_per_mole), ('dDH', 0.03),
('TDS', 3.99 * u.kilocalories_per_mole), ('dTDS', None),
('n', 1), ('DG', -6.45 * u.kilocalories_per_mole)
])),
('CB8-G4', OrderedDict([
('Ka', 5.13e+5 / u.molar**3), ('dKa', 0.04),
('DH', -9.83 * u.kilocalories_per_mole), ('dDH', None),
('TDS', -2.03 * u.kilocalories_per_mole), ('dTDS', None),
('n', 0.33), ('DG', -7.8 * u.kilocalories_per_mole)
])),
('CB8-G5', OrderedDict([
('Ka', 9.9e+5 / u.molar), ('dKa', 0.06),
('DH', -3.18 * u.kilocalories_per_mole), ('dDH', None),
('TDS', 5.0 * u.kilocalories_per_mole), ('dTDS', None),
('n', 1), ('DG', -8.18 * u.kilocalories_per_mole)
])),
('CB8-G6', OrderedDict([
('Ka', 1.3e+6 / u.molar), ('dKa', 0.06),
('DH', -5.69 * u.kilocalories_per_mole), ('dDH', None),
('TDS', 2.65 * u.kilocalories_per_mole), ('dTDS', None),
('n', 1), ('DG', -8.34 * u.kilocalories_per_mole)
])),
('CB8-G7', OrderedDict([
('Ka', 2.08e+7 / u.molar), ('dKa', 0.14),
('DH', -6.48 * u.kilocalories_per_mole), ('dDH', None),
('TDS', 3.5 * u.kilocalories_per_mole), ('dTDS', None),
('n', 1), ('DG', -9.98 * u.kilocalories_per_mole)
])),
('CB8-G8', OrderedDict([
('Ka', 8.26e+9 / u.molar), ('dKa', 0.04),
('DH', -14.4 * u.kilocalories_per_mole), ('dDH', None),
('TDS', -0.88 * u.kilocalories_per_mole), ('dTDS', None),
('n', 1), ('DG', -13.5 * u.kilocalories_per_mole)
])),
('CB8-G9', OrderedDict([
('Ka', 2.29e+6 / u.molar), ('dKa', 0.1),
('DH', -4.63 * u.kilocalories_per_mole), ('dDH', None),
('TDS', 4.05 * u.kilocalories_per_mole), ('dTDS', None),
('n', 1), ('DG', -8.68 * u.kilocalories_per_mole)
])),
('CB8-G10', OrderedDict([
('Ka', 1.05e+6 / u.molar), ('dKa', 0.09),
('DH', -2.0 * u.kilocalories_per_mole), ('dDH', 0.01),
('TDS', 6.22 * u.kilocalories_per_mole), ('dTDS', None),
('n', 1), ('DG', -8.22 * u.kilocalories_per_mole)
])),
('CB8-G11', OrderedDict([
('Ka', 4.98e+5 / u.molar), ('dKa', 0.06),
('DH', -2.11 * u.kilocalories_per_mole), ('dDH', None),
('TDS', 5.67 * u.kilocalories_per_mole), ('dTDS', None),
('n', 1), ('DG', -7.77 * u.kilocalories_per_mole)
])),
('CB8-G12a', OrderedDict([
('Ka', 1.67e+7 / u.molar), ('dKa', 0.025),
('DH', -9.16 * u.kilocalories_per_mole), ('dDH', None),
('TDS', 0.697 * u.kilocalories_per_mole), ('dTDS', None),
('n', 1), ('DG', -9.86 * u.kilocalories_per_mole)
])),
('CB8-G12b', OrderedDict([
('Ka', 1.46e+5 / u.molar**2), ('dKa', 0.01),
('DH', -4.83 * u.kilocalories_per_mole), ('dDH', None),
('TDS', 2.23 * u.kilocalories_per_mole), ('dTDS', None),
('n', 2), ('DG', -7.05 * u.kilocalories_per_mole)
])),
('CB8-G13', OrderedDict([
('Ka', 1.61e+5 / u.molar), ('dKa', 0.02),
('DH', -6.8 * u.kilocalories_per_mole), ('dDH', 0.01),
('TDS', 0.31 * u.kilocalories_per_mole), ('dTDS', None),
('n', 1), ('DG', -7.11 * u.kilocalories_per_mole)
])),
])
# =============================================================================
# UTILITY FUNCTIONS
# =============================================================================
def load_smiles(file_path):
"""Return the list of guests names and SMILES."""
guests = []
with open(file_path, 'r') as f:
for line in f:
line = line.strip()
smiles, name = line.split(' ', 1)
guests.append([smiles, name])
return guests
def compute_DG(Ka, dKa):
"""Compute the free energy from the association constant.
Parameters
----------
Ka : simtk.Quantity
Association constant.
dKa : simtk.Quantity
Association constant uncertainty.
Returns
-------
DG : simtk.Quantity
Binding free energy.
dDG : simtk.Quantity
Binding free energy uncertainty.
"""
concentration_unit = 1 / Ka.unit
DG = -R * T * np.log(Ka*concentration_unit)
# Propagate error.
if dKa is None:
dDG = None
else:
dDGdKa = -R * T / Ka # Derivative dDG(Ka)/dKa.
dDG = np.sqrt(dDGdKa**2 * dKa**2)
return DG, dDG
def compute_TDS(DG, dDG, DH, dDH):
"""Compute the entropy from free energy and enthalpy.
Parameters
----------
DG : simtk.Quantity
Free energy.
dDG : simtk.Quantity
Free energy uncertainty.
DH : simtk.Quantity
Enthalpy.
dDH : simtk.Quantity
Enthalpy uncertainty.
Returns
-------
TDS : simtk.Quantity
Entrop.
dTDS : simtk.Quantity
Binding free energy uncertainty.
"""
TDS = DH - DG
dTDS = np.sqrt(dDH**2 + dDG**2)
return TDS, dTDS
def strip_units(quantities):
for k, v in quantities.items():
if isinstance(v, u.Quantity):
# We only have energies and association constants.
if 'Ka' in k:
quantities[k] = v.value_in_unit(v.unit)
else:
quantities[k] = v.value_in_unit(u.kilocalories_per_mole)
def reduce_to_first_significant_digit(quantity, uncertainty):
first_significant_digit = math.floor(math.log10(abs(uncertainty)))
quantity = round(quantity, -first_significant_digit)
uncertainty = round(uncertainty, -first_significant_digit)
return quantity, uncertainty
# =============================================================================
# MAIN
# =============================================================================
if __name__ == '__main__':
# Load names and SMILES of guests.
molecule_names = {
'CB8': load_smiles(CB8_GUESTS_SMILES_PATH),
'OA' : load_smiles(OA_GUESTS_SMILES_PATH),
'TEMOA' : load_smiles(OA_GUESTS_SMILES_PATH),
}
output_dict = OrderedDict()
upper_bound_molecules = dict(Ka=set(), DH=set(), TDS=set())
for system_name, system_data in EXPERIMENTAL_DATA.items():
host_name, guest_name = system_name.split('-')
guest_idx = int(guest_name[1:3])
# Load SMILES and common name of the molecule.
molecule_smiles, molecule_name = molecule_names[host_name][guest_idx]
# Create entry in the output dictionary.
output_dict[system_name] = OrderedDict([
('name', molecule_name),
('SMILES', molecule_smiles),
])
output_dict[system_name].update(system_data)
system_data = output_dict[system_name] # Shortcut.
# Incorporate the relative concentration uncertainties into quantities.
for k in ['Ka', 'DH']:
quantity = system_data[k]
relative_uncertainty = system_data['d' + k]
# Use upper-bound of 1% if <1% is reported. Keep track of these molecules.
if relative_uncertainty is None:
upper_bound_molecules[k].add(system_name)
relative_uncertainty = 0.01
# Incorporate the relative concentration uncertainties into quantities.
relative_uncertainty += RELATIVE_TITRANT_CONC_ERROR
# Convert relative to absolute errors.
system_data['d' + k] = abs(quantity * relative_uncertainty)
# Propagate Ka and DH error into DG and TDS.
DG, dDG = compute_DG(system_data['Ka'], system_data['dKa'])
system_data['dDG'] = dDG
TDS, dTDS = compute_TDS(system_data['DG'], system_data['dDG'],
system_data['DH'], system_data['dDH'])
system_data['dTDS'] = dTDS
# Strip units.
strip_units(system_data)
# Consistency checks.
computed_DG = DG.value_in_unit(u.kilocalories_per_mole)
computed_TDS = TDS.value_in_unit(u.kilocalories_per_mole)
assert np.isclose(system_data['DG'], system_data['DH'] - system_data['TDS'], atol=0.020000000000001, rtol=0.0)
assert np.isclose(np.around(computed_TDS, decimals=2), system_data['TDS'], atol=0.0200000000000001, rtol=0.0)
assert np.isclose(np.around(computed_DG, decimals=2), system_data['DG'], atol=0.0200000000000001, rtol=0.0)
# Report only error most significant digit.
for k in ['Ka', 'DH', 'TDS', 'DG']:
quantity, uncertainty = system_data[k], system_data['d' + k]
if uncertainty is not None:
system_data[k], system_data['d' + k] = reduce_to_first_significant_digit(quantity, uncertainty)
# Create output JSON file.
with open('experimental_measurements.json', 'w') as f:
json.dump(output_dict, f)
# Create output CSV file.
# Convert single dict to list of dicts.
csv_dicts = []
for system_id, system_data in output_dict.items():
csv_dict = OrderedDict([('ID', system_id)])
csv_dict.update(system_data)
csv_dicts.append(csv_dict)
with open('experimental_measurements.csv', 'w') as f:
writer = csv.DictWriter(f, csv_dicts[0].keys(), delimiter=';')
writer.writeheader()
writer.writerows(csv_dicts)
# Create a LaTex table.
os.makedirs('PDFTable', exist_ok=True)
old_host = ''
with open('PDFTable/experimental_measurements.tex', 'w', encoding='utf-8') as f:
f.write('\\documentclass{article}\n'
'\\usepackage[a4paper,margin=0.4in,tmargin=0.5in,landscape]{geometry}\n'
'\\usepackage{tabu}\n'
'\\pagenumbering{gobble}\n'
'\\begin{document}\n'
'\\begin{center}\n'
'\\begin{tabu}')
# Cell alignment.
field_names = ['ID', 'name', '$K_a$ (M$^{-1}$)', '$\\Delta G$ (kcal/mol) $^{(a)}$', '$\\Delta H$ (kcal/mol)', '$T\\Delta S$ (kcal/mol) $^{(b)}$', '$n$']
f.write('{| ' + ' | '.join(['c' for _ in range(len(field_names))]) + ' |}\n')
# Table header.
f.write('\\hline\n')
f.write('\\rowfont{\\bfseries} ' + ' & '.join(field_names) + ' \\\\\n')
f.write('\\hline\n')
# Print lines.
for csv_dict in csv_dicts:
# Separate hosts with a double horizontal line.
host_name = csv_dict['ID'].split('-')[0]
if host_name != old_host:
f.write('\\hline\n')
old_host = host_name
row = '{ID} & {name}'
for k in ['Ka', 'DG', 'DH', 'TDS']:
row += ' & '
# Report Ka in scientific notation.
if k == 'Ka':
first_significant_digit = math.floor(math.log10(abs(csv_dict['d' + k])))
csv_dict['d' + k] /= 10**first_significant_digit
csv_dict[k] /= 10**first_significant_digit
row += '('
row += '{' + k + '} +- {d' + k + '}'
if k == 'Ka':
row += ') $\\times$ 10'
if first_significant_digit != 1:
row += '$^{{{{{}}}}}$'.format(first_significant_digit)
# Check if we used the upperbound.
superscript = ''
# if k != 'DG' and csv_dict['ID'] in upper_bound_molecules[k]:
# superscript += 'a'
if k == 'Ka':
if csv_dict['n'] == 0.33:
superscript += 'd'
elif csv_dict['n'] == 0.5 or csv_dict['n'] == 2:
superscript += 'c'
if superscript != '':
row += ' $^{{(' + superscript + ')}}$'
row += (' & {n} \\\\\n'
'\\hline\n')
f.write(row.format(**csv_dict))
f.write('\end{tabu}\end{center}\\vspace{5mm}\n'
'All quantities are reported as point estimate +- statistical error from the ITC data fitting procedure. '
'The upper bound ($1\%$) was used for errors reported to be $<1\%$. We also included a 3\% relative '
'uncertainty in the titrant concentration assuming the stoichiometry coefficient to be fitted to the ITC '
'data [1]. This is exact only for the OA/TEMOA sets (with the exception of OA-G5, TEMOA-G5, and TEMOA G7). '
'For the other guests, we may expand the error analysis to include also the effect of the uncertainties '
'in titrand concentration and cell volume. \\\\\n'
'($^a$) Statistical errors were propagated from the $K_a$ measurements. \\\\\n'
'($^b$) All experiments were performed at 298 K. \\\\\n'
'($^c$) Units of M$^{-2}$. \\\\\n'
'($^d$) Units of M$^{-3}$.\n'
'\end{document}\n')
| [
"json.dump",
"numpy.log",
"os.makedirs",
"numpy.around",
"numpy.isclose",
"collections.OrderedDict",
"numpy.sqrt"
] | [((11063, 11091), 'numpy.sqrt', 'np.sqrt', (['(dDH ** 2 + dDG ** 2)'], {}), '(dDH ** 2 + dDG ** 2)\n', (11070, 11091), True, 'import numpy as np\n'), ((12179, 12192), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (12190, 12192), False, 'from collections import OrderedDict\n'), ((15413, 15451), 'os.makedirs', 'os.makedirs', (['"""PDFTable"""'], {'exist_ok': '(True)'}), "('PDFTable', exist_ok=True)\n", (15424, 15451), False, 'import os\n'), ((10347, 10378), 'numpy.log', 'np.log', (['(Ka * concentration_unit)'], {}), '(Ka * concentration_unit)\n', (10353, 10378), True, 'import numpy as np\n'), ((10519, 10550), 'numpy.sqrt', 'np.sqrt', (['(dDGdKa ** 2 * dKa ** 2)'], {}), '(dDGdKa ** 2 * dKa ** 2)\n', (10526, 10550), True, 'import numpy as np\n'), ((12636, 12703), 'collections.OrderedDict', 'OrderedDict', (["[('name', molecule_name), ('SMILES', molecule_smiles)]"], {}), "([('name', molecule_name), ('SMILES', molecule_smiles)])\n", (12647, 12703), False, 'from collections import OrderedDict\n'), ((14128, 14236), 'numpy.isclose', 'np.isclose', (["system_data['DG']", "(system_data['DH'] - system_data['TDS'])"], {'atol': '(0.020000000000001)', 'rtol': '(0.0)'}), "(system_data['DG'], system_data['DH'] - system_data['TDS'], atol=\n 0.020000000000001, rtol=0.0)\n", (14138, 14236), True, 'import numpy as np\n'), ((14887, 14912), 'json.dump', 'json.dump', (['output_dict', 'f'], {}), '(output_dict, f)\n', (14896, 14912), False, 'import json\n'), ((15081, 15113), 'collections.OrderedDict', 'OrderedDict', (["[('ID', system_id)]"], {}), "([('ID', system_id)])\n", (15092, 15113), False, 'from collections import OrderedDict\n'), ((874, 1108), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 14700.0 / u.molar), ('dKa', 0.02), ('DH', -4.84 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', 0.84 * u.\n kilocalories_per_mole), ('dTDS', 0.04), ('n', 1), ('DG', -5.68 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 14700.0 / u.molar), ('dKa', 0.02), ('DH', -4.84 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', 0.84 * u.\n kilocalories_per_mole), ('dTDS', 0.04), ('n', 1), ('DG', -5.68 * u.\n kilocalories_per_mole)])\n", (885, 1108), False, 'from collections import OrderedDict\n'), ((1148, 1381), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 2570.0 / u.molar), ('dKa', 0.01), ('DH', -5.52 * u.\n kilocalories_per_mole), ('dDH', 0.01), ('TDS', -0.86 * u.\n kilocalories_per_mole), ('dTDS', 0.1), ('n', 1), ('DG', -4.65 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 2570.0 / u.molar), ('dKa', 0.01), ('DH', -5.52 * u.\n kilocalories_per_mole), ('dDH', 0.01), ('TDS', -0.86 * u.\n kilocalories_per_mole), ('dTDS', 0.1), ('n', 1), ('DG', -4.65 * u.\n kilocalories_per_mole)])\n", (1159, 1381), False, 'from collections import OrderedDict\n'), ((1422, 1660), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 1400000.0 / u.molar), ('dKa', 0.01), ('DH', -12.07 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', -3.69 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -8.38 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 1400000.0 / u.molar), ('dKa', 0.01), ('DH', -12.07 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', -3.69 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -8.38 * u.\n kilocalories_per_mole)])\n", (1433, 1660), False, 'from collections import OrderedDict\n'), ((1697, 1931), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 6210.0 / u.molar), ('dKa', None), ('DH', -7.53 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', -2.35 * u.\n kilocalories_per_mole), ('dTDS', 0.01), ('n', 1), ('DG', -5.18 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 6210.0 / u.molar), ('dKa', None), ('DH', -7.53 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', -2.35 * u.\n kilocalories_per_mole), ('dTDS', 0.01), ('n', 1), ('DG', -5.18 * u.\n kilocalories_per_mole)])\n", (1708, 1931), False, 'from collections import OrderedDict\n'), ((1972, 2207), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 164000.0 / u.molar), ('dKa', 0.01), ('DH', -6.92 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', 0.19 * u.\n kilocalories_per_mole), ('dTDS', 0.13), ('n', 1), ('DG', -7.11 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 164000.0 / u.molar), ('dKa', 0.01), ('DH', -6.92 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', 0.19 * u.\n kilocalories_per_mole), ('dTDS', 0.13), ('n', 1), ('DG', -7.11 * u.\n kilocalories_per_mole)])\n", (1983, 2207), False, 'from collections import OrderedDict\n'), ((2246, 2480), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 2330.0 / u.molar), ('dKa', 0.01), ('DH', -5.31 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', -0.71 * u.\n kilocalories_per_mole), ('dTDS', 0.02), ('n', 1), ('DG', -4.59 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 2330.0 / u.molar), ('dKa', 0.01), ('DH', -5.31 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', -0.71 * u.\n kilocalories_per_mole), ('dTDS', 0.02), ('n', 1), ('DG', -4.59 * u.\n kilocalories_per_mole)])\n", (2257, 2480), False, 'from collections import OrderedDict\n'), ((2521, 2755), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 4370.0 / u.molar), ('dKa', None), ('DH', -5.29 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', -0.33 * u.\n kilocalories_per_mole), ('dTDS', 0.11), ('n', 1), ('DG', -4.97 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 4370.0 / u.molar), ('dKa', None), ('DH', -5.29 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', -0.33 * u.\n kilocalories_per_mole), ('dTDS', 0.11), ('n', 1), ('DG', -4.97 * u.\n kilocalories_per_mole)])\n", (2532, 2755), False, 'from collections import OrderedDict\n'), ((2796, 3031), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 36000.0 / u.molar), ('dKa', None), ('DH', -7.44 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', -1.23 * u.\n kilocalories_per_mole), ('dTDS', 0.02), ('n', 1), ('DG', -6.22 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 36000.0 / u.molar), ('dKa', None), ('DH', -7.44 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', -1.23 * u.\n kilocalories_per_mole), ('dTDS', 0.02), ('n', 1), ('DG', -6.22 * u.\n kilocalories_per_mole)])\n", (2807, 3031), False, 'from collections import OrderedDict\n'), ((3074, 3309), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 28100.0 / u.molar), ('dKa', None), ('DH', -7.85 * u.\n kilocalories_per_mole), ('dDH', 0.02), ('TDS', -1.77 * u.\n kilocalories_per_mole), ('dTDS', 0.08), ('n', 1), ('DG', -6.06 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 28100.0 / u.molar), ('dKa', None), ('DH', -7.85 * u.\n kilocalories_per_mole), ('dDH', 0.02), ('TDS', -1.77 * u.\n kilocalories_per_mole), ('dTDS', 0.08), ('n', 1), ('DG', -6.06 * u.\n kilocalories_per_mole)])\n", (3085, 3309), False, 'from collections import OrderedDict\n'), ((3352, 3587), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 24000.0 / u.molar), ('dKa', 0.04), ('DH', -8.25 * u.\n kilocalories_per_mole), ('dDH', 0.04), ('TDS', -2.27 * u.\n kilocalories_per_mole), ('dTDS', 0.14), ('n', 1), ('DG', -5.97 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 24000.0 / u.molar), ('dKa', 0.04), ('DH', -8.25 * u.\n kilocalories_per_mole), ('dDH', 0.04), ('TDS', -2.27 * u.\n kilocalories_per_mole), ('dTDS', 0.14), ('n', 1), ('DG', -5.97 * u.\n kilocalories_per_mole)])\n", (3363, 3587), False, 'from collections import OrderedDict\n'), ((3629, 3864), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 98200.0 / u.molar), ('dKa', None), ('DH', -9.27 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', -2.46 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -6.81 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 98200.0 / u.molar), ('dKa', None), ('DH', -9.27 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', -2.46 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -6.81 * u.\n kilocalories_per_mole)])\n", (3640, 3864), False, 'from collections import OrderedDict\n'), ((3907, 4141), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 12800.0 / u.molar), ('dKa', 0.04), ('DH', -8.86 * u.\n kilocalories_per_mole), ('dDH', 0.02), ('TDS', -3.25 * u.\n kilocalories_per_mole), ('dTDS', 0.04), ('n', 1), ('DG', -5.6 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 12800.0 / u.molar), ('dKa', 0.04), ('DH', -8.86 * u.\n kilocalories_per_mole), ('dDH', 0.02), ('TDS', -3.25 * u.\n kilocalories_per_mole), ('dTDS', 0.04), ('n', 1), ('DG', -5.6 * u.\n kilocalories_per_mole)])\n", (3918, 4141), False, 'from collections import OrderedDict\n'), ((4184, 4420), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 512000.0 / u.molar), ('dKa', None), ('DH', -8.87 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', -1.08 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -7.79 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 512000.0 / u.molar), ('dKa', None), ('DH', -8.87 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', -1.08 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -7.79 * u.\n kilocalories_per_mole)])\n", (4195, 4420), False, 'from collections import OrderedDict\n'), ((4462, 4695), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 1130.0 / u.molar), ('dKa', None), ('DH', -7.96 * u.\n kilocalories_per_mole), ('dDH', 0.01), ('TDS', -3.8 * u.\n kilocalories_per_mole), ('dTDS', 0.03), ('n', 1), ('DG', -4.16 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 1130.0 / u.molar), ('dKa', None), ('DH', -7.96 * u.\n kilocalories_per_mole), ('dDH', 0.01), ('TDS', -3.8 * u.\n kilocalories_per_mole), ('dTDS', 0.03), ('n', 1), ('DG', -4.16 * u.\n kilocalories_per_mole)])\n", (4473, 4695), False, 'from collections import OrderedDict\n'), ((4739, 4972), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 9120.0 / u.molar), ('dKa', 0.02), ('DH', -6.19 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', -0.79 * u.\n kilocalories_per_mole), ('dTDS', 0.07), ('n', 1), ('DG', -5.4 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 9120.0 / u.molar), ('dKa', 0.02), ('DH', -6.19 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', -0.79 * u.\n kilocalories_per_mole), ('dTDS', 0.07), ('n', 1), ('DG', -5.4 * u.\n kilocalories_per_mole)])\n", (4750, 4972), False, 'from collections import OrderedDict\n'), ((5016, 5249), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 1070.0 / u.molar), ('dKa', None), ('DH', -8.33 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', -4.2 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -4.13 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 1070.0 / u.molar), ('dKa', None), ('DH', -8.33 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', -4.2 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -4.13 * u.\n kilocalories_per_mole)])\n", (5027, 5249), False, 'from collections import OrderedDict\n'), ((5292, 5526), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 80600.0 / u.molar), ('dKa', 0.05), ('DH', -4.22 * u.\n kilocalories_per_mole), ('dDH', 0.02), ('TDS', 2.48 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -6.69 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 80600.0 / u.molar), ('dKa', 0.05), ('DH', -4.22 * u.\n kilocalories_per_mole), ('dDH', 0.02), ('TDS', 2.48 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -6.69 * u.\n kilocalories_per_mole)])\n", (5303, 5526), False, 'from collections import OrderedDict\n'), ((5567, 5807), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 403000.0 / u.molar ** 2), ('dKa', 0.04), ('DH', -5.05 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', 2.6 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 0.5), ('DG', -7.65 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 403000.0 / u.molar ** 2), ('dKa', 0.04), ('DH', -5.05 *\n u.kilocalories_per_mole), ('dDH', None), ('TDS', 2.6 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 0.5), ('DG', -7.65 * u.\n kilocalories_per_mole)])\n", (5578, 5807), False, 'from collections import OrderedDict\n'), ((5846, 6080), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 408000.0 / u.molar), ('dKa', 0.06), ('DH', -6.5 * u.\n kilocalories_per_mole), ('dDH', 0.01), ('TDS', 1.16 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -7.66 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 408000.0 / u.molar), ('dKa', 0.06), ('DH', -6.5 * u.\n kilocalories_per_mole), ('dDH', 0.01), ('TDS', 1.16 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -7.66 * u.\n kilocalories_per_mole)])\n", (5857, 6080), False, 'from collections import OrderedDict\n'), ((6120, 6354), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 53400.0 / u.molar), ('dKa', 0.07), ('DH', -2.46 * u.\n kilocalories_per_mole), ('dDH', 0.03), ('TDS', 3.99 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -6.45 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 53400.0 / u.molar), ('dKa', 0.07), ('DH', -2.46 * u.\n kilocalories_per_mole), ('dDH', 0.03), ('TDS', 3.99 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -6.45 * u.\n kilocalories_per_mole)])\n", (6131, 6354), False, 'from collections import OrderedDict\n'), ((6395, 6637), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 513000.0 / u.molar ** 3), ('dKa', 0.04), ('DH', -9.83 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', -2.03 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 0.33), ('DG', -7.8 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 513000.0 / u.molar ** 3), ('dKa', 0.04), ('DH', -9.83 *\n u.kilocalories_per_mole), ('dDH', None), ('TDS', -2.03 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 0.33), ('DG', -7.8 * u.\n kilocalories_per_mole)])\n", (6406, 6637), False, 'from collections import OrderedDict\n'), ((6676, 6910), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 990000.0 / u.molar), ('dKa', 0.06), ('DH', -3.18 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', 5.0 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -8.18 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 990000.0 / u.molar), ('dKa', 0.06), ('DH', -3.18 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', 5.0 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -8.18 * u.\n kilocalories_per_mole)])\n", (6687, 6910), False, 'from collections import OrderedDict\n'), ((6949, 7185), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 1300000.0 / u.molar), ('dKa', 0.06), ('DH', -5.69 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', 2.65 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -8.34 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 1300000.0 / u.molar), ('dKa', 0.06), ('DH', -5.69 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', 2.65 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -8.34 * u.\n kilocalories_per_mole)])\n", (6960, 7185), False, 'from collections import OrderedDict\n'), ((7223, 7459), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 20800000.0 / u.molar), ('dKa', 0.14), ('DH', -6.48 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', 3.5 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -9.98 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 20800000.0 / u.molar), ('dKa', 0.14), ('DH', -6.48 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', 3.5 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -9.98 * u.\n kilocalories_per_mole)])\n", (7234, 7459), False, 'from collections import OrderedDict\n'), ((7497, 7736), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 8260000000.0 / u.molar), ('dKa', 0.04), ('DH', -14.4 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', -0.88 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -13.5 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 8260000000.0 / u.molar), ('dKa', 0.04), ('DH', -14.4 *\n u.kilocalories_per_mole), ('dDH', None), ('TDS', -0.88 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -13.5 * u.\n kilocalories_per_mole)])\n", (7508, 7736), False, 'from collections import OrderedDict\n'), ((7773, 8008), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 2290000.0 / u.molar), ('dKa', 0.1), ('DH', -4.63 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', 4.05 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -8.68 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 2290000.0 / u.molar), ('dKa', 0.1), ('DH', -4.63 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', 4.05 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -8.68 * u.\n kilocalories_per_mole)])\n", (7784, 8008), False, 'from collections import OrderedDict\n'), ((8048, 8283), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 1050000.0 / u.molar), ('dKa', 0.09), ('DH', -2.0 * u.\n kilocalories_per_mole), ('dDH', 0.01), ('TDS', 6.22 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -8.22 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 1050000.0 / u.molar), ('dKa', 0.09), ('DH', -2.0 * u.\n kilocalories_per_mole), ('dDH', 0.01), ('TDS', 6.22 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -8.22 * u.\n kilocalories_per_mole)])\n", (8059, 8283), False, 'from collections import OrderedDict\n'), ((8323, 8558), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 498000.0 / u.molar), ('dKa', 0.06), ('DH', -2.11 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', 5.67 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -7.77 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 498000.0 / u.molar), ('dKa', 0.06), ('DH', -2.11 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', 5.67 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -7.77 * u.\n kilocalories_per_mole)])\n", (8334, 8558), False, 'from collections import OrderedDict\n'), ((8600, 8839), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 16700000.0 / u.molar), ('dKa', 0.025), ('DH', -9.16 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', 0.697 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -9.86 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 16700000.0 / u.molar), ('dKa', 0.025), ('DH', -9.16 * u\n .kilocalories_per_mole), ('dDH', None), ('TDS', 0.697 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -9.86 * u.\n kilocalories_per_mole)])\n", (8611, 8839), False, 'from collections import OrderedDict\n'), ((8879, 9118), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 146000.0 / u.molar ** 2), ('dKa', 0.01), ('DH', -4.83 * u.\n kilocalories_per_mole), ('dDH', None), ('TDS', 2.23 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 2), ('DG', -7.05 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 146000.0 / u.molar ** 2), ('dKa', 0.01), ('DH', -4.83 *\n u.kilocalories_per_mole), ('dDH', None), ('TDS', 2.23 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 2), ('DG', -7.05 * u.\n kilocalories_per_mole)])\n", (8890, 9118), False, 'from collections import OrderedDict\n'), ((9158, 9392), 'collections.OrderedDict', 'OrderedDict', (["[('Ka', 161000.0 / u.molar), ('dKa', 0.02), ('DH', -6.8 * u.\n kilocalories_per_mole), ('dDH', 0.01), ('TDS', 0.31 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -7.11 * u.\n kilocalories_per_mole)]"], {}), "([('Ka', 161000.0 / u.molar), ('dKa', 0.02), ('DH', -6.8 * u.\n kilocalories_per_mole), ('dDH', 0.01), ('TDS', 0.31 * u.\n kilocalories_per_mole), ('dTDS', None), ('n', 1), ('DG', -7.11 * u.\n kilocalories_per_mole)])\n", (9169, 9392), False, 'from collections import OrderedDict\n'), ((14258, 14293), 'numpy.around', 'np.around', (['computed_TDS'], {'decimals': '(2)'}), '(computed_TDS, decimals=2)\n', (14267, 14293), True, 'import numpy as np\n'), ((14376, 14410), 'numpy.around', 'np.around', (['computed_DG'], {'decimals': '(2)'}), '(computed_DG, decimals=2)\n', (14385, 14410), True, 'import numpy as np\n')] |
# coding: utf-8
import numpy as np
from ..utils import handleKeyError
from ..utils import flatten_dual
from ..utils import ItemsetTreeDOTexporter
from ..utils import DOTexporterHandler
ITEM_MINING_METHODS = ["all", "closed"]
def create_one_hot(data):
"""
Create the one-hot binary matrix.
@params data : (list) Each element of data (data[i]) have variable length items.
@return one_hot : shape=(num_data,num_unique)
one_hot[n][i]=1 means data[n] contains idx2data[i]
@return idx2data : Dictionary from index to original data.
"""
unique_data = sorted(list(set(flatten_dual(data))))
num_unique = len(unique_data)
num_data = len(data)
data2idx = dict(zip(unique_data, range(num_unique)))
one_hot = np.zeros(shape=(num_data,num_unique), dtype=np.int32)
for i,row in enumerate(data):
one_hot[i, np.asarray([data2idx[e] for e in row], dtype=int)]=1
idx2data = dict(zip(range(num_unique), unique_data))
return one_hot, idx2data
class Node():
""" Node for Tree structure.
@params database :
@params itemset :
@params num_items :
@params freq :
@params tail :
@params threshold :
"""
def __init__(self, itemset, freq, tail):
self.itemset = itemset
self.freq = freq # (=len(database))
self.tail = tail
self.children = []
def _recurse_all(self, database, threshold, num_items):
""" Find ALL closed Itemsets. """
for i in range(self.tail+1, num_items):
next_itemset = self.itemset + [i]
next_data = database[database[:,i]==1,:]
freq = len(next_data)
if freq >= threshold:
child = Node(itemset=next_itemset, freq=freq, tail=i)
child._recurse_all(next_data, threshold, num_items)
self.children.append(child)
def _recurse_closed(self, database, threshold, num_items):
""" Find ONLY closed Itemsets. """
for i in range(self.tail+1, num_items):
next_data = database[database[:,i]==1,:]
freq = len(next_data)
if freq >= threshold:
add_itemset = i+np.where(np.all(next_data[:,i:], axis=0))[0]
next_itemset = self.itemset + add_itemset.tolist()
child = Node(itemset=next_itemset, freq=freq, tail=max(add_itemset))
child._recurse_closed(next_data, threshold, num_items)
self.children.append(child)
class FrequentSet():
def __init__(self, threshold):
self.root = None
self.threshold = threshold
self.freq_sets = []
def fit(self, database, method="closed"):
"""
@param database: Binary Matrix. shape=(num_transactions, num_items)
"""
method = method.lower()
handleKeyError(lst=ITEM_MINING_METHODS, method=method)
num_transactions, num_items = database.shape
self.root = Node(itemset=[], freq=num_transactions, tail=-1)
self.root.__getattribute__({
"all" : "_recurse_all",
"closed" : "_recurse_closed",
}[method]).__call__(database, self.threshold, num_items)
self.num_items = num_items
self.all = self.get_itemsets(self.root)
def get_itemsets(self, node):
freq_sets = [node.itemset]
for child in node.children:
freq_sets.extend(self.get_itemsets(node=child))
return freq_sets
def export_graphviz(self, out_file=None, feature_names=None,
class_names=None, cmap="jet", filled=True,
rounded=True, precision=3):
if class_names is None:
class_names = np.arange(self.num_items)
exporter = ItemsetTreeDOTexporter(
cmap=cmap, class_names=class_names,
filled=filled, rounded=rounded, precision=precision
)
return DOTexporterHandler(exporter, root=self.root, out_file=out_file)
| [
"numpy.asarray",
"numpy.zeros",
"numpy.arange",
"numpy.all"
] | [((756, 810), 'numpy.zeros', 'np.zeros', ([], {'shape': '(num_data, num_unique)', 'dtype': 'np.int32'}), '(shape=(num_data, num_unique), dtype=np.int32)\n', (764, 810), True, 'import numpy as np\n'), ((3703, 3728), 'numpy.arange', 'np.arange', (['self.num_items'], {}), '(self.num_items)\n', (3712, 3728), True, 'import numpy as np\n'), ((863, 912), 'numpy.asarray', 'np.asarray', (['[data2idx[e] for e in row]'], {'dtype': 'int'}), '([data2idx[e] for e in row], dtype=int)\n', (873, 912), True, 'import numpy as np\n'), ((2191, 2223), 'numpy.all', 'np.all', (['next_data[:, i:]'], {'axis': '(0)'}), '(next_data[:, i:], axis=0)\n', (2197, 2223), True, 'import numpy as np\n')] |
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
data=pd.read_csv(path).rename(columns={'Total':'Total_Medals'})
data.head(10)
#Code starts here
# --------------
#Code starts here
data['Better_Event']=np.where(data['Total_Summer'] == data['Total_Winter'] , 'Both' , (np.where(data['Total_Summer'] > data['Total_Winter'] , 'Summer','Winter')))
better_event= data['Better_Event'].value_counts().idxmax()
# --------------
#Code starts here
top_countries = data[['Country_Name','Total_Summer', 'Total_Winter','Total_Medals']]
top_countries=top_countries[:-1]
def top_ten(data,col):
country_list=[]
country_list=list((data.nlargest(10,col)['Country_Name']))
return country_list
top_10_summer= top_ten(top_countries,'Total_Summer')
print(top_10_summer)
top_10_winter= top_ten(top_countries,'Total_Winter')
print(top_10_winter)
top_10= top_ten(top_countries,'Total_Medals')
print(top_10)
common=list(set(top_10_summer) & set(top_10_winter) & set(top_10))
print(common)
# --------------
#Code starts here
summer_df=data[data['Country_Name'].isin(top_10_summer)]
winter_df =data[data['Country_Name'].isin(top_10_winter)]
top_df=data[data['Country_Name'].isin(top_10)]
# --------------
#Code starts here
summer_df['Golden_Ratio']= summer_df['Gold_Summer']/summer_df['Total_Summer']
summer_max_ratio = (summer_df['Golden_Ratio']).max()
summer_country_gold=summer_df.loc[summer_df['Golden_Ratio'].idxmax(),'Country_Name']
winter_df['Golden_Ratio']= winter_df['Gold_Winter']/winter_df['Total_Winter']
winter_max_ratio = (winter_df['Golden_Ratio']).max()
winter_country_gold=winter_df.loc[winter_df['Golden_Ratio'].idxmax(),'Country_Name']
top_df['Golden_Ratio']= top_df['Gold_Total']/top_df['Total_Medals']
top_max_ratio = (top_df['Golden_Ratio']).max()
top_country_gold=top_df.loc[top_df['Golden_Ratio'].idxmax(),'Country_Name']
# --------------
#Code starts here
data_1= data[:-1]
data_1['Total_Points']= data_1['Gold_Total']*3 +data_1['Silver_Total']*2 +data_1['Bronze_Total']
most_points=max(data_1['Total_Points'])
best_country=data_1.loc[data_1['Total_Points'].idxmax(),'Country_Name']
# --------------
#Code starts here
best=data[data['Country_Name']==best_country]
best=best[['Gold_Total','Silver_Total','Bronze_Total']]
best.plot.bar()
plt.xlabel('United States')
plt.ylabel('Medals Tally')
plt.xticks(rotation=45)
| [
"pandas.read_csv",
"numpy.where",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((2468, 2495), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""United States"""'], {}), "('United States')\n", (2478, 2495), True, 'import matplotlib.pyplot as plt\n'), ((2497, 2523), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Medals Tally"""'], {}), "('Medals Tally')\n", (2507, 2523), True, 'import matplotlib.pyplot as plt\n'), ((2525, 2548), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (2535, 2548), True, 'import matplotlib.pyplot as plt\n'), ((380, 453), 'numpy.where', 'np.where', (["(data['Total_Summer'] > data['Total_Winter'])", '"""Summer"""', '"""Winter"""'], {}), "(data['Total_Summer'] > data['Total_Winter'], 'Summer', 'Winter')\n", (388, 453), True, 'import numpy as np\n'), ((148, 165), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (159, 165), True, 'import pandas as pd\n')] |
import numpy as np
import pandas as pd
from networkx import nx
import numpy.linalg as la
class DataSimulation:
def __init__(self,p,n_days,t=None,road_props=None, noise_scale=1,t_switch=0,test_frac=0.8):
self.p=p
self.n_days=n_days
if not t is None:
self.t=t
else :
self.t = np.arange(14.75,20,0.25)
if not road_props is None:
self.road_props=road_props
else :
self.road_props= dict(zip([30, 50, 80, 130],np.random.multinomial(self.p,[0,0.25,0.5,0.25])))
self.noise_scale=noise_scale
self.t_switch=t_switch
self.tau = len(self.t)
self.test_frac = test_frac
def rearange_data(data,p):
data=data.swapaxes(0,2)
data=data.swapaxes(1,2)
data=data.reshape(p,-1)
return data
def generate_date_index(n_days):
start_date = pd.datetime(2020,1,1,15,0,0)
dinx=pd.date_range('2020-1-1 15:00:00+01:00',periods=4*24*n_days,freq='15min')
return dinx[(dinx.time>= pd.datetime(2020,1,1,15,0,0).time()) & (dinx.time< pd.datetime(2020,1,1,20,0,0).time()) ]
def gen_one_instant_speed(max_speed,normal_center=0.9,size=1):
normal_values = np.random.normal(0,(max_speed-normal_center*max_speed)/2,size)
return normal_center*max_speed+normal_values
def run_generation_formula(A,b,w,tau,p,A_R=None,t_switch=None,noise_scale=1):
data=[]
cur_A=A
for i in range(tau-1):
if t_switch is not None and i>t_switch :
cur_A=A_R
x= w-b[i][:]
noise = np.random.normal(0,noise_scale,size=p)
w= b[i+1][:] + cur_A.dot(x) + noise
data.append(w)
return np.array(data)
def generate_intercept(t,road_props,tau):
b_t= (2.5**2-(t-17.5)**2)
b_t=np.reshape(b_t,(1,-1))
b_t_difference = b_t[0][1:]-b_t[0][0:-1]
b_p = np.concatenate([DataSimulation.gen_one_instant_speed(max_speed,normal_center=0.9,size=prop) for max_speed,prop in road_props.items()])
b_p=np.reshape(b_p,(-1,1))
b_p=b_p.repeat(tau,axis=1)
b=b_p.T-b_t.T
return b
def generate_graph(p):
g = nx.gnm_random_graph(p, 8*p,directed=True)
return g
def generate_A_matrix(g,p):
A=np.random.uniform(-1,1,size=(p,p))*(np.array([[1 if i in g.adj[j] else 0 for i in range(p)] for j in g.adj])+np.diag([1]*p))
A=(A.T/la.norm(A,axis=1)).T
return A
def generate_data(self):
self.g = DataSimulation.generate_graph(self.p)
self.b = DataSimulation.generate_intercept(self.t,self.road_props,self.tau)
self.A_L = DataSimulation.generate_A_matrix(self.g,self.p)
self.A_R = DataSimulation.generate_A_matrix(self.g,self.p)
full_days_data = []
for i in range(self.n_days):
w0 = np.concatenate([DataSimulation.gen_one_instant_speed(max_speed,normal_center=0.9,size=prop) for max_speed,prop in self.road_props.items()])
data= DataSimulation.run_generation_formula(self.A_L,self.b,w0,self.tau,self.p,A_R = self.A_R,t_switch= self.t_switch+1)
full_days_data.append(data)
self.full_days_data=np.array(full_days_data)
return self.full_days_data
def split_center_data(self):
full_days_data_train=self.full_days_data[:int(self.test_frac*self.n_days)]
full_days_data_test=self.full_days_data[int(self.test_frac*self.n_days):]
full_days_data_train = DataSimulation.rearange_data(full_days_data_train,self.p)
full_days_data_test = DataSimulation.rearange_data(full_days_data_test,self.p)
sim_train_df = pd.DataFrame(data= full_days_data_train,columns=DataSimulation.generate_date_index(self.n_days)[:int(self.test_frac*self.n_days*(self.tau-1))])
sim_test_df = pd.DataFrame(data= full_days_data_test,columns=DataSimulation.generate_date_index(self.n_days)[int(self.test_frac*self.n_days*(self.tau-1)):])
intercept = pd.concat([sim_train_df.groupby(pd.to_datetime(sim_train_df.columns).time,axis=1).mean()
]*self.n_days,axis=1)
intercept.columns=DataSimulation.generate_date_index(self.n_days)
sim_train_intercept = intercept[intercept.columns[:int(self.test_frac*self.n_days)*(self.tau-1)]]
sim_test_intercept = intercept[intercept.columns[int(self.test_frac*self.n_days)*(self.tau-1):]]
sim_train_df=sim_train_df-sim_train_intercept
sim_test_df=sim_test_df-sim_test_intercept
return sim_train_df,sim_train_intercept,sim_test_df,sim_test_intercept
| [
"numpy.random.uniform",
"pandas.date_range",
"pandas.datetime",
"numpy.random.multinomial",
"networkx.nx.gnm_random_graph",
"numpy.array",
"numpy.reshape",
"numpy.random.normal",
"numpy.arange",
"numpy.linalg.norm",
"numpy.diag",
"pandas.to_datetime"
] | [((905, 938), 'pandas.datetime', 'pd.datetime', (['(2020)', '(1)', '(1)', '(15)', '(0)', '(0)'], {}), '(2020, 1, 1, 15, 0, 0)\n', (916, 938), True, 'import pandas as pd\n'), ((947, 1026), 'pandas.date_range', 'pd.date_range', (['"""2020-1-1 15:00:00+01:00"""'], {'periods': '(4 * 24 * n_days)', 'freq': '"""15min"""'}), "('2020-1-1 15:00:00+01:00', periods=4 * 24 * n_days, freq='15min')\n", (960, 1026), True, 'import pandas as pd\n'), ((1236, 1306), 'numpy.random.normal', 'np.random.normal', (['(0)', '((max_speed - normal_center * max_speed) / 2)', 'size'], {}), '(0, (max_speed - normal_center * max_speed) / 2, size)\n', (1252, 1306), True, 'import numpy as np\n'), ((1759, 1773), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1767, 1773), True, 'import numpy as np\n'), ((1873, 1897), 'numpy.reshape', 'np.reshape', (['b_t', '(1, -1)'], {}), '(b_t, (1, -1))\n', (1883, 1897), True, 'import numpy as np\n'), ((2106, 2130), 'numpy.reshape', 'np.reshape', (['b_p', '(-1, 1)'], {}), '(b_p, (-1, 1))\n', (2116, 2130), True, 'import numpy as np\n'), ((2243, 2287), 'networkx.nx.gnm_random_graph', 'nx.gnm_random_graph', (['p', '(8 * p)'], {'directed': '(True)'}), '(p, 8 * p, directed=True)\n', (2262, 2287), False, 'from networkx import nx\n'), ((3249, 3273), 'numpy.array', 'np.array', (['full_days_data'], {}), '(full_days_data)\n', (3257, 3273), True, 'import numpy as np\n'), ((337, 363), 'numpy.arange', 'np.arange', (['(14.75)', '(20)', '(0.25)'], {}), '(14.75, 20, 0.25)\n', (346, 363), True, 'import numpy as np\n'), ((1622, 1662), 'numpy.random.normal', 'np.random.normal', (['(0)', 'noise_scale'], {'size': 'p'}), '(0, noise_scale, size=p)\n', (1638, 1662), True, 'import numpy as np\n'), ((2344, 2381), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(p, p)'}), '(-1, 1, size=(p, p))\n', (2361, 2381), True, 'import numpy as np\n'), ((2454, 2470), 'numpy.diag', 'np.diag', (['([1] * p)'], {}), '([1] * p)\n', (2461, 2470), True, 'import numpy as np\n'), ((2485, 2503), 'numpy.linalg.norm', 'la.norm', (['A'], {'axis': '(1)'}), '(A, axis=1)\n', (2492, 2503), True, 'import numpy.linalg as la\n'), ((508, 559), 'numpy.random.multinomial', 'np.random.multinomial', (['self.p', '[0, 0.25, 0.5, 0.25]'], {}), '(self.p, [0, 0.25, 0.5, 0.25])\n', (529, 559), True, 'import numpy as np\n'), ((1054, 1087), 'pandas.datetime', 'pd.datetime', (['(2020)', '(1)', '(1)', '(15)', '(0)', '(0)'], {}), '(2020, 1, 1, 15, 0, 0)\n', (1065, 1087), True, 'import pandas as pd\n'), ((1105, 1138), 'pandas.datetime', 'pd.datetime', (['(2020)', '(1)', '(1)', '(20)', '(0)', '(0)'], {}), '(2020, 1, 1, 20, 0, 0)\n', (1116, 1138), True, 'import pandas as pd\n'), ((4074, 4110), 'pandas.to_datetime', 'pd.to_datetime', (['sim_train_df.columns'], {}), '(sim_train_df.columns)\n', (4088, 4110), True, 'import pandas as pd\n')] |
import re,os,sys,warnings,numpy,scipy,math,itertools;
from scipy import stats;
from numpy import *;
from multiprocessing import Pool;
from scipy.optimize import fmin_cobyla
from scipy.optimize import fmin_l_bfgs_b
from math import log;
numpy.random.seed(1231);
warnings.filterwarnings('ignore');
#obsolete variables: #ReadLength
read_length=1;
#JunctionLength
junction_length=1;
#splicing difference cutoff
cutoff=0.1;
#MultiProcessor
MultiProcessor=1;
if len(sys.argv)>=5:
MultiProcessor=int(sys.argv[4]);
#survival time and events
time=[];event=[];Lambda=[];risk=[];
ifile=open(sys.argv[2]);
ifile.readline();ilines=ifile.readlines();
for i in ilines:
element=re.findall('[^\t\n]+',i);
if element[1]=='NA':
time.append(-1);
event.append(0);
else:
time.append(float(element[1]));
event.append(float(element[2]));
for i in range(len(time)):
d=sum((array(time)==time[i])*array(event));
n=sum(array(time)>=time[i]);
risk.append(float(d)/n);
#unique time point?
for i in range(len(time)):
Lambda.append(sum((array(time)<=time[i])*array(risk)));
#update the baseline hazard
def update_hazard(beta_cov,beta_psi,cov,psi,time,event,I,S,inc_length,skp_length):
risk=[];Lambda=[];risk_unique=[0]*len(time);time_rec={};next_time_rec=[];
cut=stats.mstats.mquantiles(time,array(range(11))/10.0);
#print('cut');print(cut);print('beta_psi');print(beta_psi);
for i in range(len(time)):
adj=min(1,(sum(I[i])+sum(S[i]))*pow(10,-2));
if sum(I[i])+sum(S[i])<=10:
adj=adj*0.1;
adj=1;
temp=list(abs(time[i]-cut));index=temp.index(min(temp));
if time[i]>cut[index]:
this_time=cut[index];this_time_next=cut[index+1];
else:
this_time=cut[index-1];this_time_next=cut[index];
next_time_rec.append(this_time_next);
if this_time in time_rec:
risk.append(time_rec[this_time]);continue;
d=sum((array(time)>=this_time)*(array(time)<this_time_next)*array(event));
risk_set=(array(time)>=this_time);
#j is the index of the risk_set
risk_set_beta=0;
for j in range(len(risk_set)):
if risk_set[j]==True:
linear=exp(dot(array(beta_cov),array(cov[j]))+dot(array(beta_psi),array(veclogit(psi[j]))));
weight=0;
for e in range(len(I[j])):
weight=weight+pow(beta_psi[e],2)/F(I[j][e],S[j][e],psi[j][e],inc_length,skp_length);
weight=max(1,1/(1-weight))*adj;
#weight=1-weight;
#print('weight');print(weight);
risk_set_beta=risk_set_beta+weight*linear;
risk.append(float(d)/risk_set_beta);
time_rec[this_time]=float(d)/risk_set_beta;
risk_unique[i]=float(d)/risk_set_beta;
#print('time_rec');print(time_rec);
for i in range(len(time)):
Lambda.append(sum((array(time)<=next_time_rec[i])*array(risk_unique)));
return([risk,Lambda]);
#disabled in this version
#clinical covariates
# if len(sys.argv)>=9:
# ifile=open(sys.argv[8]);
# ifile.readline();ilines=ifile.readlines();
# cov=[];
# for i in ilines:
# element=re.findall('[^\t\n]+',i);
# temp=[];
# for j in element[1:]:
# temp.append(float(j)+1);
# cov.append(temp);
# else:
cov=[[]]*len(time);
#print('test_cov_input');print(cov);
#binomial MLE optimization functions
def logit(x):
if x<0.001:
x=0.001;
if x>0.999:
x=0.999;
return(log(x/(1-x)));
def veclogit(x):
res=[];
for i in x:
res.append(logit(i));
return(res);
def F(I,S,psi,inc_length,skp_length):
res=(2*psi-1-pow(psi,2))/pow(psi,2)/pow(1-psi,2);
#res=0;
res+=-1*I*skp_length*((2*inc_length+skp_length)*psi+skp_length*(1-psi))/pow(psi,2)/pow(inc_length*psi+skp_length*(1-psi),2);
res+=-1*S*inc_length*((2*skp_length+inc_length)*(1-psi)+inc_length*psi)/pow(1-psi,2)/pow(inc_length*psi+skp_length*(1-psi),2);
return(res);
#function to optimize the vector psi_k1, psi_k2 for all the exons of replicate k
def myfunc_l(x, *args):
I=args[0];S=args[1];cov=args[2];psi=args[3];risk=args[4];Lambda=args[5];event=args[6];
effective_inclusion_length=args[7];effective_skipping_length=args[8];
beta1_length=args[9];beta2_length=args[10];
beta1=args[11][:beta1_length];beta2=args[11][beta1_length:(beta1_length+beta2_length)];
psi_k=args[12];
if sum(I[psi_k])+sum(S[psi_k])<=10:
adj=1;
else:
adj=1;
psi_replace=x;
linear=dot(array(beta1),array(cov[psi_k]))+dot(array(beta2),array(psi_replace));
#log-likelihood
if risk[psi_k]==0:
l1=event[psi_k]*(-1000)+event[psi_k]*linear-Lambda[psi_k]*exp(linear);
else:
l1=event[psi_k]*log(risk[psi_k])+event[psi_k]*linear-Lambda[psi_k]*exp(linear);
l2=0;
for e in range(len(beta2)):
new_psi=effective_inclusion_length*psi_replace[e]/(effective_inclusion_length*psi_replace[e]+effective_skipping_length*(1-psi_replace[e]));
l2+=I[psi_k][e]*log(new_psi)+S[psi_k][e]*log(1-new_psi);
res1=l1+l2*adj;
return(-1*res1);
#function to optimize the vector psi_k1, psi_k2 for all the exons of replicate k
def myfunc_l_der(x, *args):
I=args[0];S=args[1];cov=args[2];psi=args[3];risk=args[4];Lambda=args[5];event=args[6];
effective_inclusion_length=args[7];effective_skipping_length=args[8];
beta1_length=args[9];beta2_length=args[10];
beta1=args[11][:beta1_length];beta2=args[11][beta1_length:(beta1_length+beta2_length)];
psi_k=args[12];
if sum(I[psi_k])+sum(S[psi_k])<=10:
adj=1;
else:
adj=1;
psi_replace=x;
res1=[];
linear=dot(array(beta1),array(cov[psi_k]))+dot(array(beta2),array(psi_replace));
for e in range(len(beta2)):
new_psi=effective_inclusion_length*psi_replace[e]/(effective_inclusion_length*psi_replace[e]+effective_skipping_length*(1-psi_replace[e]));
new_psi_der=effective_inclusion_length*effective_skipping_length/pow(effective_inclusion_length*psi_replace[e]+effective_skipping_length*(1-psi_replace[e]),2);
temp=event[psi_k]*beta2[e]-Lambda[psi_k]*beta2[e]*exp(linear);
temp+=(I[psi_k][e]/new_psi*new_psi_der-S[psi_k][e]/(1-new_psi)*new_psi_der)*adj;
res1.append(float(temp));
return(-1*array(res1));
def myfunc_surv(x, *args):
I=args[0];S=args[1];cov=args[2];psi=args[3];risk=args[4];Lambda=args[5];event=args[6];
effective_inclusion_length=args[7];effective_skipping_length=args[8];
beta1_length=args[9];beta2_length=args[10];output=args[11];
beta1=x[:beta1_length];beta2=x[beta1_length:(beta1_length+beta2_length)];
res1=0;res2=0;res1_l1=0;res1_l2=0;
for i in range(len(event)):
adj=min(1,(sum(I[i])+sum(S[i]))*pow(10,-2));
if sum(I[i])+sum(S[i])<=10:
adj=adj*0.1;
adj=1;
#print('test_myfunc_surv');print(beta1);print(cov[i]);print(beta2);print(psi[i]);
linear=dot(array(beta1),array(cov[i]))+dot(array(beta2),array(veclogit(psi[i])));
#log-likelihood
if risk[i]==0:
l1=event[i]*(-1000)+event[i]*linear-Lambda[i]*exp(linear);
else:
l1=event[i]*log(risk[i])+event[i]*linear-Lambda[i]*exp(linear);
l2=0;
for e in range(len(psi[i])):
new_psi=effective_inclusion_length*psi[i][e]/(effective_inclusion_length*psi[i][e]+effective_skipping_length*(1-psi[i][e]));
l2+=I[i][e]*log(new_psi)+S[i][e]*log(1-new_psi);
#print('test_l2');print(l2);print(psi[i]);print(I[i][e]);print(S[i][e]);
res1+=(l1+l2)*adj;
#res1+=(l1+l2);
res1_l1+=l1;res1_l2+=l2;
#determinant
F_prod=1;F_sum=0;
for e in range(len(psi[i])):
this_F=F(I[i][e],S[i][e],psi[i][e],effective_inclusion_length,effective_skipping_length);
F_prod=F_prod*this_F;
F_sum=F_sum+pow(beta2[e],2)/this_F;
res2+=(-0.5*log(1-F_sum*Lambda[i]*exp(linear)))*adj;
#res2+=(-0.5*log(1-F_sum*Lambda[i]*exp(linear)));
#print('test_F_prod');print(F_prod);
#if output==1:
# print('test_res');print(res1);print(res2);print(res1_l1);print(res1_l2);
res1=float(res1);res2=float(res2);
return(-1*(res2+res1));
def myfunc_surv_der(x, *args):
I=args[0];S=args[1];cov=args[2];psi=args[3];risk=args[4];Lambda=args[5];event=args[6];
effective_inclusion_length=args[7];effective_skipping_length=args[8];
beta1_length=args[9];beta2_length=args[10];output=args[11];
beta1=x[:beta1_length];beta2=x[beta1_length:(beta1_length+beta2_length)];
#res1_deter: derivative of beta1 in the determinant; #res2_deter: derivative of beta2 in the determinant
res1_deter=[];res2_deter=[];
#res1: derivative of beta1 in the log-likelihood; #res2
res1=[];res2=[];
for j in beta1:
res1_deter.append(0);res1.append(0);
for j in beta2:
res2_deter.append(0);res2.append(0);
#print('test_res');print(res1_deter);print(res2_deter);
for i in range(len(event)):
adj=min(1,(sum(I[i])+sum(S[i]))*pow(10,-2));
if sum(I[i])+sum(S[i])<=10:
adj=adj*0.1;
adj=1;
#res1_deter: derivative of beta1 in the determinant; #res2_deter: derivative of beta2 in the determinant
this_res1_deter=[];this_res2_deter=[];
linear=dot(array(beta1),array(cov[i]))+dot(array(beta2),array(veclogit(psi[i])));
F_prod=1;F_array=[];
for e in range(len(psi[i])):
F_prod=F_prod*F(I[i][e],S[i][e],psi[i][e],effective_inclusion_length,effective_skipping_length);
F_array.append(F(I[i][e],S[i][e],psi[i][e],effective_inclusion_length,effective_skipping_length));
#determnant
deter=(1-sum(pow(array(beta2),2)/array(F_array))*Lambda[i]*exp(linear));
#beta2_der
for e in range(len(beta2)):
this_res2_deter.append((-1*Lambda[i])*exp(linear)*(2*beta2[e]/F_array[e]+sum(pow(array(beta2),2)/array(F_array))*logit(psi[i][e])));
res2[e]+=(event[i]*logit(psi[i][e])-Lambda[i]*logit(psi[i][e])*exp(linear))*adj;
#beta1_der
for c in range(len(beta1)):
this_res1_deter.append(sum(pow(array(beta2),2)/array(F_array))*(-1*Lambda[i])*exp(linear)*(cov[i][c]));
res1[c]+=(event[i]*cov[i][c]-Lambda[i]*cov[i][c]*exp(linear))*adj;
res1_deter=array(res1_deter)+(-0.5)*array(this_res1_deter)/deter*adj;
res2_deter=array(res2_deter)+(-0.5)*array(this_res2_deter)/deter*adj;
res=array(list(array(res1_deter)+array(res1))+list(array(res2_deter)+array(res2)))
#print('test_res');print(res);print(res1_deter);print(res2_deter);
return(-1*res);
def myfunc_surv_beta0(x, *args):
I=args[0];S=args[1];cov=args[2];psi=args[3];risk=args[4];Lambda=args[5];event=args[6];
effective_inclusion_length=args[7];effective_skipping_length=args[8];
beta1_length=args[9];beta2_length=args[10];
beta1=x[:beta1_length];beta2=array(x[beta1_length:(beta1_length+beta2_length)]);
beta2[:]=0;
res=myfunc_surv(list(beta1)+list(beta2),I,S,cov,psi,risk,Lambda,event,effective_inclusion_length,effective_skipping_length,beta1_length,beta2_length);
return(res);
def myfunc_surv_beta0_der(x, *args):
I=args[0];S=args[1];cov=args[2];psi=args[3];risk=args[4];Lambda=args[5];event=args[6];
effective_inclusion_length=args[7];effective_skipping_length=args[8];
beta1_length=args[9];beta2_length=args[10];
beta1=x[:beta1_length];beta2=array(x[beta1_length:(beta1_length+beta2_length)]);
beta2[:]=0;
res=myfunc_surv_der(list(beta1)+list(beta2),I,S,cov,psi,risk,Lambda,event,effective_inclusion_length,effective_skipping_length,beta1_length,beta2_length);
return(res[:beta1_length]);
def MLE_iteration(I,S,cov,time,risk,Lambda,event,effective_inclusion_length,effective_skipping_length,beta_initial):
psi=vec2psi(I,S,effective_inclusion_length,effective_skipping_length);
beta_psi=[beta_initial]*len(I[0]);beta_cov=[0]*len(cov[0]);
iter_cutoff=1;iter_maxrun=100;count=0;previous_sum=0;min_sum=pow(10,10);min_sum_beta_psi=0;
#while(((abs(iter_cutoff)>0.001)&(abs(iter_cutoff)/(previous_sum+1)>pow(10,-6)))&(count<=iter_maxrun)):
while (abs(iter_cutoff)>0.01)&(count<=iter_maxrun):
if ((min_sum-previous_sum)<-2)&(count>10):
break;
#update baseline hazard
res=update_hazard(beta_cov,beta_psi,cov,psi,time,event,I,S,effective_inclusion_length,effective_skipping_length);
risk=res[0];Lambda=res[1];
#print('test_risk');print(risk);print(sorted(Lambda));
#update beta
xopt=fmin_l_bfgs_b(myfunc_surv,list(beta_cov)+list(beta_psi),myfunc_surv_der,args=[I,S,cov,psi,risk,Lambda,event,effective_inclusion_length,effective_skipping_length,len(cov[0]),len(psi[0]),0],iprint=-1);
#print('unconstrain_MLE_xopt');print(xopt);
beta_cov=xopt[0][:len(cov[0])];beta_psi=xopt[0][len(cov[0]):];
myfunc_surv(list(beta_cov)+list(beta_psi),I,S,cov,psi,risk,Lambda,event,effective_inclusion_length,effective_skipping_length,len(cov[0]),len(psi[0]),1);
current_sum=xopt[1];
if previous_sum!=0:
iter_cutoff=current_sum-previous_sum;
previous_sum=current_sum;count+=1;
if min_sum>current_sum:
min_sum=current_sum;
min_sum_beta_psi=beta_psi;
if sum(abs(array(beta_psi))>=20)>0:
break;
#update psi
#if (iter_cutoff>0.1)&(count<=2):
# for i in range(len(event)):
# psi_init=[];psi_bound=[];
# for e in range(len(beta_psi)):
# psi_init.append(psi[i][e]);
# psi_bound.append([0.001,0.999]);
# xopt=fmin_l_bfgs_b(myfunc_l,psi_init,myfunc_l_der,args=[I,S,cov,psi,risk,Lambda,event,effective_inclusion_length,effective_skipping_length,len(cov[0]),len(psi[0]),list(beta_cov)+list(beta_psi),i],bounds=psi_bound,iprint=-1);
# psi[i]=list(xopt[0]);
# print('test_xopt');print(xopt);
#print('test_psi');print(list(psi));
#return([current_sum,[beta_cov,beta_psi,psi]]);
return([min_sum,[beta_cov,min_sum_beta_psi,psi]]);
def MLE_iteration_constrain(I,S,cov,time,risk,Lambda,event,effective_inclusion_length,effective_skipping_length,psi):
psi=vec2psi(I,S,effective_inclusion_length,effective_skipping_length);
beta_psi=[0]*len(I[0]);beta_cov=[0]*len(cov[0]);
iter_cutoff=1;iter_maxrun=100;count=0;previous_sum=0;min_sum=pow(10,10);
while (abs(iter_cutoff)>0.01)&(count<=iter_maxrun):
#update baseline hazard
res=update_hazard(beta_cov,beta_psi,cov,psi,time,event,I,S,effective_inclusion_length,effective_skipping_length);
risk=res[0];Lambda=res[1];
#print('test_risk');print(risk);print(sorted(Lambda));
#update beta
if len(cov[0])==0:
xopt=myfunc_surv(beta_cov+beta_psi,I,S,cov,psi,risk,Lambda,event,effective_inclusion_length,effective_skipping_length,len(cov[0]),len(I[0]),1);
current_sum=xopt;
else:
temp=[[-10,10]*(len(cov[0]))];
xopt=fmin_l_bfgs_b(myfunc_surv_beta0,list(beta_cov),myfunc_surv_beta0_der,args=[I,S,cov,psi,risk,Lambda,event,effective_inclusion_length,effective_skipping_length,len(cov[0]),len(psi[0])],bounds=temp,iprint=-1);
beta_cov=xopt[0][:len(cov[0])];
current_sum=xopt[1];
#print('constrain_MLE_xopt');print(xopt);
if previous_sum!=0:
iter_cutoff=current_sum-previous_sum;
previous_sum=current_sum;count+=1;
if min_sum>current_sum:
min_sum=current_sum;
#update psi
#if (iter_cutoff>0.1)&(count<=2):
# for i in range(len(event)):
# psi_init=[];psi_bound=[];
# for e in range(len(beta_psi)):
# psi_init.append(psi[i][e]);
# psi_bound.append([0.05,0.95]);
# xopt=fmin_l_bfgs_b(myfunc_l,psi_init,myfunc_l_der,args=[I,S,cov,psi,risk,Lambda,event,effective_inclusion_length,effective_skipping_length,len(cov[0]),len(psi[0]),list(beta_cov)+list(beta_psi),i],bounds=psi_bound,iprint=-1);
# psi[i]=list(xopt[0]);
#print('constrain_MLE_xopt_psi');print(xopt);
#print('test_psi');print(list(psi));
return([min_sum,[beta_cov,beta_psi,psi]]);
#Random Sampling Function
def likelihood_test(i1,s1,cov,time,risk,Lambda,event,effective_inclusion_length,effective_skipping_length,flag,id,beta_initial):
#print('testing'+str(id));
if flag==0:
return([1,1]);
#return([scipy.stats.uniform.rvs(0.1,0.9,1),1]);
else:
res=MLE_iteration(i1,s1,cov,time,risk,Lambda,event,effective_inclusion_length,effective_skipping_length,beta_initial);
res_constrain=MLE_iteration_constrain(i1,s1,cov,time,risk,Lambda,event,effective_inclusion_length,effective_skipping_length,res[1][2]);
#print('test');print(res);print(res_constrain);
#return([1-scipy.stats.chi2.cdf(10*(abs(res_constrain[0]-res[0])),1)]);
temp=scipy.stats.chi2.sf(2*(res_constrain[0]-res[0]),1);
if temp<=0.1:
temp=0.5*temp;
return([min(temp,1)]);
#MultiProcessorFunction
def MultiProcessorPool(n_original_diff):
i1=n_original_diff[0];s1=n_original_diff[1];
effective_inclusion_length=n_original_diff[2];effective_skipping_length=n_original_diff[3];
time=n_original_diff[4];risk=n_original_diff[5];Lambda=n_original_diff[6];event=n_original_diff[7];
cov=n_original_diff[8];
flag=n_original_diff[9];id=n_original_diff[10];beta_initial=n_original_diff[11];
P=likelihood_test(i1,s1,cov,time,risk,Lambda,event,effective_inclusion_length,effective_skipping_length,flag,id,beta_initial);
return(P);
#Function for vector handling
def vec2float(vec):
res=[];
for i in vec:
res.append(float(i));
return(res);
#add 1 in both inclusion and skipping counts for robustness in small sample size
def vecAddOne(vec):
res=[];
for i in vec:
res.append([i]);
return(res);
def vecprod(vec):
res=1;
for i in vec:
res=res*i;
return(res);
def vecadd(vec1,vec2):
res=[];
for i in range(len(vec1)):
res.append(vec1[i]+vec2[i]);
return(res);
def vec2remove0psi(inc,skp):
res1=[];res2=[];
for i in range(len(inc)):
if (inc[i]!=0) | (skp[i]!=0):
res1.append(inc[i]);res2.append(skp[i]);
return([res1,res2]);
def vec2psi_single(inc,skp,effective_inclusion_length,effective_skipping_length):
psi=[];
inclusion_length=effective_inclusion_length;
skipping_length=effective_skipping_length;
for i in range(len(inc)):
temp=float(inc[i])/inclusion_length/(float(inc[i])/inclusion_length+float(skp[i])/skipping_length);
if temp<=0.001:
temp=0.001;
if temp>0.999:
temp=0.999;
psi.append(temp);
return(psi);
def vec2psi(inc,skp,effective_inclusion_length,effective_skipping_length):
psi=[];
inclusion_length=effective_inclusion_length;
skipping_length=effective_skipping_length;
for i in range(len(inc)):
psi.append([]);
for e in range(len(inc[0])):
temp=float(inc[i][e])/inclusion_length/(float(inc[i][e])/inclusion_length+float(skp[i][e])/skipping_length);
if temp<=0.001:
temp=0.001;
if temp>0.999:
temp=0.999;
psi[i].append(temp);
return(psi);
def vec210(vec):
res=[];
for i in vec:
if i>0:
res.append(1);
else:
res.append(-1);
return(res);
def vec_remove_na_surv(inc,skp,time,risk,Lambda,event,cov):
res_inc=[];res_skp=[];res_time=[];res_risk=[];res_Lambda=[];res_event=[];res_cov=[];
for i in range(len(inc)):
if (float(inc[i])+float(skp[i]))>0:
if (time[i]>0):
res_inc.append(inc[i]);
res_skp.append(skp[i]);
res_time.append(time[i]);
res_risk.append(risk[i]);
res_Lambda.append(Lambda[i]);
res_event.append(event[i]);
res_cov.append(cov[i]);
return([res_inc,res_skp,res_time,res_risk,res_Lambda,res_event,res_cov]);
def myttest(vec1,vec2):
if (len(vec1)==1) & (len(vec2)==1):
res=stats.ttest_ind([vec1[0],vec1[0]],[vec2[0],vec2[0]]);
else:
res=stats.ttest_ind(vec1,vec2);
return(res);
ifile=open(sys.argv[1]);
title=ifile.readline();
#analyze the title of the inputed data file to find the information of how much simulation are involved
#the min simulated round is 10, each time it increases by 10 times
element=re.findall('[^ \t\n]+',title);
ofile=open(sys.argv[3],'w');
ofile.write(title[:-1]+'\tPValue'+'\n');
list_n_original_diff=[];probability=[];psi_list_1=[];psi_list_2=[];
ilines=ifile.readlines();
for i in range(len(ilines)):
element=re.findall('[^ \t\n]+',ilines[i]);
if "NA" in ilines[i]:
list_n_original_diff.append([0,0,0,0,0,0,0,0,0,0,element[0],0]);
continue;
inc=re.findall('[^,]+',element[1]);skp=re.findall('[^,]+',element[2]);
effective_inclusion_length=int(element[3]);
effective_skipping_length=int(element[4]);
beta_initial=0;
#if len(element)>=6:
# beta_initial=float(element[-1]);
inc=vec2float(inc);skp=vec2float(skp);
temp=vec_remove_na_surv(inc,skp,time,risk,Lambda,event,cov);
inc_nona=temp[0];skp_nona=temp[1];
psi_nona=vec2psi_single(inc_nona,skp_nona,effective_inclusion_length,effective_skipping_length)
time_nona=temp[2];risk_nona=temp[3];Lambda_nona=temp[4];event_nona=temp[5];
cov_nona=temp[6];
#print('psi_nona');print(psi_nona);
inc_95=stats.mstats.mquantiles(inc,0.95);
skp_95=stats.mstats.mquantiles(skp,0.95);
# if (((((len(inc_nona)<=20)|(sum(inc_nona)==0))|(sum(skp_nona)==0))|((len(set(psi_nona))/len(psi_nona))<=0.5))|(inc_95<=5))|(skp_95<=5):
if (((((len(inc_nona)<=20)|(sum(inc_nona)==0))|(sum(skp_nona)==0))|((len(set(psi_nona)))<=10))|(inc_95<=3))|(skp_95<=3):
# print('test');print(len(inc_nona));print(set(psi_nona));print(len(set(psi_nona))/len(psi_nona));print(inc_95);print(skp_95);
list_n_original_diff.append([inc,skp,effective_inclusion_length,effective_skipping_length,time,risk,Lambda,event,cov,0,element[0],beta_initial]);
else:
inc=inc_nona;skp=skp_nona;
inc=vecAddOne(inc);skp=vecAddOne(skp);
list_n_original_diff.append([inc,skp,effective_inclusion_length,effective_skipping_length,time_nona,risk_nona,Lambda_nona,event_nona,cov_nona,1,element[0],beta_initial]);
#if i>2:
# break;
if MultiProcessor>1:
pool=Pool(processes=MultiProcessor);
probability=pool.map(MultiProcessorPool,list_n_original_diff);
else:
for i in range(len(list_n_original_diff)):
probability.append(MultiProcessorPool(list_n_original_diff[i]));
index=0;
for i in range(len(ilines)):
element=re.findall('[^ \t\n]+',ilines[i]);
ofile.write(ilines[i][:-1]+'\t'+str(probability[i][0])+'\n');
ofile.close();
| [
"scipy.stats.chi2.sf",
"numpy.random.seed",
"warnings.filterwarnings",
"scipy.stats.ttest_ind",
"re.findall",
"scipy.stats.mstats.mquantiles",
"multiprocessing.Pool",
"math.log"
] | [((238, 261), 'numpy.random.seed', 'numpy.random.seed', (['(1231)'], {}), '(1231)\n', (255, 261), False, 'import re, os, sys, warnings, numpy, scipy, math, itertools\n'), ((263, 296), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (286, 296), False, 'import re, os, sys, warnings, numpy, scipy, math, itertools\n'), ((18723, 18753), 're.findall', 're.findall', (['"""[^ \t\n]+"""', 'title'], {}), "('[^ \\t\\n]+', title)\n", (18733, 18753), False, 'import re, os, sys, warnings, numpy, scipy, math, itertools\n'), ((669, 694), 're.findall', 're.findall', (['"""[^\t\n]+"""', 'i'], {}), "('[^\\t\\n]+', i)\n", (679, 694), False, 'import re, os, sys, warnings, numpy, scipy, math, itertools\n'), ((3178, 3194), 'math.log', 'log', (['(x / (1 - x))'], {}), '(x / (1 - x))\n', (3181, 3194), False, 'from math import log\n'), ((18957, 18991), 're.findall', 're.findall', (['"""[^ \t\n]+"""', 'ilines[i]'], {}), "('[^ \\t\\n]+', ilines[i])\n", (18967, 18991), False, 'import re, os, sys, warnings, numpy, scipy, math, itertools\n'), ((19099, 19130), 're.findall', 're.findall', (['"""[^,]+"""', 'element[1]'], {}), "('[^,]+', element[1])\n", (19109, 19130), False, 'import re, os, sys, warnings, numpy, scipy, math, itertools\n'), ((19134, 19165), 're.findall', 're.findall', (['"""[^,]+"""', 'element[2]'], {}), "('[^,]+', element[2])\n", (19144, 19165), False, 'import re, os, sys, warnings, numpy, scipy, math, itertools\n'), ((19706, 19740), 'scipy.stats.mstats.mquantiles', 'stats.mstats.mquantiles', (['inc', '(0.95)'], {}), '(inc, 0.95)\n', (19729, 19740), False, 'from scipy import stats\n'), ((19749, 19783), 'scipy.stats.mstats.mquantiles', 'stats.mstats.mquantiles', (['skp', '(0.95)'], {}), '(skp, 0.95)\n', (19772, 19783), False, 'from scipy import stats\n'), ((20618, 20648), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'MultiProcessor'}), '(processes=MultiProcessor)\n', (20622, 20648), False, 'from multiprocessing import Pool\n'), ((20881, 20915), 're.findall', 're.findall', (['"""[^ \t\n]+"""', 'ilines[i]'], {}), "('[^ \\t\\n]+', ilines[i])\n", (20891, 20915), False, 'import re, os, sys, warnings, numpy, scipy, math, itertools\n'), ((15521, 15576), 'scipy.stats.chi2.sf', 'scipy.stats.chi2.sf', (['(2 * (res_constrain[0] - res[0]))', '(1)'], {}), '(2 * (res_constrain[0] - res[0]), 1)\n', (15540, 15576), False, 'import re, os, sys, warnings, numpy, scipy, math, itertools\n'), ((18385, 18440), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['[vec1[0], vec1[0]]', '[vec2[0], vec2[0]]'], {}), '([vec1[0], vec1[0]], [vec2[0], vec2[0]])\n', (18400, 18440), False, 'from scipy import stats\n'), ((18452, 18479), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['vec1', 'vec2'], {}), '(vec1, vec2)\n', (18467, 18479), False, 'from scipy import stats\n'), ((4612, 4624), 'math.log', 'log', (['new_psi'], {}), '(new_psi)\n', (4615, 4624), False, 'from math import log\n'), ((4637, 4653), 'math.log', 'log', (['(1 - new_psi)'], {}), '(1 - new_psi)\n', (4640, 4653), False, 'from math import log\n'), ((4352, 4368), 'math.log', 'log', (['risk[psi_k]'], {}), '(risk[psi_k])\n', (4355, 4368), False, 'from math import log\n'), ((6826, 6838), 'math.log', 'log', (['new_psi'], {}), '(new_psi)\n', (6829, 6838), False, 'from math import log\n'), ((6847, 6863), 'math.log', 'log', (['(1 - new_psi)'], {}), '(1 - new_psi)\n', (6850, 6863), False, 'from math import log\n'), ((6592, 6604), 'math.log', 'log', (['risk[i]'], {}), '(risk[i])\n', (6595, 6604), False, 'from math import log\n')] |
import matplotlib.pyplot as plt
import numpy as np
def siso_freq_iden(win_num=32):
#save_data_list = ["running_time", "yoke_pitch", "theta", "airspeed", "q", "aoa", "VVI", "alt"]
arr = np.load("../data/sweep_data_2017_11_16_11_47.npy")
time_seq_source = arr[:, 0]
ele_seq_source = arr[:, 1]
q_seq_source = arr[:, 4]
vvi_seq_source = arr[:,6]
theta_seq_source = arr[:,2]
airspeed_seq = arr[:, 3]
simo_iden = FreqIdenSIMO(time_seq_source,0.5, 50, ele_seq_source, q_seq_source,theta_seq_source,airspeed_seq, win_num=win_num)
# plt.figure(0)
# simo_iden.plt_bode_plot(0)
# plt.figure(1)
# simo_iden.plt_bode_plot(1)
plt.figure("airspeed num{}".format(win_num))
simo_iden.plt_bode_plot(2)
plt.show()
if __name__ == "__main__":
siso_freq_iden(128) | [
"numpy.load",
"matplotlib.pyplot.show"
] | [((194, 244), 'numpy.load', 'np.load', (['"""../data/sweep_data_2017_11_16_11_47.npy"""'], {}), "('../data/sweep_data_2017_11_16_11_47.npy')\n", (201, 244), True, 'import numpy as np\n'), ((750, 760), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (758, 760), True, 'import matplotlib.pyplot as plt\n')] |
import logging
import time
import copy
from functools import partial
from typing import Optional
import numpy as np
from qutip import Qobj
from qutip.parallel import serial_map
from .result import Result
from .structural_conversions import (
extract_controls, extract_controls_mapping, control_onto_interval,
pulse_options_dict_to_list, pulse_onto_tlist,
plug_in_pulse_values, discretize)
from .mu import derivative_wrt_pulse
from .info_hooks import chain
__all__ = ['optimize_pulses']
def _overlap(a, b) -> Optional[complex]:
"""Complex overlap of two quantum objects.
If `a`, `b` are not quantum objects or are not compatible, return None.
"""
if isinstance(a, Qobj) and isinstance(b, Qobj):
if a.dims != b.dims:
return None
if a.type == b.type == 'oper':
return complex((a.dag() * b).tr())
else:
return a.overlap(b)
else:
return None
def optimize_pulses(
objectives,
pulse_options,
tlist,
*,
propagator,
chi_constructor,
mu=None,
sigma=None,
iter_start=0,
iter_stop=5000,
check_convergence=None,
state_dependent_constraint=None,
info_hook=None,
modify_params_after_iter=None,
storage='array',
parallel_map=None,
store_all_pulses=False
):
r"""Use Krotov's method to optimize towards the given `objectives`
Optimize all time-dependent controls found in the Hamiltonians of the given
`objectives`.
Args:
objectives (list[Objective]): List of objectives
pulse_options (dict): Mapping of time-dependent controls found in the
Hamiltonians of the objectives to :class:`.PulseOptions` instances.
There must be a mapping for each control. As numpy arrays are
unhashable and thus cannot be used as dict keys, the options for a
``control`` that is an array must set using
``pulse_options[id(control)] = ...``
tlist (numpy.ndarray): Array of time grid values, cf.
:func:`~qutip.mesolve.mesolve`
propagator (callable): Function that propagates the state backward or
forwards in time by a single time step, between two points in
`tlist`
chi_constructor (callable): Function that calculates the boundary
condition for the backward propagation. This is where the
final-time functional (indirectly) enters the optimization.
mu (None or callable): Function that calculates the derivative
$\frac{\partial H}{\partial\epsilon}$ for an equation of motion
$\dot{\phi}(t) = -i H[\phi(t)]$ of an abstract operator $H$ and an
abstract state $\phi$. If None, defaults to
:func:`krotov.mu.derivative_wrt_pulse`, which covers the standard
Schrödinger and master equations. See :mod:`krotov.mu` for a
full explanation of the role of `mu` in the optimization, and the
required function signature.
sigma (None or callable): Function that calculates the second-order
Krotov term. If None, the first-order Krotov method is used.
iter_start (int): The formal iteration number at which to start the
optimization
iter_stop (int): The iteration number after which to end the
optimization, whether or not convergence has been reached
check_convergence (None or callable): Function that determines whether
the optimization has converged. If None, the optimization will only
end when `iter_stop` is reached. See :mod:`krotov.convergence` for
details.
state_dependent_constraint (None or callable): Function that evaluates
a state-dependent constraint. If None, optimize without any
state-dependent constraint. Currently not implemented.
info_hook (None or callable): Function that is called after each
iteration of the optimization, for the purpose of analysis. Any
value returned by `info_hook` (e.g. an evaluated functional J_T)
will be stored, for each iteration, in the `info_vals` attribute of
the returned :class:`.Result`. The `info_hook` must have the same
signature as :func:`krotov.info_hooks.print_debug_information`. It
should not modify its arguments except for `shared_data` in any
way.
modify_params_after_iter (None or callable): Function that is called
after each iteration, which may modify its arguments for certain
advanced use cases, such as dynamically adjusting `lambda_vals`, or
applying spectral filters to the `optimized_pulses`. It has the
same interface as `info_hook` but should not return anything. The
`modify_params_after_iter` function is called immediately before
`info_hook`, and can transfer arbitrary data to any subsequent
`info_hook` via the `shared_data` argument.
storage (callable): Storage constructor for the storage of
propagated states. Must accept an integer parameter `N` and return
an empty array of length `N`. The default value 'array' is
equivalent to ``functools.partial(numpy.empty, dtype=object)``.
parallel_map (callable or None): Parallel function evaluator. The
argument must have the same specification as
:func:`qutip.parallel.serial_map`,
which is used when None is passed. Alternatives are
:func:`qutip.parallel.parallel_map` or
:func:`qutip.ipynbtools.parallel_map`.
store_all_pulses (bool): Whether or not to store the optimized pulses
from *all* iterations in :class:`.Result`.
Returns:
Result: The result of the optimization.
Raises:
ValueError: If any controls are not real-valued, or if any update
shape is not a real-valued function in the range [0, 1].
"""
logger = logging.getLogger('krotov')
# Initialization
logger.info("Initializing optimization with Krotov's method")
if mu is None:
mu = derivative_wrt_pulse
if sigma is not None:
raise NotImplementedError("Second order")
if modify_params_after_iter is not None:
# From a technical perspective, the `modify_params_after_iter` is
# really just another info_hook, the only difference is the
# convention that info_hooks shouldn't modify the parameters.
if info_hook is None:
info_hook = modify_params_after_iter
else:
info_hook = chain(modify_params_after_iter, info_hook)
if state_dependent_constraint is not None:
raise NotImplementedError("state_dependent_constraint")
adjoint_objectives = [obj.adjoint for obj in objectives]
if storage == 'array':
storage = partial(np.empty, dtype=object)
if parallel_map is None:
parallel_map = serial_map
(
guess_controls,
guess_pulses,
pulses_mapping,
lambda_vals,
shape_arrays,
) = _initialize_krotov_controls(objectives, pulse_options, tlist)
result = Result()
result.start_local_time = time.localtime()
# Initial forward-propagation
tic = time.time()
forward_states = parallel_map(
_forward_propagation,
list(range(len(objectives))),
(objectives, guess_pulses, pulses_mapping, tlist, propagator, storage),
)
toc = time.time()
fw_states_T = [states[-1] for states in forward_states]
tau_vals = [
_overlap(state_T, obj.target)
for (state_T, obj) in zip(fw_states_T, objectives)
]
info = info_hook(
objectives=objectives,
adjoint_objectives=adjoint_objectives,
backward_states=None,
forward_states=forward_states,
optimized_pulses=guess_pulses,
lambda_vals=lambda_vals,
shape_arrays=shape_arrays,
fw_states_T=fw_states_T,
tau_vals=tau_vals,
start_time=tic,
stop_time=toc,
iteration=0,
shared_data={},
)
# Initialize result
result.tlist = tlist
result.objectives = objectives
result.guess_controls = guess_controls
result.controls_mapping = pulses_mapping
result.info_vals.append(info)
result.iters.append(0)
result.tau_vals.append(tau_vals)
if store_all_pulses:
result.all_pulses.append(guess_pulses)
result.states = fw_states_T
for krotov_iteration in range(iter_start + 1, iter_stop + 1):
logger.info("Started Krotov iteration %d" % krotov_iteration)
tic = time.time()
# Boundary condition for the backward propagation
# -- this is where the functional enters the optimizaton
chi_states = chi_constructor(fw_states_T, objectives, result.tau_vals)
chi_norms = [chi.norm() for chi in chi_states]
# normalizing χ improves numerical stability; the norm then has to be
# taken into account when calculating Δϵ
chi_states = [chi / nrm for (chi, nrm) in zip(chi_states, chi_norms)]
# Backward propagation
backward_states = parallel_map(
_backward_propagation,
list(range(len(objectives))),
(
chi_states,
adjoint_objectives,
guess_pulses,
pulses_mapping,
tlist,
propagator,
storage,
),
)
# Forward propagation and pulse update
logger.info("Started forward propagation/pulse update")
# forward_states_from_previous_iter = forward_states
forward_states = [storage(len(tlist)) for _ in range(len(objectives))]
for i_obj in range(len(objectives)):
forward_states[i_obj][0] = objectives[i_obj].initial_state
delta_eps = [
np.zeros(len(tlist) - 1, dtype=np.complex128) for _ in guess_pulses
]
optimized_pulses = copy.deepcopy(guess_pulses)
for time_index in range(len(tlist) - 1): # iterate over time intervals
# pulse update
for (i_pulse, guess_pulse) in enumerate(guess_pulses):
for (i_obj, objective) in enumerate(objectives):
χ = backward_states[i_obj][time_index]
μ = mu(
objectives,
i_obj,
guess_pulses,
pulses_mapping,
i_pulse,
time_index,
)
Ψ = forward_states[i_obj][time_index]
update = _overlap(χ, μ(Ψ)) # ⟨χ|μ|Ψ⟩
update *= chi_norms[i_obj]
delta_eps[i_pulse][time_index] += update
λₐ = lambda_vals[i_pulse]
S_t = shape_arrays[i_pulse][time_index]
Δϵ = (S_t / λₐ) * delta_eps[i_pulse][time_index].imag
optimized_pulses[i_pulse][time_index] += Δϵ
# forward propagation
fw_states = parallel_map(
_forward_propagation_step,
list(range(len(objectives))),
(
forward_states,
objectives,
optimized_pulses,
pulses_mapping,
tlist,
time_index,
propagator,
),
)
# storage
for i_obj in range(len(objectives)):
forward_states[i_obj][time_index + 1] = fw_states[i_obj]
logger.info("Finished forward propagation/pulse update")
fw_states_T = fw_states
tau_vals = [
_overlap(fw_state_T, obj.target)
for (fw_state_T, obj) in zip(fw_states_T, objectives)
]
toc = time.time()
# Display information about iteration
if info_hook is None:
info = None
else:
info = info_hook(
objectives=objectives,
adjoint_objectives=adjoint_objectives,
backward_states=backward_states,
forward_states=forward_states,
fw_states_T=fw_states_T,
optimized_pulses=optimized_pulses,
lambda_vals=lambda_vals,
shape_arrays=shape_arrays,
tau_vals=tau_vals,
start_time=tic,
stop_time=toc,
shared_data={},
iteration=krotov_iteration,
)
# Update optimization `result` with info from finished iteration
result.iters.append(krotov_iteration)
result.iter_seconds.append(int(toc - tic))
result.info_vals.append(info)
result.tau_vals.append(tau_vals)
result.optimized_controls = optimized_pulses
if store_all_pulses:
result.all_pulses.append(optimized_pulses)
result.states = fw_states_T
logger.info("Finished Krotov iteration %d" % krotov_iteration)
# Convergence check
msg = None
if check_convergence is not None:
msg = check_convergence(result)
if bool(msg) is True: # this is not an anti-pattern!
result.message = "Reached convergence"
if isinstance(msg, str):
result.message += ": " + msg
break
else:
# prepare for next iteration
guess_pulses = optimized_pulses
else: # optimization finished without `check_convergence` break
result.message = "Reached %d iterations" % iter_stop
# Finalize
result.end_local_time = time.localtime()
for i, pulse in enumerate(optimized_pulses):
result.optimized_controls[i] = pulse_onto_tlist(pulse)
return result
def _initialize_krotov_controls(objectives, pulse_options, tlist):
"""Extract discretized guess controls and pulses from `objectives`, and
return them with the associated mapping and option data"""
guess_controls = extract_controls(objectives)
pulses_mapping = extract_controls_mapping(objectives, guess_controls)
options_list = pulse_options_dict_to_list(pulse_options, guess_controls)
guess_controls = [discretize(control, tlist) for control in guess_controls]
for control in guess_controls:
if np.iscomplexobj(control):
raise ValueError(
"All controls must be real-valued. Complex controls must be "
"split into an independent real and imaginary part in the "
"objectives before passing them to the optimization")
guess_pulses = [ # defined on the tlist intervals
control_onto_interval(control)
for control in guess_controls]
lambda_vals = [options.lambda_a for options in options_list]
shape_arrays = []
for options in options_list:
S = discretize(options.shape, tlist, args=())
shape_arrays.append(control_onto_interval(S))
for shape_array in shape_arrays:
if np.iscomplexobj(shape_array):
raise ValueError(
"Update shapes (shape attribute in PulseOptions) must be "
"real-valued")
if np.min(shape_array) < 0 or np.max(shape_array) > 1.01:
# 1.01 accounts for rounding errors: In principle, shapes > 1 are
# not a problem, but then it cancels with λₐ, which makes things
# unnecessarily confusing.
raise ValueError(
"Update shapes (shape attribute in PulseOptions) must have "
"values in the range [0, 1]")
return (
guess_controls, guess_pulses, pulses_mapping, lambda_vals,
shape_arrays)
def _forward_propagation(
i_objective,
objectives,
pulses,
pulses_mapping,
tlist,
propagator,
storage,
store_all=True,
):
"""Forward propagation of the initial state of a single objective over the
entire `tlist`"""
logger = logging.getLogger('krotov')
logger.info(
"Started initial forward propagation of objective %d", i_objective
)
obj = objectives[i_objective]
state = obj.initial_state
if store_all:
storage_array = storage(len(tlist))
storage_array[0] = state
mapping = pulses_mapping[i_objective]
for time_index in range(len(tlist) - 1): # index over intervals
H = plug_in_pulse_values(obj.H, pulses, mapping[0], time_index)
c_ops = [
plug_in_pulse_values(c_op, pulses, mapping[ic + 1], time_index)
for (ic, c_op) in enumerate(obj.c_ops)
]
dt = tlist[time_index + 1] - tlist[time_index]
state = propagator(H, state, dt, c_ops)
if store_all:
storage_array[time_index + 1] = state
logger.info(
"Finished initial forward propagation of objective %d", i_objective
)
if store_all:
return storage_array
else:
return state
def _backward_propagation(
i_state,
chi_states,
adjoint_objectives,
pulses,
pulses_mapping,
tlist,
propagator,
storage,
):
"""Backward propagation of chi_states[i_state] over the entire `tlist`"""
logger = logging.getLogger('krotov')
logger.info("Started backward propagation of state %d", i_state)
state = chi_states[i_state]
obj = adjoint_objectives[i_state]
storage_array = storage(len(tlist))
storage_array[-1] = state
mapping = pulses_mapping[i_state]
for time_index in range(len(tlist) - 2, -1, -1): # index bw over intervals
H = plug_in_pulse_values(
obj.H, pulses, mapping[0], time_index, conjugate=True
)
c_ops = [
plug_in_pulse_values(c_op, pulses, mapping[ic + 1], time_index)
for (ic, c_op) in enumerate(obj.c_ops)
]
dt = tlist[time_index + 1] - tlist[time_index]
state = propagator(H, state, dt, c_ops, backwards=True)
storage_array[time_index] = state
logger.info("Finished backward propagation of state %d", i_state)
return storage_array
def _forward_propagation_step(
i_state,
states,
objectives,
pulses,
pulses_mapping,
tlist,
time_index,
propagator,
):
"""Forward-propagate states[i_state] by a single time step"""
state = states[i_state][time_index]
obj = objectives[i_state]
mapping = pulses_mapping[i_state]
H = plug_in_pulse_values(obj.H, pulses, mapping[0], time_index)
c_ops = [
plug_in_pulse_values(c_op, pulses, mapping[ic + 1], time_index)
for (ic, c_op) in enumerate(obj.c_ops)
]
dt = tlist[time_index + 1] - tlist[time_index]
return propagator(H, state, dt, c_ops)
| [
"functools.partial",
"copy.deepcopy",
"numpy.iscomplexobj",
"logging.getLogger",
"time.time",
"numpy.min",
"numpy.max",
"time.localtime"
] | [((6044, 6071), 'logging.getLogger', 'logging.getLogger', (['"""krotov"""'], {}), "('krotov')\n", (6061, 6071), False, 'import logging\n'), ((7262, 7278), 'time.localtime', 'time.localtime', ([], {}), '()\n', (7276, 7278), False, 'import time\n'), ((7324, 7335), 'time.time', 'time.time', ([], {}), '()\n', (7333, 7335), False, 'import time\n'), ((7535, 7546), 'time.time', 'time.time', ([], {}), '()\n', (7544, 7546), False, 'import time\n'), ((13774, 13790), 'time.localtime', 'time.localtime', ([], {}), '()\n', (13788, 13790), False, 'import time\n'), ((16096, 16123), 'logging.getLogger', 'logging.getLogger', (['"""krotov"""'], {}), "('krotov')\n", (16113, 16123), False, 'import logging\n'), ((17319, 17346), 'logging.getLogger', 'logging.getLogger', (['"""krotov"""'], {}), "('krotov')\n", (17336, 17346), False, 'import logging\n'), ((6924, 6955), 'functools.partial', 'partial', (['np.empty'], {'dtype': 'object'}), '(np.empty, dtype=object)\n', (6931, 6955), False, 'from functools import partial\n'), ((8690, 8701), 'time.time', 'time.time', ([], {}), '()\n', (8699, 8701), False, 'import time\n'), ((10062, 10089), 'copy.deepcopy', 'copy.deepcopy', (['guess_pulses'], {}), '(guess_pulses)\n', (10075, 10089), False, 'import copy\n'), ((11947, 11958), 'time.time', 'time.time', ([], {}), '()\n', (11956, 11958), False, 'import time\n'), ((14456, 14480), 'numpy.iscomplexobj', 'np.iscomplexobj', (['control'], {}), '(control)\n', (14471, 14480), True, 'import numpy as np\n'), ((15145, 15173), 'numpy.iscomplexobj', 'np.iscomplexobj', (['shape_array'], {}), '(shape_array)\n', (15160, 15173), True, 'import numpy as np\n'), ((15322, 15341), 'numpy.min', 'np.min', (['shape_array'], {}), '(shape_array)\n', (15328, 15341), True, 'import numpy as np\n'), ((15349, 15368), 'numpy.max', 'np.max', (['shape_array'], {}), '(shape_array)\n', (15355, 15368), True, 'import numpy as np\n')] |
#Pad(constant_pad)
import onnx
from onnx import helper
from onnx import numpy_helper
from onnx import AttributeProto, TensorProto, GraphProto
import numpy as np
from Compare_output import compare
# Create the inputs (ValueInfoProto)
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 3, 4, 5])
pads = helper.make_tensor_value_info('pads', TensorProto.INT64, [8])
value = helper.make_tensor_value_info('value', TensorProto.FLOAT, [1,])
# Create one output (ValueInfoProto)
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 3, 7, 12])
# Create a node (NodeProto)
node_def = helper.make_node(
'Pad',
inputs=['x', 'pads', 'value'],
outputs=['y'],
mode='constant'
)
# Create the graph (GraphProto)
graph_def = helper.make_graph(
[node_def],
'test-model',
[x, pads, value],
[y],
)
# Create the model (ModelProto)
model_def = helper.make_model(graph_def, producer_name='onnx-Pad')
print('The model is:\n{}'.format(model_def))
onnx.checker.check_model(model_def)
print('The model is checked!')
# Save the ONNX model
import os
path = os.getcwd()
new_model_path = os.path.join(path, '../onnx_generated_models/Pad.onnx')
onnx.save(model_def, new_model_path)
print('The model is saved.')
# Preprocessing: load the ONNX model (Loading an already exisisting model)
model_path1 = os.path.join(path, '../onnx_generated_models/Pad.onnx')
onnx_model1 = onnx.load(model_path1)
print('The model is:\n{}'.format(onnx_model1))
def pad_impl(data, raw_pads, mode, constant_values=0.0): # type: ignore
input_rank = data.ndim
if input_rank * 2 != raw_pads.size:
raise Exception('The number of elements in raw_pads should be 2 * data_rank')
# re-order to np.pad accepted order ((x1_begin, x1_end), (x2_begin, x2_end), ...)
pad_width = ()
for i in range(int(raw_pads.size / 2)):
pad_width += ((raw_pads[i], raw_pads[i + input_rank])), # type: ignore
if mode == 'constant':
y = np.pad(
data,
pad_width=pad_width,
mode=mode,
constant_values=constant_values,
)
return y
y = np.pad(
data,
pad_width=pad_width,
mode=mode,
)
return y
x = np.random.randn(1, 3, 4, 5).astype(np.float32)
pads = np.array([0, 0, 1, 3, 0, 0, 2, 4]).astype(np.int64) # pad order [x1_begin, x2_begin, ..., x1_end, x2_end, ...]
value = np.array([1.2]).astype(np.float32)
y_actual = pad_impl(
x,
pads,
'constant',
1.2
)
#Running the model using ONNX Runtime
import onnxruntime as rt
import numpy
sess = rt.InferenceSession("../onnx_generated_models/Pad.onnx")
input_name = sess.get_inputs()[0].name
input_name1 = sess.get_inputs()[1].name
input_name2 = sess.get_inputs()[2].name
label_name = sess.get_outputs()[0].name
y_pred = sess.run(
[label_name], {input_name: x,
input_name1: pads,
input_name2: value
})
print("The predicted output for the operation: Pad")
print(y_pred)
y_pred = np.asarray(y_pred) #converting list into an array
print(y_pred.shape)
y_pred = np.squeeze(y_pred, axis=0)
print(y_pred.shape)
compare(y_actual, y_pred) | [
"numpy.pad",
"onnx.helper.make_node",
"onnx.save",
"numpy.random.randn",
"onnx.helper.make_model",
"os.getcwd",
"onnx.helper.make_tensor_value_info",
"numpy.asarray",
"Compare_output.compare",
"onnxruntime.InferenceSession",
"numpy.array",
"onnx.load",
"numpy.squeeze",
"onnx.checker.check_... | [((240, 307), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""x"""', 'TensorProto.FLOAT', '[1, 3, 4, 5]'], {}), "('x', TensorProto.FLOAT, [1, 3, 4, 5])\n", (269, 307), False, 'from onnx import helper\n'), ((316, 377), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""pads"""', 'TensorProto.INT64', '[8]'], {}), "('pads', TensorProto.INT64, [8])\n", (345, 377), False, 'from onnx import helper\n'), ((387, 449), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""value"""', 'TensorProto.FLOAT', '[1]'], {}), "('value', TensorProto.FLOAT, [1])\n", (416, 449), False, 'from onnx import helper\n'), ((494, 562), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""y"""', 'TensorProto.FLOAT', '[1, 3, 7, 12]'], {}), "('y', TensorProto.FLOAT, [1, 3, 7, 12])\n", (523, 562), False, 'from onnx import helper\n'), ((604, 695), 'onnx.helper.make_node', 'helper.make_node', (['"""Pad"""'], {'inputs': "['x', 'pads', 'value']", 'outputs': "['y']", 'mode': '"""constant"""'}), "('Pad', inputs=['x', 'pads', 'value'], outputs=['y'], mode=\n 'constant')\n", (620, 695), False, 'from onnx import helper\n'), ((754, 820), 'onnx.helper.make_graph', 'helper.make_graph', (['[node_def]', '"""test-model"""', '[x, pads, value]', '[y]'], {}), "([node_def], 'test-model', [x, pads, value], [y])\n", (771, 820), False, 'from onnx import helper\n'), ((885, 939), 'onnx.helper.make_model', 'helper.make_model', (['graph_def'], {'producer_name': '"""onnx-Pad"""'}), "(graph_def, producer_name='onnx-Pad')\n", (902, 939), False, 'from onnx import helper\n'), ((985, 1020), 'onnx.checker.check_model', 'onnx.checker.check_model', (['model_def'], {}), '(model_def)\n', (1009, 1020), False, 'import onnx\n'), ((1092, 1103), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1101, 1103), False, 'import os\n'), ((1121, 1176), 'os.path.join', 'os.path.join', (['path', '"""../onnx_generated_models/Pad.onnx"""'], {}), "(path, '../onnx_generated_models/Pad.onnx')\n", (1133, 1176), False, 'import os\n'), ((1177, 1213), 'onnx.save', 'onnx.save', (['model_def', 'new_model_path'], {}), '(model_def, new_model_path)\n', (1186, 1213), False, 'import onnx\n'), ((1334, 1389), 'os.path.join', 'os.path.join', (['path', '"""../onnx_generated_models/Pad.onnx"""'], {}), "(path, '../onnx_generated_models/Pad.onnx')\n", (1346, 1389), False, 'import os\n'), ((1404, 1426), 'onnx.load', 'onnx.load', (['model_path1'], {}), '(model_path1)\n', (1413, 1426), False, 'import onnx\n'), ((2589, 2645), 'onnxruntime.InferenceSession', 'rt.InferenceSession', (['"""../onnx_generated_models/Pad.onnx"""'], {}), "('../onnx_generated_models/Pad.onnx')\n", (2608, 2645), True, 'import onnxruntime as rt\n'), ((3006, 3024), 'numpy.asarray', 'np.asarray', (['y_pred'], {}), '(y_pred)\n', (3016, 3024), True, 'import numpy as np\n'), ((3086, 3112), 'numpy.squeeze', 'np.squeeze', (['y_pred'], {'axis': '(0)'}), '(y_pred, axis=0)\n', (3096, 3112), True, 'import numpy as np\n'), ((3134, 3159), 'Compare_output.compare', 'compare', (['y_actual', 'y_pred'], {}), '(y_actual, y_pred)\n', (3141, 3159), False, 'from Compare_output import compare\n'), ((2135, 2179), 'numpy.pad', 'np.pad', (['data'], {'pad_width': 'pad_width', 'mode': 'mode'}), '(data, pad_width=pad_width, mode=mode)\n', (2141, 2179), True, 'import numpy as np\n'), ((1972, 2049), 'numpy.pad', 'np.pad', (['data'], {'pad_width': 'pad_width', 'mode': 'mode', 'constant_values': 'constant_values'}), '(data, pad_width=pad_width, mode=mode, constant_values=constant_values)\n', (1978, 2049), True, 'import numpy as np\n'), ((2231, 2258), 'numpy.random.randn', 'np.random.randn', (['(1)', '(3)', '(4)', '(5)'], {}), '(1, 3, 4, 5)\n', (2246, 2258), True, 'import numpy as np\n'), ((2285, 2319), 'numpy.array', 'np.array', (['[0, 0, 1, 3, 0, 0, 2, 4]'], {}), '([0, 0, 1, 3, 0, 0, 2, 4])\n', (2293, 2319), True, 'import numpy as np\n'), ((2405, 2420), 'numpy.array', 'np.array', (['[1.2]'], {}), '([1.2])\n', (2413, 2420), True, 'import numpy as np\n')] |
import numpy as np
b = np.zeros((3, 4))
b[-1] = np.arange(5, 9)
print(b) | [
"numpy.zeros",
"numpy.arange"
] | [((24, 40), 'numpy.zeros', 'np.zeros', (['(3, 4)'], {}), '((3, 4))\n', (32, 40), True, 'import numpy as np\n'), ((49, 64), 'numpy.arange', 'np.arange', (['(5)', '(9)'], {}), '(5, 9)\n', (58, 64), True, 'import numpy as np\n')] |
from exasol_udf_mock_python.column import Column
from exasol_udf_mock_python.group import Group
from exasol_udf_mock_python.mock_exa_environment import MockExaEnvironment
from exasol_udf_mock_python.mock_meta_data import MockMetaData
from exasol_udf_mock_python.udf_mock_executor import UDFMockExecutor
def udf_wrapper():
from exasol_udf_mock_python.udf_context import UDFContext
from sklearn.linear_model import SGDRegressor
from numpy.random import RandomState
from exasol_data_science_utils_python.preprocessing.scikit_learn.sklearn_identity_transformer import \
SKLearnIdentityTransformer
from exasol_data_science_utils_python.preprocessing.scikit_learn.sklearn_prefitted_column_transformer import \
SKLearnPrefittedColumnTransformer
from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.column_set_preprocessor import \
ColumnSetPreprocessor
from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.table_preprocessor import TablePreprocessor
from exasol_data_science_utils_python.model_utils.model_aggregator import combine_to_voting_regressor
from exasol_data_science_utils_python.model_utils.score_iterator import ScoreIterator
from exasol_data_science_utils_python.model_utils.partial_fit_iterator import RegressorPartialFitIterator
def run(ctx: UDFContext):
input_preprocessor = SKLearnPrefittedColumnTransformer(
transformer_mapping=[("t2", SKLearnIdentityTransformer())]
)
output_preprocessor = SKLearnPrefittedColumnTransformer(
transformer_mapping=[("t2", SKLearnIdentityTransformer())]
)
table_preprocessor = TablePreprocessor(
input_column_set_preprocessors=ColumnSetPreprocessor(
column_transformer=input_preprocessor,
),
target_column_set_preprocessors=ColumnSetPreprocessor(
column_transformer=output_preprocessor,
),
)
model = SGDRegressor(random_state=RandomState(0), loss="squared_loss", verbose=False)
df = ctx.get_dataframe(101)
iterator = RegressorPartialFitIterator(
table_preprocessor=table_preprocessor,
model=model
)
epochs = 10
for i in range(epochs):
iterator.train(ctx, batch_size=50, shuffle_buffer_size=100)
combined_model = combine_to_voting_regressor([iterator.model, iterator.model])
combined_iterator = ScoreIterator(
table_preprocessor=iterator.table_preprocessor,
model=combined_model
)
score_sum, score_count = combined_iterator.compute_score(ctx, batch_size=10)
ctx.emit(score_sum, score_count)
def test_partial_fit_iterator():
executor = UDFMockExecutor()
meta = MockMetaData(
script_code_wrapper_function=udf_wrapper,
input_type="SET",
input_columns=[Column("t1", int, "INTEGER"),
Column("t2", float, "FLOAT"), ],
output_type="EMIT",
output_columns=[Column("SCORE_SUM", float, "FLOAD"),
Column("SCORE_COUNT", int, "INT"), ]
)
exa = MockExaEnvironment(meta)
input_data = [(i, (1.0 * i) / 100) for i in range(100)]
result = executor.run([Group(input_data)], exa)
result_row = result[0].rows[0]
assert result_row[1] == 100
assert result_row[0] >= -5000.0
print(result_row[0] / result_row[1])
| [
"exasol_udf_mock_python.group.Group",
"exasol_data_science_utils_python.model_utils.model_aggregator.combine_to_voting_regressor",
"exasol_data_science_utils_python.preprocessing.scikit_learn.sklearn_identity_transformer.SKLearnIdentityTransformer",
"numpy.random.RandomState",
"exasol_data_science_utils_pyt... | [((2795, 2812), 'exasol_udf_mock_python.udf_mock_executor.UDFMockExecutor', 'UDFMockExecutor', ([], {}), '()\n', (2810, 2812), False, 'from exasol_udf_mock_python.udf_mock_executor import UDFMockExecutor\n'), ((3189, 3213), 'exasol_udf_mock_python.mock_exa_environment.MockExaEnvironment', 'MockExaEnvironment', (['meta'], {}), '(meta)\n', (3207, 3213), False, 'from exasol_udf_mock_python.mock_exa_environment import MockExaEnvironment\n'), ((2148, 2227), 'exasol_data_science_utils_python.model_utils.partial_fit_iterator.RegressorPartialFitIterator', 'RegressorPartialFitIterator', ([], {'table_preprocessor': 'table_preprocessor', 'model': 'model'}), '(table_preprocessor=table_preprocessor, model=model)\n', (2175, 2227), False, 'from exasol_data_science_utils_python.model_utils.partial_fit_iterator import RegressorPartialFitIterator\n'), ((2411, 2472), 'exasol_data_science_utils_python.model_utils.model_aggregator.combine_to_voting_regressor', 'combine_to_voting_regressor', (['[iterator.model, iterator.model]'], {}), '([iterator.model, iterator.model])\n', (2438, 2472), False, 'from exasol_data_science_utils_python.model_utils.model_aggregator import combine_to_voting_regressor\n'), ((2501, 2589), 'exasol_data_science_utils_python.model_utils.score_iterator.ScoreIterator', 'ScoreIterator', ([], {'table_preprocessor': 'iterator.table_preprocessor', 'model': 'combined_model'}), '(table_preprocessor=iterator.table_preprocessor, model=\n combined_model)\n', (2514, 2589), False, 'from exasol_data_science_utils_python.model_utils.score_iterator import ScoreIterator\n'), ((3301, 3318), 'exasol_udf_mock_python.group.Group', 'Group', (['input_data'], {}), '(input_data)\n', (3306, 3318), False, 'from exasol_udf_mock_python.group import Group\n'), ((1757, 1817), 'exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.column_set_preprocessor.ColumnSetPreprocessor', 'ColumnSetPreprocessor', ([], {'column_transformer': 'input_preprocessor'}), '(column_transformer=input_preprocessor)\n', (1778, 1817), False, 'from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.column_set_preprocessor import ColumnSetPreprocessor\n'), ((1894, 1955), 'exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.column_set_preprocessor.ColumnSetPreprocessor', 'ColumnSetPreprocessor', ([], {'column_transformer': 'output_preprocessor'}), '(column_transformer=output_preprocessor)\n', (1915, 1955), False, 'from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.column_set_preprocessor import ColumnSetPreprocessor\n'), ((2041, 2055), 'numpy.random.RandomState', 'RandomState', (['(0)'], {}), '(0)\n', (2052, 2055), False, 'from numpy.random import RandomState\n'), ((2937, 2965), 'exasol_udf_mock_python.column.Column', 'Column', (['"""t1"""', 'int', '"""INTEGER"""'], {}), "('t1', int, 'INTEGER')\n", (2943, 2965), False, 'from exasol_udf_mock_python.column import Column\n'), ((2990, 3018), 'exasol_udf_mock_python.column.Column', 'Column', (['"""t2"""', 'float', '"""FLOAT"""'], {}), "('t2', float, 'FLOAT')\n", (2996, 3018), False, 'from exasol_udf_mock_python.column import Column\n'), ((3075, 3110), 'exasol_udf_mock_python.column.Column', 'Column', (['"""SCORE_SUM"""', 'float', '"""FLOAD"""'], {}), "('SCORE_SUM', float, 'FLOAD')\n", (3081, 3110), False, 'from exasol_udf_mock_python.column import Column\n'), ((3136, 3169), 'exasol_udf_mock_python.column.Column', 'Column', (['"""SCORE_COUNT"""', 'int', '"""INT"""'], {}), "('SCORE_COUNT', int, 'INT')\n", (3142, 3169), False, 'from exasol_udf_mock_python.column import Column\n'), ((1479, 1507), 'exasol_data_science_utils_python.preprocessing.scikit_learn.sklearn_identity_transformer.SKLearnIdentityTransformer', 'SKLearnIdentityTransformer', ([], {}), '()\n', (1505, 1507), False, 'from exasol_data_science_utils_python.preprocessing.scikit_learn.sklearn_identity_transformer import SKLearnIdentityTransformer\n'), ((1625, 1653), 'exasol_data_science_utils_python.preprocessing.scikit_learn.sklearn_identity_transformer.SKLearnIdentityTransformer', 'SKLearnIdentityTransformer', ([], {}), '()\n', (1651, 1653), False, 'from exasol_data_science_utils_python.preprocessing.scikit_learn.sklearn_identity_transformer import SKLearnIdentityTransformer\n')] |
import io
import time
import logging
from datetime import datetime
import numpy as np
from torch.utils.tensorboard import SummaryWriter
LOGGER_NAME = 'root'
LOGGER_DATEFMT = '%Y-%m-%d %H:%M:%S'
handler = logging.StreamHandler()
logger = logging.getLogger(LOGGER_NAME)
logger.setLevel(logging.INFO)
logger.addHandler(handler)
def add_logging(logs_path, prefix):
log_name = prefix + datetime.strftime(datetime.today(), '%Y-%m-%d_%H-%M-%S') + '.log'
stdout_log_path = logs_path / log_name
fh = logging.FileHandler(str(stdout_log_path))
formatter = logging.Formatter(fmt='(%(levelname)s) %(asctime)s: %(message)s',
datefmt=LOGGER_DATEFMT)
fh.setFormatter(formatter)
logger.addHandler(fh)
class TqdmToLogger(io.StringIO):
logger = None
level = None
buf = ''
def __init__(self, logger, level=None, mininterval=5):
super(TqdmToLogger, self).__init__()
self.logger = logger
self.level = level or logging.INFO
self.mininterval = mininterval
self.last_time = 0
def write(self, buf):
self.buf = buf.strip('\r\n\t ')
def flush(self):
if len(self.buf) > 0 and time.time() - self.last_time > self.mininterval:
self.logger.log(self.level, self.buf)
self.last_time = time.time()
class SummaryWriterAvg(SummaryWriter):
def __init__(self, *args, dump_period=20, **kwargs):
super().__init__(*args, **kwargs)
self._dump_period = dump_period
self._avg_scalars = dict()
def add_scalar(self, tag, value, global_step=None, disable_avg=False):
if disable_avg or isinstance(value, (tuple, list, dict)):
super().add_scalar(tag, np.array(value), global_step=global_step)
else:
if tag not in self._avg_scalars:
self._avg_scalars[tag] = ScalarAccumulator(self._dump_period)
avg_scalar = self._avg_scalars[tag]
avg_scalar.add(value)
if avg_scalar.is_full():
super().add_scalar(tag, avg_scalar.value,
global_step=global_step)
avg_scalar.reset()
class ScalarAccumulator(object):
def __init__(self, period):
self.sum = 0
self.cnt = 0
self.period = period
def add(self, value):
self.sum += value
self.cnt += 1
@property
def value(self):
if self.cnt > 0:
return self.sum / self.cnt
else:
return 0
def reset(self):
self.cnt = 0
self.sum = 0
def is_full(self):
return self.cnt >= self.period
def __len__(self):
return self.cnt
| [
"datetime.datetime.today",
"logging.StreamHandler",
"time.time",
"logging.Formatter",
"numpy.array",
"logging.getLogger"
] | [((207, 230), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (228, 230), False, 'import logging\n'), ((241, 271), 'logging.getLogger', 'logging.getLogger', (['LOGGER_NAME'], {}), '(LOGGER_NAME)\n', (258, 271), False, 'import logging\n'), ((568, 662), 'logging.Formatter', 'logging.Formatter', ([], {'fmt': '"""(%(levelname)s) %(asctime)s: %(message)s"""', 'datefmt': 'LOGGER_DATEFMT'}), "(fmt='(%(levelname)s) %(asctime)s: %(message)s', datefmt=\n LOGGER_DATEFMT)\n", (585, 662), False, 'import logging\n'), ((1326, 1337), 'time.time', 'time.time', ([], {}), '()\n', (1335, 1337), False, 'import time\n'), ((409, 425), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (423, 425), False, 'from datetime import datetime\n'), ((1731, 1746), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (1739, 1746), True, 'import numpy as np\n'), ((1198, 1209), 'time.time', 'time.time', ([], {}), '()\n', (1207, 1209), False, 'import time\n')] |
"""
Data structure for implementing experience replay
Author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
from replay_buffer import ReplayBuffer, ReplayBufferNew, ReplayBufferStructure, ReplayBufferStructureLean
from result_buffer import ResultBuffer
import time
import csv
import os
import logging
import tensorflow as tf
from analysis import Qgraph
logger = logging.getLogger("logger")
class DDPG(object):
def __init__(self, env, actor, critic, config):
self.env = env
self.actor = actor
self.critic = critic
self.sess = actor.sess
self.config = config
# initiate replay buffer
self.replay_buffer = ReplayBuffer(config.buffer_size)
# initiate results buffer
self.results = ResultBuffer(config.directory, ["policy_eval", "train", "eval", "long_eval"])
# training parameters
self.train_episode_len = config.episode_len
self.episode_num = config.episode_num
self.episode_num_pol = config.episode_num_pol
self.env_size = config.env_size
self.offline_batch_size = config.batch_size
self.explore_noise = config.noise_std
self.explore_noise_dec = config.noise_dec
self.reward_decay = config.gamma
self.reward_averaging = config.tau
# evaluation parameters
self.eval_len = config.eval_len
self.env_eval_size = config.env_eval_size
self.last_eval_len = config.last_eval_len
def run_episode(self, name, pol_eval=None, noise=True):
self.results.add_episode(name,
self.actor.lr,
self.explore_noise,
self.replay_buffer.size)
z = self.env.reset(self.env_size)
for j in range(self.train_episode_len):
step_results = dict()
# Added exploration noise
if noise:
u = self.noisy_action(z)
else:
u = self.actor.predict(z)
z2, r, w = self.env.step(z, u)
self.results.average_reward = self.reward_averaging * np.mean(r) + \
(1 - self.reward_averaging) * self.results.average_reward
step_results.update({'states': z,
'actions': u,
'online_rewards': r,
'disturbance': w})
for zz, uu, rr, zz2, ww in zip(z, u, r, z2, w):
self.replay_buffer.add(zz, uu, rr, zz2, ww)
# Keep adding experience to the memory until there are at least mini-batch size samples
z_batch, u_batch, r_batch, z2_batch, w_batch = self.replay_buffer.sample_batch(self.offline_batch_size)
y_target = self.TD_target(z_batch, u_batch, r_batch, z2_batch)
# Update the critic given the targets
q_loss, global_step_critic, g_norm_critic = self.critic.train(z_batch, u_batch, y_target)
step_results.update({'q_loss': q_loss,
'global_step_critic': global_step_critic,
'g_norm_critic': g_norm_critic})
if pol_eval is None:
# Update the actor to maximize the critic
a_outs = self.actor.predict(z_batch)
grads = self.critic.action_gradients(z_batch, a_outs)
global_step_actor, g_norm_actor = self.actor.train(z_batch, grads[0])
step_results.update({'global_step_actor': global_step_actor,
'g_norm_actor': g_norm_actor})
# Update target networks
self.actor.update_target_network()
self.actor.lr_decay()
self.critic.update_target_network()
self.critic.lr_decay()
self.results.update_episode(**step_results)
z = z2
self.results.finalize_episode()
def policy_eval(self, episode_num):
for i in range(episode_num):
self.run_episode("policy_eval", pol_eval=True)
if np.mod(i,5) == 0:
self.evaluate("eval", self.eval_len)
def evaluate(self, name, eval_len):
self.results.add_episode(name,
self.actor.lr,
self.explore_noise,
self.replay_buffer.size)
z = self.env.reset(self.env_eval_size)
for j in range(eval_len):
step_results = dict()
u = self.actor.predict(z)
z2, r, w = self.env.step(z, u)
step_results.update({'states': z,
'actions': u,
'online_rewards': r,
'disturbance': w})
self.results.update_episode(**step_results)
z = z2
self.results.finalize_episode()
def noisy_action(self, z):
self.explore_noise *= self.explore_noise_dec
n = np.random.randn(z.shape[0], self.actor.a_dim) * 0.5
u_noisy = self.actor.predict_noisy(z, n)
return u_noisy
def TD_target(self, z_batch, u_batch, r_batch=None, z2_batch=None, evaluate=None):
if evaluate is None:
critic_predict = self.critic.predict_target
else:
critic_predict = self.critic.predict
# Calculate the TD target
target_q = critic_predict(z2_batch, self.actor.predict(z2_batch))
# produce TD error with estimated Q(s2, a(s2))
y_target = r_batch + self.reward_decay * target_q
return y_target
def simplex_sampler(self, size):
unsorted_vec = np.random.rand(size, self.env.input_cardin - 1, self.env.state_cardin)
sorted_vec = np.sort(unsorted_vec, axis=1)
full_sorted = np.concatenate([np.zeros([size, 1, self.env.state_cardin]),
sorted_vec,
np.ones([size, 1, self.env.state_cardin])], axis=1)
diff = full_sorted[:, 1:, :] - full_sorted[:, :-1, :]
n = diff / np.sum(diff, axis=1, keepdims=True)
# n_uniform = np.random.rand(size, self.env.input_cardin, self.env.state_cardin)
# n_log = -np.log(n_uniform)
# n = n_log / np.sum(n_log, axis=1, keepdims=True)
return n
def train(self):
logger.info(self.results.title())
logger.info("Initial policy evaluation:")
logger.info(self.results.title())
for i in range(self.episode_num_pol):
self.run_episode("policy_eval", pol_eval=True)
if np.mod(i, 5) == 0:
self.evaluate("eval", self.eval_len)
logger.info("\n\nTraining:")
logger.info(self.results.title())
for i in range(self.episode_num):
self.run_episode("train")
if np.mod(i, 25) == 0:
self.evaluate("eval", self.eval_len)
if np.mod(i, 100) == 0 and i > 0:
logger.info("############ long evaluation #################")
self.evaluate("long_eval", self.last_eval_len)
self.evaluate("long_eval", self.last_eval_len)
class DDPG_Infinite(DDPG):
def __init__(self, env,actor, critic, config):
super().__init__(env, actor, critic, config)
def TD_target(self, z_batch, u_batch, r_batch=None, z2_batch=None, evaluate=None):
if evaluate is None:
critic_predict = self.critic.predict_target
else:
critic_predict = self.critic.predict
# Calculate the TD target
target_q = critic_predict(z2_batch, self.actor.predict(z2_batch))
# produce TD error with estimated Q(s2, a(s2))
y_target = r_batch - self.results.average_reward + self.reward_decay * target_q
return y_target
class DDPG_Infinite_Planning(DDPG_Infinite):
def __init__(self, env, actor, critic, config):
super().__init__(env, actor, critic, config)
def TD_target(self, z_batch, u_batch, r_batch=None, z2_batch=None, evaluate=None):
if evaluate is None:
critic_predict = self.critic.predict_target
else:
critic_predict = self.critic.predict
# Calculate the TD target
z2, r, p_y = self.env.step(z_batch, u_batch, planning=True)
y_target = (r - self.results.average_reward)
P, S = [np.reshape(p_y[:,i], [-1, 1]) for i in range(z2.shape[1])], [z2[:,i,:] for i in range(z2.shape[1])]
for p, s in zip(P, S):
y_target += np.reshape(p, [-1, 1]) * critic_predict(s, self.actor.predict_target(s))
return y_target
# class DDPG_StructuredReplay_Infinite(DDPG_StructuredReplay):
# def __init__(self, env, actor, critic, config):
# super().__init__(env, actor, critic, config)
#
# def TD_target(self, z_batch, u_batch, r_batch=None, z2_batch=None, evaluate=None):
# if evaluate is None:
# critic_predict = self.critic.predict_target
# else:
# critic_predict = self.critic.predict
#
# # Calculate the TD target
# target_q = critic_predict(z2_batch, self.actor.predict(z2_batch))
#
# # produce TD error with estimated Q(s2, a(s2))
# y_target = r_batch - self.results.average_reward + self.reward_decay * target_q
#
#
# return y_target
| [
"result_buffer.ResultBuffer",
"numpy.sum",
"numpy.random.randn",
"numpy.zeros",
"numpy.ones",
"numpy.mod",
"numpy.sort",
"replay_buffer.ReplayBuffer",
"numpy.mean",
"numpy.reshape",
"numpy.random.rand",
"logging.getLogger"
] | [((379, 406), 'logging.getLogger', 'logging.getLogger', (['"""logger"""'], {}), "('logger')\n", (396, 406), False, 'import logging\n'), ((684, 716), 'replay_buffer.ReplayBuffer', 'ReplayBuffer', (['config.buffer_size'], {}), '(config.buffer_size)\n', (696, 716), False, 'from replay_buffer import ReplayBuffer, ReplayBufferNew, ReplayBufferStructure, ReplayBufferStructureLean\n'), ((775, 852), 'result_buffer.ResultBuffer', 'ResultBuffer', (['config.directory', "['policy_eval', 'train', 'eval', 'long_eval']"], {}), "(config.directory, ['policy_eval', 'train', 'eval', 'long_eval'])\n", (787, 852), False, 'from result_buffer import ResultBuffer\n'), ((5745, 5815), 'numpy.random.rand', 'np.random.rand', (['size', '(self.env.input_cardin - 1)', 'self.env.state_cardin'], {}), '(size, self.env.input_cardin - 1, self.env.state_cardin)\n', (5759, 5815), True, 'import numpy as np\n'), ((5837, 5866), 'numpy.sort', 'np.sort', (['unsorted_vec'], {'axis': '(1)'}), '(unsorted_vec, axis=1)\n', (5844, 5866), True, 'import numpy as np\n'), ((5075, 5120), 'numpy.random.randn', 'np.random.randn', (['z.shape[0]', 'self.actor.a_dim'], {}), '(z.shape[0], self.actor.a_dim)\n', (5090, 5120), True, 'import numpy as np\n'), ((6170, 6205), 'numpy.sum', 'np.sum', (['diff'], {'axis': '(1)', 'keepdims': '(True)'}), '(diff, axis=1, keepdims=True)\n', (6176, 6205), True, 'import numpy as np\n'), ((4150, 4162), 'numpy.mod', 'np.mod', (['i', '(5)'], {}), '(i, 5)\n', (4156, 4162), True, 'import numpy as np\n'), ((5905, 5947), 'numpy.zeros', 'np.zeros', (['[size, 1, self.env.state_cardin]'], {}), '([size, 1, self.env.state_cardin])\n', (5913, 5947), True, 'import numpy as np\n'), ((6037, 6078), 'numpy.ones', 'np.ones', (['[size, 1, self.env.state_cardin]'], {}), '([size, 1, self.env.state_cardin])\n', (6044, 6078), True, 'import numpy as np\n'), ((6686, 6698), 'numpy.mod', 'np.mod', (['i', '(5)'], {}), '(i, 5)\n', (6692, 6698), True, 'import numpy as np\n'), ((6933, 6946), 'numpy.mod', 'np.mod', (['i', '(25)'], {}), '(i, 25)\n', (6939, 6946), True, 'import numpy as np\n'), ((8462, 8492), 'numpy.reshape', 'np.reshape', (['p_y[:, i]', '[-1, 1]'], {}), '(p_y[:, i], [-1, 1])\n', (8472, 8492), True, 'import numpy as np\n'), ((8618, 8640), 'numpy.reshape', 'np.reshape', (['p', '[-1, 1]'], {}), '(p, [-1, 1])\n', (8628, 8640), True, 'import numpy as np\n'), ((2138, 2148), 'numpy.mean', 'np.mean', (['r'], {}), '(r)\n', (2145, 2148), True, 'import numpy as np\n'), ((7022, 7036), 'numpy.mod', 'np.mod', (['i', '(100)'], {}), '(i, 100)\n', (7028, 7036), True, 'import numpy as np\n')] |
import glob
import os
from enum import Enum
from timeit import default_timer as timer
import numpy as np
from cv2 import cv2
from numpy import random
from data.train_model import ModelType
from data.yolov3_load_dataset import YoloV3DataLoader
from model.yolo3.utils import yolov3_classes
from model.yolo3.yolo_eval import YOLO
import keras.backend as K
import sys
def transform_box_format_pred(box):
"""y1,x1,y2,x2 to x1, y1, w, h"""
y1, x1, y2, x2 = box
return [x1, y1, x2 - x1, y2 - y1]
def transform_box_format_gt(box):
"""x1,y1,x2,y2 to x1, y1, w, h"""
x1, y1, x2, y2 = box.x1, box.y1, box.x2, box.y2
return [x1, y1, x2 - x1, y2 - y1]
def get_iou(pred_box, gt_box):
b1_xy = np.array(pred_box[:2])
b1_wh = np.array(pred_box[2:4])
b1_wh_half = b1_wh / 2.
b1_mins = b1_xy - b1_wh_half
b1_maxes = b1_xy + b1_wh_half
b2_xy = np.array(gt_box[:2])
b2_wh = np.array(gt_box[2:4])
b2_wh_half = b2_wh / 2.
b2_mins = b2_xy - b2_wh_half
b2_maxes = b2_xy + b2_wh_half
intersect_mins = [max(b1_mins[0], b2_mins[0]), max(b1_mins[1], b2_mins[1])]
intersect_maxes = [min(b1_maxes[0], b2_maxes[0]), min(b1_maxes[1], b2_maxes[1])]
intersect_wh = [max(intersect_maxes[0] - intersect_mins[0], 0.), max(intersect_maxes[1] - intersect_mins[1], 0.)]
intersect_area = intersect_wh[0] * intersect_wh[1]
b1_area = b1_wh[0] * b1_wh[1]
b2_area = b2_wh[0] * b2_wh[1]
iou = intersect_area / (b1_area + b2_area - intersect_area)
return iou
def get_TPs(pred_boxes, pred_classes, gt_boxes, gt_classes, iou_threshold=0.5, printing=True):
n_tp = 0
for i in range(0, len(pred_boxes)):
pred_box = pred_boxes[i]
pred_class = pred_classes[i]
for j in range(0, len(gt_boxes)):
gt_box = gt_boxes[j]
gt_class = gt_classes[j]
if get_iou(pred_box, gt_box) > iou_threshold:
if pred_class == gt_class:
n_tp += 1
# if printing:
# print('iou: ' + str(get_iou(pred_box, gt_box)))
break
return n_tp
def get_FNs(gt_boxes, gt_classes, pred_boxes, pred_classes, iou_threshold=0.5):
return len(gt_boxes) - get_TPs(gt_boxes, gt_classes, pred_boxes, pred_classes, iou_threshold, printing=False)
def load_dataset(dataset_path, n_images, input_shape=(608, 608), model_type=ModelType.YOLO_V3.value):
my_random_seed = 1337
# choose dataset loader according to the current network type
dataset_loader = None
if ModelType(model_type) == ModelType.YOLO_V3:
dataset_loader = YoloV3DataLoader()
assert dataset_loader is not None
# get dataset
validation = dataset_loader.load_dataset(dataset_path, input_shape, random_seed=my_random_seed)['val']
# reformat into one array
val_dataset = []
for dataset in validation:
val_dataset.extend(dataset)
# shuffle with fix seed
random.seed(my_random_seed)
random.shuffle(val_dataset)
# get n_images number of samples
return val_dataset[:n_images]
def evaluate(val_dataset, dataset_path, out_path, model_type=ModelType.YOLO_V3.value,
pruning=None, mod_mask=(0, 0, 0, 0, 0), model_path=None):
"""Select n_images number of images from validation dataset and return evaluation statistics"""
model = None
if ModelType(model_type) == ModelType.YOLO_V3:
model = YOLO(mod_mask=mod_mask, pruning=pruning, model_path=model_path)
assert model is not None
st = timer()
idx = 0
avg_precision = 0
avg_recall = 0
avg_duration = 0
for data in val_dataset:
image = None
# get the random image by name
for filename in glob.iglob(dataset_path + '/**/' + data.name, recursive=True):
image = cv2.imread(filename)
break
assert image is not None
gt_boxes = []
gt_classes = []
for label in data.labels:
gt_boxes.append(label.box)
gt_classes.append(yolov3_classes[label.category])
duration, pred_image, pred_boxes, pred_scores, pred_classes = model.detect_image(image, gt_boxes)
for i in range(0, len(pred_boxes)):
pred_boxes[i] = transform_box_format_pred(pred_boxes[i])
for i in range(0, len(gt_boxes)):
gt_boxes[i] = transform_box_format_gt(gt_boxes[i])
curr_tp = get_TPs(pred_boxes, pred_classes, gt_boxes, gt_classes)
avg_precision += curr_tp / len(pred_boxes) if len(pred_boxes) > 0 else 1
curr_fn = get_FNs(gt_boxes, gt_classes, pred_boxes, pred_classes)
avg_recall += curr_tp / (curr_tp + curr_fn) if curr_fn > 0 else 1
avg_duration += duration
# save every 100. images
if idx % 10 == 0:
# cv2.imshow('evaluation', pred_image)
cv2.imwrite(os.path.join(out_path, data.name), pred_image)
idx += 1
length = len(val_dataset)
mean_avg_precision = avg_precision / length
mean_avg_recall = avg_recall / length
avg_duration /= length
print(timer() - st)
return mean_avg_precision, mean_avg_recall, avg_duration
def normal(dataset, model_path=None):
mean_avg_precision, mean_avg_recall, avg_duration = evaluate(dataset, '/media/boti/Adatok/Datasets-pc/',
'/media/boti/Adatok/Datasets-pc/evaluation',
model_path=model_path)
print('mean_avg_precision: ' + str(mean_avg_precision))
print('mean_avg_recall: ' + str(mean_avg_recall))
print('avg_duration: ' + str(avg_duration))
K.clear_session()
def with_modification(dataset, model_mask, model_path=None):
mean_avg_precision, mean_avg_recall, avg_duration = evaluate(dataset, '/media/boti/Adatok/Datasets-pc/',
'/media/boti/Adatok/Datasets-pc/evaluation',
mod_mask=model_mask, model_path=model_path)
print('mean_avg_precision: ' + str(mean_avg_precision))
print('mean_avg_recall: ' + str(mean_avg_recall))
print('avg_duration: ' + str(avg_duration))
K.clear_session()
def with_pruning(dataset, model_mask, pruning, model_path=None):
mean_avg_precision, mean_avg_recall, avg_duration = evaluate(dataset, '/media/boti/Adatok/Datasets-pc/',
'/media/boti/Adatok/Datasets-pc/evaluation',
mod_mask=model_mask, pruning=pruning,
model_path=model_path)
print('mean_avg_precision: ' + str(mean_avg_precision))
print('mean_avg_recall: ' + str(mean_avg_recall))
print('avg_duration: ' + str(avg_duration))
K.clear_session()
def pruning_one_layer(dataset, model_mask, model_path=None):
n_blocks = 1 + 2 + 8 + 8 + 4
for i in range(0, n_blocks + 1):
print(str(i) + '. block was pruned:')
with_pruning(dataset, model_mask, [i], model_path)
if __name__ == "__main__":
# LOAD DATASET
val_dataset = load_dataset('/media/boti/Adatok/Datasets-pc/', 101)
# sys.stdout = open('../logs/evaluation_log.txt', 'w')
# normal(val_dataset, '/home/boti/Workspace/PyCharmWorkspace/szdoga/trained_weights/trained_weights_stage_1.h5')
# normal(val_dataset, '/home/boti/Workspace/PyCharmWorkspace/szdoga/trained_weights/trained_weights_final.h5')
# normal(val_dataset, '/home/boti/Workspace/PyCharmWorkspace/szdoga/logs/train_stage1/trained_weights_stage_1.h5')
normal(val_dataset, '/home/boti/Workspace/PyCharmWorkspace/szdoga/trained_weights/trained_weights_stage_11543915433.6909509.h5')
# with_modification(val_dataset, (0, 0, 0, 0, 1))
# pruning_one_layer(val_dataset, (0, 0, 0, 0, 0))
# with_modification(val_dataset, (0, 0, 0, 1, 4), '/home/boti/Workspace/PyCharmWorkspace/szdoga/trained_weights/mod_trained_weights_stage_3_0.h5')
| [
"numpy.random.seed",
"numpy.random.shuffle",
"cv2.cv2.imread",
"timeit.default_timer",
"model.yolo3.yolo_eval.YOLO",
"data.yolov3_load_dataset.YoloV3DataLoader",
"data.train_model.ModelType",
"numpy.array",
"glob.iglob",
"os.path.join",
"keras.backend.clear_session"
] | [((713, 735), 'numpy.array', 'np.array', (['pred_box[:2]'], {}), '(pred_box[:2])\n', (721, 735), True, 'import numpy as np\n'), ((748, 771), 'numpy.array', 'np.array', (['pred_box[2:4]'], {}), '(pred_box[2:4])\n', (756, 771), True, 'import numpy as np\n'), ((880, 900), 'numpy.array', 'np.array', (['gt_box[:2]'], {}), '(gt_box[:2])\n', (888, 900), True, 'import numpy as np\n'), ((913, 934), 'numpy.array', 'np.array', (['gt_box[2:4]'], {}), '(gt_box[2:4])\n', (921, 934), True, 'import numpy as np\n'), ((2965, 2992), 'numpy.random.seed', 'random.seed', (['my_random_seed'], {}), '(my_random_seed)\n', (2976, 2992), False, 'from numpy import random\n'), ((2997, 3024), 'numpy.random.shuffle', 'random.shuffle', (['val_dataset'], {}), '(val_dataset)\n', (3011, 3024), False, 'from numpy import random\n'), ((3543, 3550), 'timeit.default_timer', 'timer', ([], {}), '()\n', (3548, 3550), True, 'from timeit import default_timer as timer\n'), ((5691, 5708), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (5706, 5708), True, 'import keras.backend as K\n'), ((6267, 6284), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (6282, 6284), True, 'import keras.backend as K\n'), ((6929, 6946), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (6944, 6946), True, 'import keras.backend as K\n'), ((2561, 2582), 'data.train_model.ModelType', 'ModelType', (['model_type'], {}), '(model_type)\n', (2570, 2582), False, 'from data.train_model import ModelType\n'), ((2630, 2648), 'data.yolov3_load_dataset.YoloV3DataLoader', 'YoloV3DataLoader', ([], {}), '()\n', (2646, 2648), False, 'from data.yolov3_load_dataset import YoloV3DataLoader\n'), ((3380, 3401), 'data.train_model.ModelType', 'ModelType', (['model_type'], {}), '(model_type)\n', (3389, 3401), False, 'from data.train_model import ModelType\n'), ((3440, 3503), 'model.yolo3.yolo_eval.YOLO', 'YOLO', ([], {'mod_mask': 'mod_mask', 'pruning': 'pruning', 'model_path': 'model_path'}), '(mod_mask=mod_mask, pruning=pruning, model_path=model_path)\n', (3444, 3503), False, 'from model.yolo3.yolo_eval import YOLO\n'), ((3740, 3801), 'glob.iglob', 'glob.iglob', (["(dataset_path + '/**/' + data.name)"], {'recursive': '(True)'}), "(dataset_path + '/**/' + data.name, recursive=True)\n", (3750, 3801), False, 'import glob\n'), ((3823, 3843), 'cv2.cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (3833, 3843), False, 'from cv2 import cv2\n'), ((5101, 5108), 'timeit.default_timer', 'timer', ([], {}), '()\n', (5106, 5108), True, 'from timeit import default_timer as timer\n'), ((4877, 4910), 'os.path.join', 'os.path.join', (['out_path', 'data.name'], {}), '(out_path, data.name)\n', (4889, 4910), False, 'import os\n')] |
import mirdata
import numpy as np
import sklearn
import random
import torch
import torchaudio
import pytorch_lightning as pl
class MridangamDataset(torch.utils.data.Dataset):
def __init__(
self,
mirdataset,
seq_duration=0.5,
random_start=True,
resample=8000,
subset=0,
train_split=0.8,
test_split=0.2,
random_seed=42
):
"""
"""
self.seq_duration = seq_duration
self.dataset = mirdataset
self.track_ids = self.dataset.track_ids
self.tracks = self.dataset.load_tracks()
self.resample = resample
self.set = subset
self.random_start = random_start
labels = [self.dataset.track(i).stroke_name for i in self.track_ids]
unique_labels = list(set(labels)) ### unique labels
self.labels = {label:i for i,label in enumerate(unique_labels)}
self.trackids_train, self.trackids_test = sklearn.model_selection.train_test_split(self.track_ids, train_size=1-test_split, random_state=random_seed, stratify=labels)
self.trackids_train, self.trackids_valid = sklearn.model_selection.train_test_split(self.track_ids, train_size=train_split, random_state=random_seed, stratify=labels)
def __getitem__(self, index):
if self.set==0:
track_id = self.trackids_train[index]
elif self.set==1:
track_id = self.trackids_valid[index]
elif self.set==2:
track_id = self.trackids_test[index]
track = self.dataset.track(track_id)
#### compute start and end frames to read from the disk
si, ei = torchaudio.info(track.audio_path)
sample_rate, channels, length = si.rate, si.channels, si.length
duration = length / sample_rate
if self.seq_duration>duration:
offset = 0
num_frames = length
else:
if self.random_start:
start = random.uniform(0, duration - self.seq_duration)
else:
start = 0.
offset = int(np.floor(start * sample_rate))
num_frames = int(np.floor(self.seq_duration * sample_rate))
#### get audio
audio_signal, sample_rate = torchaudio.load(filepath=track.audio_path, offset=offset,num_frames=num_frames)
#### zero pad if the size is smaller than seq_duration
seq_duration_samples = int(self.seq_duration * sample_rate)
total_samples = audio_signal.shape[-1]
if seq_duration_samples>total_samples:
audio_signal = torch.nn.ConstantPad2d((0,seq_duration_samples-total_samples,0,0),0)(audio_signal)
#### resample
audio_signal = torchaudio.transforms.Resample(sample_rate, self.resample)(audio_signal)
return audio_signal, self.labels[track.stroke_name]
def __len__(self):
if self.set==0:
return len(self.trackids_train)
elif self.set==1:
return len(self.trackids_valid)
else:
return len(self.trackids_test)
class M5(pl.LightningModule):
'''
M5 neural net taken from: https://pytorch.org/tutorials/intermediate/speech_command_recognition_with_torchaudio.html
'''
def __init__(self, n_input=1, n_output=10, stride=8, n_channel=32):
super().__init__()
#### network
self.conv1 = torch.nn.Conv1d(n_input, n_channel, kernel_size=80, stride=stride)
self.bn1 = torch.nn.BatchNorm1d(n_channel)
self.pool1 = torch.nn.MaxPool1d(4)
self.conv2 = torch.nn.Conv1d(n_channel, n_channel, kernel_size=3)
self.bn2 = torch.nn.BatchNorm1d(n_channel)
self.pool2 = torch.nn.MaxPool1d(4)
self.conv3 = torch.nn.Conv1d(n_channel, 2 * n_channel, kernel_size=3)
self.bn3 = torch.nn.BatchNorm1d(2 * n_channel)
self.pool3 = torch.nn.MaxPool1d(4)
self.conv4 = torch.nn.Conv1d(2 * n_channel, 2 * n_channel, kernel_size=3)
self.bn4 = torch.nn.BatchNorm1d(2 * n_channel)
self.pool4 = torch.nn.MaxPool1d(4)
self.fc1 = torch.nn.Linear(2 * n_channel, n_output)
#### metrics
self.train_acc = pl.metrics.Accuracy()
self.valid_acc = pl.metrics.Accuracy()
self.test_acc = pl.metrics.Accuracy()
self.test_cm = pl.metrics.classification.ConfusionMatrix(num_classes=n_output)
def forward_function(self, x):
x = self.conv1(x)
x = torch.nn.functional.relu(self.bn1(x))
x = self.pool1(x)
x = self.conv2(x)
x = torch.nn.functional.relu(self.bn2(x))
x = self.pool2(x)
x = self.conv3(x)
x = torch.nn.functional.relu(self.bn3(x))
x = self.pool3(x)
x = self.conv4(x)
x = torch.nn.functional.relu(self.bn4(x))
x = self.pool4(x)
# x = torch.nn.functional.avg_pool1d(x) #, kernel_size=x.shape[-1],stride=1
x = x.permute(0, 2, 1)
x = self.fc1(x)
return torch.nn.functional.log_softmax(x, dim=2).squeeze(1)
def training_step(self, batch, batch_idx):
waveform, label = batch
output = self.forward_function(waveform)
### why log softmax and nll loss: https://ljvmiranda921.github.io/notebook/2017/08/13/softmax-and-the-negative-log-likelihood/
loss = torch.nn.functional.nll_loss(output, label)
self.log('train_loss', loss)
self.train_acc(output, label)
self.log('train_acc', self.train_acc, on_step=True, on_epoch=True)
return loss
def validation_step(self, batch, batch_idx):
waveform, label = batch
output = self.forward_function(waveform)
loss = torch.nn.functional.nll_loss(output, label)
self.log('val_loss', loss)
self.valid_acc(output, label)
self.log('valid_acc', self.valid_acc, on_step=True, on_epoch=True)
def test_step(self, batch, batch_idx):
waveform, label = batch
output = self.forward_function(waveform)
loss = torch.nn.functional.nll_loss(output, label)
self.log('test_loss', loss)
self.test_acc(output, label)
self.log('test_acc', self.test_acc, on_step=True, on_epoch=False)
self.test_cm(output, label)
def training_epoch_end(self, outputs):
# log epoch metric
self.log('train_acc', self.train_acc.compute(), prog_bar=True)
def validation_epoch_end(self, outputs):
self.log('val_acc', self.valid_acc.compute(), prog_bar=True)
def get_progress_bar_dict(self):
# don't show the version number
items = super().get_progress_bar_dict()
items.pop("v_num", None)
return items
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=4e-2,weight_decay=0.0001)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1) # reduce the learning after 10 epochs by a factor of 10
return [optimizer], [scheduler]
#### Init the Mridangam stroke dataset
data_home='/Volumes/Macintosh HD 2/Documents/git/mirdata/tests/resources/mir_datasets_full/mridangam_stroke'
mridangam = mirdata.initialize("mridangam_stroke") #,data_home=data_home
download = False
if download:
mridangam.download()
random_seed=0
pl.utilities.seed.seed_everything(seed=random_seed)
#### Pytorch dataset loaders
train_dataset = MridangamDataset(mirdataset=mridangam,subset=0, random_seed=random_seed)
train_loader = torch.utils.data.DataLoader(train_dataset,batch_size=128,num_workers=24,pin_memory=True)
valid_dataset = MridangamDataset(mirdataset=mridangam,subset=1, random_seed=random_seed,pin_memory=True)
valid_loader = torch.utils.data.DataLoader(valid_dataset,batch_size=128,num_workers=24,pin_memory=True)
test_dataset = MridangamDataset(mirdataset=mridangam,subset=2, random_seed=random_seed)
test_loader = torch.utils.data.DataLoader(valid_dataset,batch_size=128,num_workers=24,pin_memory=True)
### Which batch size/learning rate?
# Theory suggests that when multiplying the batch size by k, one should multiply the learning rate by sqrt(k) to keep the variance in the gradient expectation constant. See page 5 at <NAME>. One weird trick for parallelizing convolutional neural networks: https://arxiv.org/abs/1404.5997
# However, recent experiments with large mini-batches suggest for a simpler linear scaling rule, i.e multiply your learning rate by k when using mini-batch size of kN. See P.Goyal et al.: Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour https://arxiv.org/abs/1706.02677
#### Initialize the model
model = M5(n_input=train_dataset[0][0].shape[0], n_output=len(train_dataset.labels))
#### Initialize a trainer
trainer = pl.Trainer(gpus=1, max_epochs=3, progress_bar_refresh_rate=5)
#### Train the model
trainer.fit(model, train_loader, valid_loader)
#### Compute metrics on the test set
trainer.test(test_dataloaders=test_loader)
#### Compute confusion matrix on the test set
confusion_matrix = model.test_cm.compute().cpu().numpy()
import matplotlib.pyplot as plt
plt.matshow(confusion_matrix)
plt.show()
import pdb;pdb.set_trace()
| [
"pytorch_lightning.Trainer",
"torch.optim.lr_scheduler.StepLR",
"sklearn.model_selection.train_test_split",
"numpy.floor",
"torch.nn.MaxPool1d",
"pytorch_lightning.utilities.seed.seed_everything",
"pytorch_lightning.metrics.classification.ConfusionMatrix",
"torchaudio.info",
"torch.utils.data.DataLo... | [((7115, 7153), 'mirdata.initialize', 'mirdata.initialize', (['"""mridangam_stroke"""'], {}), "('mridangam_stroke')\n", (7133, 7153), False, 'import mirdata\n'), ((7247, 7298), 'pytorch_lightning.utilities.seed.seed_everything', 'pl.utilities.seed.seed_everything', ([], {'seed': 'random_seed'}), '(seed=random_seed)\n', (7280, 7298), True, 'import pytorch_lightning as pl\n'), ((7433, 7528), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': '(128)', 'num_workers': '(24)', 'pin_memory': '(True)'}), '(train_dataset, batch_size=128, num_workers=24,\n pin_memory=True)\n', (7460, 7528), False, 'import torch\n'), ((7642, 7737), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_dataset'], {'batch_size': '(128)', 'num_workers': '(24)', 'pin_memory': '(True)'}), '(valid_dataset, batch_size=128, num_workers=24,\n pin_memory=True)\n', (7669, 7737), False, 'import torch\n'), ((7833, 7928), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valid_dataset'], {'batch_size': '(128)', 'num_workers': '(24)', 'pin_memory': '(True)'}), '(valid_dataset, batch_size=128, num_workers=24,\n pin_memory=True)\n', (7860, 7928), False, 'import torch\n'), ((8677, 8738), 'pytorch_lightning.Trainer', 'pl.Trainer', ([], {'gpus': '(1)', 'max_epochs': '(3)', 'progress_bar_refresh_rate': '(5)'}), '(gpus=1, max_epochs=3, progress_bar_refresh_rate=5)\n', (8687, 8738), True, 'import pytorch_lightning as pl\n'), ((9025, 9054), 'matplotlib.pyplot.matshow', 'plt.matshow', (['confusion_matrix'], {}), '(confusion_matrix)\n', (9036, 9054), True, 'import matplotlib.pyplot as plt\n'), ((9055, 9065), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9063, 9065), True, 'import matplotlib.pyplot as plt\n'), ((9078, 9093), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (9091, 9093), False, 'import pdb\n'), ((958, 1088), 'sklearn.model_selection.train_test_split', 'sklearn.model_selection.train_test_split', (['self.track_ids'], {'train_size': '(1 - test_split)', 'random_state': 'random_seed', 'stratify': 'labels'}), '(self.track_ids, train_size=1 -\n test_split, random_state=random_seed, stratify=labels)\n', (998, 1088), False, 'import sklearn\n'), ((1134, 1262), 'sklearn.model_selection.train_test_split', 'sklearn.model_selection.train_test_split', (['self.track_ids'], {'train_size': 'train_split', 'random_state': 'random_seed', 'stratify': 'labels'}), '(self.track_ids, train_size=\n train_split, random_state=random_seed, stratify=labels)\n', (1174, 1262), False, 'import sklearn\n'), ((1647, 1680), 'torchaudio.info', 'torchaudio.info', (['track.audio_path'], {}), '(track.audio_path)\n', (1662, 1680), False, 'import torchaudio\n'), ((2241, 2326), 'torchaudio.load', 'torchaudio.load', ([], {'filepath': 'track.audio_path', 'offset': 'offset', 'num_frames': 'num_frames'}), '(filepath=track.audio_path, offset=offset, num_frames=num_frames\n )\n', (2256, 2326), False, 'import torchaudio\n'), ((3365, 3431), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['n_input', 'n_channel'], {'kernel_size': '(80)', 'stride': 'stride'}), '(n_input, n_channel, kernel_size=80, stride=stride)\n', (3380, 3431), False, 'import torch\n'), ((3451, 3482), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['n_channel'], {}), '(n_channel)\n', (3471, 3482), False, 'import torch\n'), ((3504, 3525), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(4)'], {}), '(4)\n', (3522, 3525), False, 'import torch\n'), ((3547, 3599), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['n_channel', 'n_channel'], {'kernel_size': '(3)'}), '(n_channel, n_channel, kernel_size=3)\n', (3562, 3599), False, 'import torch\n'), ((3619, 3650), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['n_channel'], {}), '(n_channel)\n', (3639, 3650), False, 'import torch\n'), ((3672, 3693), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(4)'], {}), '(4)\n', (3690, 3693), False, 'import torch\n'), ((3715, 3771), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['n_channel', '(2 * n_channel)'], {'kernel_size': '(3)'}), '(n_channel, 2 * n_channel, kernel_size=3)\n', (3730, 3771), False, 'import torch\n'), ((3791, 3826), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['(2 * n_channel)'], {}), '(2 * n_channel)\n', (3811, 3826), False, 'import torch\n'), ((3848, 3869), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(4)'], {}), '(4)\n', (3866, 3869), False, 'import torch\n'), ((3891, 3951), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(2 * n_channel)', '(2 * n_channel)'], {'kernel_size': '(3)'}), '(2 * n_channel, 2 * n_channel, kernel_size=3)\n', (3906, 3951), False, 'import torch\n'), ((3971, 4006), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['(2 * n_channel)'], {}), '(2 * n_channel)\n', (3991, 4006), False, 'import torch\n'), ((4028, 4049), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(4)'], {}), '(4)\n', (4046, 4049), False, 'import torch\n'), ((4069, 4109), 'torch.nn.Linear', 'torch.nn.Linear', (['(2 * n_channel)', 'n_output'], {}), '(2 * n_channel, n_output)\n', (4084, 4109), False, 'import torch\n'), ((4157, 4178), 'pytorch_lightning.metrics.Accuracy', 'pl.metrics.Accuracy', ([], {}), '()\n', (4176, 4178), True, 'import pytorch_lightning as pl\n'), ((4204, 4225), 'pytorch_lightning.metrics.Accuracy', 'pl.metrics.Accuracy', ([], {}), '()\n', (4223, 4225), True, 'import pytorch_lightning as pl\n'), ((4250, 4271), 'pytorch_lightning.metrics.Accuracy', 'pl.metrics.Accuracy', ([], {}), '()\n', (4269, 4271), True, 'import pytorch_lightning as pl\n'), ((4295, 4358), 'pytorch_lightning.metrics.classification.ConfusionMatrix', 'pl.metrics.classification.ConfusionMatrix', ([], {'num_classes': 'n_output'}), '(num_classes=n_output)\n', (4336, 4358), True, 'import pytorch_lightning as pl\n'), ((5290, 5333), 'torch.nn.functional.nll_loss', 'torch.nn.functional.nll_loss', (['output', 'label'], {}), '(output, label)\n', (5318, 5333), False, 'import torch\n'), ((5650, 5693), 'torch.nn.functional.nll_loss', 'torch.nn.functional.nll_loss', (['output', 'label'], {}), '(output, label)\n', (5678, 5693), False, 'import torch\n'), ((5982, 6025), 'torch.nn.functional.nll_loss', 'torch.nn.functional.nll_loss', (['output', 'label'], {}), '(output, label)\n', (6010, 6025), False, 'import torch\n'), ((6788, 6855), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': '(10)', 'gamma': '(0.1)'}), '(optimizer, step_size=10, gamma=0.1)\n', (6819, 6855), False, 'import torch\n'), ((2703, 2761), 'torchaudio.transforms.Resample', 'torchaudio.transforms.Resample', (['sample_rate', 'self.resample'], {}), '(sample_rate, self.resample)\n', (2733, 2761), False, 'import torchaudio\n'), ((1959, 2006), 'random.uniform', 'random.uniform', (['(0)', '(duration - self.seq_duration)'], {}), '(0, duration - self.seq_duration)\n', (1973, 2006), False, 'import random\n'), ((2077, 2106), 'numpy.floor', 'np.floor', (['(start * sample_rate)'], {}), '(start * sample_rate)\n', (2085, 2106), True, 'import numpy as np\n'), ((2137, 2178), 'numpy.floor', 'np.floor', (['(self.seq_duration * sample_rate)'], {}), '(self.seq_duration * sample_rate)\n', (2145, 2178), True, 'import numpy as np\n'), ((2574, 2648), 'torch.nn.ConstantPad2d', 'torch.nn.ConstantPad2d', (['(0, seq_duration_samples - total_samples, 0, 0)', '(0)'], {}), '((0, seq_duration_samples - total_samples, 0, 0), 0)\n', (2596, 2648), False, 'import torch\n'), ((4958, 4999), 'torch.nn.functional.log_softmax', 'torch.nn.functional.log_softmax', (['x'], {'dim': '(2)'}), '(x, dim=2)\n', (4989, 4999), False, 'import torch\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.