code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import cv2
import numpy as np
from torchvision.transforms import ColorJitter
from PIL import Image
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, kpts=None):
for t in self.transforms:
img, kpts = t(img, kpts)
if kpts is None:
return img
else:
return img, kpts
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
class ToTensor(object):
def __call__(self, img, kpts):
return img / 255., kpts
class Normalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, img, kpts):
img -= self.mean
img /= self.std
return img, kpts
class JpegCompress(object):
def __init__(self, quality_low=15, quality_high=75):
self.quality_low = quality_low
self.quality_high = quality_high
def __call__(self, img, kpts):
if np.random.uniform(0, 1) < 0.5:
return img, kpts
quality = np.random.randint(self.quality_low, self.quality_high)
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), quality]
result, encimg = cv2.imencode('.jpg', img, encode_param)
img = cv2.imdecode(encimg, 1)
return img, kpts
class GaussianBlur(object):
def __init__(self, blur_range=(3, 5, 7, 9, 11)):
self.blur_range = blur_range
def __call__(self, img, kpts):
if np.random.uniform(0, 1) < 0.5:
return img, kpts
sigma = np.random.choice(self.blur_range, 1, p=(0.4, 0.3, 0.2, 0.05, 0.05))
return cv2.GaussianBlur(img, (sigma, sigma), 0), kpts
class AddNoise(object):
def __call__(self, img, kpts):
if np.random.uniform(0, 1) < 0.66:
return img, kpts
# gaussian noise
if np.random.uniform(0, 1) < 0.75:
row, col, ch = img.shape
mean = 0
var = np.random.rand(1) * 0.3 * 256
sigma = var**0.5
gauss = sigma * np.random.randn(row,col) + mean
gauss = np.repeat(gauss[:, :, np.newaxis], ch, axis=2)
img = img + gauss
img = np.clip(img, 0, 255)
img = img.astype(np.uint8)
else:
# motion blur
sizes = [3, 5, 7, 9]
size = sizes[int(np.random.randint(len(sizes), size=1))]
kernel_motion_blur = np.zeros((size, size))
if np.random.rand(1) < 0.5:
kernel_motion_blur[int((size-1)/2), :] = np.ones(size)
else:
kernel_motion_blur[:, int((size-1)/2)] = np.ones(size)
kernel_motion_blur = kernel_motion_blur / size
img = cv2.filter2D(img, -1, kernel_motion_blur)
return img, kpts
class Jitter(object):
def __init__(self, brightness=0.5, contrast=0.2, saturation=0.2, hue=0.2):
self.jitter = ColorJitter(brightness, contrast, saturation, hue)
def __call__(self, img, kpts):
if np.random.uniform(0, 1) < 0.66:
return img, kpts
img = np.asarray(self.jitter(Image.fromarray(img)))
return img, kpts
| [
"torchvision.transforms.ColorJitter",
"numpy.random.uniform",
"cv2.GaussianBlur",
"cv2.filter2D",
"numpy.random.randn",
"cv2.imdecode",
"numpy.zeros",
"numpy.ones",
"numpy.clip",
"PIL.Image.fromarray",
"numpy.random.randint",
"cv2.imencode",
"numpy.random.choice",
"numpy.random.rand",
"n... | [((1262, 1316), 'numpy.random.randint', 'np.random.randint', (['self.quality_low', 'self.quality_high'], {}), '(self.quality_low, self.quality_high)\n', (1279, 1316), True, 'import numpy as np\n'), ((1406, 1445), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'img', 'encode_param'], {}), "('.jpg', img, encode_param)\n", (1418, 1445), False, 'import cv2\n'), ((1460, 1483), 'cv2.imdecode', 'cv2.imdecode', (['encimg', '(1)'], {}), '(encimg, 1)\n', (1472, 1483), False, 'import cv2\n'), ((1753, 1820), 'numpy.random.choice', 'np.random.choice', (['self.blur_range', '(1)'], {'p': '(0.4, 0.3, 0.2, 0.05, 0.05)'}), '(self.blur_range, 1, p=(0.4, 0.3, 0.2, 0.05, 0.05))\n', (1769, 1820), True, 'import numpy as np\n'), ((3123, 3173), 'torchvision.transforms.ColorJitter', 'ColorJitter', (['brightness', 'contrast', 'saturation', 'hue'], {}), '(brightness, contrast, saturation, hue)\n', (3134, 3173), False, 'from torchvision.transforms import ColorJitter\n'), ((1183, 1206), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1200, 1206), True, 'import numpy as np\n'), ((1676, 1699), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1693, 1699), True, 'import numpy as np\n'), ((1836, 1876), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img', '(sigma, sigma)', '(0)'], {}), '(img, (sigma, sigma), 0)\n', (1852, 1876), False, 'import cv2\n'), ((1955, 1978), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1972, 1978), True, 'import numpy as np\n'), ((2053, 2076), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2070, 2076), True, 'import numpy as np\n'), ((2300, 2346), 'numpy.repeat', 'np.repeat', (['gauss[:, :, np.newaxis]', 'ch'], {'axis': '(2)'}), '(gauss[:, :, np.newaxis], ch, axis=2)\n', (2309, 2346), True, 'import numpy as np\n'), ((2395, 2415), 'numpy.clip', 'np.clip', (['img', '(0)', '(255)'], {}), '(img, 0, 255)\n', (2402, 2415), True, 'import numpy as np\n'), ((2630, 2652), 'numpy.zeros', 'np.zeros', (['(size, size)'], {}), '((size, size))\n', (2638, 2652), True, 'import numpy as np\n'), ((2930, 2971), 'cv2.filter2D', 'cv2.filter2D', (['img', '(-1)', 'kernel_motion_blur'], {}), '(img, -1, kernel_motion_blur)\n', (2942, 2971), False, 'import cv2\n'), ((3221, 3244), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (3238, 3244), True, 'import numpy as np\n'), ((2668, 2685), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (2682, 2685), True, 'import numpy as np\n'), ((2750, 2763), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (2757, 2763), True, 'import numpy as np\n'), ((2839, 2852), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (2846, 2852), True, 'import numpy as np\n'), ((3320, 3340), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (3335, 3340), False, 'from PIL import Image\n'), ((2161, 2178), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (2175, 2178), True, 'import numpy as np\n'), ((2248, 2273), 'numpy.random.randn', 'np.random.randn', (['row', 'col'], {}), '(row, col)\n', (2263, 2273), True, 'import numpy as np\n')] |
import numpy as np
def _calc_A_min_max(tx_min, tx_max, rx_min, rx_max, gT=1.0, gR=0.6, window=7):
"""Calculate rain rate from attenuation using the A-R Relationship
Parameters
----------
gT : float, optional
induced bias
gR : float, optional
induced bias
window: int, optional
number of previous measurements to use for zero-level calculation
Returns
-------
float or iterable of float
Ar_max
Note
----
Based on: "Empirical Study of the Quantization Bias Effects in
Commercial Microwave Links Min/Max Attenuation
Measurements for Rain Monitoring" by <NAME>., <NAME>.
"""
# quantization bias correction
Ac_max = tx_max - rx_min - (gT + gR) / 2
Ac_min = tx_min - rx_max + (gT + gR) / 2
Ac_max[np.isnan(Ac_max)] = np.rint(np.nanmean(Ac_max))
Ac_min[np.isnan(Ac_min)] = np.rint(np.nanmean(Ac_min))
# zero-level calculation
Ar_max = np.full(Ac_max.shape, 0.0)
for i in range(window, len(Ac_max)):
Ar_max[i] = Ac_max[i] - Ac_min[i - window : i + 1].min()
Ar_max[Ar_max < 0.0] = 0.0
Ar_max[0:window] = np.nan
return Ar_max
| [
"numpy.full",
"numpy.isnan",
"numpy.nanmean"
] | [((953, 979), 'numpy.full', 'np.full', (['Ac_max.shape', '(0.0)'], {}), '(Ac_max.shape, 0.0)\n', (960, 979), True, 'import numpy as np\n'), ((803, 819), 'numpy.isnan', 'np.isnan', (['Ac_max'], {}), '(Ac_max)\n', (811, 819), True, 'import numpy as np\n'), ((831, 849), 'numpy.nanmean', 'np.nanmean', (['Ac_max'], {}), '(Ac_max)\n', (841, 849), True, 'import numpy as np\n'), ((862, 878), 'numpy.isnan', 'np.isnan', (['Ac_min'], {}), '(Ac_min)\n', (870, 878), True, 'import numpy as np\n'), ((890, 908), 'numpy.nanmean', 'np.nanmean', (['Ac_min'], {}), '(Ac_min)\n', (900, 908), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import math
''' Get_X_Region - Functions '''
'''
get_rotated_region(data[],labels[],region_size, region_count_per_sample)
Description:
Takes multiple samples, the size of the regions to be cut and
the amount of regions to be extracted from each sample.
Then rotate each sample randomly and cut regions from the
centered circle with radius sample_size/2 (~21% loss compared to
rotation specific discrimination method)
[Works only with squares (a*b => b=a)]
Returns:
Regions for each image in order of samples
ret data_regions,label_regions
'''
def get_rotated_region(data, labels,position_matricies, region_size, region_count_per_sample):
# Returns a random xy pos in a circle with center-offset
# of o_x,o_y and a radius of max_r
def get_random_circular_xy(o_x, o_y,max_r):
r = np.random.uniform(0,max_r)
angle = np.random.uniform(0,math.pi*2.0)
x=o_x+r*math.sin(angle)
y=o_y+r*math.cos(angle)
return x,y
sample_count = len(data)
data_regions = np.zeros((sample_count*region_count_per_sample,region_size,region_size,1))
label_regions = np.zeros((sample_count*region_count_per_sample,region_size,region_size,1))
cell_counts = np.zeros((sample_count*region_count_per_sample),dtype=np.int32)
for s in range(sample_count):
width,height, channels = get_dims_hwc(data[s])
# Circular transform variables
region_r = math.sqrt(2.0*math.pow(region_size,2))/2.0 # radius of the circle containing the region_size rectangle
max_r = width / 2.0 - region_r # max radius for random radius selection
# Rotate sample randomly
matrix = cv2.getRotationMatrix2D((width/2,height/2),np.random.randint(0,360),1)
data_r = cv2.warpAffine(data[s],matrix,(width,height))
label_r = cv2.warpAffine(labels[s],matrix,(width,height))
if len(position_matricies[s]) > 0:
position_matrix = np.multiply(position_matricies[s],np.asarray([width,height]))
position_matrix = affine(position_matrix,matrix)
# reshape
data_r = data_r.reshape((height,width,1))
label_r = label_r.reshape((height,width,1))
for r in range(region_count_per_sample):
# Calculate start xy of region
cell_count_r = 0
circle_x, circle_y = get_random_circular_xy(width/2,height/2,max_r)
start_x = int(circle_x - region_size/2) # Start offset of region
start_y = int(circle_y - region_size/2) # x,y
# Cut out region
data_region = data_r[start_y:start_y+region_size,
start_x:start_x+region_size]
label_region = label_r[start_y:start_y+region_size,
start_x:start_x+region_size]
if len(position_matricies[s]) > 0:
border_room_px = 3
for i in range(len(position_matrix)):
if position_matrix[i,0] >= start_x-border_room_px and \
position_matrix[i,0] <= start_x+region_size+border_room_px and \
position_matrix[i,1] >= start_y-border_room_px and \
position_matrix[i,1] <= start_y+region_size+border_room_px:
cell_count_r+=1
idx = s*region_count_per_sample+r
data_regions[idx] = data_region
label_regions[idx] = label_region
cell_counts[idx] = cell_count_r
return data_regions,label_regions, cell_counts
def get_all_regions(data,labels,position_matricies,region_size):
width,height,channels = get_dims_hwc(data[0])
sample_rows = int(math.floor(float(height)/float(region_size)))
sample_cols = int(math.floor(float(width)/float(region_size)))
regions_per_sample = sample_rows*sample_cols
data_r = np.zeros((regions_per_sample*len(data),region_size,region_size,channels),dtype=np.float64)
label_r = np.zeros((regions_per_sample*len(labels),region_size,region_size,1),dtype=np.float64)
cell_count_r = np.zeros([regions_per_sample*len(labels),1],dtype=np.float32)
for i in range(len(data)):
if len(position_matricies[i]) > 0:
position_matrix = np.multiply(position_matricies[i],np.asarray([width,height]))
for y in range(sample_rows):
for x in range(sample_cols):
c_data_r = data[i][y*region_size:y*region_size+region_size, x*region_size:x*region_size+region_size]
c_label_r = labels[i][y*region_size:y*region_size+region_size, x*region_size:x*region_size+region_size]
c_cell_count_r = np.asarray([0])
if len(position_matricies[i]) > 0:
border_room_px = 0
for j in range(len(position_matrix)):
if position_matrix[j,0] >= x*region_size-border_room_px and \
position_matrix[j,0] <= x*region_size+region_size+border_room_px and \
position_matrix[j,1] >= y*region_size-border_room_px and \
position_matrix[j,1] <= y*region_size+region_size+border_room_px:
c_cell_count_r[0] +=1
idx = i*regions_per_sample+y*sample_cols+x
data_r[idx] = c_data_r
label_r[idx] = c_label_r
cell_count_r[idx] = c_cell_count_r
return data_r,label_r, cell_count_r
def as_numpy_array_of_shape(data,labels,width,height,channels):
data_arr = np.zeros([len(data),width,height,channels],dtype=np.uint8)
label_arr = np.zeros([len(labels),width,height,channels],dtype=np.uint8)
for i in range(len(data)):
d,l = data[i].copy(),labels[i].copy()
# Resize
if d.shape[0] != height or d.shape[1] != width:
d = cv2.resize(d,(width,height),interpolation=cv2.INTER_CUBIC)
if l.shape[0] != height or l.shape[1] != width:
l = cv2.resize(l,(width,height),interpolation=cv2.INTER_CUBIC)
# Grayscale
d=adjust_channels(d,channels)
l=adjust_channels(l,channels)
data_arr[i] = d
label_arr[i] = l
return data_arr,label_arr
''' Augmentations '''
def zca_whitening(data):
# Flatten data
data_1d,shape = vector_array_n_to_1d(data)
for i in range(shape[0]):
inputs = data_1d[i].reshape((1,len(data_1d[i])))
# Compute zca_matrix
sigma = np.dot(inputs,inputs.T)/inputs.shape[1]
U,S,V = np.linalg.svd(sigma)
epsilon = 0.1
zca_matrix = np.dot(np.dot(U, np.diag(1.0/np.sqrt(np.diag(S) + epsilon))), U.T)
# Apply zca_matrix to input vectors
res = np.dot(zca_matrix,inputs)
data_1d[i] = res
# Reshape input_vectors
return vector_array_1d_to_n(data_1d,shape)
def color_jitter(data,min_value=0.0,max_value=0.2):
jitter = np.random.uniform(min_value,max_value,size=get_shape_size(data))
noise = jitter.reshape(data.shape)
res = np.add(data,noise)
return res
''' Helpers '''
def adjust_channels(im,channels):
# Gray => Color
if (len(im.shape) == 2 or im.shape[-1] == 1) and channels == 3:
im = cv2.cvtColor(im,cv2.COLOR_GRAY2BGR)
# Gray [y,x] => Gray[y,x,1]
elif len(im.shape) == 2 and channels == 1:
im = im.reshape([im.shape[0],im.shape[1],channels])
# Color => Gray
elif len(im.shape) == 3 and channels == 1:
im = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY).reshape([im.shape[0],im.shape[1],channels])
return im
def affine(points, matrix):
return np.dot(np.c_[points, np.ones(points.shape[0])], matrix.T)
def show_image_hwc(image,win_name="image",width=600,height=600,wait_key=True, destroy_window=True):
cv2.namedWindow(win_name,cv2.WINDOW_NORMAL)
cv2.resizeWindow(win_name, width,height)
cv2.imshow(win_name,image)
if wait_key: cv2.waitKey(0)
if destroy_window: cv2.destroyAllWindows()
def get_dims_hwc(image):
width = image.shape[1]
height = image.shape[0]
channel = image.shape[2]
return width,height,channel
# Returns the multiplied size of the shape
def get_shape_size(array):
size = 1
for i in range(len(array.shape)):
size*=array.shape[i]
return size
# input numpy array
# Ret: 1d_vector, original_shape
def vector_array_n_to_1d(vector):
ori_shape = vector.shape
flat_vec = vector.flatten(0)
flat_vec = flat_vec.reshape(ori_shape[0],len(flat_vec)/ori_shape[0])
return flat_vec, ori_shape
# input numpy array, shape
# Ret: nd_vector with shape shape
def vector_array_1d_to_n(vector, shape):
ori = vector.reshape(shape)
return ori
def normalize_uint8(data,labels):
data = (data/256.0).astype(np.float32)
labels = (labels/256.0).astype(np.float32)
return data,labels
def denormalize_float32(data,labels):
data = (np.clip(data,0,255)*256.0).astype(np.uint8)
labels = (np.clip(labels,0,255)*256.0).astype(np.uint8)
return data,labels | [
"numpy.ones",
"numpy.clip",
"cv2.warpAffine",
"numpy.linalg.svd",
"numpy.random.randint",
"numpy.diag",
"cv2.imshow",
"math.pow",
"cv2.cvtColor",
"math.cos",
"numpy.add",
"cv2.destroyAllWindows",
"cv2.resize",
"cv2.waitKey",
"numpy.asarray",
"math.sin",
"numpy.dot",
"cv2.resizeWind... | [((1018, 1097), 'numpy.zeros', 'np.zeros', (['(sample_count * region_count_per_sample, region_size, region_size, 1)'], {}), '((sample_count * region_count_per_sample, region_size, region_size, 1))\n', (1026, 1097), True, 'import numpy as np\n'), ((1110, 1189), 'numpy.zeros', 'np.zeros', (['(sample_count * region_count_per_sample, region_size, region_size, 1)'], {}), '((sample_count * region_count_per_sample, region_size, region_size, 1))\n', (1118, 1189), True, 'import numpy as np\n'), ((1200, 1264), 'numpy.zeros', 'np.zeros', (['(sample_count * region_count_per_sample)'], {'dtype': 'np.int32'}), '(sample_count * region_count_per_sample, dtype=np.int32)\n', (1208, 1264), True, 'import numpy as np\n'), ((6158, 6177), 'numpy.add', 'np.add', (['data', 'noise'], {}), '(data, noise)\n', (6164, 6177), True, 'import numpy as np\n'), ((6854, 6898), 'cv2.namedWindow', 'cv2.namedWindow', (['win_name', 'cv2.WINDOW_NORMAL'], {}), '(win_name, cv2.WINDOW_NORMAL)\n', (6869, 6898), False, 'import cv2\n'), ((6899, 6940), 'cv2.resizeWindow', 'cv2.resizeWindow', (['win_name', 'width', 'height'], {}), '(win_name, width, height)\n', (6915, 6940), False, 'import cv2\n'), ((6941, 6968), 'cv2.imshow', 'cv2.imshow', (['win_name', 'image'], {}), '(win_name, image)\n', (6951, 6968), False, 'import cv2\n'), ((839, 866), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'max_r'], {}), '(0, max_r)\n', (856, 866), True, 'import numpy as np\n'), ((876, 911), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(math.pi * 2.0)'], {}), '(0, math.pi * 2.0)\n', (893, 911), True, 'import numpy as np\n'), ((1692, 1740), 'cv2.warpAffine', 'cv2.warpAffine', (['data[s]', 'matrix', '(width, height)'], {}), '(data[s], matrix, (width, height))\n', (1706, 1740), False, 'import cv2\n'), ((1750, 1800), 'cv2.warpAffine', 'cv2.warpAffine', (['labels[s]', 'matrix', '(width, height)'], {}), '(labels[s], matrix, (width, height))\n', (1764, 1800), False, 'import cv2\n'), ((5704, 5724), 'numpy.linalg.svd', 'np.linalg.svd', (['sigma'], {}), '(sigma)\n', (5717, 5724), True, 'import numpy as np\n'), ((5872, 5898), 'numpy.dot', 'np.dot', (['zca_matrix', 'inputs'], {}), '(zca_matrix, inputs)\n', (5878, 5898), True, 'import numpy as np\n'), ((6332, 6368), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_GRAY2BGR'], {}), '(im, cv2.COLOR_GRAY2BGR)\n', (6344, 6368), False, 'import cv2\n'), ((6983, 6997), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (6994, 6997), False, 'import cv2\n'), ((7018, 7041), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7039, 7041), False, 'import cv2\n'), ((1653, 1678), 'numpy.random.randint', 'np.random.randint', (['(0)', '(360)'], {}), '(0, 360)\n', (1670, 1678), True, 'import numpy as np\n'), ((5109, 5170), 'cv2.resize', 'cv2.resize', (['d', '(width, height)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(d, (width, height), interpolation=cv2.INTER_CUBIC)\n', (5119, 5170), False, 'import cv2\n'), ((5225, 5286), 'cv2.resize', 'cv2.resize', (['l', '(width, height)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(l, (width, height), interpolation=cv2.INTER_CUBIC)\n', (5235, 5286), False, 'import cv2\n'), ((5654, 5678), 'numpy.dot', 'np.dot', (['inputs', 'inputs.T'], {}), '(inputs, inputs.T)\n', (5660, 5678), True, 'import numpy as np\n'), ((920, 935), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (928, 935), False, 'import math\n'), ((946, 961), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (954, 961), False, 'import math\n'), ((1890, 1917), 'numpy.asarray', 'np.asarray', (['[width, height]'], {}), '([width, height])\n', (1900, 1917), True, 'import numpy as np\n'), ((3820, 3847), 'numpy.asarray', 'np.asarray', (['[width, height]'], {}), '([width, height])\n', (3830, 3847), True, 'import numpy as np\n'), ((4146, 4161), 'numpy.asarray', 'np.asarray', (['[0]'], {}), '([0])\n', (4156, 4161), True, 'import numpy as np\n'), ((6714, 6738), 'numpy.ones', 'np.ones', (['points.shape[0]'], {}), '(points.shape[0])\n', (6721, 6738), True, 'import numpy as np\n'), ((7897, 7918), 'numpy.clip', 'np.clip', (['data', '(0)', '(255)'], {}), '(data, 0, 255)\n', (7904, 7918), True, 'import numpy as np\n'), ((7952, 7975), 'numpy.clip', 'np.clip', (['labels', '(0)', '(255)'], {}), '(labels, 0, 255)\n', (7959, 7975), True, 'import numpy as np\n'), ((1407, 1431), 'math.pow', 'math.pow', (['region_size', '(2)'], {}), '(region_size, 2)\n', (1415, 1431), False, 'import math\n'), ((6563, 6599), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2GRAY'], {}), '(im, cv2.COLOR_BGR2GRAY)\n', (6575, 6599), False, 'import cv2\n'), ((5793, 5803), 'numpy.diag', 'np.diag', (['S'], {}), '(S)\n', (5800, 5803), True, 'import numpy as np\n')] |
import numpy as np
from numpy.linalg import norm
import sparse_matrices
from reflection import hasArg
def genPerturbation(x):
return np.random.uniform(low=-1,high=1, size=x.shape)
def preamble(obj, xeval, perturb, fixedVars = []):
if (xeval is None): xeval = obj.getVars()
if (perturb is None): perturb = genPerturbation(xeval)
xold = obj.getVars()
perturb = np.copy(perturb)
perturb[fixedVars] = 0.0
return (xold, xeval, perturb)
def setVars(obj, x, customArgs = None):
if (customArgs is not None):
obj.setVars(x, **{k: v for k, v in customArgs.items() if hasArg(obj.setVars, k)})
obj.setVars(x)
def evalWithCustomArgs(f, customArgs):
if (customArgs is not None):
if (isinstance(customArgs, list)): return f(*customArgs)
if (isinstance(customArgs, dict)): return f(**{k: v for k, v in customArgs.items() if hasArg(f, k)})
return f(customArgs)
return f()
def basisDirection(obj, c):
e_c = np.zeros(obj.numVars())
e_c[c] = 1.0
return e_c
def fdGrad(obj, fd_eps, xeval = None, perturb = None, customArgs = None, fixedVars = []):
xold, xeval, perturb = preamble(obj, xeval, perturb, fixedVars)
def evalAt(x):
setVars(obj, x, customArgs)
return evalWithCustomArgs(obj.energy, customArgs)
fd_delta_E = (evalAt(xeval + perturb * fd_eps) - evalAt(xeval - perturb * fd_eps)) / (2 * fd_eps)
setVars(obj, xold, customArgs)
return fd_delta_E
def validateGrad(obj, fd_eps = 1e-6, xeval = None, perturb = None, customArgs = None, fixedVars = [], g = None):
xold, xeval, perturb = preamble(obj, xeval, perturb, fixedVars)
setVars(obj, xeval, customArgs)
if g is None: g = evalWithCustomArgs(obj.gradient, customArgs)
analytic_delta_E = g.dot(perturb)
fd_delta_E = fdGrad(obj, fd_eps, xeval, perturb, customArgs, fixedVars)
setVars(obj, xold, customArgs)
return (fd_delta_E, analytic_delta_E)
def findBadGradComponent(obj, fd_eps, xeval = None, customArgs = None, fixedVars = [], nprobes = 3):
"""
Use a simple binary search to hone in on bad components of the gradient.
This isn't guaranteed to find the worst component, but it should find one
of the worse ones.
"""
xold, xeval, perturb = preamble(obj, xeval, None, fixedVars)
setVars(obj, xeval, customArgs)
g = evalWithCustomArgs(obj.gradient, customArgs)
# Determine the total error across `nprobes` perturbations of interval
# the [lowIdx, upIdx] (inclusive)
def errForRange(lowIdx, upIdx):
if lowIdx > upIdx: return 0
err = 0
for i in range(nprobes):
perturb = np.random.uniform(low=-1, high=1, size=obj.numVars())
perturb[fixedVars] = 0
perturb[0:lowIdx] = 0
perturb[upIdx + 1:] = 0
fd_delta_E = fdGrad(obj, fd_eps, xeval, perturb, customArgs, fixedVars)
analytic_delta_E = g.dot(perturb)
err += np.abs(analytic_delta_E - fd_delta_E)
return err
lowIdx, upIdx = 0, len(perturb)
while upIdx > lowIdx:
# print([lowIdx, upIdx])
mid = (lowIdx + upIdx) // 2
if errForRange(lowIdx, mid) > errForRange(mid + 1, upIdx):
upIdx = mid
else:
lowIdx = mid + 1
setVars(obj, xold, customArgs)
return lowIdx
def validateHessian(obj, fd_eps = 1e-6, xeval = None, perturb = None, customArgs = None, fixedVars = [], indexInterval = None, H = None):
"""
Returns
-------
relative error (in l2 norm)
finite difference delta gradient
analytic delta gradient
"""
xold, xeval, perturb = preamble(obj, xeval, perturb, fixedVars)
def gradAt(x):
setVars(obj, x, customArgs)
return evalWithCustomArgs(obj.gradient, customArgs)
setVars(obj, xeval, customArgs)
if H is None: H = evalWithCustomArgs(obj.hessian, customArgs)
fd_delta_grad = (gradAt(xeval + perturb * fd_eps) - gradAt(xeval - perturb * fd_eps)) / (2 * fd_eps)
if isinstance(H, np.ndarray): # Dense case
an_delta_grad = H @ perturb
else: an_delta_grad = H.apply(perturb)
if indexInterval is not None:
fd_delta_grad = fd_delta_grad[indexInterval[0]:indexInterval[1]]
an_delta_grad = an_delta_grad[indexInterval[0]:indexInterval[1]]
setVars(obj, xold, customArgs)
return (norm(an_delta_grad - fd_delta_grad) / norm(fd_delta_grad), fd_delta_grad, an_delta_grad)
def gradConvergence(obj, perturb=None, customArgs=None, fixedVars = [], epsilons=None):
if epsilons is None:
epsilons = np.logspace(-9, -3, 100)
errors = []
if (perturb is None): perturb = np.random.uniform(-1, 1, size=obj.numVars())
g = evalWithCustomArgs(obj.gradient, customArgs)
for eps in epsilons:
fd, an = validateGrad(obj, g=g, customArgs=customArgs, perturb=perturb, fd_eps=eps, fixedVars=fixedVars)
err = np.abs(an - fd) / np.abs(an)
errors.append(err)
return (epsilons, errors, an)
from matplotlib import pyplot as plt
def gradConvergencePlotRaw(obj, perturb=None, customArgs=None, fixedVars = [], epsilons=None):
eps, errors, ignore = gradConvergence(obj, perturb, customArgs, fixedVars, epsilons=epsilons)
plt.loglog(eps, errors, label='grad')
plt.grid()
def gradConvergencePlot(obj, perturb=None, customArgs=None, fixedVars = [], epsilons=None):
gradConvergencePlotRaw(obj, perturb, customArgs, fixedVars, epsilons=epsilons)
plt.title('Directional derivative fd test for gradient')
plt.ylabel('Relative error')
plt.xlabel('Step size')
def hessConvergence(obj, perturb=None, customArgs=None, fixedVars = [], indexInterval = None, epsilons=None):
if epsilons is None:
epsilons = np.logspace(-9, -3, 100)
errors = []
if (perturb is None): perturb = np.random.uniform(-1, 1, size=obj.numVars())
H = evalWithCustomArgs(obj.hessian, customArgs)
for eps in epsilons:
err, fd, an = validateHessian(obj, customArgs=customArgs, perturb=perturb, fd_eps=eps, fixedVars=fixedVars, indexInterval=indexInterval, H=H)
errors.append(err)
return (epsilons, errors, an)
def hessConvergencePlotRaw(obj, perturb=None, customArgs=None, fixedVars = [], indexInterval = None, epsilons=None):
eps, errors, ignore = hessConvergence(obj, perturb, customArgs, fixedVars, indexInterval=indexInterval, epsilons=epsilons)
plt.loglog(eps, errors, label='hess')
plt.grid()
def hessConvergencePlot(obj, perturb=None, customArgs=None, fixedVars = [], indexInterval = None, epsilons=None):
hessConvergencePlotRaw(obj, perturb, customArgs, fixedVars, indexInterval=indexInterval, epsilons=epsilons)
plt.title('Directional derivative fd test for Hessian')
plt.ylabel('Relative error')
plt.xlabel('Step size')
def allEnergies(obj):
if hasattr(obj, 'EnergyType'):
return {name: obj.energy(etype) for name, etype in obj.EnergyType.__members__.items()}
else:
return {'Energy': obj.energy()}
def linesearchValidationPlot(obj, direction, alphaMax = 1e-5, width=12, height=6):
"""
Help diagnose issues with the backtracking linesearch by plotting the
energy along the linesearch direction `direction`.
"""
x = obj.getVars()
alphas = np.linspace(0, 1e-5, 100)
energies = []
for alpha in alphas:
obj.setVars(x + alpha * direction)
energies.append(allEnergies(obj))
obj.setVars(x)
keys = list(energies[0].keys())
nplots = len(keys)
plt.figure(figsize=(width, height))
for i, k in enumerate(keys):
cols = int(np.ceil(np.sqrt(nplots)))
rows = int(np.ceil(nplots / cols))
plt.subplot(rows, cols, i + 1)
if k is None: plt.plot(alphas, energies)
else: plt.plot(alphas, [e[k] for e in energies])
if k is not None: plt.title(k)
plt.grid()
plt.tight_layout()
| [
"matplotlib.pyplot.loglog",
"numpy.random.uniform",
"matplotlib.pyplot.title",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplot",
"numpy.abs",
"numpy.copy",
"numpy.ceil",
"matplotlib.pyplot.plot",
"numpy.logspace",
"matplotlib.pyplot.figure",
"reflection.hasArg",
"numpy.linalg.no... | [((138, 185), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': 'x.shape'}), '(low=-1, high=1, size=x.shape)\n', (155, 185), True, 'import numpy as np\n'), ((383, 399), 'numpy.copy', 'np.copy', (['perturb'], {}), '(perturb)\n', (390, 399), True, 'import numpy as np\n'), ((5250, 5287), 'matplotlib.pyplot.loglog', 'plt.loglog', (['eps', 'errors'], {'label': '"""grad"""'}), "(eps, errors, label='grad')\n", (5260, 5287), True, 'from matplotlib import pyplot as plt\n'), ((5292, 5302), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (5300, 5302), True, 'from matplotlib import pyplot as plt\n'), ((5483, 5539), 'matplotlib.pyplot.title', 'plt.title', (['"""Directional derivative fd test for gradient"""'], {}), "('Directional derivative fd test for gradient')\n", (5492, 5539), True, 'from matplotlib import pyplot as plt\n'), ((5544, 5572), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Relative error"""'], {}), "('Relative error')\n", (5554, 5572), True, 'from matplotlib import pyplot as plt\n'), ((5577, 5600), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Step size"""'], {}), "('Step size')\n", (5587, 5600), True, 'from matplotlib import pyplot as plt\n'), ((6416, 6453), 'matplotlib.pyplot.loglog', 'plt.loglog', (['eps', 'errors'], {'label': '"""hess"""'}), "(eps, errors, label='hess')\n", (6426, 6453), True, 'from matplotlib import pyplot as plt\n'), ((6458, 6468), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (6466, 6468), True, 'from matplotlib import pyplot as plt\n'), ((6700, 6755), 'matplotlib.pyplot.title', 'plt.title', (['"""Directional derivative fd test for Hessian"""'], {}), "('Directional derivative fd test for Hessian')\n", (6709, 6755), True, 'from matplotlib import pyplot as plt\n'), ((6760, 6788), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Relative error"""'], {}), "('Relative error')\n", (6770, 6788), True, 'from matplotlib import pyplot as plt\n'), ((6793, 6816), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Step size"""'], {}), "('Step size')\n", (6803, 6816), True, 'from matplotlib import pyplot as plt\n'), ((7284, 7310), 'numpy.linspace', 'np.linspace', (['(0)', '(1e-05)', '(100)'], {}), '(0, 1e-05, 100)\n', (7295, 7310), True, 'import numpy as np\n'), ((7520, 7555), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(width, height)'}), '(figsize=(width, height))\n', (7530, 7555), True, 'from matplotlib import pyplot as plt\n'), ((7884, 7902), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7900, 7902), True, 'from matplotlib import pyplot as plt\n'), ((4598, 4622), 'numpy.logspace', 'np.logspace', (['(-9)', '(-3)', '(100)'], {}), '(-9, -3, 100)\n', (4609, 4622), True, 'import numpy as np\n'), ((5756, 5780), 'numpy.logspace', 'np.logspace', (['(-9)', '(-3)', '(100)'], {}), '(-9, -3, 100)\n', (5767, 5780), True, 'import numpy as np\n'), ((7685, 7715), 'matplotlib.pyplot.subplot', 'plt.subplot', (['rows', 'cols', '(i + 1)'], {}), '(rows, cols, i + 1)\n', (7696, 7715), True, 'from matplotlib import pyplot as plt\n'), ((7869, 7879), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (7877, 7879), True, 'from matplotlib import pyplot as plt\n'), ((2962, 2999), 'numpy.abs', 'np.abs', (['(analytic_delta_E - fd_delta_E)'], {}), '(analytic_delta_E - fd_delta_E)\n', (2968, 2999), True, 'import numpy as np\n'), ((4376, 4411), 'numpy.linalg.norm', 'norm', (['(an_delta_grad - fd_delta_grad)'], {}), '(an_delta_grad - fd_delta_grad)\n', (4380, 4411), False, 'from numpy.linalg import norm\n'), ((4414, 4433), 'numpy.linalg.norm', 'norm', (['fd_delta_grad'], {}), '(fd_delta_grad)\n', (4418, 4433), False, 'from numpy.linalg import norm\n'), ((4925, 4940), 'numpy.abs', 'np.abs', (['(an - fd)'], {}), '(an - fd)\n', (4931, 4940), True, 'import numpy as np\n'), ((4943, 4953), 'numpy.abs', 'np.abs', (['an'], {}), '(an)\n', (4949, 4953), True, 'import numpy as np\n'), ((7653, 7675), 'numpy.ceil', 'np.ceil', (['(nplots / cols)'], {}), '(nplots / cols)\n', (7660, 7675), True, 'import numpy as np\n'), ((7738, 7764), 'matplotlib.pyplot.plot', 'plt.plot', (['alphas', 'energies'], {}), '(alphas, energies)\n', (7746, 7764), True, 'from matplotlib import pyplot as plt\n'), ((7779, 7821), 'matplotlib.pyplot.plot', 'plt.plot', (['alphas', '[e[k] for e in energies]'], {}), '(alphas, [e[k] for e in energies])\n', (7787, 7821), True, 'from matplotlib import pyplot as plt\n'), ((7848, 7860), 'matplotlib.pyplot.title', 'plt.title', (['k'], {}), '(k)\n', (7857, 7860), True, 'from matplotlib import pyplot as plt\n'), ((7616, 7631), 'numpy.sqrt', 'np.sqrt', (['nplots'], {}), '(nplots)\n', (7623, 7631), True, 'import numpy as np\n'), ((602, 624), 'reflection.hasArg', 'hasArg', (['obj.setVars', 'k'], {}), '(obj.setVars, k)\n', (608, 624), False, 'from reflection import hasArg\n'), ((878, 890), 'reflection.hasArg', 'hasArg', (['f', 'k'], {}), '(f, k)\n', (884, 890), False, 'from reflection import hasArg\n')] |
# coding : utf-8
from __future__ import print_function, absolute_import, division, unicode_literals
import sys, logging
import numpy as np
from resfgb.models import ResFGB, LogReg, SVM, get_hyperparams
from scripts import sample_data
logging.basicConfig( format='%(message)s', level=logging.INFO )
# Set seed
seed = 123
np.random.seed( seed )
# Get data
X, Y, Xt, Yt = sample_data.get_ijcnn1()
n_train = int( 0.8 * X.shape[0] )
Xv, Yv = X[n_train:], Y[n_train:]
X, Y = X[:n_train], Y[:n_train]
logging.info( 'train size: {0}, validation size: {1}, test size: {2}'\
.format( X.shape[0],Xv.shape[0],Xt.shape[0] ) )
# Build model and train.
(n_data, input_dim) = X.shape
n_class = len( set(Y) | set(Yv) | set(Yt) )
hparams = get_hyperparams( n_data, input_dim, n_class )
hparams['model_hparams']['bias'] = True
hparams['model_hparams']['wr'] = 1e-5
hparams['model_hparams']['max_epoch'] = 30
hparams['resblock_hparams']['wr'] = 1e-5
hparams['resblock_hparams']['max_epoch'] = 20
hparams['fg_eta'] = 1e-1
hparams['max_iters'] = 30
hparams['seed'] = seed
model = ResFGB( **hparams )
best_iters,_ ,_ = model.fit( X, Y, Xv, Yv, use_best_iter=True )
train_loss, train_acc = model.evaluate( X, Y )
logging.info( '- Result -' )
logging.info( 'train_loss: {0:5.4f}, train_acc: {1:4.3f}'\
.format( train_loss, train_acc ) )
if Xt is not None:
test_loss, test_acc = model.evaluate( Xt, Yt )
logging.info( 'test_loss : {0:5.4f}, test_acc : {1:4.3f}'\
.format( test_loss, test_acc ) )
| [
"resfgb.models.ResFGB",
"numpy.random.seed",
"resfgb.models.get_hyperparams",
"logging.basicConfig",
"logging.info",
"scripts.sample_data.get_ijcnn1"
] | [((236, 297), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(message)s"""', 'level': 'logging.INFO'}), "(format='%(message)s', level=logging.INFO)\n", (255, 297), False, 'import sys, logging\n'), ((323, 343), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (337, 343), True, 'import numpy as np\n'), ((373, 397), 'scripts.sample_data.get_ijcnn1', 'sample_data.get_ijcnn1', ([], {}), '()\n', (395, 397), False, 'from scripts import sample_data\n'), ((743, 786), 'resfgb.models.get_hyperparams', 'get_hyperparams', (['n_data', 'input_dim', 'n_class'], {}), '(n_data, input_dim, n_class)\n', (758, 786), False, 'from resfgb.models import ResFGB, LogReg, SVM, get_hyperparams\n'), ((1116, 1133), 'resfgb.models.ResFGB', 'ResFGB', ([], {}), '(**hparams)\n', (1122, 1133), False, 'from resfgb.models import ResFGB, LogReg, SVM, get_hyperparams\n'), ((1249, 1275), 'logging.info', 'logging.info', (['"""- Result -"""'], {}), "('- Result -')\n", (1261, 1275), False, 'import sys, logging\n')] |
"""
Created on Tue Oct 09 16:39:00 2018
@author: <NAME>
"""
from scipy import special
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy import ndimage
import numpy as np
from matplotlib import (pyplot as plt, path, patches)
Path = path.Path
PathPatch = patches.PathPatch
erf = special.erf
def path_maker(axes, locations, facecolor, edgecolor, linestyle, lineweight):
"""
Adds path to figure
Parameters
----------
axes : matplotlib axes
axes which to add the plot to
locations : numpy array
location to position the path
facecolor : str, optional
facecolor of the path
edgecolor : str, optional
edgecolor of the path
linestyle : str, optional
sets the style of the line, using conventional matplotlib styles
lineweight : float, optional
thickness of the line
"""
vertices = []
codes = []
codes = [Path.MOVETO] + [Path.LINETO] * 3 + [Path.CLOSEPOLY]
# extracts the vertices used to construct the path
vertices = [(locations[0], locations[2]),
(locations[1], locations[2]),
(locations[1], locations[3]),
(locations[0], locations[3]),
(0, 0)]
vertices = np.array(vertices, float)
# makes a path from the vertices
path = Path(vertices, codes)
pathpatch = PathPatch(path, facecolor=facecolor, edgecolor=edgecolor,
ls=linestyle, lw=lineweight)
# adds path to axes
axes.add_patch(pathpatch)
def rotate_and_crop(image_, angle=60.46, frac_rm=0.17765042979942694):
"""
Function which rotates and crops the images
Parameters
----------
image_ : array
image array to plot
angle : float, optional
angle to rotate the image by
frac_rm : float, optional
sets the fraction of the image to remove
Returns
----------
crop_image : array
image which is rotated and cropped
scale_factor : float
scaling factor for the image following rotation
"""
# makes a copy of the image
image = np.copy(image_)
# replaces all points with the minimum value
image[~np.isfinite(image)] = np.nanmin(image)
# rotates the image
rot_topo = ndimage.interpolation.rotate(
image, 90-angle, cval=np.nanmin(image))
# crops the image
pix_rem = int(rot_topo.shape[0]*frac_rm)
crop_image = rot_topo[pix_rem:rot_topo.shape[0] -
pix_rem, pix_rem:rot_topo.shape[0]-pix_rem]
# returns the scale factor for the new image size
scale_factor = (np.cos(np.deg2rad(angle)) +
np.cos(np.deg2rad(90-angle)))*(1-frac_rm)
return crop_image, scale_factor
def roll_and_append(data, fraction=4):
"""
Function which rotates and crops the images
Parameters
----------
data : array
input data to process
fraction : float, optional
fraction to roll and append
Returns
----------
data : array
output data to process
"""
data = np.roll(data, data.shape[0]//fraction)
data = np.append(data, data[0])
return data
def verbose_print(verbose, *args):
if verbose:
print(*args)
def set_axis_aspect(ax, ratio=1):
"""
sets the aspect ratio of a figure
Parameters
----------
ax : object
figure axis to modify
ratio : float, optional
sets the aspect ratio of the figure
"""
xleft, xright = ax.get_xlim()
ybottom, ytop = ax.get_ylim()
ax.set_aspect(abs((xright-xleft)/(ybottom-ytop))*ratio)
def colorbar(axes, plot,
location='right', size=10,
pad=0.05, num_format='%.1e',
ticks = True,
label = False):
"""
Adds a colorbar to a imageplot
Parameters
----------
axes : matplotlib axes
axes which to add the plot to
plot : matplotlib plot
Plot being references for the scalebar
location : str, optional
position to place the colorbar
size : int, optional
percent size of colorbar relative to the plot
pad : float, optional
gap between colorbar and plot
num_format : str, optional
string format for the labels on colorbar
label : str, optional
sets the label for the axis
"""
# Adds the scalebar
divider = make_axes_locatable(axes)
cax = divider.append_axes(location, size='{0}%'.format(size), pad=pad)
cbar = plt.colorbar(plot, cax=cax, format=num_format)
if not ticks:
cbar.set_ticks([])
if isinstance(label, str):
cbar.set_label(label, rotation=270, labelpad= 15)
def find_nearest(array, value, averaging_number):
"""
returns the indices nearest to a value in an image
Parameters
----------
array : float, array
image to find the index closest to a value
value : float
value to find points near
averaging_number : int
number of points to find
"""
idx = (np.abs(array-value)).argsort()[0:averaging_number]
return idx
def loop_fitting_function(v, a1, a2, a3,
b1, b2, b3,
b4, b5, b6,
b7, b8,
Au, Al):
"""
computes the loop fitting
Parameters
----------
V : float, array
voltage array
Return
----------
{} : dict
Branch1 : float, array
top branch
Branch2 : float, array
bottom branch
"""
S1 = ((b1+b2)/2) + ((b2-b1)/2)*erf((v-b7)/b5)
S2 = ((b4+b3)/2) + ((b3-b4)/2)*erf((v-b8)/b6)
Branch1 = (a1+a2)/2 + ((a2-a1)/2)*erf((v-Au)/S1)+a3*v
Branch2 = (a1+a2)/2 + ((a2-a1)/2)*erf((v-Al)/S2)+a3*v
return {'Branch1': Branch1, 'Branch2': Branch2}
| [
"mpl_toolkits.axes_grid1.make_axes_locatable",
"numpy.abs",
"numpy.copy",
"numpy.roll",
"numpy.deg2rad",
"numpy.isfinite",
"numpy.nanmin",
"matplotlib.pyplot.colorbar",
"numpy.append",
"numpy.array"
] | [((1252, 1277), 'numpy.array', 'np.array', (['vertices', 'float'], {}), '(vertices, float)\n', (1260, 1277), True, 'import numpy as np\n'), ((2112, 2127), 'numpy.copy', 'np.copy', (['image_'], {}), '(image_)\n', (2119, 2127), True, 'import numpy as np\n'), ((2210, 2226), 'numpy.nanmin', 'np.nanmin', (['image'], {}), '(image)\n', (2219, 2226), True, 'import numpy as np\n'), ((3076, 3116), 'numpy.roll', 'np.roll', (['data', '(data.shape[0] // fraction)'], {}), '(data, data.shape[0] // fraction)\n', (3083, 3116), True, 'import numpy as np\n'), ((3126, 3150), 'numpy.append', 'np.append', (['data', 'data[0]'], {}), '(data, data[0])\n', (3135, 3150), True, 'import numpy as np\n'), ((4390, 4415), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['axes'], {}), '(axes)\n', (4409, 4415), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((4502, 4548), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['plot'], {'cax': 'cax', 'format': 'num_format'}), '(plot, cax=cax, format=num_format)\n', (4514, 4548), True, 'from matplotlib import pyplot as plt, path, patches\n'), ((2188, 2206), 'numpy.isfinite', 'np.isfinite', (['image'], {}), '(image)\n', (2199, 2206), True, 'import numpy as np\n'), ((2326, 2342), 'numpy.nanmin', 'np.nanmin', (['image'], {}), '(image)\n', (2335, 2342), True, 'import numpy as np\n'), ((2616, 2633), 'numpy.deg2rad', 'np.deg2rad', (['angle'], {}), '(angle)\n', (2626, 2633), True, 'import numpy as np\n'), ((2664, 2686), 'numpy.deg2rad', 'np.deg2rad', (['(90 - angle)'], {}), '(90 - angle)\n', (2674, 2686), True, 'import numpy as np\n'), ((5039, 5060), 'numpy.abs', 'np.abs', (['(array - value)'], {}), '(array - value)\n', (5045, 5060), True, 'import numpy as np\n')] |
import numpy as np
from swutil.plots import plot_convergence
from swutil.np_tools import extrapolate
L=100
K=50
base=0.2
coeff = np.random.rand(1,L)
hs= 2.**(-np.arange(1,K))
w=2**np.arange(1,K)
hs = np.reshape(hs,(-1,1))
hf= hs**(base*np.arange(1,L+1))
T = coeff*hf
values = np.sum(T,axis=1)
from matplotlib import pyplot as plt
plot_convergence(w,values,reference=0)
plot_convergence(w[0:],extrapolate(values,degree=-1,base=base/2),reference=0)
plt.show()
| [
"matplotlib.pyplot.show",
"numpy.sum",
"swutil.plots.plot_convergence",
"swutil.np_tools.extrapolate",
"numpy.arange",
"numpy.reshape",
"numpy.random.rand"
] | [((129, 149), 'numpy.random.rand', 'np.random.rand', (['(1)', 'L'], {}), '(1, L)\n', (143, 149), True, 'import numpy as np\n'), ((200, 223), 'numpy.reshape', 'np.reshape', (['hs', '(-1, 1)'], {}), '(hs, (-1, 1))\n', (210, 223), True, 'import numpy as np\n'), ((276, 293), 'numpy.sum', 'np.sum', (['T'], {'axis': '(1)'}), '(T, axis=1)\n', (282, 293), True, 'import numpy as np\n'), ((330, 370), 'swutil.plots.plot_convergence', 'plot_convergence', (['w', 'values'], {'reference': '(0)'}), '(w, values, reference=0)\n', (346, 370), False, 'from swutil.plots import plot_convergence\n'), ((447, 457), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (455, 457), True, 'from matplotlib import pyplot as plt\n'), ((180, 195), 'numpy.arange', 'np.arange', (['(1)', 'K'], {}), '(1, K)\n', (189, 195), True, 'import numpy as np\n'), ((392, 437), 'swutil.np_tools.extrapolate', 'extrapolate', (['values'], {'degree': '(-1)', 'base': '(base / 2)'}), '(values, degree=-1, base=base / 2)\n', (403, 437), False, 'from swutil.np_tools import extrapolate\n'), ((159, 174), 'numpy.arange', 'np.arange', (['(1)', 'K'], {}), '(1, K)\n', (168, 174), True, 'import numpy as np\n'), ((236, 255), 'numpy.arange', 'np.arange', (['(1)', '(L + 1)'], {}), '(1, L + 1)\n', (245, 255), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#@Time : 2019/5/13 10:28
#@Author: yangjian
#@File : model.py
import numpy
import os
import torch
from flyai.model.base import Base
__import__('net', fromlist=["Net"])
MODEL_NAME = "model.pkl"
from path import MODEL_PATH
# 判断gpu是否可用
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
device = torch.device(device)
class Model(Base):
def __init__(self, data):
self.data = data
def predict(self, **data):
network = torch.load(os.path.join(MODEL_PATH, MODEL_NAME))
x_data = self.data.predict_data(**data)
x_1 = torch.from_numpy(x_data[0])
x_2 = torch.from_numpy(x_data[1])
x_1 = x_1.float().to(device)
x_2 = x_2.float().to(device)
outputs = network(x_1, x_2)
_, prediction = torch.max(outputs.data, 1)
prediction = prediction.cpu()
prediction = prediction.numpy()
return prediction
def predict_all(self, datas):
network = torch.load(os.path.join(MODEL_PATH, MODEL_NAME))
labels = []
for data in datas:
x_data = self.data.predict_data(**data)
x_1 = torch.from_numpy(x_data[0])
x_2 = torch.from_numpy(x_data[1])
x_1 = x_1.float().to(device)
x_2 = x_2.float().to(device)
outputs = network(x_1, x_2)
_, prediction = torch.max(outputs.data, 1)
prediction = prediction.cpu()
prediction = prediction.numpy()
labels.append(prediction)
return labels
def batch_iter(self, x1, x2, y, batch_size=16):
"""生成批次数据"""
data_len = len(x1)
num_batch = int((data_len - 1) / batch_size) + 1
indices = numpy.random.permutation(numpy.arange(data_len))
x1_shuffle = x1[indices]
x2_shuffle = x2[indices]
y_shuffle = y[indices]
for i in range(num_batch):
start_id = i * batch_size
end_id = min((i + 1) * batch_size, data_len)
yield x1_shuffle[start_id:end_id], x2_shuffle[start_id:end_id], y_shuffle[start_id:end_id]
def save_model(self, network, path, name=MODEL_NAME, overwrite=False):
super().save_model(network, path, name, overwrite)
torch.save(network, os.path.join(path, name)) | [
"torch.max",
"torch.cuda.is_available",
"numpy.arange",
"torch.device",
"os.path.join",
"torch.from_numpy"
] | [((304, 329), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (327, 329), False, 'import torch\n'), ((389, 409), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (401, 409), False, 'import torch\n'), ((657, 684), 'torch.from_numpy', 'torch.from_numpy', (['x_data[0]'], {}), '(x_data[0])\n', (673, 684), False, 'import torch\n'), ((700, 727), 'torch.from_numpy', 'torch.from_numpy', (['x_data[1]'], {}), '(x_data[1])\n', (716, 727), False, 'import torch\n'), ((866, 892), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (875, 892), False, 'import torch\n'), ((555, 591), 'os.path.join', 'os.path.join', (['MODEL_PATH', 'MODEL_NAME'], {}), '(MODEL_PATH, MODEL_NAME)\n', (567, 591), False, 'import os\n'), ((1067, 1103), 'os.path.join', 'os.path.join', (['MODEL_PATH', 'MODEL_NAME'], {}), '(MODEL_PATH, MODEL_NAME)\n', (1079, 1103), False, 'import os\n'), ((1226, 1253), 'torch.from_numpy', 'torch.from_numpy', (['x_data[0]'], {}), '(x_data[0])\n', (1242, 1253), False, 'import torch\n'), ((1273, 1300), 'torch.from_numpy', 'torch.from_numpy', (['x_data[1]'], {}), '(x_data[1])\n', (1289, 1300), False, 'import torch\n'), ((1455, 1481), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (1464, 1481), False, 'import torch\n'), ((1841, 1863), 'numpy.arange', 'numpy.arange', (['data_len'], {}), '(data_len)\n', (1853, 1863), False, 'import numpy\n'), ((2371, 2395), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (2383, 2395), False, 'import os\n')] |
import numpy as np
import copy
from . import ekf_utils
gtrack_MIN_DISPERSION_ALPHA = 0.1
gtrack_EST_POINTS = 10
gtrack_MIN_POINTS_TO_UPDATE_DISPERSION = 3
gtrack_KNOWN_TARGET_POINTS_THRESHOLD = 50
# GTRACK Module calls this function to instantiate GTRACK Unit with desired configuration parameters.
# Function returns a handle, which is used my module to call units' methods
def unit_create(params):
inst = ekf_utils.GtrackUnitInstance()
inst.gatingParams = params.gatingParams
inst.stateParams = params.stateParams
inst.allocationParams = params.allocationParams
inst.unrollingParams = params.unrollingParams
inst.variationParams = params.variationParams
inst.sceneryParams = params.sceneryParams
inst.uid = params.uid
inst.maxAcceleration = params.maxAcceleration
inst.maxRadialVelocity = params.maxRadialVelocity
inst.radialVelocityResolution = params.radialVelocityResolution
inst.verbose = params.verbose
inst.initialRadialVelocity = params.initialRadialVelocity
inst.F4 = params.F4
inst.Q4 = params.Q4
inst.F6 = params.F6
inst.Q6 = params.Q6
if params.stateVectorType == ekf_utils.gtrack_STATE_VECTOR_TYPE().gtrack_STATE_VECTORS_2DA:
inst.stateVectorType = ekf_utils.gtrack_STATE_VECTOR_TYPE().gtrack_STATE_VECTORS_2DA
inst.stateVectorLength = 6
inst.measurementVectorLength = 3
else:
raise ValueError('not supported, unit_create')
inst.dt = params.deltaT
inst.state = ekf_utils.TrackState().TRACK_STATE_FREE
return inst
# GTRACK Module calls this function to run GTRACK unit prediction step
def unit_predict(handle):
inst = handle
inst.heartBeatCount += 1
temp1 = np.zeros(shape=(36,), dtype=np.float32)
temp2 = np.zeros(shape=(36,), dtype=np.float32)
temp3 = np.zeros(shape=(36,), dtype=np.float32)
# Current state vector length
sLen = inst.stateVectorLength
if inst.processVariance != 0:
inst.S_apriori_hat = ekf_utils.gtrack_matrixMultiply(sLen, sLen, 1, inst.F, inst.S_hat)
temp1 = ekf_utils.gtrack_matrixMultiply(6, 6, 6, inst.F, inst.P_hat)
temp2 = ekf_utils.gtrack_matrixTransposeMultiply(6, 6, 6, temp1, inst.F)
temp1 = ekf_utils.gtrack_matrixScalerMultiply(sLen, sLen, inst.Q, inst.processVariance)
temp3 = ekf_utils.gtrack_matrixAdd(sLen, sLen, temp1, temp2)
inst.P_apriori_hat = ekf_utils.gtrack_matrixMakeSymmetrical(sLen, temp3)
else:
inst.S_apriori_hat = copy.deepcopy(inst.S_hat)
inst.P_apriori_hat = copy.deepcopy(inst.P_hat)
ekf_utils.gtrack_cartesian2spherical(inst.stateVectorType, inst.S_apriori_hat, inst.H_s)
# GTRACK Module calls this function to obtain the measurement vector scoring from the GTRACK unit perspective
def unit_score(handle, point, best_score, best_ind, num):
limits = np.zeros(shape=(3,), dtype=np.float32)
u_tilda = np.zeros(shape=(3,), dtype=np.float32)
inst = handle
limits[0] = inst.gatingParams.limits[0].length
limits[1] = inst.gatingParams.limits[0].width
limits[2] = inst.gatingParams.limits[0].vel
if inst.processVariance == 0:
inst.G = 1
else:
inst.G = ekf_utils.gtrack_gateCreateLim(inst.gatingParams.volume, inst.gC_inv, inst.H_s[0], limits)
det = ekf_utils.gtrack_matrixDet3(inst.gC)
log_det = np.float32(np.log(det))
for n in range(num):
if best_ind[n] == ekf_utils.gtrack_ID_POINT_BEHIND_THE_WALL:
continue
u_tilda[0] = np.float32(point[n].range - inst.H_s[0])
u_tilda[1] = np.float32(point[n].angle - inst.H_s[1])
if inst.velocityHandling < ekf_utils.VelocityHandlingState().VELOCITY_LOCKED:
# Radial velocity estimation is not yet known, unroll based on velocity measured at allocation time
rv_out = ekf_utils.gtrack_unrollRadialVelocity(inst.maxRadialVelocity, inst.allocationVelocity,
point[n].doppler)
u_tilda[2] = np.float32(rv_out - inst.allocationVelocity)
else:
# Radial velocity estimation is known
rv_out = ekf_utils.gtrack_unrollRadialVelocity(inst.maxRadialVelocity, inst.H_s[2], point[n].doppler)
u_tilda[2] = np.float32(rv_out - inst.H_s[2])
chi2 = ekf_utils.gtrack_computeMahalanobis3(u_tilda, inst.gC_inv)
# print(inst.gC_inv)
if chi2 < inst.G:
score = np.float32(log_det + chi2)
if score < best_score[n]:
best_score[n] = score
best_ind[n] = np.uint8(inst.uid)
point[n].doppler = rv_out
# GTRACK Module calls this function to start target tracking. This function is called during modules' allocation step,
# once new set of points passes allocation thresholds
def unit_start(handle, time_stamp, tid, um):
inst = handle
m = np.zeros(shape=(3,), dtype=np.float32)
inst.tid = tid
inst.heartBeatCount = time_stamp
inst.allocationTime = time_stamp
inst.allocationRange = um[0]
inst.allocationVelocity = um[2]
inst.associatedPoints = 0
inst.state = ekf_utils.TrackState().TRACK_STATE_DETECTION
inst.currentStateVectorType = ekf_utils.gtrack_STATE_VECTOR_TYPE().gtrack_STATE_VECTORS_2DA
inst.stateVectorLength = 6
inst.processVariance = (0.5 * inst.maxAcceleration) * (0.5 * inst.maxAcceleration)
inst.F = inst.F6
inst.Q = inst.Q6
inst.velocityHandling = ekf_utils.VelocityHandlingState().VELOCITY_INIT
m[2] = ekf_utils.gtrack_unrollRadialVelocity(inst.maxRadialVelocity, inst.initialRadialVelocity, um[2])
inst.rangeRate = m[2]
m[0] = um[0]
m[1] = um[1]
ekf_utils.gtrack_spherical2cartesian(inst.currentStateVectorType, m, inst.S_apriori_hat)
inst.H_s = copy.deepcopy(m)
inst.P_apriori_hat = copy.deepcopy(ekf_utils.pinit6x6)
inst.gD = copy.deepcopy(ekf_utils.zero3x3)
inst.G = 1.
# GTRACK Module calls this function to perform an update step for the tracking unit.
def unit_update(handle, point, var, pInd, num):
J = np.zeros(shape=(18,), dtype=np.float32) # 3x6
PJ = np.zeros(shape=(18,), dtype=np.float32) # 6x3
JPJ = np.zeros(shape=(9,), dtype=np.float32) # 3x3
U = np.zeros(shape=(3,), dtype=np.float32)
u_tilda = np.zeros(shape=(3,), dtype=np.float32)
cC = np.zeros(shape=(9,), dtype=np.float32)
cC_inv = np.zeros(shape=(9,), dtype=np.float32)
K = np.zeros(shape=(18,), dtype=np.float32) # 6x3
u_mean = ekf_utils.gtrack_measurementPoint()
D = np.zeros(shape=(9,), dtype=np.float32)
Rm = np.zeros(shape=(9,), dtype=np.float32)
Rc = np.zeros(shape=(9,), dtype=np.float32)
temp1 = np.zeros(shape=(36,), dtype=np.float32)
inst = handle
mlen = inst.measurementVectorLength
slen = inst.stateVectorLength
myPointNum = 0
for n in range(num):
if pInd[n] == inst.uid:
myPointNum += 1
u_mean.range += point[n].range
u_mean.angle += point[n].angle
if var != None:
Rm[0] += var[n].rangeVar
Rm[4] += var[n].angleVar
Rm[8] += var[n].dopplerVar
if myPointNum == 1:
rvPilot = point[n].doppler
u_mean.doppler = rvPilot
else:
rvCurrent = ekf_utils.gtrack_unrollRadialVelocity(inst.maxRadialVelocity, rvPilot, point[n].doppler)
point[n].doppler = rvCurrent
u_mean.doppler += rvCurrent
if myPointNum == 0:
# INACTIVE
if (np.abs(inst.S_hat[2]) < inst.radialVelocityResolution) and \
(np.abs(inst.S_hat[3]) < inst.radialVelocityResolution):
inst.S_hat = np.zeros(shape=(inst.S_hat.shape), dtype=np.float32)
inst.S_hat[0] = inst.S_apriori_hat[0]
inst.S_hat[1] = inst.S_apriori_hat[1]
inst.P_hat = copy.deepcopy(inst.P_apriori_hat)
inst.processVariance = 0
else:
inst.S_hat = copy.deepcopy(inst.S_apriori_hat)
inst.P_hat = copy.deepcopy(inst.P_apriori_hat)
unit_event(inst, myPointNum)
return inst.state
inst.associatedPoints += myPointNum
if inst.processVariance == 0:
inst.processVariance = np.float32((0.5 * (inst.maxAcceleration)) * (0.5 * (inst.maxAcceleration)))
u_mean.range = np.float32(u_mean.range / myPointNum)
u_mean.angle = np.float32(u_mean.angle / myPointNum)
u_mean.doppler = np.float32(u_mean.doppler / myPointNum)
if var != None:
Rm[0] = np.float32(Rm[0] / myPointNum)
Rm[4] = np.float32(Rm[4] / myPointNum)
Rm[8] = np.float32(Rm[8] / myPointNum)
else:
dRangeVar = np.float32((inst.variationParams.lengthStd) * (inst.variationParams.lengthStd))
dDopplerVar = np.float32((inst.variationParams.dopplerStd) * (inst.variationParams.dopplerStd))
Rm[0] = dRangeVar
angleStd = np.float32(2 * np.float32(np.arctan(0.5 * (inst.variationParams.widthStd) / inst.H_s[0])))
Rm[4] = angleStd * angleStd
Rm[8] = dDopplerVar
U[0] = u_mean.range
U[1] = u_mean.angle
U[2] = u_mean.doppler
velocity_state_handling(inst, U)
if myPointNum > gtrack_MIN_POINTS_TO_UPDATE_DISPERSION:
for n in range(num):
if pInd[n] == inst.uid:
D[0] += np.float32((point[n].range - u_mean.range) * (point[n].range - u_mean.range))
D[4] += np.float32((point[n].angle - u_mean.angle) * (point[n].angle - u_mean.angle))
D[8] += np.float32((point[n].doppler - u_mean.doppler) * (point[n].doppler - u_mean.doppler))
D[1] += np.float32((point[n].range - u_mean.range) * (point[n].angle - u_mean.angle))
D[2] += np.float32((point[n].range - u_mean.range) * (point[n].doppler - u_mean.doppler))
D[5] += np.float32((point[n].angle - u_mean.angle) * (point[n].doppler - u_mean.doppler))
D[0] = np.float32(D[0] / myPointNum)
D[4] = np.float32(D[4] / myPointNum)
D[8] = np.float32(D[8] / myPointNum)
D[1] = np.float32(D[1] / myPointNum)
D[2] = np.float32(D[2] / myPointNum)
D[5] = np.float32(D[5] / myPointNum)
alpha = np.float32(myPointNum / (inst.associatedPoints))
# print(alpha)
if alpha < gtrack_MIN_DISPERSION_ALPHA:
alpha = gtrack_MIN_DISPERSION_ALPHA
inst.gD[0] = np.float32((1. - alpha) * inst.gD[0] + alpha * D[0])
inst.gD[1] = np.float32((1. - alpha) * inst.gD[1] + alpha * D[1])
inst.gD[2] = np.float32((1. - alpha) * inst.gD[2] + alpha * D[2])
inst.gD[3] = np.float32(inst.gD[1])
inst.gD[4] = np.float32((1. - alpha) * inst.gD[4] + alpha * D[4])
inst.gD[5] = np.float32((1. - alpha) * inst.gD[5] + alpha * D[5])
inst.gD[6] = np.float32(inst.gD[2])
inst.gD[7] = np.float32(inst.gD[5])
inst.gD[8] = np.float32((1. - alpha) * inst.gD[8] + alpha * D[8])
if myPointNum > gtrack_EST_POINTS:
alpha = 0
else:
alpha = np.float32((gtrack_EST_POINTS - myPointNum) / ((gtrack_EST_POINTS - 1) * myPointNum))
Rc[0] = np.float32((Rm[0] / myPointNum) + alpha * (inst.gD[0]))
Rc[4] = np.float32((Rm[4] / myPointNum) + alpha * (inst.gD[4]))
Rc[8] = np.float32((Rm[8] / myPointNum) + alpha * (inst.gD[8]))
ekf_utils.gtrack_computeJacobian(inst.currentStateVectorType, inst.S_apriori_hat, J)
u_tilda = ekf_utils.gtrack_matrixSub(mlen, 1, U, inst.H_s)
PJ = ekf_utils.gtrack_matrixComputePJT(inst.P_apriori_hat, J)
JPJ = ekf_utils.gtrack_matrixMultiply(mlen, slen, mlen, J, PJ)
cC = ekf_utils.gtrack_matrixAdd(mlen, mlen, JPJ, Rc)
cC_inv = ekf_utils.gtrack_matrixInv3(cC)
K = ekf_utils.gtrack_matrixMultiply(slen, mlen, mlen, PJ, cC_inv)
temp1 = ekf_utils.gtrack_matrixMultiply(slen, mlen, 1, K, u_tilda)
inst.S_hat = ekf_utils.gtrack_matrixAdd(slen, 1, inst.S_apriori_hat, temp1)
# print(temp1)
temp1 = ekf_utils.gtrack_matrixTransposeMultiply(slen, mlen, slen, K, PJ)
inst.P_hat = ekf_utils.gtrack_matrixSub(slen, slen, inst.P_apriori_hat, temp1)
temp1 = ekf_utils.gtrack_matrixAdd(mlen, mlen, JPJ, Rm)
inst.gC = ekf_utils.gtrack_matrixAdd(mlen, mlen, temp1, inst.gD)
inst.gC_inv = ekf_utils.gtrack_matrixInv3(inst.gC)
unit_event(inst, myPointNum)
return inst.state
# this is the helper function for GTRACK unit update
def velocity_state_handling(handle, um):
inst = handle
rvIn = um[2]
# print(inst.velocityHandling)
if inst.velocityHandling == ekf_utils.VelocityHandlingState().VELOCITY_INIT:
um[2] = inst.rangeRate
inst.velocityHandling = ekf_utils.VelocityHandlingState().VELOCITY_RATE_FILTER
elif inst.velocityHandling == ekf_utils.VelocityHandlingState().VELOCITY_RATE_FILTER:
instanteneousRangeRate = np.float32(
(um[0] - inst.allocationRange) / ((inst.heartBeatCount - inst.allocationTime) * (inst.dt)))
inst.rangeRate = np.float32((inst.unrollingParams.alpha) * (inst.rangeRate) + (
1 - (inst.unrollingParams.alpha)) * instanteneousRangeRate)
um[2] = ekf_utils.gtrack_unrollRadialVelocity(inst.maxRadialVelocity, inst.rangeRate, rvIn)
rrError = np.float32((instanteneousRangeRate - inst.rangeRate) / inst.rangeRate)
if np.abs(rrError) < inst.unrollingParams.confidence:
inst.velocityHandling = ekf_utils.VelocityHandlingState().VELOCITY_TRACKING
elif inst.velocityHandling == ekf_utils.VelocityHandlingState().VELOCITY_TRACKING:
instanteneousRangeRate = np.float32(
(um[0] - inst.allocationRange) / ((inst.heartBeatCount - inst.allocationTime) * inst.dt))
inst.rangeRate = np.float32(
(inst.unrollingParams.alpha) * inst.rangeRate + (1 - inst.unrollingParams.alpha) * instanteneousRangeRate)
um[2] = ekf_utils.gtrack_unrollRadialVelocity(inst.maxRadialVelocity, inst.rangeRate, rvIn)
rvError = np.float32((inst.H_s[2] - um[2]) / um[2])
if np.abs(rvError) < 0.1:
inst.velocityHandling = ekf_utils.VelocityHandlingState().VELOCITY_LOCKED
elif inst.velocityHandling == ekf_utils.VelocityHandlingState().VELOCITY_LOCKED:
um[2] = ekf_utils.gtrack_unrollRadialVelocity(inst.maxRadialVelocity, inst.H_s[2], um[2])
# GTRACK Module calls this function to run GTRACK unit level state machine
def unit_event(handle, num):
inst = handle
if inst.state == ekf_utils.TrackState().TRACK_STATE_DETECTION:
if num > inst.allocationParams.pointsThre:
inst.detect2freeCount = 0
inst.detect2activeCount += 1
if inst.detect2activeCount > inst.stateParams.det2actThre:
inst.state = ekf_utils.TrackState().TRACK_STATE_ACTIVE
else:
if num == 0:
inst.detect2freeCount += 1
if inst.detect2activeCount > 0:
inst.detect2activeCount -= 1
if inst.detect2freeCount > inst.stateParams.det2freeThre:
inst.state = ekf_utils.TrackState().TRACK_STATE_FREE
elif inst.state == ekf_utils.TrackState().TRACK_STATE_ACTIVE:
if num != 0:
inst.active2freeCount = 0
else:
inst.active2freeCount += 1
if inst.sceneryParams.numStaticBoxes != 0:
thre = inst.stateParams.exit2freeThre
for numBoxes in range(inst.sceneryParams.numStaticBoxes):
if ekf_utils.isPointInsideBox(inst.S_hat[0], inst.S_hat[1],
inst.sceneryParams.boundaryBox[numBoxes]) == 1:
if inst.processVariance == 0:
thre = inst.stateParams.static2freeThre
else:
thre = inst.stateParams.active2freeThre
break
else:
thre = inst.stateParams.active2freeThre
if thre > inst.heartBeatCount:
thre = np.uint16(inst.heartBeatCount)
if inst.active2freeCount > thre:
inst.state = ekf_utils.TrackState().TRACK_STATE_FREE
# GTRACK Module calls this function to report GTRACK unit results to the target descriptor
def unit_report(handle, target):
inst = handle
target.uid = inst.uid
target.tid = inst.tid
target.S = copy.deepcopy(inst.S_hat)
target.EC = copy.deepcopy(inst.gC_inv)
target.G = inst.G
| [
"copy.deepcopy",
"numpy.uint8",
"numpy.abs",
"numpy.log",
"numpy.float32",
"numpy.zeros",
"numpy.uint16",
"numpy.arctan"
] | [((1718, 1757), 'numpy.zeros', 'np.zeros', ([], {'shape': '(36,)', 'dtype': 'np.float32'}), '(shape=(36,), dtype=np.float32)\n', (1726, 1757), True, 'import numpy as np\n'), ((1770, 1809), 'numpy.zeros', 'np.zeros', ([], {'shape': '(36,)', 'dtype': 'np.float32'}), '(shape=(36,), dtype=np.float32)\n', (1778, 1809), True, 'import numpy as np\n'), ((1822, 1861), 'numpy.zeros', 'np.zeros', ([], {'shape': '(36,)', 'dtype': 'np.float32'}), '(shape=(36,), dtype=np.float32)\n', (1830, 1861), True, 'import numpy as np\n'), ((2864, 2902), 'numpy.zeros', 'np.zeros', ([], {'shape': '(3,)', 'dtype': 'np.float32'}), '(shape=(3,), dtype=np.float32)\n', (2872, 2902), True, 'import numpy as np\n'), ((2917, 2955), 'numpy.zeros', 'np.zeros', ([], {'shape': '(3,)', 'dtype': 'np.float32'}), '(shape=(3,), dtype=np.float32)\n', (2925, 2955), True, 'import numpy as np\n'), ((4908, 4946), 'numpy.zeros', 'np.zeros', ([], {'shape': '(3,)', 'dtype': 'np.float32'}), '(shape=(3,), dtype=np.float32)\n', (4916, 4946), True, 'import numpy as np\n'), ((5818, 5834), 'copy.deepcopy', 'copy.deepcopy', (['m'], {}), '(m)\n', (5831, 5834), False, 'import copy\n'), ((5861, 5894), 'copy.deepcopy', 'copy.deepcopy', (['ekf_utils.pinit6x6'], {}), '(ekf_utils.pinit6x6)\n', (5874, 5894), False, 'import copy\n'), ((5909, 5941), 'copy.deepcopy', 'copy.deepcopy', (['ekf_utils.zero3x3'], {}), '(ekf_utils.zero3x3)\n', (5922, 5941), False, 'import copy\n'), ((6102, 6141), 'numpy.zeros', 'np.zeros', ([], {'shape': '(18,)', 'dtype': 'np.float32'}), '(shape=(18,), dtype=np.float32)\n', (6110, 6141), True, 'import numpy as np\n'), ((6158, 6197), 'numpy.zeros', 'np.zeros', ([], {'shape': '(18,)', 'dtype': 'np.float32'}), '(shape=(18,), dtype=np.float32)\n', (6166, 6197), True, 'import numpy as np\n'), ((6215, 6253), 'numpy.zeros', 'np.zeros', ([], {'shape': '(9,)', 'dtype': 'np.float32'}), '(shape=(9,), dtype=np.float32)\n', (6223, 6253), True, 'import numpy as np\n'), ((6269, 6307), 'numpy.zeros', 'np.zeros', ([], {'shape': '(3,)', 'dtype': 'np.float32'}), '(shape=(3,), dtype=np.float32)\n', (6277, 6307), True, 'import numpy as np\n'), ((6322, 6360), 'numpy.zeros', 'np.zeros', ([], {'shape': '(3,)', 'dtype': 'np.float32'}), '(shape=(3,), dtype=np.float32)\n', (6330, 6360), True, 'import numpy as np\n'), ((6370, 6408), 'numpy.zeros', 'np.zeros', ([], {'shape': '(9,)', 'dtype': 'np.float32'}), '(shape=(9,), dtype=np.float32)\n', (6378, 6408), True, 'import numpy as np\n'), ((6422, 6460), 'numpy.zeros', 'np.zeros', ([], {'shape': '(9,)', 'dtype': 'np.float32'}), '(shape=(9,), dtype=np.float32)\n', (6430, 6460), True, 'import numpy as np\n'), ((6469, 6508), 'numpy.zeros', 'np.zeros', ([], {'shape': '(18,)', 'dtype': 'np.float32'}), '(shape=(18,), dtype=np.float32)\n', (6477, 6508), True, 'import numpy as np\n'), ((6575, 6613), 'numpy.zeros', 'np.zeros', ([], {'shape': '(9,)', 'dtype': 'np.float32'}), '(shape=(9,), dtype=np.float32)\n', (6583, 6613), True, 'import numpy as np\n'), ((6623, 6661), 'numpy.zeros', 'np.zeros', ([], {'shape': '(9,)', 'dtype': 'np.float32'}), '(shape=(9,), dtype=np.float32)\n', (6631, 6661), True, 'import numpy as np\n'), ((6671, 6709), 'numpy.zeros', 'np.zeros', ([], {'shape': '(9,)', 'dtype': 'np.float32'}), '(shape=(9,), dtype=np.float32)\n', (6679, 6709), True, 'import numpy as np\n'), ((6723, 6762), 'numpy.zeros', 'np.zeros', ([], {'shape': '(36,)', 'dtype': 'np.float32'}), '(shape=(36,), dtype=np.float32)\n', (6731, 6762), True, 'import numpy as np\n'), ((8409, 8446), 'numpy.float32', 'np.float32', (['(u_mean.range / myPointNum)'], {}), '(u_mean.range / myPointNum)\n', (8419, 8446), True, 'import numpy as np\n'), ((8466, 8503), 'numpy.float32', 'np.float32', (['(u_mean.angle / myPointNum)'], {}), '(u_mean.angle / myPointNum)\n', (8476, 8503), True, 'import numpy as np\n'), ((8525, 8564), 'numpy.float32', 'np.float32', (['(u_mean.doppler / myPointNum)'], {}), '(u_mean.doppler / myPointNum)\n', (8535, 8564), True, 'import numpy as np\n'), ((11225, 11276), 'numpy.float32', 'np.float32', (['(Rm[0] / myPointNum + alpha * inst.gD[0])'], {}), '(Rm[0] / myPointNum + alpha * inst.gD[0])\n', (11235, 11276), True, 'import numpy as np\n'), ((11293, 11344), 'numpy.float32', 'np.float32', (['(Rm[4] / myPointNum + alpha * inst.gD[4])'], {}), '(Rm[4] / myPointNum + alpha * inst.gD[4])\n', (11303, 11344), True, 'import numpy as np\n'), ((11361, 11412), 'numpy.float32', 'np.float32', (['(Rm[8] / myPointNum + alpha * inst.gD[8])'], {}), '(Rm[8] / myPointNum + alpha * inst.gD[8])\n', (11371, 11412), True, 'import numpy as np\n'), ((16497, 16522), 'copy.deepcopy', 'copy.deepcopy', (['inst.S_hat'], {}), '(inst.S_hat)\n', (16510, 16522), False, 'import copy\n'), ((16539, 16565), 'copy.deepcopy', 'copy.deepcopy', (['inst.gC_inv'], {}), '(inst.gC_inv)\n', (16552, 16565), False, 'import copy\n'), ((2506, 2531), 'copy.deepcopy', 'copy.deepcopy', (['inst.S_hat'], {}), '(inst.S_hat)\n', (2519, 2531), False, 'import copy\n'), ((2561, 2586), 'copy.deepcopy', 'copy.deepcopy', (['inst.P_hat'], {}), '(inst.P_hat)\n', (2574, 2586), False, 'import copy\n'), ((3371, 3382), 'numpy.log', 'np.log', (['det'], {}), '(det)\n', (3377, 3382), True, 'import numpy as np\n'), ((3522, 3562), 'numpy.float32', 'np.float32', (['(point[n].range - inst.H_s[0])'], {}), '(point[n].range - inst.H_s[0])\n', (3532, 3562), True, 'import numpy as np\n'), ((3584, 3624), 'numpy.float32', 'np.float32', (['(point[n].angle - inst.H_s[1])'], {}), '(point[n].angle - inst.H_s[1])\n', (3594, 3624), True, 'import numpy as np\n'), ((8313, 8382), 'numpy.float32', 'np.float32', (['(0.5 * inst.maxAcceleration * (0.5 * inst.maxAcceleration))'], {}), '(0.5 * inst.maxAcceleration * (0.5 * inst.maxAcceleration))\n', (8323, 8382), True, 'import numpy as np\n'), ((8602, 8632), 'numpy.float32', 'np.float32', (['(Rm[0] / myPointNum)'], {}), '(Rm[0] / myPointNum)\n', (8612, 8632), True, 'import numpy as np\n'), ((8649, 8679), 'numpy.float32', 'np.float32', (['(Rm[4] / myPointNum)'], {}), '(Rm[4] / myPointNum)\n', (8659, 8679), True, 'import numpy as np\n'), ((8696, 8726), 'numpy.float32', 'np.float32', (['(Rm[8] / myPointNum)'], {}), '(Rm[8] / myPointNum)\n', (8706, 8726), True, 'import numpy as np\n'), ((8757, 8832), 'numpy.float32', 'np.float32', (['(inst.variationParams.lengthStd * inst.variationParams.lengthStd)'], {}), '(inst.variationParams.lengthStd * inst.variationParams.lengthStd)\n', (8767, 8832), True, 'import numpy as np\n'), ((8859, 8936), 'numpy.float32', 'np.float32', (['(inst.variationParams.dopplerStd * inst.variationParams.dopplerStd)'], {}), '(inst.variationParams.dopplerStd * inst.variationParams.dopplerStd)\n', (8869, 8936), True, 'import numpy as np\n'), ((10025, 10054), 'numpy.float32', 'np.float32', (['(D[0] / myPointNum)'], {}), '(D[0] / myPointNum)\n', (10035, 10054), True, 'import numpy as np\n'), ((10070, 10099), 'numpy.float32', 'np.float32', (['(D[4] / myPointNum)'], {}), '(D[4] / myPointNum)\n', (10080, 10099), True, 'import numpy as np\n'), ((10115, 10144), 'numpy.float32', 'np.float32', (['(D[8] / myPointNum)'], {}), '(D[8] / myPointNum)\n', (10125, 10144), True, 'import numpy as np\n'), ((10160, 10189), 'numpy.float32', 'np.float32', (['(D[1] / myPointNum)'], {}), '(D[1] / myPointNum)\n', (10170, 10189), True, 'import numpy as np\n'), ((10205, 10234), 'numpy.float32', 'np.float32', (['(D[2] / myPointNum)'], {}), '(D[2] / myPointNum)\n', (10215, 10234), True, 'import numpy as np\n'), ((10250, 10279), 'numpy.float32', 'np.float32', (['(D[5] / myPointNum)'], {}), '(D[5] / myPointNum)\n', (10260, 10279), True, 'import numpy as np\n'), ((10297, 10343), 'numpy.float32', 'np.float32', (['(myPointNum / inst.associatedPoints)'], {}), '(myPointNum / inst.associatedPoints)\n', (10307, 10343), True, 'import numpy as np\n'), ((10487, 10540), 'numpy.float32', 'np.float32', (['((1.0 - alpha) * inst.gD[0] + alpha * D[0])'], {}), '((1.0 - alpha) * inst.gD[0] + alpha * D[0])\n', (10497, 10540), True, 'import numpy as np\n'), ((10561, 10614), 'numpy.float32', 'np.float32', (['((1.0 - alpha) * inst.gD[1] + alpha * D[1])'], {}), '((1.0 - alpha) * inst.gD[1] + alpha * D[1])\n', (10571, 10614), True, 'import numpy as np\n'), ((10635, 10688), 'numpy.float32', 'np.float32', (['((1.0 - alpha) * inst.gD[2] + alpha * D[2])'], {}), '((1.0 - alpha) * inst.gD[2] + alpha * D[2])\n', (10645, 10688), True, 'import numpy as np\n'), ((10709, 10731), 'numpy.float32', 'np.float32', (['inst.gD[1]'], {}), '(inst.gD[1])\n', (10719, 10731), True, 'import numpy as np\n'), ((10753, 10806), 'numpy.float32', 'np.float32', (['((1.0 - alpha) * inst.gD[4] + alpha * D[4])'], {}), '((1.0 - alpha) * inst.gD[4] + alpha * D[4])\n', (10763, 10806), True, 'import numpy as np\n'), ((10827, 10880), 'numpy.float32', 'np.float32', (['((1.0 - alpha) * inst.gD[5] + alpha * D[5])'], {}), '((1.0 - alpha) * inst.gD[5] + alpha * D[5])\n', (10837, 10880), True, 'import numpy as np\n'), ((10901, 10923), 'numpy.float32', 'np.float32', (['inst.gD[2]'], {}), '(inst.gD[2])\n', (10911, 10923), True, 'import numpy as np\n'), ((10945, 10967), 'numpy.float32', 'np.float32', (['inst.gD[5]'], {}), '(inst.gD[5])\n', (10955, 10967), True, 'import numpy as np\n'), ((10989, 11042), 'numpy.float32', 'np.float32', (['((1.0 - alpha) * inst.gD[8] + alpha * D[8])'], {}), '((1.0 - alpha) * inst.gD[8] + alpha * D[8])\n', (10999, 11042), True, 'import numpy as np\n'), ((11126, 11215), 'numpy.float32', 'np.float32', (['((gtrack_EST_POINTS - myPointNum) / ((gtrack_EST_POINTS - 1) * myPointNum))'], {}), '((gtrack_EST_POINTS - myPointNum) / ((gtrack_EST_POINTS - 1) *\n myPointNum))\n', (11136, 11215), True, 'import numpy as np\n'), ((4033, 4077), 'numpy.float32', 'np.float32', (['(rv_out - inst.allocationVelocity)'], {}), '(rv_out - inst.allocationVelocity)\n', (4043, 4077), True, 'import numpy as np\n'), ((4282, 4314), 'numpy.float32', 'np.float32', (['(rv_out - inst.H_s[2])'], {}), '(rv_out - inst.H_s[2])\n', (4292, 4314), True, 'import numpy as np\n'), ((4466, 4492), 'numpy.float32', 'np.float32', (['(log_det + chi2)'], {}), '(log_det + chi2)\n', (4476, 4492), True, 'import numpy as np\n'), ((7758, 7808), 'numpy.zeros', 'np.zeros', ([], {'shape': 'inst.S_hat.shape', 'dtype': 'np.float32'}), '(shape=inst.S_hat.shape, dtype=np.float32)\n', (7766, 7808), True, 'import numpy as np\n'), ((7938, 7971), 'copy.deepcopy', 'copy.deepcopy', (['inst.P_apriori_hat'], {}), '(inst.P_apriori_hat)\n', (7951, 7971), False, 'import copy\n'), ((8049, 8082), 'copy.deepcopy', 'copy.deepcopy', (['inst.S_apriori_hat'], {}), '(inst.S_apriori_hat)\n', (8062, 8082), False, 'import copy\n'), ((8108, 8141), 'copy.deepcopy', 'copy.deepcopy', (['inst.P_apriori_hat'], {}), '(inst.P_apriori_hat)\n', (8121, 8141), False, 'import copy\n'), ((12942, 13047), 'numpy.float32', 'np.float32', (['((um[0] - inst.allocationRange) / ((inst.heartBeatCount - inst.\n allocationTime) * inst.dt))'], {}), '((um[0] - inst.allocationRange) / ((inst.heartBeatCount - inst.\n allocationTime) * inst.dt))\n', (12952, 13047), True, 'import numpy as np\n'), ((13083, 13203), 'numpy.float32', 'np.float32', (['(inst.unrollingParams.alpha * inst.rangeRate + (1 - inst.unrollingParams.\n alpha) * instanteneousRangeRate)'], {}), '(inst.unrollingParams.alpha * inst.rangeRate + (1 - inst.\n unrollingParams.alpha) * instanteneousRangeRate)\n', (13093, 13203), True, 'import numpy as np\n'), ((13341, 13411), 'numpy.float32', 'np.float32', (['((instanteneousRangeRate - inst.rangeRate) / inst.rangeRate)'], {}), '((instanteneousRangeRate - inst.rangeRate) / inst.rangeRate)\n', (13351, 13411), True, 'import numpy as np\n'), ((4599, 4617), 'numpy.uint8', 'np.uint8', (['inst.uid'], {}), '(inst.uid)\n', (4607, 4617), True, 'import numpy as np\n'), ((7599, 7620), 'numpy.abs', 'np.abs', (['inst.S_hat[2]'], {}), '(inst.S_hat[2])\n', (7605, 7620), True, 'import numpy as np\n'), ((7677, 7698), 'numpy.abs', 'np.abs', (['inst.S_hat[3]'], {}), '(inst.S_hat[3])\n', (7683, 7698), True, 'import numpy as np\n'), ((9405, 9482), 'numpy.float32', 'np.float32', (['((point[n].range - u_mean.range) * (point[n].range - u_mean.range))'], {}), '((point[n].range - u_mean.range) * (point[n].range - u_mean.range))\n', (9415, 9482), True, 'import numpy as np\n'), ((9507, 9584), 'numpy.float32', 'np.float32', (['((point[n].angle - u_mean.angle) * (point[n].angle - u_mean.angle))'], {}), '((point[n].angle - u_mean.angle) * (point[n].angle - u_mean.angle))\n', (9517, 9584), True, 'import numpy as np\n'), ((9609, 9699), 'numpy.float32', 'np.float32', (['((point[n].doppler - u_mean.doppler) * (point[n].doppler - u_mean.doppler))'], {}), '((point[n].doppler - u_mean.doppler) * (point[n].doppler - u_mean\n .doppler))\n', (9619, 9699), True, 'import numpy as np\n'), ((9719, 9796), 'numpy.float32', 'np.float32', (['((point[n].range - u_mean.range) * (point[n].angle - u_mean.angle))'], {}), '((point[n].range - u_mean.range) * (point[n].angle - u_mean.angle))\n', (9729, 9796), True, 'import numpy as np\n'), ((9821, 9907), 'numpy.float32', 'np.float32', (['((point[n].range - u_mean.range) * (point[n].doppler - u_mean.doppler))'], {}), '((point[n].range - u_mean.range) * (point[n].doppler - u_mean.\n doppler))\n', (9831, 9907), True, 'import numpy as np\n'), ((9927, 10013), 'numpy.float32', 'np.float32', (['((point[n].angle - u_mean.angle) * (point[n].doppler - u_mean.doppler))'], {}), '((point[n].angle - u_mean.angle) * (point[n].doppler - u_mean.\n doppler))\n', (9937, 10013), True, 'import numpy as np\n'), ((13424, 13439), 'numpy.abs', 'np.abs', (['rrError'], {}), '(rrError)\n', (13430, 13439), True, 'import numpy as np\n'), ((13683, 13788), 'numpy.float32', 'np.float32', (['((um[0] - inst.allocationRange) / ((inst.heartBeatCount - inst.\n allocationTime) * inst.dt))'], {}), '((um[0] - inst.allocationRange) / ((inst.heartBeatCount - inst.\n allocationTime) * inst.dt))\n', (13693, 13788), True, 'import numpy as np\n'), ((13823, 13943), 'numpy.float32', 'np.float32', (['(inst.unrollingParams.alpha * inst.rangeRate + (1 - inst.unrollingParams.\n alpha) * instanteneousRangeRate)'], {}), '(inst.unrollingParams.alpha * inst.rangeRate + (1 - inst.\n unrollingParams.alpha) * instanteneousRangeRate)\n', (13833, 13943), True, 'import numpy as np\n'), ((14072, 14113), 'numpy.float32', 'np.float32', (['((inst.H_s[2] - um[2]) / um[2])'], {}), '((inst.H_s[2] - um[2]) / um[2])\n', (14082, 14113), True, 'import numpy as np\n'), ((9013, 9073), 'numpy.arctan', 'np.arctan', (['(0.5 * inst.variationParams.widthStd / inst.H_s[0])'], {}), '(0.5 * inst.variationParams.widthStd / inst.H_s[0])\n', (9022, 9073), True, 'import numpy as np\n'), ((14125, 14140), 'numpy.abs', 'np.abs', (['rvError'], {}), '(rvError)\n', (14131, 14140), True, 'import numpy as np\n'), ((16138, 16168), 'numpy.uint16', 'np.uint16', (['inst.heartBeatCount'], {}), '(inst.heartBeatCount)\n', (16147, 16168), True, 'import numpy as np\n')] |
# Use should provide a conformant object `hive`
# and call the function `test_all` in their Hive setup
# to test the UDF functionalities.
import random
import string
from typing import List, Sequence, Tuple
import numpy as np
import pandas as pd
from hive_udf import (
make_udf,
hive_udf_example,
hive_udaf_example,
hive_udf_args_example)
class _Hive:
def execute(self, sql: str):
raise NotImplementedError
def fetchall(self) -> List[tuple]:
raise NotImplementedError
def create_table(self, db_name, tb_name, cols: Sequence[Tuple[str, str]]):
raise NotImplementedError
def _test_udf(hive, db_name, tb_name):
print('records in table:')
hive.execute(f'SELECT * FROM {db_name}.{tb_name} LIMIT 10')
print(hive.fetchall())
code = make_udf(hive_udf_example)
sql = f'''
SELECT
TRANSFORM (
id,
info_json
)
USING '{code}'
AS (make STRING, price FLOAT)
FROM {db_name}.{tb_name}
'''
hive.execute(sql)
rows = hive.fetchall()
z = pd.DataFrame.from_records(
list(rows), columns=['make', 'price'])
z = z.sort_values(['make', 'price'])
print('z')
print(z)
print('')
# Expected result:
#
# make price
# 1 ford 2000.0
# 5 ford 4000.0
# 2 ford NaN
# 0 honda 1000.0
# 4 honda 2000.0
# 3 tesla 3000.0
assert len(z) == 6
assert z['make'].tolist() == [
'ford', 'ford', 'ford', 'honda', 'honda', 'tesla']
assert np.isnan(z['price'].iloc[2])
assert z['price'].iloc[3] == 1000
def _test_udaf(hive, db_name, tb_name):
code = make_udf(hive_udaf_example)
sql = f'''
SELECT
TRANSFORM (
info_json
)
USING '{code}'
AS (make STRING, avg_price FLOAT, null_prices INT)
FROM (
SELECT
id,
info_json
FROM {db_name}.{tb_name}
CLUSTER BY GET_JSON_OBJECT(info_json, '$.make')
) AS t
'''
hive.execute(sql)
rows = hive.fetchall()
z = pd.DataFrame.from_records(
list(rows), columns=['make', 'avg_price', 'null_prices'])
print('z')
print(z)
print('')
# Expected result:
#
# make avg_price null_prices
# 1 ford 3000.0 1
# 0 honda 1500.0 0
# 2 tesla 3000.0 0
assert len(z) == 3
z = z.sort_values(['make'])
assert z['make'].tolist() == ['ford', 'honda', 'tesla']
assert z['avg_price'].tolist() == [3000, 1500, 3000]
assert z['null_prices'].tolist() == [1, 0, 0]
def _run_sql(hive, sql, cols):
hive.execute(sql)
rows = hive.fetchall()
z = pd.DataFrame.from_records(list(rows), columns=cols)
z = z.sort_values(cols)
print('z')
print(z)
print('')
return z
def _test_udf_args(hive, db_name, tb_name):
def make_sql(country, default_price):
code = make_udf(hive_udf_args_example, country, default_price)
sql = f'''
SELECT
TRANSFORM (
id,
info_json
)
USING '{code}'
AS (make STRING, price FLOAT)
FROM {db_name}.{tb_name}
'''
return sql
### jap ###
sql = make_sql('jap', 250)
z = _run_sql(hive, sql, ['make', 'price'])
# Expected result:
#
# make price
# 0 honda 1000.0
# 1 honda 2000.0
assert z['make'].tolist() == ['honda', 'honda']
assert z['price'].tolist() == [1000, 2000]
### america ###
sql = make_sql('america', 550)
z = _run_sql(hive, sql, ['make', 'price'])
# Expected result:
#
# make price
# 1 ford 550.0
# 0 ford 2000.0
# 3 ford 4000.0
# 2 tesla 3000.0
assert z['make'].tolist() == ['ford', 'ford', 'ford', 'tesla']
assert z['price'].tolist() == [550, 2000, 4000, 3000]
sql = make_sql('all', 340)
z = _run_sql(hive, sql, ['make', 'price'])
# Expected result:
#
# make price
# 2 ford 340.0
# 1 ford 2000.0
# 5 ford 4000.0
# 0 honda 1000.0
# 4 honda 2000.0
# 3 tesla 3000.0
assert z['make'].tolist() == ['ford', 'ford', 'ford',
'honda', 'honda', 'tesla']
assert z['price'].tolist() == [340, 2000, 4000, 1000, 2000, 3000]
def _test_udf_followed_by_agg(hive, db_name, tb_name):
code = make_udf(hive_udf_example)
sql = f'''
SELECT
make,
SUM(price) AS price_total
FROM (
SELECT
TRANSFORM (
id,
info_json
)
USING '{code}'
AS (make STRING, price FLOAT)
FROM {db_name}.{tb_name}
) A
GROUP BY make
'''
z = _run_sql(hive, sql, ['make', 'price_total'])
# Expected result:
#
# make price_total
# 0 ford 6000.0
# 1 honda 3000.0
# 2 tesla 3000.0
assert len(z) == 3
assert z['make'].tolist() == ['ford', 'honda', 'tesla']
assert z['price_total'].tolist() == [6000.0, 3000.0, 3000.0]
def _make_tmp_name():
return 'tmp_' + ''.join(random.choices(string.ascii_lowercase, k=20))
def _get_databases(hive: _Hive):
hive.execute('SHOW DATABASES')
z = hive.fetchall()
return [v[0] for v in z]
def _get_tables(hive: _Hive, db_name: str):
hive.execute(f'SHOW TABLES IN {db_name}')
z = hive.fetchall()
return [v[0] for v in z]
def test_all(hive: _Hive, db_name: str):
assert db_name in _get_databases(hive)
tb_name = _make_tmp_name()
print('creating table "{}.{}"'.format(db_name, tb_name))
hive.execute(f'DROP TABLE IF EXISTS {db_name}.{tb_name}')
# hive.execute(f'''
# CREATE TABLE {db_name}.{tb_name}
# (
# id INT,
# info_json STRING
# )
# STORED AS ORC
# TBLPROPERTIES (
# 'orc.compress'='ZLIB'
# )
# ''')
hive.create_table(
db_name,
tb_name,
[('id', 'INT'), ('info_json', 'STRING')],
)
try:
assert tb_name in _get_tables(hive, db_name)
print(f'table {db_name}.{tb_name} created successfully')
hive.execute(f'''
INSERT OVERWRITE TABLE {db_name}.{tb_name}
VALUES
(1, '{{"make": "honda", "price": 1000}}'),
(2, '{{"make": "ford", "price": 2000}}'),
(3, '{{"make": "ford"}}'),
(4, '{{"make": "tesla", "price": 3000}}'),
(5, '{{"make": "honda", "price": 2000}}'),
(6, '{{"make": "ford", "price": 4000}}')
''')
_test_udf(hive, db_name, tb_name)
_test_udaf(hive, db_name, tb_name)
_test_udf_args(hive, db_name, tb_name)
_test_udf_followed_by_agg(hive, db_name, tb_name)
finally:
print(f'dropping table {db_name}.{tb_name}')
hive.execute(f'DROP TABLE IF EXISTS {db_name}.{tb_name}')
assert tb_name not in _get_tables(hive, db_name)
| [
"hive_udf.make_udf",
"random.choices",
"numpy.isnan"
] | [((802, 828), 'hive_udf.make_udf', 'make_udf', (['hive_udf_example'], {}), '(hive_udf_example)\n', (810, 828), False, 'from hive_udf import make_udf, hive_udf_example, hive_udaf_example, hive_udf_args_example\n'), ((1591, 1619), 'numpy.isnan', 'np.isnan', (["z['price'].iloc[2]"], {}), "(z['price'].iloc[2])\n", (1599, 1619), True, 'import numpy as np\n'), ((1711, 1738), 'hive_udf.make_udf', 'make_udf', (['hive_udaf_example'], {}), '(hive_udaf_example)\n', (1719, 1738), False, 'from hive_udf import make_udf, hive_udf_example, hive_udaf_example, hive_udf_args_example\n'), ((4590, 4616), 'hive_udf.make_udf', 'make_udf', (['hive_udf_example'], {}), '(hive_udf_example)\n', (4598, 4616), False, 'from hive_udf import make_udf, hive_udf_example, hive_udaf_example, hive_udf_args_example\n'), ((3056, 3111), 'hive_udf.make_udf', 'make_udf', (['hive_udf_args_example', 'country', 'default_price'], {}), '(hive_udf_args_example, country, default_price)\n', (3064, 3111), False, 'from hive_udf import make_udf, hive_udf_example, hive_udaf_example, hive_udf_args_example\n'), ((5393, 5437), 'random.choices', 'random.choices', (['string.ascii_lowercase'], {'k': '(20)'}), '(string.ascii_lowercase, k=20)\n', (5407, 5437), False, 'import random\n')] |
# coding: utf-8
import sys, os
sys.path.append(os.pardir) # 부모 디렉터리의 파일을 가져올 수 있도록 설정
import numpy as np
from dataset.mnist import load_mnist
from PIL import Image
def img_show(img):
pil_img = Image.fromarray(np.uint8(img))
pil_img.show()
(x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize=False)
print(x_train.shape, t_train.shape, x_test.shape, t_test.shape)
img = x_train[0]
print(type(img))
print('img information: ', img.shape, img.max(), img.min())
label = t_train[0]
print(label) # 5
print(type(label))
print('label information: ', label.shape, label.max(), label.min())
print(img.shape) # (784,)
img = img.reshape(28, 28) # 형상을 원래 이미지의 크기로 변형
print(img.shape) # (28, 28)
img_show(img)
| [
"sys.path.append",
"dataset.mnist.load_mnist",
"numpy.uint8"
] | [((31, 57), 'sys.path.append', 'sys.path.append', (['os.pardir'], {}), '(os.pardir)\n', (46, 57), False, 'import sys, os\n'), ((290, 331), 'dataset.mnist.load_mnist', 'load_mnist', ([], {'flatten': '(True)', 'normalize': '(False)'}), '(flatten=True, normalize=False)\n', (300, 331), False, 'from dataset.mnist import load_mnist\n'), ((216, 229), 'numpy.uint8', 'np.uint8', (['img'], {}), '(img)\n', (224, 229), True, 'import numpy as np\n')] |
from faceEncodings import getencodes
import face_recognition as fr
import cv2
import os
import numpy as np
encodedfacesknown = getencodes()
def recognize(face):
name = -1
try:
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
encodeface = fr.face_encodings(face)
facedist = fr.face_distance(encodedfacesknown, encodeface[0])
matches = fr.compare_faces(encodedfacesknown, encodeface[0])
matchIndex = np.argmin(facedist)
if matches[matchIndex]:
name = matchIndex
except:
print("Error recognizing face...")
if name == -1:
return 0
else:
return name+1 | [
"face_recognition.face_distance",
"face_recognition.compare_faces",
"faceEncodings.getencodes",
"cv2.cvtColor",
"face_recognition.face_encodings",
"numpy.argmin"
] | [((128, 140), 'faceEncodings.getencodes', 'getencodes', ([], {}), '()\n', (138, 140), False, 'from faceEncodings import getencodes\n'), ((202, 239), 'cv2.cvtColor', 'cv2.cvtColor', (['face', 'cv2.COLOR_BGR2RGB'], {}), '(face, cv2.COLOR_BGR2RGB)\n', (214, 239), False, 'import cv2\n'), ((261, 284), 'face_recognition.face_encodings', 'fr.face_encodings', (['face'], {}), '(face)\n', (278, 284), True, 'import face_recognition as fr\n'), ((304, 354), 'face_recognition.face_distance', 'fr.face_distance', (['encodedfacesknown', 'encodeface[0]'], {}), '(encodedfacesknown, encodeface[0])\n', (320, 354), True, 'import face_recognition as fr\n'), ((373, 423), 'face_recognition.compare_faces', 'fr.compare_faces', (['encodedfacesknown', 'encodeface[0]'], {}), '(encodedfacesknown, encodeface[0])\n', (389, 423), True, 'import face_recognition as fr\n'), ((445, 464), 'numpy.argmin', 'np.argmin', (['facedist'], {}), '(facedist)\n', (454, 464), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 28 09:55:19 2015
@author: Ben
"""
import config as cfg
import pandas as pd
import util
from datamapfunctions import Abstract
import numpy as np
import inspect
from util import DfOper
from shared_classes import StockItem
import logging
import pdb
class FlexibleLoadMeasure(Abstract):
def __init__(self, id):
self.id = id
self.sql_id_table = 'DemandFlexibleLoadMeasures'
self.sql_data_table = 'DemandFlexibleLoadMeasuresData'
Abstract.__init__(self, self.id, primary_key='id', data_id_key='parent_id')
self.input_type = 'intensity'
self.remap()
self.values.sort_index(inplace=True)
#TODO we should really do something like this to separate the class constructor from the DB
@classmethod
def make_from_db(cls, id):
# read DB here
return FlexibleLoadMeasure()
@classmethod
def make_from_perturbation(cls, perturbation):
# set up here
return FlexibleLoadMeasure()
# this class really should be combined with the top flexible load measure class but the constructor is not flexible enough
class FlexibleLoadMeasure2(Abstract):
def __init__(self, perturbation):
self.raw_values = self.perturbation_to_raw_values(perturbation)
self.name = 'perturbation'
self.interpolation_method = 'nearest'
self.extrapolation_method = 'nearest'
self.input_type = 'intensity'
self.geography = cfg.primary_geography # the perturbations come in already geomapped
self.remap()
org_index = self.values.index.names
temp = self.values.reset_index()
start_year = self.raw_values.index.get_level_values('year').min()
temp.loc[temp['year'] < start_year, 'value'] = 0
self.values = temp.set_index(org_index).sort()
def perturbation_to_raw_values(self, perturbation):
raw_values = perturbation.sales_share_changes['percent_of_load_that_is_flexible'].to_frame()
raw_values.columns = ['value']
raw_values.index = raw_values.index.rename('demand_technology', 'demand_technology_id')
raw_values = raw_values.groupby(level=['demand_technology', cfg.primary_geography, 'year']).mean()
raw_values = raw_values.reset_index()
# this is because when we have linked technologies, that technology linkage is not updated with our new tech names
if perturbation.sales_share_changes['adoption_achieved'].sum()>0:
raw_values['demand_technology'] = raw_values['demand_technology'].map(perturbation.new_techs)
raw_values = raw_values.set_index(['demand_technology', cfg.primary_geography, 'year'])
assert not any(raw_values.values>1)
return raw_values
class DemandMeasure(StockItem):
def __init__(self):
StockItem.__init__(self)
def calculate(self, vintages, years, unit_to):
self.vintages = vintages
self.years = years
self.unit_to = unit_to
self.convert()
self.clean_data()
attributes = vars(self)
for att in attributes:
obj = getattr(self, att)
if inspect.isclass(type(obj)) and hasattr(obj, '__dict__') and hasattr(obj, 'calculate'):
obj.calculate(self.vintages, self.years, self.unit_to)
def clean_data(self):
if self.input_type == 'total':
self.savings = self.clean_timeseries('values', inplace=False, time_index_name='year', time_index=self.years)
else:
self.remap(map_from='raw_values', map_to='values', time_index_name='year',lower=-100)
def convert(self):
if self.input_type == 'total':
self.values = util.unit_convert(self.raw_values, unit_from_num=self.unit,
unit_to_num=self.unit_to)
else:
self.values = self.raw_values
def calculate_book_life(self):
"""
determines book life for measures based on input mean or max/min lifetimes.
Used for cost levelization
"""
if hasattr(self, 'mean_lifetime'):
self.book_life = getattr(self, 'mean_lifetime')
elif hasattr(self, 'max_lifetime') and hasattr(self, 'min_lifetime'):
self.book_life = (getattr(self, 'min_lifetime') + getattr(self, 'max_lifetime')) / 2
else:
logging.debug("incomplete lifetime information entered for technology %s" % self.name)
class ServiceDemandMeasure(Abstract, DemandMeasure):
def __init__(self, id, cost_of_capital, **kwargs):
self.id = id
# self.service_demand_unit = service_demand_unit
self.sql_id_table = 'DemandServiceDemandMeasures'
self.sql_data_table = 'DemandServiceDemandMeasuresData'
Abstract.__init__(self, self.id, primary_key='id', data_id_key = 'parent_id')
DemandMeasure.__init__(self)
self.cost_of_capital = cost_of_capital
self.calculate_book_life()
self.cost = DemandMeasureCost(id, self.cost_of_capital, self.book_life, 'DemandServiceDemandMeasuresCost', 'DemandServiceDemandMeasuresCostData')
class EnergyEfficiencyMeasure(Abstract, DemandMeasure):
def __init__(self, id, cost_of_capital, **kwargs):
self.id = id
self.sql_id_table = 'DemandEnergyEfficiencyMeasures'
self.sql_data_table = 'DemandEnergyEfficiencyMeasuresData'
Abstract.__init__(self, self.id, primary_key='id', data_id_key='parent_id')
DemandMeasure.__init__(self)
self.calculate_book_life()
self.cost_of_capital = cost_of_capital
self.cost = DemandMeasureCost(id, self.cost_of_capital, self.book_life, 'DemandEnergyEfficiencyMeasuresCost', 'DemandEnergyEfficiencyMeasuresCostData')
class FuelSwitchingMeasure(Abstract, StockItem):
def __init__(self, id, cost_of_capital, **kwargs):
self.id = id
self.sql_id_table = 'DemandFuelSwitchingMeasures'
self.sql_data_table = 'DemandFuelSwitchingMeasuresData'
for col, att in util.object_att_from_table(self.sql_id_table, self.id):
if att is not None:
setattr(self, col, att)
self.calculate_book_life()
self.cost_of_capital = cost_of_capital
self.impact = FuelSwitchingImpact(self.id)
self.energy_intensity = FuelSwitchingEnergyIntensity(self.id)
self.cost = DemandMeasureCost(id, self.cost_of_capital, self.book_life, 'DemandFuelSwitchingMeasuresCost', 'DemandFuelSwitchingMeasuresCostData')
def calculate(self, vintages, years, unit_to):
self.vintages = vintages
self.years = years
self.unit_to = unit_to
attributes = vars(self)
for att in attributes:
obj = getattr(self, att)
if inspect.isclass(type(obj)) and hasattr(obj, '__dict__') and hasattr(obj, 'calculate'):
obj.calculate(self.vintages, self.years, self.unit_to)
def energy_replace(self):
self.replace_impact = DfOper.mult([self.energy_intensity, self.impact])
def calculate_book_life(self):
"""
determines book life for measures based on input mean or max/min lifetimes.
Used for cost levelization
"""
if hasattr(self, 'mean_lifetime'):
self.book_life = getattr(self, 'mean_lifetime')
elif hasattr(self, 'max_lifetime') and hasattr(self, 'min_lifetime'):
self.book_life = (getattr(self, 'min_lifetime') + getattr(self, 'max_lifetime')) / 2
else:
logging.debug("incomplete lifetime information entered for technology %s" % self.name)
class FuelSwitchingImpact(Abstract):
def __init__(self, id, *kwargs):
self.id = id
self.sql_id_table = 'DemandFuelSwitchingMeasuresImpact'
self.sql_data_table = 'DemandFuelSwitchingMeasuresImpactData'
Abstract.__init__(self, self.id, primary_key='parent_id', data_id_key='parent_id')
def calculate(self, vintages, years, unit_to):
self.vintages = vintages
self.years = years
self.unit_to = unit_to
self.clean_data()
self.convert()
def clean_data(self):
if self.input_type == 'total':
self.savings = self.clean_timeseries('values', inplace=False, time_index=self.years)
else:
self.remap(map_from='raw_values', map_to='values', time_index_name='year')
def convert(self):
if self.input_type == 'total':
self.values = util.unit_convert(self.raw_values, unit_from_num=self.unit,
unit_to_num=self.unit_to)
else:
pass
class FuelSwitchingEnergyIntensity(Abstract):
def __init__(self, id, **kwargs):
self.id = id
self.sql_id_table = 'DemandFuelSwitchingMeasuresEnergyIntensity'
self.sql_data_table = 'DemandFuelSwitchingMeasuresEnergyIntensityData'
self.input_type = 'intensity'
Abstract.__init__(self, id, primary_key='parent_id', data_id_key='parent_id')
def calculate(self, years, vintages, unit_to):
self.years = years
self.remap(map_from='raw_values', map_to='values', time_index=self.years)
class DemandMeasureCost(Abstract):
def __init__(self, id, cost_of_capital, book_life, sql_id_table, sql_data_table, **kwargs):
self.id = id
self.book_life = book_life
self.input_type = 'intensity'
self.sql_id_table = sql_id_table
self.sql_data_table = sql_data_table
Abstract.__init__(self, id, primary_key='parent_id', data_id_key='parent_id')
if hasattr(self, 'cost_of_capital'):
if self.cost_of_capital is None:
self.cost_of_capital = cost_of_capital
def calculate(self, vintages, years, unit_to):
self.vintages = vintages
self.years = years
self.unit_to = unit_to
if self.data and self.raw_values is not None:
self.convert_cost()
self.remap(map_from='values', map_to='values', time_index_name='vintage')
self.levelize_costs()
if self.data is False:
self.absolute = False
if self.raw_values is None:
# if the class is empty, then there is no data for conversion, so the class is considered absolute
self.absolute = True
def convert_cost(self):
"""
convert raw_values to model currency and energy
"""
self.values = util.unit_convert(self.raw_values, unit_from_den=self.cost_denominator_unit,
unit_to_den=self.unit_to)
self.values = util.currency_convert(self.values, self.currency_id, self.currency_year_id)
def levelize_costs(self):
if self.is_levelized == 1:
inflation = float(cfg.cfgfile.get('case', 'inflation_rate'))
rate = self.cost_of_capital - inflation
if self.is_levelized == 0:
self.values_level = - np.pmt(rate, self.book_life, 1, 0, 'end') * self.values
util.convert_age(self, attr_from='values_level', attr_to='values_level', reverse=False,
vintages=self.vintages, years=self.years)
else:
self.values_level = self.values.copy()
util.convert_age(self, attr_from='values_level', attr_to='values_level', reverse=False,
vintages=self.vintages, years=self.years)
self.values = np.pv(rate, self.book_life, -1, 0, 'end') * self.values
else:
util.convert_age(self, reverse=False, vintages=self.vintages, years=self.years) | [
"config.cfgfile.get",
"util.currency_convert",
"datamapfunctions.Abstract.__init__",
"logging.debug",
"util.DfOper.mult",
"util.unit_convert",
"numpy.pmt",
"numpy.pv",
"util.convert_age",
"util.object_att_from_table",
"shared_classes.StockItem.__init__"
] | [((510, 585), 'datamapfunctions.Abstract.__init__', 'Abstract.__init__', (['self', 'self.id'], {'primary_key': '"""id"""', 'data_id_key': '"""parent_id"""'}), "(self, self.id, primary_key='id', data_id_key='parent_id')\n", (527, 585), False, 'from datamapfunctions import Abstract\n'), ((2825, 2849), 'shared_classes.StockItem.__init__', 'StockItem.__init__', (['self'], {}), '(self)\n', (2843, 2849), False, 'from shared_classes import StockItem\n'), ((4779, 4854), 'datamapfunctions.Abstract.__init__', 'Abstract.__init__', (['self', 'self.id'], {'primary_key': '"""id"""', 'data_id_key': '"""parent_id"""'}), "(self, self.id, primary_key='id', data_id_key='parent_id')\n", (4796, 4854), False, 'from datamapfunctions import Abstract\n'), ((5400, 5475), 'datamapfunctions.Abstract.__init__', 'Abstract.__init__', (['self', 'self.id'], {'primary_key': '"""id"""', 'data_id_key': '"""parent_id"""'}), "(self, self.id, primary_key='id', data_id_key='parent_id')\n", (5417, 5475), False, 'from datamapfunctions import Abstract\n'), ((6028, 6082), 'util.object_att_from_table', 'util.object_att_from_table', (['self.sql_id_table', 'self.id'], {}), '(self.sql_id_table, self.id)\n', (6054, 6082), False, 'import util\n'), ((6990, 7039), 'util.DfOper.mult', 'DfOper.mult', (['[self.energy_intensity, self.impact]'], {}), '([self.energy_intensity, self.impact])\n', (7001, 7039), False, 'from util import DfOper\n'), ((7851, 7938), 'datamapfunctions.Abstract.__init__', 'Abstract.__init__', (['self', 'self.id'], {'primary_key': '"""parent_id"""', 'data_id_key': '"""parent_id"""'}), "(self, self.id, primary_key='parent_id', data_id_key=\n 'parent_id')\n", (7868, 7938), False, 'from datamapfunctions import Abstract\n'), ((8945, 9022), 'datamapfunctions.Abstract.__init__', 'Abstract.__init__', (['self', 'id'], {'primary_key': '"""parent_id"""', 'data_id_key': '"""parent_id"""'}), "(self, id, primary_key='parent_id', data_id_key='parent_id')\n", (8962, 9022), False, 'from datamapfunctions import Abstract\n'), ((9506, 9583), 'datamapfunctions.Abstract.__init__', 'Abstract.__init__', (['self', 'id'], {'primary_key': '"""parent_id"""', 'data_id_key': '"""parent_id"""'}), "(self, id, primary_key='parent_id', data_id_key='parent_id')\n", (9523, 9583), False, 'from datamapfunctions import Abstract\n'), ((10455, 10561), 'util.unit_convert', 'util.unit_convert', (['self.raw_values'], {'unit_from_den': 'self.cost_denominator_unit', 'unit_to_den': 'self.unit_to'}), '(self.raw_values, unit_from_den=self.cost_denominator_unit,\n unit_to_den=self.unit_to)\n', (10472, 10561), False, 'import util\n'), ((10620, 10695), 'util.currency_convert', 'util.currency_convert', (['self.values', 'self.currency_id', 'self.currency_year_id'], {}), '(self.values, self.currency_id, self.currency_year_id)\n', (10641, 10695), False, 'import util\n'), ((3704, 3794), 'util.unit_convert', 'util.unit_convert', (['self.raw_values'], {'unit_from_num': 'self.unit', 'unit_to_num': 'self.unit_to'}), '(self.raw_values, unit_from_num=self.unit, unit_to_num=\n self.unit_to)\n', (3721, 3794), False, 'import util\n'), ((8479, 8569), 'util.unit_convert', 'util.unit_convert', (['self.raw_values'], {'unit_from_num': 'self.unit', 'unit_to_num': 'self.unit_to'}), '(self.raw_values, unit_from_num=self.unit, unit_to_num=\n self.unit_to)\n', (8496, 8569), False, 'import util\n'), ((11563, 11642), 'util.convert_age', 'util.convert_age', (['self'], {'reverse': '(False)', 'vintages': 'self.vintages', 'years': 'self.years'}), '(self, reverse=False, vintages=self.vintages, years=self.years)\n', (11579, 11642), False, 'import util\n'), ((4375, 4465), 'logging.debug', 'logging.debug', (["('incomplete lifetime information entered for technology %s' % self.name)"], {}), "('incomplete lifetime information entered for technology %s' %\n self.name)\n", (4388, 4465), False, 'import logging\n'), ((7525, 7615), 'logging.debug', 'logging.debug', (["('incomplete lifetime information entered for technology %s' % self.name)"], {}), "('incomplete lifetime information entered for technology %s' %\n self.name)\n", (7538, 7615), False, 'import logging\n'), ((10792, 10833), 'config.cfgfile.get', 'cfg.cfgfile.get', (['"""case"""', '"""inflation_rate"""'], {}), "('case', 'inflation_rate')\n", (10807, 10833), True, 'import config as cfg\n'), ((11036, 11169), 'util.convert_age', 'util.convert_age', (['self'], {'attr_from': '"""values_level"""', 'attr_to': '"""values_level"""', 'reverse': '(False)', 'vintages': 'self.vintages', 'years': 'self.years'}), "(self, attr_from='values_level', attr_to='values_level',\n reverse=False, vintages=self.vintages, years=self.years)\n", (11052, 11169), False, 'import util\n'), ((11288, 11421), 'util.convert_age', 'util.convert_age', (['self'], {'attr_from': '"""values_level"""', 'attr_to': '"""values_level"""', 'reverse': '(False)', 'vintages': 'self.vintages', 'years': 'self.years'}), "(self, attr_from='values_level', attr_to='values_level',\n reverse=False, vintages=self.vintages, years=self.years)\n", (11304, 11421), False, 'import util\n'), ((11481, 11522), 'numpy.pv', 'np.pv', (['rate', 'self.book_life', '(-1)', '(0)', '"""end"""'], {}), "(rate, self.book_life, -1, 0, 'end')\n", (11486, 11522), True, 'import numpy as np\n'), ((10964, 11005), 'numpy.pmt', 'np.pmt', (['rate', 'self.book_life', '(1)', '(0)', '"""end"""'], {}), "(rate, self.book_life, 1, 0, 'end')\n", (10970, 11005), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 20 19:33:39 2021
@author: damv_
Ecuaciones cubicas de estado
"""
from scipy.special import gamma
import numpy as np
"""****************************************************************************************************************************
Indices para ecuaciones cubicas de estado;
0 - <NAME>
1 - <NAME>
2 - <NAME>
R = 83.14472 #Barcm3/molK
En la rutina PseudoComp, se generan los pseudocomponentes a partir de la fracción de nC7+. Min se toma
de la literatura (Tablas de SCN) para la masa molar de nC7 como fraccion SCN.
Metodo para generar pseudocomponentes:
Distribucion: Gamma
Metodo de integracion: Gauss - Laguerre
Lista de datos requeridos:
zpseud = Composicion de fraccion C7+
Mprom = Masa molar promedio de fraccion C7+
n = numero de pseudocomponentes, 4 es un buen valor para no forzar la PC.
alfa = parametro de forma, un valor de entre 1 y 2 es buen estimado para crudos.
Mmin = Masa molar minima para la fraccion C7+, ya que toma la masa molar del nC7 (SCN).
****************************************************************************************************************************"""
def PseudoComp( zpseud , Mprom , n , alfa , Mmin = 90): #Entradas escalares
z = np.array([ ])
M = np.array([ ])
X,w = np.polynomial.laguerre.laggauss( n ) #Puntos y pesos para Gauss Laguerre
beta = ( 2.5*Mprom-Mmin )/X[ -1 ]
exs = alfa*beta/( Mprom-Mmin )
delta = np.exp( exs-1 )
f = lambda j : ( X[ j ]**( alfa-1 ) )*( exs**alfa )/( gamma( alfa )*delta**X[ j ] )
for i in range( n ):
zi = zpseud*( w[ i ]*f( i ) )
Mi = Mmin+beta*X[ i ]
z = np.append( z , zi )
M = np.append( M , Mi )
cz = zpseud/sum( z ) #Correccion de z
z = z*cz
cM = Mprom*sum( z )/sum( M*z ) #Correccion de M
M = M*cM
return np.array([ z , M ])
"""****************************************************************************************************************************
Entradas en las ecuaciones:
Las propiedades de substancias como Pc, Tc, w, etc,
son np.array. El indice corresponde a una substancia especifica.
Indices para ecuaciones cubicas de estado;
0 - <NAME> -> RECOMENDADA
1 - <NAME>
2 - <NAME>
Rutina para funcion alfa( Tr ):
a) Alfa de Stryjek y Vera con parametro polar q - 1986 ( alfa_SV )
Se usan las reglas de mezclado de VdW.
Oma = np.array([ 0.457235530 , 0.42748 , 27/64 ]) Omb = np.array([ 0.077796074 , 0.08664 , 1/8 ])
A = Oma[Ec]*alfa( Tr )*Pr/Tc**2
B = Omb[Ec]*Pr/Tc
Bm = B_i*x_i
A_ij = ( 1 - k_ij)*( A_i*A_j )**0.5
Am = x_i*x_j*A_ij
A_i´ = x_j*A_ij
Rutinas para los distintos parametros:
a) Parametros B individuales ( B_i )
b) B de mezcla, calculando Bi ( Bm_0 )
c) Parametros A individuales ( A_i )
d) A de mezcla, calculando Ai ( Am_0 )
e) A de mezcla, otorgando Ai ( Am_1 )
f) A´ de un componente en mezcla ( Aprim_i )
****************************************************************************************************************************"""
def B_i( P ,T , Pc, Tc, Ec = 0 ):
Omb = np.array([ 0.077796074 , 0.08664 , 1/8 ])
Pr = P/Pc
Tr = T/Tc
B = Pr*Omb[ Ec ]/Tr
return B
def Bm_0( P ,T , Pc, Tc, x , Ec = 0 ):
return np.dot( x , B_i( P ,T , Pc, Tc, Ec) )
#<NAME> <NAME>, 1986
def alfa_SV( T , Tc , w , q , Ec = 0 ):
r = np.array([ [ 0.378893, 1.4897153, -0.17131848, 0.0196554 ] , [ 0.48508, 1.55191, -0.15613, 0 ] , [0,0,0,0] ])
alfa = np.zeros([ Tc.size ])
mi = lambda n: r[ Ec,0 ] + r[ Ec,1 ]*w[ n ] + r[ Ec,2 ]*np.power(w[n],2) + r[ Ec,3 ]*np.power(w[n],3)
ci = lambda n: 1 + mi( n )/2 +0.3*q[ n ]
for i in range( Tc.size ):
if T/Tc[ i ] <= 1:
alfa[ i ] = np.power( 1 + mi( i )*( 1 - np.power( T/Tc[ i ], 0.5 ) - q[ i ]*( 1 - T/Tc[ i ] )*( 0.7 - T/Tc[ i ] ) ), 2 )
elif T/Tc[ i ] > 1:
alfa[ i ] = np.exp( 2*( ci( i ) - 1 )*( 1 - np.power( T/Tc[ i ], ci(i) ) )/ci( i ) )
return alfa
def A_i( P , T , Pc , Tc , w , q , Ec = 0 ):
Oma = np.array([ 0.457235530 , 0.42748 , 27/64 ])
# A = ( Oma[Ec]*P/np.power(T,2) )*alfa_SV( T , Tc , w , q , Ec )*np.power( Tc,2 )/Pc
A = Oma[Ec]*(P/Pc)*np.power(Tc/T, 2)*alfa_SV( T , Tc , w , q , Ec )
return A
def Am_0( P , T , Pc , Tc , w , q , x , kij , Ec = 0 ):
Ai = np.power( A_i( P , T , Pc , Tc , w , q , Ec ) , 0.5 )
Aij = np.outer( Ai , Ai )
Kij = 1 - kij
Aij = Aij*Kij
# print( Aij )
return np.dot( x.T , np.dot( x.T , Aij ) )
def Am_1( x , Ai, kij ):
Ai = np.power( Ai, 0.5 )
Aij = np.outer(Ai, Ai)*(1 - kij)
return np.dot( x.T, np.dot( x.T , Aij ) )
def Aprim_i( P , T , Pc , Tc , w , q , x , kij , Ec = 0 ):
Ai = np.power( A_i( P , T , Pc , Tc , w , q , Ec ) , 0.5 )
Aij = np.outer( Ai , Ai )
Kij = 1 - kij
Aij = Aij*Kij
return 2*np.dot( x.T , Aij )
def Aprim_i_1( x, Ai, kij ):
Ai = np.power( Ai, 0.5 )
Aij = np.outer( Ai, Ai )*(1 - kij)
return 2*np.dot( x.T, Aij )
"""****************************************************************************************************************************
Ecuaciones Cubicas de Estado:
0 - <NAME>
1 - <NAME>
2 - <NAME>
Rutina para solucion de Ec cubica, dando T,P y x ( z_0 )
Rutina para solucion de Ec cubica, dando Am y Bm ( z_1 )
u = np.array([ 2 , 1 , 0 ]) v = np.array([ -1 , 0, 0 ])
Polinomio: z**3 + ( (u[Ec] - 1)*B - 1 )*z**2 ...
... + ( A - u[Ec]*B +(v[Ec] - u[Ec])*B**2 )*z - ( A*B + v[Ec]*( B**2 + B**3 ) )
Rutina necesaria para calcular fugacidad. f_i = P*phi_i*x_i
Rutinas para calcular coeficientes de fugacidad "phi":
a) Coeficiente de fugacidad de un componente ( Phi_i )
b) Lista de coeficientes de fugacidad para todos los componentes ( phi_ )
****************************************************************************************************************************"""
def z_0( P , T , Pc , Tc , w , q , x , kij , Fase , Ec = 0 ):
Fase = str( Fase )
u = np.array([ 2 , 1 , 0 ])
v = np.array([ -1 , 0, 0 ])
A = Am_0( P , T , Pc , Tc , w , q , x , kij , Ec )
B = Bm_0( P ,T , Pc, Tc, x , Ec )
alf = 1 - (u[Ec] - 1)*B
beta = A - u[Ec]*B +(v[Ec] - u[Ec])*np.power(B,2)
gamma = A*B + v[Ec]*( np.power(B,2) + np.power(B,3) )
if Fase == "V":
z = np.roots([ 1 , -alf , beta , -gamma ])[ 0 ]
elif Fase == "L":
z = np.roots([ 1 , -alf , beta , -gamma ])[ 2 ]
else:
z = np.roots([ 1 , -alf , beta , -gamma ])
return z
def z_1( Am , Bm , Fase , Ec = 0 ):
Fase = str( Fase )
u = np.array([ 2 , 1 , 0 ])
v = np.array([ -1 , 0, 0 ])
alf = 1 - (u[Ec] - 1)*Bm
beta = Am - u[Ec]*Bm +(v[Ec] - u[Ec])*Bm**2
gamma = Am*Bm + v[Ec]*( Bm**2 + Bm**3 )
if Fase == "V":
z = np.roots([ 1 , -alf , beta , -gamma ])[ 0 ]
elif Fase == "L":
z = np.roots([ 1 , -alf , beta , -gamma ])[ 2 ]
else:
z = np.roots([ 1 , -alf , beta , -gamma ])
return z
# def z_0( P , T , Pc , Tc , w , q , x , kij , Fase , Ec = 0 ):
# Fase = str( Fase )
# u = np.array([ 2 , 1 , 0 ])
# v = np.array([ -1 , 0, 0 ])
# A = Am_0( P , T , Pc , Tc , w , q , x , kij , Ec )
# B = Bm_0( P ,T , Pc, Tc, x , Ec )
# alf = 1 - (u[Ec] - 1)*B
# beta = A - u[Ec]*B +(v[Ec] - u[Ec])*np.power(B,2)
# gamma = A*B + v[Ec]*( np.power(B,2) + np.power(B,3) )
# C = 3*beta - np.power( alf, 2 )
# D = -np.power( alf, 3 ) + 4.5*alf*beta - 13.5*gamma
# Q = np.power( C, 3 ) + np.power( D, 2 )
# if Q > 0:
# D1 = np.power( abs(-D + Q**0.5) )
# z = (alf + ( ))
# def phi_i( P , T , Pc , Tc , w , q , x , kij , n , Fase , Ec = 0 ):
# u = np.array([ 2 , 1 , 0 ])
# v = np.array([ -1 , 0, 0 ])
# Am = Am_0( P , T , Pc , Tc , w , q , x , kij , Ec )
# Bi = B_i( P , T , Pc , Tc , Ec )
# Bm = np.dot( x , Bi )
# A_prim = Aprim_i( P , T , Pc , Tc , w , q , x , kij , Ec )
# z = z_1( Am , Bm , Fase , Ec )
# # print(z)
# delta = ( u[Ec]**2 - 4*v[Ec] )**0.5
# L = np.log( ( 2*z + Bm*(u[Ec] + delta) )/( 2*z + Bm*( u[Ec] - delta) ) )/delta
# ln_Phi = -np.log( z - Bm ) - ( z - 1 )*Bi[ n ]/Bm + ( Am/Bm )*( Bi[ n ]/Bm - A_prim[ n ]/ Am )*L
# return np.exp( ln_Phi )
def phi_i( P , T , Pc , Tc , w , q , x , kij , n , Bi , Am , Bm , A_prim ,Fase , Ec = 0 ):
u = np.array([ 2 , 1 , 0 ])
v = np.array([ -1 , 0, 0 ])
z = z_1( Am , Bm , Fase , Ec )
# print(z)
delta = np.power( np.power(u[Ec], 2) - 4*v[Ec] , 0.5 )
L = np.log( ( 2*z + Bm*( u[Ec] + delta) )/( 2*z + Bm*( u[Ec] - delta ) ) )/delta
ln_Phi = -np.log( z - Bm ) + ( z - 1 )*Bi[ n ]/Bm + ( Am/Bm )*( Bi[ n ]/Bm - A_prim[ n ]/ Am )*L
return np.exp( ln_Phi )
# def phi_( P , T , Pc , Tc , w , q , x , kij , Fase , Ec = 0 ):
# Phi = np.zeros( x.size )
# for i in range( 0 , x.size ):
# Phi[ i ] = phi_i( P , T , Pc , Tc , w , q , x , kij , i , Fase , Ec )
# return Phi
def phi_( P , T , Pc , Tc , w , q , x , kij , Fase , Ec = 0 ):
Ai = A_i( P , T , Pc , Tc , w , q , Ec )
Am = Am_1( x , Ai, kij )
Bi = B_i( P , T , Pc , Tc , Ec )
Bm = np.dot( x , Bi )
A_prim = Aprim_i_1( x, Ai, kij )
Phi = np.array([])
for i in range( x.size ):
Phi = np.append( Phi, phi_i( P , T , Pc , Tc , w , q , x , kij , i , Bi , Am , Bm, A_prim ,Fase , Ec ) )
return Phi
"""****************************************************************************************************************************
Rutina para Presion de saturacion ( Psat_i )
Rutinas para los siguientes problemas de equilibrio fases ideales:
a) Presion de Burbuja ( Pbur_id )
b) Temperatura de rocio ( Troc_id )
****************************************************************************************************************************"""
def Psat_i( T , Pc , Tc , w ):
log_10Pr = ( 7/3 )*( 1 + w )*( 1 - Tc/T )
return Pc*np.power( 10, log_10Pr )
def Pbur_id( T , Pc , Tc , w , x ):
return np.dot( Psat_i( T , Pc , Tc , w ) , x )
def Troc_id( P , Pc , Tc , w , y , T = 350 , delta = 1e-8 , tol = 1e-6 , Nmax = 20 ):
ferror = lambda T : np.log( P*sum( y/Psat_i( T , Pc , Tc , w ) ) )
dferror = lambda T: ( ferror( T + delta ) - ferror( T ) )/( 1/(T +delta) - 1/T )
N = 0
while N < Nmax:
N = N + 1
if abs( ferror( T ) ) < tol:
break
T = 1/T - ferror( T )/dferror( T )
T = 1/T
# print("N. de iteraciones (Rutina Troc ideal):",N,"\n")
return T
"""****************************************************************************************************************************
Ecuaciones Cubicas de Estado:
0 - <NAME>
1 - <NAME>
2 - <NAME>
Rutinas para los siguientes problemas de equilibrio:
a) Presion de Burbuja, genera estimados iniciales ( Pbur )
b) Temperatura de rocio, genera estimados iniciales ( Troc )
****************************************************************************************************************************"""
def Pbur( T , Pc , Tc , w , q , z , kij , delta = 1e-10 , tol = 1e-6 , Nmax = 20 , Ec = 0 ):
Psat = Psat_i( T , Pc , Tc , w )
Pbur_r = np.dot( Psat , z )
y_i = Psat*z/Pbur_r
del( Psat )
ferror = lambda P : sum( z*phi_( P , T , Pc , Tc , w , q , z , kij , "L" )/phi_( P , T , Pc , Tc , w , q , y_i , kij , "V" ) ) - 1
dferror = lambda P : ( ferror( P + delta ) - ferror( P ) )/( 1/(P + delta) - 1/P )
N = 0
while N < Nmax:
N = N + 1
if abs( ferror( Pbur_r ) ) < tol:
break
Pbur_rr = 1/Pbur_r - ferror( Pbur_r )/dferror( Pbur_r )
Pbur_r = 1/Pbur_rr
y_i = z*phi_( Pbur_r , T , Pc , Tc , w , q , z , kij , "L" )/phi_( Pbur_r , T , Pc , Tc , w , q , y_i , kij , "V" )
y_i = y_i/sum( y_i )
# print( "N. de iteraciones (Rutina Pbur real):", N , "\n" )
return np.append( np.array([ Pbur_r ]) , y_i )
def Troc( P , Pc , Tc , w , q , y , kij , delta = 1e-8 , tol = 1e-6 , Nmax = 20 , Ec = 0 ):
Troc_r = Troc_id( P , Pc , Tc , w , y )
x_i = P*y/Psat_i( Troc_r , Pc, Tc, w )
ferror = lambda T: np.log( sum( y*phi_( P , T , Pc , Tc , w , q , y , kij , "V" )/phi_( P , T , Pc , Tc , w , q , x_i , kij , "L" ) ) )
dferror = lambda T: ( ferror(T + delta) - ferror( T ))/( 1/(T + delta) - 1/T )
N = 0
while N < Nmax:
N = N + 1
if abs( ferror( Troc_r ) ) < tol:
break
Troc_r = 1/Troc_r - ferror( Troc_r )/dferror( Troc_r )
Troc_r = 1/Troc_r
x_i = y*phi_( P , T , Pc , Tc , w , q , y , kij , "V" )/phi_( P , T , Pc , Tc , w , q , x_i , kij , "L" )
x_i = x_i/sum( x_i )
# print( "N.de iteraciones (Rutina Troc real):",N,"\n")
return np.append( np.array([ Troc_r ]) , x_i )
#Pruebas: nC3 y nC7
if __name__=="__main__":
P = 1.01013 #Bar
T = 298.15 #K
Pc = np.array([ 42.4953 , 27.4084 ])
Tc = np.array([ 369.82 , 540.14 ])
w = np.array([ 0.15416, 0.35 ])
q = np.array([ -0.03136 , -0.02325 ])
kij = np.array([ [0 , 0.0067],[ 0.0067 , 0] ])
z = np.array([ 0.6 , 0.4 ])
B = B_i( P , T , Pc , Tc )
Bm = Bm_0( P ,T , Pc, Tc, z )
A = A_i( P , T , Pc , Tc , w , q )
alfa = alfa_SV( T , Tc , w , q )
Am = Am_0( P , T , Pc , Tc , w , q , z , kij )
A_prim = Aprim_i( P , T , Pc , Tc , w , q , z , kij )
Z = z_1( Am, Bm, "Prueba")
phi_nC3_L = phi_i( P , T , Pc , Tc , w , q , z , kij , 0 , B, Am, Bm , A_prim, "L" )
phi_nC3_V = phi_i( P , T , Pc , Tc , w , q , z , kij , 0 , B, Am, Bm , A_prim, "V" )
phi_nC7_L = phi_i( P , T , Pc , Tc , w , q , z , kij , 1 , B, Am, Bm , A_prim, "L" )
phi_nC7_V = phi_i( P , T , Pc , Tc , w , q , z , kij , 1 , B, Am, Bm , A_prim, "V" )
phi = phi_( P , T , Pc , Tc , w , q , z , kij , "L" )
psat = Psat_i( T, Pc, Tc, w )
Pb_id = Pbur_id( T , Pc , Tc , w , z )
Pbur_r = Pbur( T , Pc , Tc , w , q , z , kij )
Tr_id = Troc_id( P , Pc , Tc , w , z )
Troc_r = Troc( P , Pc , Tc , w , q , z , kij )
print("R E S U L T A D O S D E P R U E B A S :")
print("T:",T,"K P:",P,"Bar \n")
# print("___________________________________________________________________________________________","\n")
# print("Bi:",B,"\n")
# print("Bm:",Bm,"\n")
# print("alfa_i:",alfa,"\n")
# print("Ai:",A,"\n")
# print("Am:",Am,"\n")
# print("A_prim:",A_prim,"\n")
# print("z:",z,"\n")
print("phi_nC3 L:",phi_nC3_L,"\n")
print("phi_nC3 V:",phi_nC3_V,"\n")
print("phi_nC7 L:",phi_nC7_L,"\n")
print("phi_nC7 V:",phi_nC7_V,"\n")
# print("phi:",phi,"\n")
print("psat:",psat,"Bar \n")
print("Pbur_id:",Pb_id,"Bar \n")
print("Pbur_real:",Pbur_r[0],"Bar \n")
# print("yi_id:",psat*z/sum( psat*z ),"[nC3 , nC7] \n")
# print("yi_real:",Pbur_r[ 1 : z.size + 1 ],"[nC3 , nC7] \n")
print("Troc_id:",Tr_id,"K \n")
print("Troc_real:",Troc_r[0],"K \n")
# print("xi_id:",P*z/Psat_i( Tr_id , Pc, Tc, w ),"[nC3 , nC7] \n")
# print("xi_real:",Troc_r[ 1 : z.size + 1 ],"[nC3 , nC7] \n") | [
"numpy.roots",
"numpy.outer",
"numpy.log",
"numpy.power",
"numpy.zeros",
"numpy.append",
"numpy.array",
"numpy.exp",
"numpy.dot",
"numpy.polynomial.laguerre.laggauss",
"scipy.special.gamma"
] | [((1317, 1329), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1325, 1329), True, 'import numpy as np\n'), ((1339, 1351), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1347, 1351), True, 'import numpy as np\n'), ((1363, 1397), 'numpy.polynomial.laguerre.laggauss', 'np.polynomial.laguerre.laggauss', (['n'], {}), '(n)\n', (1394, 1397), True, 'import numpy as np\n'), ((1521, 1536), 'numpy.exp', 'np.exp', (['(exs - 1)'], {}), '(exs - 1)\n', (1527, 1536), True, 'import numpy as np\n'), ((1913, 1929), 'numpy.array', 'np.array', (['[z, M]'], {}), '([z, M])\n', (1921, 1929), True, 'import numpy as np\n'), ((3226, 3265), 'numpy.array', 'np.array', (['[0.077796074, 0.08664, 1 / 8]'], {}), '([0.077796074, 0.08664, 1 / 8])\n', (3234, 3265), True, 'import numpy as np\n'), ((3492, 3600), 'numpy.array', 'np.array', (['[[0.378893, 1.4897153, -0.17131848, 0.0196554], [0.48508, 1.55191, -0.15613,\n 0], [0, 0, 0, 0]]'], {}), '([[0.378893, 1.4897153, -0.17131848, 0.0196554], [0.48508, 1.55191,\n -0.15613, 0], [0, 0, 0, 0]])\n', (3500, 3600), True, 'import numpy as np\n'), ((3613, 3632), 'numpy.zeros', 'np.zeros', (['[Tc.size]'], {}), '([Tc.size])\n', (3621, 3632), True, 'import numpy as np\n'), ((4174, 4214), 'numpy.array', 'np.array', (['[0.45723553, 0.42748, 27 / 64]'], {}), '([0.45723553, 0.42748, 27 / 64])\n', (4182, 4214), True, 'import numpy as np\n'), ((4523, 4539), 'numpy.outer', 'np.outer', (['Ai', 'Ai'], {}), '(Ai, Ai)\n', (4531, 4539), True, 'import numpy as np\n'), ((4680, 4697), 'numpy.power', 'np.power', (['Ai', '(0.5)'], {}), '(Ai, 0.5)\n', (4688, 4697), True, 'import numpy as np\n'), ((4917, 4933), 'numpy.outer', 'np.outer', (['Ai', 'Ai'], {}), '(Ai, Ai)\n', (4925, 4933), True, 'import numpy as np\n'), ((5045, 5062), 'numpy.power', 'np.power', (['Ai', '(0.5)'], {}), '(Ai, 0.5)\n', (5053, 5062), True, 'import numpy as np\n'), ((6136, 6155), 'numpy.array', 'np.array', (['[2, 1, 0]'], {}), '([2, 1, 0])\n', (6144, 6155), True, 'import numpy as np\n'), ((6168, 6188), 'numpy.array', 'np.array', (['[-1, 0, 0]'], {}), '([-1, 0, 0])\n', (6176, 6188), True, 'import numpy as np\n'), ((6722, 6741), 'numpy.array', 'np.array', (['[2, 1, 0]'], {}), '([2, 1, 0])\n', (6730, 6741), True, 'import numpy as np\n'), ((6754, 6774), 'numpy.array', 'np.array', (['[-1, 0, 0]'], {}), '([-1, 0, 0])\n', (6762, 6774), True, 'import numpy as np\n'), ((8501, 8520), 'numpy.array', 'np.array', (['[2, 1, 0]'], {}), '([2, 1, 0])\n', (8509, 8520), True, 'import numpy as np\n'), ((8533, 8553), 'numpy.array', 'np.array', (['[-1, 0, 0]'], {}), '([-1, 0, 0])\n', (8541, 8553), True, 'import numpy as np\n'), ((8863, 8877), 'numpy.exp', 'np.exp', (['ln_Phi'], {}), '(ln_Phi)\n', (8869, 8877), True, 'import numpy as np\n'), ((9295, 9308), 'numpy.dot', 'np.dot', (['x', 'Bi'], {}), '(x, Bi)\n', (9301, 9308), True, 'import numpy as np\n'), ((9359, 9371), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (9367, 9371), True, 'import numpy as np\n'), ((11338, 11353), 'numpy.dot', 'np.dot', (['Psat', 'z'], {}), '(Psat, z)\n', (11344, 11353), True, 'import numpy as np\n'), ((13038, 13066), 'numpy.array', 'np.array', (['[42.4953, 27.4084]'], {}), '([42.4953, 27.4084])\n', (13046, 13066), True, 'import numpy as np\n'), ((13079, 13105), 'numpy.array', 'np.array', (['[369.82, 540.14]'], {}), '([369.82, 540.14])\n', (13087, 13105), True, 'import numpy as np\n'), ((13117, 13142), 'numpy.array', 'np.array', (['[0.15416, 0.35]'], {}), '([0.15416, 0.35])\n', (13125, 13142), True, 'import numpy as np\n'), ((13153, 13183), 'numpy.array', 'np.array', (['[-0.03136, -0.02325]'], {}), '([-0.03136, -0.02325])\n', (13161, 13183), True, 'import numpy as np\n'), ((13197, 13233), 'numpy.array', 'np.array', (['[[0, 0.0067], [0.0067, 0]]'], {}), '([[0, 0.0067], [0.0067, 0]])\n', (13205, 13233), True, 'import numpy as np\n'), ((13246, 13266), 'numpy.array', 'np.array', (['[0.6, 0.4]'], {}), '([0.6, 0.4])\n', (13254, 13266), True, 'import numpy as np\n'), ((1730, 1746), 'numpy.append', 'np.append', (['z', 'zi'], {}), '(z, zi)\n', (1739, 1746), True, 'import numpy as np\n'), ((1762, 1778), 'numpy.append', 'np.append', (['M', 'Mi'], {}), '(M, Mi)\n', (1771, 1778), True, 'import numpy as np\n'), ((4623, 4639), 'numpy.dot', 'np.dot', (['x.T', 'Aij'], {}), '(x.T, Aij)\n', (4629, 4639), True, 'import numpy as np\n'), ((4710, 4726), 'numpy.outer', 'np.outer', (['Ai', 'Ai'], {}), '(Ai, Ai)\n', (4718, 4726), True, 'import numpy as np\n'), ((4761, 4777), 'numpy.dot', 'np.dot', (['x.T', 'Aij'], {}), '(x.T, Aij)\n', (4767, 4777), True, 'import numpy as np\n'), ((4986, 5002), 'numpy.dot', 'np.dot', (['x.T', 'Aij'], {}), '(x.T, Aij)\n', (4992, 5002), True, 'import numpy as np\n'), ((5075, 5091), 'numpy.outer', 'np.outer', (['Ai', 'Ai'], {}), '(Ai, Ai)\n', (5083, 5091), True, 'import numpy as np\n'), ((5117, 5133), 'numpy.dot', 'np.dot', (['x.T', 'Aij'], {}), '(x.T, Aij)\n', (5123, 5133), True, 'import numpy as np\n'), ((8674, 8745), 'numpy.log', 'np.log', (['((2 * z + Bm * (u[Ec] + delta)) / (2 * z + Bm * (u[Ec] - delta)))'], {}), '((2 * z + Bm * (u[Ec] + delta)) / (2 * z + Bm * (u[Ec] - delta)))\n', (8680, 8745), True, 'import numpy as np\n'), ((10078, 10100), 'numpy.power', 'np.power', (['(10)', 'log_10Pr'], {}), '(10, log_10Pr)\n', (10086, 10100), True, 'import numpy as np\n'), ((12059, 12077), 'numpy.array', 'np.array', (['[Pbur_r]'], {}), '([Pbur_r])\n', (12067, 12077), True, 'import numpy as np\n'), ((12914, 12932), 'numpy.array', 'np.array', (['[Troc_r]'], {}), '([Troc_r])\n', (12922, 12932), True, 'import numpy as np\n'), ((4330, 4349), 'numpy.power', 'np.power', (['(Tc / T)', '(2)'], {}), '(Tc / T, 2)\n', (4338, 4349), True, 'import numpy as np\n'), ((6354, 6368), 'numpy.power', 'np.power', (['B', '(2)'], {}), '(B, 2)\n', (6362, 6368), True, 'import numpy as np\n'), ((6458, 6491), 'numpy.roots', 'np.roots', (['[1, -alf, beta, -gamma]'], {}), '([1, -alf, beta, -gamma])\n', (6466, 6491), True, 'import numpy as np\n'), ((6602, 6635), 'numpy.roots', 'np.roots', (['[1, -alf, beta, -gamma]'], {}), '([1, -alf, beta, -gamma])\n', (6610, 6635), True, 'import numpy as np\n'), ((6931, 6964), 'numpy.roots', 'np.roots', (['[1, -alf, beta, -gamma]'], {}), '([1, -alf, beta, -gamma])\n', (6939, 6964), True, 'import numpy as np\n'), ((7075, 7108), 'numpy.roots', 'np.roots', (['[1, -alf, beta, -gamma]'], {}), '([1, -alf, beta, -gamma])\n', (7083, 7108), True, 'import numpy as np\n'), ((8629, 8647), 'numpy.power', 'np.power', (['u[Ec]', '(2)'], {}), '(u[Ec], 2)\n', (8637, 8647), True, 'import numpy as np\n'), ((1595, 1606), 'scipy.special.gamma', 'gamma', (['alfa'], {}), '(alfa)\n', (1600, 1606), False, 'from scipy.special import gamma\n'), ((3724, 3741), 'numpy.power', 'np.power', (['w[n]', '(3)'], {}), '(w[n], 3)\n', (3732, 3741), True, 'import numpy as np\n'), ((6394, 6408), 'numpy.power', 'np.power', (['B', '(2)'], {}), '(B, 2)\n', (6402, 6408), True, 'import numpy as np\n'), ((6410, 6424), 'numpy.power', 'np.power', (['B', '(3)'], {}), '(B, 3)\n', (6418, 6424), True, 'import numpy as np\n'), ((6536, 6569), 'numpy.roots', 'np.roots', (['[1, -alf, beta, -gamma]'], {}), '([1, -alf, beta, -gamma])\n', (6544, 6569), True, 'import numpy as np\n'), ((7009, 7042), 'numpy.roots', 'np.roots', (['[1, -alf, beta, -gamma]'], {}), '([1, -alf, beta, -gamma])\n', (7017, 7042), True, 'import numpy as np\n'), ((8765, 8779), 'numpy.log', 'np.log', (['(z - Bm)'], {}), '(z - Bm)\n', (8771, 8779), True, 'import numpy as np\n'), ((3695, 3712), 'numpy.power', 'np.power', (['w[n]', '(2)'], {}), '(w[n], 2)\n', (3703, 3712), True, 'import numpy as np\n'), ((3896, 3920), 'numpy.power', 'np.power', (['(T / Tc[i])', '(0.5)'], {}), '(T / Tc[i], 0.5)\n', (3904, 3920), True, 'import numpy as np\n')] |
from __future__ import division, print_function
import numpy as np
import scipy.io
import scipy.ndimage
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.widgets import Slider
from skimage import measure
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
############ COMMENTS ##############
''' COMMENTS:
Creating Isosurfaces of the reconstructed wavefronts for the Experimental Scheme.
'''
# Set fontsize of ticklabels globally for 128 dpi display.
matplotlib.rcParams['xtick.labelsize'] = 8
matplotlib.rcParams['ytick.labelsize'] = 8
####################################
####### SECTION STATEMENTS #########
ShowPlots = True # Toggles plt.show() on/off in a global fashion.
####################################
######## "GLOBAL" PARAMETERS #######
####################################
######### HELPER FUNCTIONS #########
####################################
########### BEGIN CODE #############
# Load the .mat files and extra the volume data.
rdict = scipy.io.loadmat('./roundred.mat', mat_dtype=True)
bdict = scipy.io.loadmat('./roundblue.mat', mat_dtype=True)
rdata = rdict['roundred']
bdata = bdict['roundblue']
# Ok, now that we have the volume data, we use skimage to extract the isosurface.
rverts, rfaces, rnormals, rvalues = measure.marching_cubes_lewiner(rdata, level=2.7, spacing=[1.0, 1.0, 1.0], step_size=1)
bverts, bfaces, bnormals, bvalues = measure.marching_cubes_lewiner(bdata, level=2.6, spacing=[1.0, 1.0, 1.0], step_size=1)
print(np.shape(rverts), np.shape(rfaces))
print(np.shape(bverts), np.shape(bfaces))
# rmesh = Poly3DCollection(rverts[rfaces[::2, :]])
# bmesh = Poly3DCollection(bverts[bfaces[::2, :]])
# rmesh.set_edgecolor('r')
# bmesh.set_edgecolor('b')
# Try plotting.
fig = plt.figure(figsize=(7, 3), dpi=128)
fig.subplots_adjust(left=0.0, right=0.98, wspace=0.0)
ax1 = fig.add_subplot(121, projection='3d')
ax2 = fig.add_subplot(122, projection='3d')
# ax1.add_collection3d(rmesh)
# ax2.add_collection3d(bmesh)
# ax1.set_xlim()
ax1.plot_trisurf(rverts[:, 2], rverts[:, 1], rfaces, rverts[:, 0], color='r', lw=1, shade=True)
ax2.plot_trisurf(bverts[:, 2], bverts[:, 1], bfaces, bverts[:, 0], color='b', lw=1, shade=True)
for axi in [ax1, ax2]:
axi.view_init(elev=90, azim=-135)
# # Let's save the verts and faces data as NumPy arrays.
# np.save('./Red_LG_Verts_1step.npy', rverts)
# np.save('./Red_LG_Faces_1step.npy', rfaces)
# np.save('./Blue_LG_Verts_1step.npy', bverts)
# np.save('./Blue_LG_Faces_1step.npy', bfaces)
####################################
if ShowPlots == True:
plt.show()
| [
"numpy.shape",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"skimage.measure.marching_cubes_lewiner"
] | [((1316, 1406), 'skimage.measure.marching_cubes_lewiner', 'measure.marching_cubes_lewiner', (['rdata'], {'level': '(2.7)', 'spacing': '[1.0, 1.0, 1.0]', 'step_size': '(1)'}), '(rdata, level=2.7, spacing=[1.0, 1.0, 1.0],\n step_size=1)\n', (1346, 1406), False, 'from skimage import measure\n'), ((1439, 1529), 'skimage.measure.marching_cubes_lewiner', 'measure.marching_cubes_lewiner', (['bdata'], {'level': '(2.6)', 'spacing': '[1.0, 1.0, 1.0]', 'step_size': '(1)'}), '(bdata, level=2.6, spacing=[1.0, 1.0, 1.0],\n step_size=1)\n', (1469, 1529), False, 'from skimage import measure\n'), ((1791, 1826), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 3)', 'dpi': '(128)'}), '(figsize=(7, 3), dpi=128)\n', (1801, 1826), True, 'import matplotlib.pyplot as plt\n'), ((1533, 1549), 'numpy.shape', 'np.shape', (['rverts'], {}), '(rverts)\n', (1541, 1549), True, 'import numpy as np\n'), ((1551, 1567), 'numpy.shape', 'np.shape', (['rfaces'], {}), '(rfaces)\n', (1559, 1567), True, 'import numpy as np\n'), ((1575, 1591), 'numpy.shape', 'np.shape', (['bverts'], {}), '(bverts)\n', (1583, 1591), True, 'import numpy as np\n'), ((1593, 1609), 'numpy.shape', 'np.shape', (['bfaces'], {}), '(bfaces)\n', (1601, 1609), True, 'import numpy as np\n'), ((2612, 2622), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2620, 2622), True, 'import matplotlib.pyplot as plt\n')] |
"""This script contains methods to plot multiple aspects of the results
of MSAF.
"""
import logging
import mir_eval
import numpy as np
import os
from os.path import join, basename, dirname, splitext
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# Local stuff
import msaf
from msaf import io
from msaf import utils
from msaf import jams2
translate_ids = {
"2dfmc" : "2D-FMC",
"cnmf3" : "C-NMF",
"foote" : "Ckboard",
"levy" : "CC",
"cc" : "CC",
"olda" : "OLDA",
"serra" : "SF",
"sf" : "SF",
"siplca": "SI-PLCA"
}
def _plot_formatting(title, est_file, algo_ids, last_bound, N, output_file):
"""Formats the plot with the correct axis labels, title, ticks, and
so on."""
if title is None:
title = os.path.basename(est_file).split(".")[0]
plt.title(title)
plt.yticks(np.arange(0, 1, 1 / float(N)) + 1 / (float(N) * 2))
plt.gcf().subplots_adjust(bottom=0.22)
plt.gca().set_yticklabels(algo_ids)
plt.xlabel("Time (seconds)")
plt.xlim((0, last_bound))
plt.tight_layout()
if output_file is not None:
plt.savefig(output_file, format=splitext(output_file)[1][1:])
plt.close()
def plot_boundaries(all_boundaries, est_file, algo_ids=None, title=None,
output_file=None):
"""Plots all the boundaries.
Parameters
----------
all_boundaries: list
A list of np.arrays containing the times of the boundaries, one array
for each algorithm.
est_file: str
Path to the estimated file (JSON file)
algo_ids : list
List of algorithm ids to to read boundaries from.
If None, all algorithm ids are read.
title : str
Title of the plot. If None, the name of the file is printed instead.
"""
N = len(all_boundaries) # Number of lists of boundaries
if algo_ids is None:
algo_ids = io.get_algo_ids(est_file)
# Translate ids
for i, algo_id in enumerate(algo_ids):
algo_ids[i] = translate_ids[algo_id]
algo_ids = ["GT"] + algo_ids
figsize = (6, 4)
plt.figure(1, figsize=figsize, dpi=120, facecolor='w', edgecolor='k')
for i, boundaries in enumerate(all_boundaries):
color = "b"
if i == 0:
color = "g"
for b in boundaries:
plt.axvline(b, i / float(N), (i + 1) / float(N), color=color)
plt.axhline(i / float(N), color="k", linewidth=1)
# Format plot
_plot_formatting(title, est_file, algo_ids, all_boundaries[0][-1], N,
output_file)
def plot_labels(all_labels, gt_times, est_file, algo_ids=None, title=None,
output_file=None):
"""Plots all the labels.
Parameters
----------
all_labels: list
A list of np.arrays containing the labels of the boundaries, one array
for each algorithm.
gt_times: np.array
Array with the ground truth boundaries.
est_file: str
Path to the estimated file (JSON file)
algo_ids : list
List of algorithm ids to to read boundaries from.
If None, all algorithm ids are read.
title : str
Title of the plot. If None, the name of the file is printed instead.
"""
N = len(all_labels) # Number of lists of labels
if algo_ids is None:
algo_ids = io.get_algo_ids(est_file)
# Translate ids
for i, algo_id in enumerate(algo_ids):
algo_ids[i] = translate_ids[algo_id]
algo_ids = ["GT"] + algo_ids
# Index the labels to normalize them
for i, labels in enumerate(all_labels):
all_labels[i] = mir_eval.util.index_labels(labels)[0]
# Get color map
cm = plt.get_cmap('gist_rainbow')
max_label = max(max(labels) for labels in all_labels)
# To intervals
gt_inters = utils.times_to_intervals(gt_times)
# Plot labels
figsize = (6, 4)
plt.figure(1, figsize=figsize, dpi=120, facecolor='w', edgecolor='k')
for i, labels in enumerate(all_labels):
for label, inter in zip(labels, gt_inters):
plt.axvspan(inter[0], inter[1], ymin=i / float(N),
ymax=(i + 1) / float(N), alpha=0.6,
color=cm(label / float(max_label)))
plt.axhline(i / float(N), color="k", linewidth=1)
# Draw the boundary lines
for bound in gt_times:
plt.axvline(bound, color="g")
# Format plot
_plot_formatting(title, est_file, algo_ids, gt_times[-1], N,
output_file)
def plot_one_track(plot_name, file_struct, est_times, est_labels, boundaries_id, labels_id,
ds_prefix, title=None):
"""Plots the results of one track, with ground truth if it exists."""
# Get context
if ds_prefix in msaf.prefix_dict.keys():
context = msaf.prefix_dict[ds_prefix]
else:
context = "function"
# Set up the boundaries id
bid_lid = boundaries_id
if labels_id is not None:
bid_lid += " + " + labels_id
try:
# Read file
ref_inter, ref_labels = jams2.converters.load_jams_range(file_struct.ref_file, "sections", annotator=0, context=context)
# To times
ref_times = utils.intervals_to_times(ref_inter)
all_boundaries = [ref_times, est_times]
all_labels = [ref_labels, est_labels]
algo_ids = ["GT", bid_lid]
except:
logging.warning("No references found in %s. Not plotting groundtruth"
% file_struct.ref_file)
all_boundaries = [est_times]
all_labels = [est_labels]
algo_ids = [bid_lid]
N = len(all_boundaries)
# Index the labels to normalize them
for i, labels in enumerate(all_labels):
all_labels[i] = mir_eval.util.index_labels(labels)[0]
# Get color map
cm = plt.get_cmap('gist_rainbow')
max_label = max(max(labels) for labels in all_labels)
figsize = (8, 4)
plt.figure(1, figsize=figsize, dpi=120, facecolor='w', edgecolor='k')
for i, boundaries in enumerate(all_boundaries):
color = "b"
if i == 0:
color = "g"
for b in boundaries:
plt.axvline(b, i / float(N), (i + 1) / float(N), color=color)
if labels_id is not None:
labels = all_labels[i]
inters = utils.times_to_intervals(boundaries)
for label, inter in zip(labels, inters):
plt.axvspan(inter[0], inter[1], ymin=i / float(N),
ymax=(i + 1) / float(N), alpha=0.6,
color=cm(label / float(max_label)))
plt.axhline(i / float(N), color="k", linewidth=1)
# Format plot
_plot_formatting(title, os.path.basename(file_struct.audio_file), algo_ids, all_boundaries[0][-1], N, plot_name)
def plot_tree(T, res=None, title=None, cmap_id="Pastel2"):
"""Plots a given tree, containing hierarchical segmentation.
Parameters
----------
T: mir_eval.segment.tree
A tree object containing the hierarchical segmentation.
res: float
Frame-rate resolution of the tree (None to use seconds).
title: str
Title for the plot. `None` for no title.
cmap_id: str
Color Map ID
"""
def round_time(t, res=0.1):
v = int(t / float(res)) * res
return v
# Get color map
cmap = plt.get_cmap(cmap_id)
# Get segments by level
level_bounds = []
for level in T.levels:
if level == "root":
continue
segments = T.get_segments_in_level(level)
level_bounds.append(segments)
# Plot axvspans for each segment
B = float(len(level_bounds))
#plt.figure(figsize=figsize)
for i, segments in enumerate(level_bounds):
labels = utils.segment_labels_to_floats(segments)
for segment, label in zip(segments, labels):
#print i, label, cmap(label)
if res is None:
start = segment.start
end = segment.end
xlabel = "Time (seconds)"
else:
start = int(round_time(segment.start, res=res) / res)
end = int(round_time(segment.end, res=res) / res)
xlabel = "Time (frames)"
plt.axvspan(start, end,
ymax=(len(level_bounds) - i) / B,
ymin=(len(level_bounds) - i - 1) / B,
facecolor=cmap(label))
# Plot labels
L = float(len(T.levels) - 1)
plt.yticks(np.linspace(0, (L - 1) / L, num=L) + 1 / L / 2.,
T.levels[1:][::-1])
plt.xlabel(xlabel)
if title is not None:
plt.title(title)
plt.gca().set_xlim([0, end])
| [
"matplotlib.pyplot.title",
"msaf.utils.intervals_to_times",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.gca",
"msaf.utils.segment_labels_to_floats",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.axvline",
"msaf.jams2.converters.load_jams_range",
"logging.warning",
"matplotlib.pyplot.clos... | [((218, 239), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (232, 239), False, 'import matplotlib\n'), ((779, 795), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (788, 795), True, 'import matplotlib.pyplot as plt\n'), ((938, 966), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (seconds)"""'], {}), "('Time (seconds)')\n", (948, 966), True, 'import matplotlib.pyplot as plt\n'), ((968, 993), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0, last_bound)'], {}), '((0, last_bound))\n', (976, 993), True, 'import matplotlib.pyplot as plt\n'), ((995, 1013), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1011, 1013), True, 'import matplotlib.pyplot as plt\n'), ((1108, 1119), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1117, 1119), True, 'import matplotlib.pyplot as plt\n'), ((1909, 1978), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': 'figsize', 'dpi': '(120)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(1, figsize=figsize, dpi=120, facecolor='w', edgecolor='k')\n", (1919, 1978), True, 'import matplotlib.pyplot as plt\n'), ((3293, 3321), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gist_rainbow"""'], {}), "('gist_rainbow')\n", (3305, 3321), True, 'import matplotlib.pyplot as plt\n'), ((3407, 3441), 'msaf.utils.times_to_intervals', 'utils.times_to_intervals', (['gt_times'], {}), '(gt_times)\n', (3431, 3441), False, 'from msaf import utils\n'), ((3477, 3546), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': 'figsize', 'dpi': '(120)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(1, figsize=figsize, dpi=120, facecolor='w', edgecolor='k')\n", (3487, 3546), True, 'import matplotlib.pyplot as plt\n'), ((5137, 5165), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gist_rainbow"""'], {}), "('gist_rainbow')\n", (5149, 5165), True, 'import matplotlib.pyplot as plt\n'), ((5241, 5310), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': 'figsize', 'dpi': '(120)', 'facecolor': '"""w"""', 'edgecolor': '"""k"""'}), "(1, figsize=figsize, dpi=120, facecolor='w', edgecolor='k')\n", (5251, 5310), True, 'import matplotlib.pyplot as plt\n'), ((6450, 6471), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap_id'], {}), '(cmap_id)\n', (6462, 6471), True, 'import matplotlib.pyplot as plt\n'), ((7443, 7461), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (7453, 7461), True, 'import matplotlib.pyplot as plt\n'), ((1736, 1761), 'msaf.io.get_algo_ids', 'io.get_algo_ids', (['est_file'], {}), '(est_file)\n', (1751, 1761), False, 'from msaf import io\n'), ((2980, 3005), 'msaf.io.get_algo_ids', 'io.get_algo_ids', (['est_file'], {}), '(est_file)\n', (2995, 3005), False, 'from msaf import io\n'), ((3878, 3907), 'matplotlib.pyplot.axvline', 'plt.axvline', (['bound'], {'color': '"""g"""'}), "(bound, color='g')\n", (3889, 3907), True, 'import matplotlib.pyplot as plt\n'), ((4233, 4256), 'msaf.prefix_dict.keys', 'msaf.prefix_dict.keys', ([], {}), '()\n', (4254, 4256), False, 'import msaf\n'), ((4486, 4586), 'msaf.jams2.converters.load_jams_range', 'jams2.converters.load_jams_range', (['file_struct.ref_file', '"""sections"""'], {'annotator': '(0)', 'context': 'context'}), "(file_struct.ref_file, 'sections',\n annotator=0, context=context)\n", (4518, 4586), False, 'from msaf import jams2\n'), ((4611, 4646), 'msaf.utils.intervals_to_times', 'utils.intervals_to_times', (['ref_inter'], {}), '(ref_inter)\n', (4635, 4646), False, 'from msaf import utils\n'), ((5871, 5911), 'os.path.basename', 'os.path.basename', (['file_struct.audio_file'], {}), '(file_struct.audio_file)\n', (5887, 5911), False, 'import os\n'), ((6802, 6842), 'msaf.utils.segment_labels_to_floats', 'utils.segment_labels_to_floats', (['segments'], {}), '(segments)\n', (6832, 6842), False, 'from msaf import utils\n'), ((7487, 7503), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (7496, 7503), True, 'import matplotlib.pyplot as plt\n'), ((861, 870), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (868, 870), True, 'import matplotlib.pyplot as plt\n'), ((901, 910), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (908, 910), True, 'import matplotlib.pyplot as plt\n'), ((3231, 3265), 'mir_eval.util.index_labels', 'mir_eval.util.index_labels', (['labels'], {}), '(labels)\n', (3257, 3265), False, 'import mir_eval\n'), ((4769, 4866), 'logging.warning', 'logging.warning', (["('No references found in %s. Not plotting groundtruth' % file_struct.ref_file)"], {}), "('No references found in %s. Not plotting groundtruth' %\n file_struct.ref_file)\n", (4784, 4866), False, 'import logging\n'), ((5075, 5109), 'mir_eval.util.index_labels', 'mir_eval.util.index_labels', (['labels'], {}), '(labels)\n', (5101, 5109), False, 'import mir_eval\n'), ((5556, 5592), 'msaf.utils.times_to_intervals', 'utils.times_to_intervals', (['boundaries'], {}), '(boundaries)\n', (5580, 5592), False, 'from msaf import utils\n'), ((7367, 7401), 'numpy.linspace', 'np.linspace', (['(0)', '((L - 1) / L)'], {'num': 'L'}), '(0, (L - 1) / L, num=L)\n', (7378, 7401), True, 'import numpy as np\n'), ((7505, 7514), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7512, 7514), True, 'import matplotlib.pyplot as plt\n'), ((737, 763), 'os.path.basename', 'os.path.basename', (['est_file'], {}), '(est_file)\n', (753, 763), False, 'import os\n'), ((1077, 1098), 'os.path.splitext', 'splitext', (['output_file'], {}), '(output_file)\n', (1085, 1098), False, 'from os.path import join, basename, dirname, splitext\n')] |
from __future__ import annotations
import logging
import string
from datetime import datetime, timezone
from pathlib import Path
from typing import Iterator, List, Optional, Tuple, Union
import h5py
import numpy as np
from ParProcCo.aggregator_interface import AggregatorInterface
from ParProcCo.utils import decode_to_string
from . import __version__
class NXdataAggregator(AggregatorInterface):
def __init__(self) -> None:
self.accumulator_aux_signals: List[np.ndarray]
self.accumulator_axis_lengths: List
self.accumulator_axis_ranges: List
self.accumulator_volume: np.ndarray
self.accumulator_weights: np.ndarray
self.all_axes: List[List]
self.all_slices: List[Tuple[slice, ...]]
self.aux_signal_names: Optional[List[str]]
self.axes_maxs: List
self.axes_mins: List
self.axes_names: List[str]
self.axes_spacing: List
self.data_dimensions: int
self.non_weight_aux_signal_names: List[str]
self.nxdata_name: str
self.nxdata_path_name: str
self.nxentry_name: str
self.data_files: List[Path]
self.renormalisation: bool
self.signal_name: str
self.signal_shapes: List[Tuple]
self.use_default_axes: bool = False
self.is_binoculars: bool = False
def aggregate(self, aggregation_output: Path, data_files: List[Path]) -> Path:
"""Overrides AggregatorInterface.aggregate"""
self._renormalise(data_files)
aggregated_data_file = self._write_aggregation_file(aggregation_output)
return aggregated_data_file
def _renormalise(self, data_files: List[Path]) -> None:
start = datetime.now()
self.data_files = data_files
self._get_nxdata()
self._initialise_arrays()
self._accumulate_volumes()
aggregation_time = datetime.now() - start
logging.info(
f"Aggregation completed in {aggregation_time.total_seconds():.3f}s. Sliced file paths: {data_files}.")
def _initialise_arrays(self) -> None:
self._get_all_axes()
self.axes_mins = [np.inf] * self.data_dimensions
self.axes_maxs = [np.NINF] * self.data_dimensions
for axis_set in self.all_axes:
for j, axis in enumerate(axis_set):
self.axes_mins[j] = min([min(axis), self.axes_mins[j]])
self.axes_maxs[j] = max([max(axis), self.axes_maxs[j]])
logging.debug(f"Calculated axes_mins: {self.axes_mins} and axes_maxs: {self.axes_maxs}")
self.all_slices = []
for axes, signal_shape in zip(self.all_axes, self.signal_shapes):
axes_lengths = tuple(len(axis) for axis in axes)
assert axes_lengths == signal_shape, "axes_lengths must equal volumes_array.shape"
slices = []
for j, axis in enumerate(axes):
start = int(round((axis[0] - self.axes_mins[j]) / self.axes_spacing[j]))
stop = axes_lengths[j] + start
slices.append(slice(start, stop))
self.all_slices.append(tuple(slices))
self.accumulator_axis_lengths = []
self.accumulator_axis_ranges = []
for i in range(self.data_dimensions):
length = int(round((self.axes_maxs[i] - self.axes_mins[i]) / self.axes_spacing[i])) + 1
self.accumulator_axis_lengths.append(length)
ranges = [x * self.axes_spacing[i] + self.axes_mins[i] for x in np.arange(self.accumulator_axis_lengths[i])]
self.accumulator_axis_ranges.append(ranges)
logging.debug(f"Calculated accumulator_axis_lengths: {self.accumulator_axis_lengths} and accumulator_axis_ranges: {self.accumulator_axis_ranges}")
for axes, slices in zip(self.all_axes, self.all_slices):
for axis, axis_range, single_slice in zip(axes, self.accumulator_axis_ranges, slices):
if not np.allclose(axis, axis_range[single_slice]):
raise RuntimeError(f"axis does not match slice {single_slice} of accumulator_axis_range")
self.accumulator_volume = np.zeros(self.accumulator_axis_lengths)
logging.debug(f"Accumulator volume array initialised with shape: {self.accumulator_volume.shape}")
self.accumulator_aux_signals = [np.zeros(self.accumulator_axis_lengths)] * len(self.non_weight_aux_signal_names)
if self.renormalisation:
self.accumulator_weights = np.zeros(self.accumulator_axis_lengths)
logging.debug(f"Accumulator weight array initialised with shape: {self.accumulator_weights.shape}")
def _get_nxdata(self):
"""sets self.nxentry_name, self.nxdata_name and self.axes_names """
data_file = self.data_files[0]
self.is_binoculars = False
with h5py.File(data_file, "r") as root:
if not self.is_binoculars:
self.is_binoculars = "binoculars" in root
self.nxentry_name = self._get_default_nxgroup(root, "NXentry")
nxentry = root[self.nxentry_name]
self.nxdata_name = self._get_default_nxgroup(nxentry, "NXdata")
self.nxdata_path_name = "/".join([self.nxentry_name, self.nxdata_name])
nxdata = root[self.nxdata_path_name]
self._get_default_signals_and_axes(nxdata)
signal_shape = nxdata[self.signal_name].shape
self.data_dimensions = len(signal_shape)
if self.renormalisation:
weights = nxdata["weight"]
assert len(weights.shape) == self.data_dimensions, "signal and weight dimensions must match"
assert weights.shape == signal_shape, "signal and weight shapes must match"
def _get_default_nxgroup(self, f: Union[h5py.File, h5py.Group], class_name: str) -> str:
if "default" in f.attrs:
group_name = f.attrs["default"]
group_name = decode_to_string(group_name)
class_type = f[group_name].attrs.get("NX_class", '')
class_type = decode_to_string(class_type)
assert class_type == class_name, f"{group_name} class_name must be {class_name}"
return group_name
group_name = self._get_group_name(f, class_name)
try:
return next(group_name)
except StopIteration:
raise ValueError(f"no {class_name} group found")
def _get_group_name(self, group: Union[h5py.File, h5py.Group], class_name: str) -> Iterator[str]:
for group_name in group.keys():
try:
class_type = group[group_name].attrs.get("NX_class", '')
class_type = decode_to_string(class_type)
if class_type == class_name:
group_name = decode_to_string(group_name)
yield group_name
except KeyError:
logging.warning(f"KeyError: {group_name} could not be accessed in {group}")
def _get_default_signals_and_axes(self, nxdata: h5py.Group) -> None:
self.renormalisation = False
self.non_weight_aux_signal_names = []
if "auxiliary_signals" in nxdata.attrs:
self.aux_signal_names = [decode_to_string(name) for name in nxdata.attrs["auxiliary_signals"]]
self.non_weight_aux_signal_names = [name for name in self.aux_signal_names if name != "weight"]
logging.info(f"Auxiliary signals found: {self.aux_signal_names}")
if "weight" in self.aux_signal_names:
self.renormalisation = True
else:
self.aux_signal_names = None
if "signal" in nxdata.attrs:
signal_name = nxdata.attrs["signal"]
self.signal_name = decode_to_string(signal_name)
elif "data" in nxdata.keys():
self.signal_name = "data"
if hasattr(self, "signal_name"):
if "axes" in nxdata.attrs:
self.axes_names = [decode_to_string(name) for name in nxdata.attrs["axes"]]
else:
self._generate_axes_names(nxdata)
else:
raise KeyError
def _generate_axes_names(self, nxdata: h5py.Group) -> None:
self.use_default_axes = True
signal_shape = nxdata[self.signal_name].shape
self.axes_names = [f"{letter}-axis" for letter in string.ascii_lowercase[:len(signal_shape)]]
def _get_all_axes(self) -> None:
self.signal_shapes = []
self.all_axes = []
for data_file in self.data_files:
with h5py.File(data_file, "r") as f:
signal_shape = f[self.nxdata_path_name][self.signal_name].shape
logging.info(f"Signal '{'/'.join([self.nxdata_path_name, self.signal_name])}' read from {data_file}. Shape: {signal_shape}")
assert len(signal_shape) == self.data_dimensions
self.signal_shapes.append(signal_shape)
if self.aux_signal_names:
for aux_signal_name in self.aux_signal_names:
aux_signal_shape = f[self.nxdata_path_name][aux_signal_name].shape
logging.debug(f"Auxiliary signal '{'/'.join([self.nxdata_path_name, aux_signal_name])}' read from {data_file}. Shape: {aux_signal_shape}")
assert signal_shape == aux_signal_shape, f"{aux_signal_name} shape must equal signal_shape"
if self.use_default_axes:
axes = [np.arange(length) for length in signal_shape]
else:
axes = [f[self.nxdata_path_name][axis_name][...] for axis_name in self.axes_names]
self.all_axes.append(axes)
self.axes_spacing = [np.mean([np.mean(np.diff(axis)) for axis in axis_set])
for axis_set in zip(*self.all_axes)]
logging.debug(f"Calculated axes spacings: {self.axes_spacing}")
def _accumulate_volumes(self) -> None:
logging.info(f"Accumulating volume with shape {self.accumulator_volume.shape} and axes {self.axes_names}")
for data_file, slices in zip(self.data_files, self.all_slices):
with h5py.File(data_file, "r") as f:
aux_signals = []
volume = f[self.nxdata_path_name][self.signal_name][...]
logging.debug(f"Reading volume from {'/'.join([self.nxdata_path_name, self.signal_name])} in {data_file}. Shape is {volume.shape}")
if self.renormalisation:
weights = f[self.nxdata_path_name]["weight"][...]
for name in self.non_weight_aux_signal_names:
aux_signals.append(f[self.nxdata_path_name][name][...])
logging.debug(f"Reading auxiliary signal from {'/'.join([self.nxdata_path_name, name])} in {data_file}")
if self.renormalisation:
volume = np.multiply(volume, weights)
self.accumulator_weights[slices] += weights
aux_signals = [np.multiply(aux_signal, weights) for aux_signal in aux_signals]
self.accumulator_volume[slices] += volume
for signal, accumulator_signal in zip(aux_signals, self.accumulator_aux_signals):
accumulator_signal[slices] += signal
if self.renormalisation:
logging.info(f"Renormalising accumulator_volume with weights {'/'.join([self.nxdata_path_name, 'weight'])}")
self.accumulator_volume = self.accumulator_volume / self.accumulator_weights
self.accumulator_volume[np.isnan(self.accumulator_volume)] = 0
for aux_signal in self.accumulator_aux_signals:
logging.info(f"Renormalising aux_signal with weights in {'/'.join([self.nxdata_path_name, 'weight'])}")
aux_signal = aux_signal / self.accumulator_weights
aux_signal[np.isnan(aux_signal)] = 0
def _write_aggregation_file(self, aggregation_output: Path) -> Path:
start = datetime.now()
logging.info(f"Writing aggregated data to file: {aggregation_output}")
with h5py.File(aggregation_output, "w") as f:
processed = f.create_group(self.nxentry_name)
processed.attrs["NX_class"] = "NXentry"
processed.attrs["default"] = self.nxdata_name
process = processed.create_group("process")
process.attrs["NX_class"] = "NXprocess"
process.create_dataset("date", data=str(datetime.now(timezone.utc)))
process.create_dataset("parameters", data=f"inputs: {self.data_files}, output: {aggregation_output}")
process.create_dataset("program", data="ParProcCo")
process.create_dataset("version", data=__version__)
data_group = processed.create_group(self.nxdata_name)
data_group.attrs["NX_class"] = "NXdata"
if self.aux_signal_names:
data_group.attrs["auxiliary_signals"] = self.aux_signal_names
data_group.attrs["axes"] = self.axes_names
data_group.attrs["signal"] = self.signal_name
for i, axis in enumerate(self.axes_names):
data_group.attrs[f"{axis}_indices"] = i
data_group.create_dataset(f"{axis}", data=self.accumulator_axis_ranges[i])
data_group.create_dataset(self.signal_name, data=self.accumulator_volume)
if self.renormalisation:
data_group.create_dataset("weight", data=self.accumulator_weights)
for name, dataset in zip(self.non_weight_aux_signal_names, self.accumulator_aux_signals):
data_group.create_dataset(name, data=dataset)
f.attrs["default"] = self.nxentry_name
for i, filepath in enumerate(self.data_files):
with h5py.File(filepath, "r") as df:
data_nxentry_group = df[self.nxentry_name]
group_name = self._get_group_name(data_nxentry_group, "NXprocess")
for j, name in enumerate(group_name):
if not "old_processed" in f:
old_processed = f.create_group("old_processed")
old_processed.attrs["NX_class"] = "NXentry"
logging.info(f"Created 'old_processed' group in {aggregation_output}")
data_nxentry_group.copy(name, old_processed, name=f"process{i}.{j}")
logging.info(
f"Copied '{'/'.join([data_nxentry_group.name, name])}' group in {filepath} to"
f" '{'/'.join(['old_processed', f'process{i}.{j}'])}' group in {aggregation_output}")
if self.is_binoculars:
logging.info("Writing BINoculars group")
binoculars = f.create_group("binoculars")
binoculars.attrs["type"] = "space"
f.create_group("binoculars/axes")
binocular_axes = [axis.split("-axis")[0].capitalize() for axis in self.axes_names]
for i, axis in enumerate(binocular_axes):
axis_min = self.axes_mins[i]
axis_max = self.axes_maxs[i]
scaling = (self.accumulator_axis_lengths[i] - 1) / (axis_max - axis_min)
axis_dataset = [i, axis_min, axis_max, self.axes_spacing[i], axis_min * scaling, axis_max * scaling]
f.create_dataset(f"binoculars/axes/{axis}", data=axis_dataset)
binoculars["counts"] = data_group[self.signal_name]
if self.renormalisation:
binoculars["contributions"] = data_group["weight"]
elapsed_time = datetime.now() - start
logging.info(
f"Aggregated data written in {elapsed_time.total_seconds():.3f}s. Aggregation file: {aggregation_output}")
return aggregation_output
| [
"h5py.File",
"logging.debug",
"numpy.multiply",
"logging.warning",
"numpy.allclose",
"numpy.zeros",
"numpy.isnan",
"logging.info",
"numpy.diff",
"numpy.arange",
"datetime.datetime.now",
"ParProcCo.utils.decode_to_string"
] | [((1703, 1717), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1715, 1717), False, 'from datetime import datetime, timezone\n'), ((2466, 2559), 'logging.debug', 'logging.debug', (['f"""Calculated axes_mins: {self.axes_mins} and axes_maxs: {self.axes_maxs}"""'], {}), "(\n f'Calculated axes_mins: {self.axes_mins} and axes_maxs: {self.axes_maxs}')\n", (2479, 2559), False, 'import logging\n'), ((3595, 3751), 'logging.debug', 'logging.debug', (['f"""Calculated accumulator_axis_lengths: {self.accumulator_axis_lengths} and accumulator_axis_ranges: {self.accumulator_axis_ranges}"""'], {}), "(\n f'Calculated accumulator_axis_lengths: {self.accumulator_axis_lengths} and accumulator_axis_ranges: {self.accumulator_axis_ranges}'\n )\n", (3608, 3751), False, 'import logging\n'), ((4120, 4159), 'numpy.zeros', 'np.zeros', (['self.accumulator_axis_lengths'], {}), '(self.accumulator_axis_lengths)\n', (4128, 4159), True, 'import numpy as np\n'), ((4168, 4276), 'logging.debug', 'logging.debug', (['f"""Accumulator volume array initialised with shape: {self.accumulator_volume.shape}"""'], {}), "(\n f'Accumulator volume array initialised with shape: {self.accumulator_volume.shape}'\n )\n", (4181, 4276), False, 'import logging\n'), ((9799, 9862), 'logging.debug', 'logging.debug', (['f"""Calculated axes spacings: {self.axes_spacing}"""'], {}), "(f'Calculated axes spacings: {self.axes_spacing}')\n", (9812, 9862), False, 'import logging\n'), ((9915, 10031), 'logging.info', 'logging.info', (['f"""Accumulating volume with shape {self.accumulator_volume.shape} and axes {self.axes_names}"""'], {}), "(\n f'Accumulating volume with shape {self.accumulator_volume.shape} and axes {self.axes_names}'\n )\n", (9927, 10031), False, 'import logging\n'), ((11930, 11944), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (11942, 11944), False, 'from datetime import datetime, timezone\n'), ((11953, 12023), 'logging.info', 'logging.info', (['f"""Writing aggregated data to file: {aggregation_output}"""'], {}), "(f'Writing aggregated data to file: {aggregation_output}')\n", (11965, 12023), False, 'import logging\n'), ((1878, 1892), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1890, 1892), False, 'from datetime import datetime, timezone\n'), ((4460, 4499), 'numpy.zeros', 'np.zeros', (['self.accumulator_axis_lengths'], {}), '(self.accumulator_axis_lengths)\n', (4468, 4499), True, 'import numpy as np\n'), ((4512, 4621), 'logging.debug', 'logging.debug', (['f"""Accumulator weight array initialised with shape: {self.accumulator_weights.shape}"""'], {}), "(\n f'Accumulator weight array initialised with shape: {self.accumulator_weights.shape}'\n )\n", (4525, 4621), False, 'import logging\n'), ((4803, 4828), 'h5py.File', 'h5py.File', (['data_file', '"""r"""'], {}), "(data_file, 'r')\n", (4812, 4828), False, 'import h5py\n'), ((5910, 5938), 'ParProcCo.utils.decode_to_string', 'decode_to_string', (['group_name'], {}), '(group_name)\n', (5926, 5938), False, 'from ParProcCo.utils import decode_to_string\n'), ((6029, 6057), 'ParProcCo.utils.decode_to_string', 'decode_to_string', (['class_type'], {}), '(class_type)\n', (6045, 6057), False, 'from ParProcCo.utils import decode_to_string\n'), ((7368, 7433), 'logging.info', 'logging.info', (['f"""Auxiliary signals found: {self.aux_signal_names}"""'], {}), "(f'Auxiliary signals found: {self.aux_signal_names}')\n", (7380, 7433), False, 'import logging\n'), ((7701, 7730), 'ParProcCo.utils.decode_to_string', 'decode_to_string', (['signal_name'], {}), '(signal_name)\n', (7717, 7730), False, 'from ParProcCo.utils import decode_to_string\n'), ((12037, 12071), 'h5py.File', 'h5py.File', (['aggregation_output', '"""w"""'], {}), "(aggregation_output, 'w')\n", (12046, 12071), False, 'import h5py\n'), ((15631, 15645), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (15643, 15645), False, 'from datetime import datetime, timezone\n'), ((4307, 4346), 'numpy.zeros', 'np.zeros', (['self.accumulator_axis_lengths'], {}), '(self.accumulator_axis_lengths)\n', (4315, 4346), True, 'import numpy as np\n'), ((6641, 6669), 'ParProcCo.utils.decode_to_string', 'decode_to_string', (['class_type'], {}), '(class_type)\n', (6657, 6669), False, 'from ParProcCo.utils import decode_to_string\n'), ((7178, 7200), 'ParProcCo.utils.decode_to_string', 'decode_to_string', (['name'], {}), '(name)\n', (7194, 7200), False, 'from ParProcCo.utils import decode_to_string\n'), ((8503, 8528), 'h5py.File', 'h5py.File', (['data_file', '"""r"""'], {}), "(data_file, 'r')\n", (8512, 8528), False, 'import h5py\n'), ((10111, 10136), 'h5py.File', 'h5py.File', (['data_file', '"""r"""'], {}), "(data_file, 'r')\n", (10120, 10136), False, 'import h5py\n'), ((10834, 10862), 'numpy.multiply', 'np.multiply', (['volume', 'weights'], {}), '(volume, weights)\n', (10845, 10862), True, 'import numpy as np\n'), ((11501, 11534), 'numpy.isnan', 'np.isnan', (['self.accumulator_volume'], {}), '(self.accumulator_volume)\n', (11509, 11534), True, 'import numpy as np\n'), ((14675, 14715), 'logging.info', 'logging.info', (['"""Writing BINoculars group"""'], {}), "('Writing BINoculars group')\n", (14687, 14715), False, 'import logging\n'), ((3486, 3529), 'numpy.arange', 'np.arange', (['self.accumulator_axis_lengths[i]'], {}), '(self.accumulator_axis_lengths[i])\n', (3495, 3529), True, 'import numpy as np\n'), ((3930, 3973), 'numpy.allclose', 'np.allclose', (['axis', 'axis_range[single_slice]'], {}), '(axis, axis_range[single_slice])\n', (3941, 3973), True, 'import numpy as np\n'), ((6748, 6776), 'ParProcCo.utils.decode_to_string', 'decode_to_string', (['group_name'], {}), '(group_name)\n', (6764, 6776), False, 'from ParProcCo.utils import decode_to_string\n'), ((6859, 6934), 'logging.warning', 'logging.warning', (['f"""KeyError: {group_name} could not be accessed in {group}"""'], {}), "(f'KeyError: {group_name} could not be accessed in {group}')\n", (6874, 6934), False, 'import logging\n'), ((7923, 7945), 'ParProcCo.utils.decode_to_string', 'decode_to_string', (['name'], {}), '(name)\n', (7939, 7945), False, 'from ParProcCo.utils import decode_to_string\n'), ((10954, 10986), 'numpy.multiply', 'np.multiply', (['aux_signal', 'weights'], {}), '(aux_signal, weights)\n', (10965, 10986), True, 'import numpy as np\n'), ((11814, 11834), 'numpy.isnan', 'np.isnan', (['aux_signal'], {}), '(aux_signal)\n', (11822, 11834), True, 'import numpy as np\n'), ((13731, 13755), 'h5py.File', 'h5py.File', (['filepath', '"""r"""'], {}), "(filepath, 'r')\n", (13740, 13755), False, 'import h5py\n'), ((9425, 9442), 'numpy.arange', 'np.arange', (['length'], {}), '(length)\n', (9434, 9442), True, 'import numpy as np\n'), ((9685, 9698), 'numpy.diff', 'np.diff', (['axis'], {}), '(axis)\n', (9692, 9698), True, 'import numpy as np\n'), ((12407, 12433), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (12419, 12433), False, 'from datetime import datetime, timezone\n'), ((14200, 14270), 'logging.info', 'logging.info', (['f"""Created \'old_processed\' group in {aggregation_output}"""'], {}), '(f"Created \'old_processed\' group in {aggregation_output}")\n', (14212, 14270), False, 'import logging\n')] |
# -*- coding: utf-8 -*-
import cv2
import numpy as np
#感知哈希算法
def pHash(image):
image = cv2.resize(image,(32,32), interpolation=cv2.INTER_CUBIC)
image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
# 将灰度图转为浮点型,再进行dct变换
dct = cv2.dct(np.float32(image))
# 取左上角的8*8,这些代表图片的最低频率
# 这个操作等价于c++中利用opencv实现的掩码操作
# 在python中进行掩码操作,可以直接这样取出图像矩阵的某一部分
dct_roi = dct[0:8,0:8]
avreage = np.mean(dct_roi)
hash = []
for i in range(dct_roi.shape[0]):
for j in range(dct_roi.shape[1]):
if dct_roi[i,j] > avreage:
hash.append(1)
else:
hash.append(0)
return hash
#均值哈希算法
def aHash(image):
#缩放为8*8
image=cv2.resize(image,(8,8),interpolation=cv2.INTER_CUBIC)
#转换为灰度图
image=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
avreage = np.mean(image)
hash = []
for i in range(image.shape[0]):
for j in range(image.shape[1]):
if image[i,j] > avreage:
hash.append(1)
else:
hash.append(0)
return hash
#差值感知算法
def dHash(image):
#缩放9*8
image=cv2.resize(image,(9,8),interpolation=cv2.INTER_CUBIC)
#转换灰度图
image=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
hash=[]
#每行前一个像素大于后一个像素为1,相反为0,生成哈希
for i in range(8):
for j in range(8):
if image[i,j]>image[i,j+1]:
hash.append(1)
else:
hash.append(0)
return hash
#计算汉明距离
def Hamming_distance(hash1,hash2):
num = 0
for index in range(len(hash1)):
if hash1[index] != hash2[index]:
num += 1
return num
#将距离转化为相似度
def similarity(hash1, hash2) :
dist = Hamming_distance(hash1,hash2)
return (1 - dist * 1.0 / 64)
if __name__ == "__main__":
image_file1 = 'E:\\cocAssistant\\core\\temp\\screen.jpg'
image_file2 = 'E:\\cocAssistant\\core\\temp\\screen1.jpg'
img1 = cv2.imread(image_file1)
img2 = cv2.imread(image_file2)
hash1 = pHash(img1)
hash2 = pHash(img2)
dist = Hamming_distance(hash1, hash2)
#将距离转化为相似度
m_similarity = similarity(hash1, hash2)
print(dist)
print(m_similarity)
| [
"cv2.cvtColor",
"numpy.float32",
"cv2.imread",
"numpy.mean",
"cv2.resize"
] | [((93, 151), 'cv2.resize', 'cv2.resize', (['image', '(32, 32)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(image, (32, 32), interpolation=cv2.INTER_CUBIC)\n', (103, 151), False, 'import cv2\n'), ((162, 201), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (174, 201), False, 'import cv2\n'), ((403, 419), 'numpy.mean', 'np.mean', (['dct_roi'], {}), '(dct_roi)\n', (410, 419), True, 'import numpy as np\n'), ((698, 754), 'cv2.resize', 'cv2.resize', (['image', '(8, 8)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(image, (8, 8), interpolation=cv2.INTER_CUBIC)\n', (708, 754), False, 'import cv2\n'), ((774, 813), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (786, 813), False, 'import cv2\n'), ((827, 841), 'numpy.mean', 'np.mean', (['image'], {}), '(image)\n', (834, 841), True, 'import numpy as np\n'), ((1113, 1169), 'cv2.resize', 'cv2.resize', (['image', '(9, 8)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(image, (9, 8), interpolation=cv2.INTER_CUBIC)\n', (1123, 1169), False, 'import cv2\n'), ((1188, 1227), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (1200, 1227), False, 'import cv2\n'), ((1905, 1928), 'cv2.imread', 'cv2.imread', (['image_file1'], {}), '(image_file1)\n', (1915, 1928), False, 'import cv2\n'), ((1940, 1963), 'cv2.imread', 'cv2.imread', (['image_file2'], {}), '(image_file2)\n', (1950, 1963), False, 'import cv2\n'), ((244, 261), 'numpy.float32', 'np.float32', (['image'], {}), '(image)\n', (254, 261), True, 'import numpy as np\n')] |
# Copyright 2017 <NAME>, <NAME> and <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Example: The toggle switch model
"""
# Initialize
import numpy as np
from gillespy2.solvers.cpp import SSACSolver
from sciope.utilities.summarystats import auto_tsfresh
from sciope.utilities.priors import uniform_prior
from sciope.inference.abc_inference import ABC
from sklearn.metrics import mean_absolute_error
from sciope.utilities.distancefunctions import naive_squared
import gillespy2
class ToggleSwitch(gillespy2.Model):
""" Gardner et al. Nature (1999)
'Construction of a genetic toggle switch in Escherichia coli'
"""
def __init__(self):
# Initialize the model.
gillespy2.Model.__init__(self, name="toggle_switch")
# Parameters
alpha1 = gillespy2.Parameter(name='alpha1', expression=1)
alpha2 = gillespy2.Parameter(name='alpha2', expression=1)
beta = gillespy2.Parameter(name='beta', expression="2.0")
gamma = gillespy2.Parameter(name='gamma', expression="2.0")
mu = gillespy2.Parameter(name='mu', expression=1.0)
self.add_parameter([alpha1, alpha2, beta, gamma, mu])
# Species
U = gillespy2.Species(name='U', initial_value=10)
V = gillespy2.Species(name='V', initial_value=10)
self.add_species([U, V])
# Reactions
cu = gillespy2.Reaction(name="r1", reactants={}, products={U: 1},
propensity_function="alpha1/(1+pow(V,beta))")
cv = gillespy2.Reaction(name="r2", reactants={}, products={V: 1},
propensity_function="alpha2/(1+pow(U,gamma))")
du = gillespy2.Reaction(name="r3", reactants={U: 1}, products={},
rate=mu)
dv = gillespy2.Reaction(name="r4", reactants={V: 1}, products={},
rate=mu)
self.add_reaction([cu, cv, du, dv])
self.timespan(np.linspace(0, 50, 101))
def set_model_parameters(params, model):
""" params - array, needs to have the same order as
model.listOfParameters """
for e, (pname, p) in enumerate(model.listOfParameters.items()):
model.get_parameter(pname).set_expression(params[e])
return model
def get_true_param():
"""
Return the 'true' parameter values to be inferred in a test run
:return: the 'true' or reference parameter set
"""
model = ToggleSwitch()
default_param = np.array(list(model.listOfParameters.items()))[:, 1] # take default from model as reference
true_param = []
for exp in default_param:
true_param.append(float(exp.expression))
# set the bounds
true_param = np.array(true_param)
return true_param
def get_bounds():
"""
return the default bounds
:return: bounds in each dimension as a list
"""
fixed_point = get_true_param()
dmin = fixed_point * 0.1
dmax = fixed_point * 2.0
return dmin, dmax
def get_fixed_data():
"""
generate fixed data for inference
:return: a dataset of 100 trajectories
"""
model = ToggleSwitch()
fixed_data = model.run(solver=SSACSolver, number_of_trajectories=100, show_labels=False)
# reshape data to (n_points,n_species,n_timepoints)
fixed_data = np.asarray([x.T for x in fixed_data])
# and remove timepoints array
fixed_data = fixed_data[:, 1:, :]
return fixed_data
def simulate(params):
"""
Instantiate a model and simulate input parameters
:param params: input parameters
:return:
"""
model = ToggleSwitch()
model_update = set_model_parameters(params, model)
num_trajectories = 1 # TODO: howto handle ensembles
res = model_update.run(solver=SSACSolver, show_labels=False,
number_of_trajectories=num_trajectories)
tot_res = np.asarray([x.T for x in res]) # reshape to (N, S, T)
tot_res = tot_res[:, 1:, :] # should not contain timepoints
return tot_res
def abc_test_run():
"""
Perform a test abc run
:return: ABC mean absolute error
"""
dmin, dmax = get_bounds()
uni_prior = uniform_prior.UniformPrior(dmin, dmax)
fixed_data = get_fixed_data()
summ_func = auto_tsfresh.SummariesTSFRESH()
ns = naive_squared.NaiveSquaredDistance()
abc = ABC(fixed_data, sim=simulate, prior_function=uni_prior,
summaries_function=summ_func.compute, distance_function=ns, use_logger=True)
abc.compute_fixed_mean(chunk_size=2)
res = abc.infer(num_samples=100, batch_size=10, chunk_size=2)
true_params = get_true_param()
mae_inference = mean_absolute_error(true_params, abc.results['inferred_parameters'])
return mae_inference
| [
"gillespy2.Species",
"numpy.asarray",
"sciope.utilities.distancefunctions.naive_squared.NaiveSquaredDistance",
"gillespy2.Parameter",
"gillespy2.Model.__init__",
"sklearn.metrics.mean_absolute_error",
"sciope.utilities.priors.uniform_prior.UniformPrior",
"sciope.inference.abc_inference.ABC",
"sciope... | [((3190, 3210), 'numpy.array', 'np.array', (['true_param'], {}), '(true_param)\n', (3198, 3210), True, 'import numpy as np\n'), ((3777, 3814), 'numpy.asarray', 'np.asarray', (['[x.T for x in fixed_data]'], {}), '([x.T for x in fixed_data])\n', (3787, 3814), True, 'import numpy as np\n'), ((4340, 4370), 'numpy.asarray', 'np.asarray', (['[x.T for x in res]'], {}), '([x.T for x in res])\n', (4350, 4370), True, 'import numpy as np\n'), ((4628, 4666), 'sciope.utilities.priors.uniform_prior.UniformPrior', 'uniform_prior.UniformPrior', (['dmin', 'dmax'], {}), '(dmin, dmax)\n', (4654, 4666), False, 'from sciope.utilities.priors import uniform_prior\n'), ((4717, 4748), 'sciope.utilities.summarystats.auto_tsfresh.SummariesTSFRESH', 'auto_tsfresh.SummariesTSFRESH', ([], {}), '()\n', (4746, 4748), False, 'from sciope.utilities.summarystats import auto_tsfresh\n'), ((4758, 4794), 'sciope.utilities.distancefunctions.naive_squared.NaiveSquaredDistance', 'naive_squared.NaiveSquaredDistance', ([], {}), '()\n', (4792, 4794), False, 'from sciope.utilities.distancefunctions import naive_squared\n'), ((4805, 4942), 'sciope.inference.abc_inference.ABC', 'ABC', (['fixed_data'], {'sim': 'simulate', 'prior_function': 'uni_prior', 'summaries_function': 'summ_func.compute', 'distance_function': 'ns', 'use_logger': '(True)'}), '(fixed_data, sim=simulate, prior_function=uni_prior, summaries_function=\n summ_func.compute, distance_function=ns, use_logger=True)\n', (4808, 4942), False, 'from sciope.inference.abc_inference import ABC\n'), ((5114, 5182), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['true_params', "abc.results['inferred_parameters']"], {}), "(true_params, abc.results['inferred_parameters'])\n", (5133, 5182), False, 'from sklearn.metrics import mean_absolute_error\n'), ((1196, 1248), 'gillespy2.Model.__init__', 'gillespy2.Model.__init__', (['self'], {'name': '"""toggle_switch"""'}), "(self, name='toggle_switch')\n", (1220, 1248), False, 'import gillespy2\n'), ((1287, 1335), 'gillespy2.Parameter', 'gillespy2.Parameter', ([], {'name': '"""alpha1"""', 'expression': '(1)'}), "(name='alpha1', expression=1)\n", (1306, 1335), False, 'import gillespy2\n'), ((1353, 1401), 'gillespy2.Parameter', 'gillespy2.Parameter', ([], {'name': '"""alpha2"""', 'expression': '(1)'}), "(name='alpha2', expression=1)\n", (1372, 1401), False, 'import gillespy2\n'), ((1417, 1467), 'gillespy2.Parameter', 'gillespy2.Parameter', ([], {'name': '"""beta"""', 'expression': '"""2.0"""'}), "(name='beta', expression='2.0')\n", (1436, 1467), False, 'import gillespy2\n'), ((1484, 1535), 'gillespy2.Parameter', 'gillespy2.Parameter', ([], {'name': '"""gamma"""', 'expression': '"""2.0"""'}), "(name='gamma', expression='2.0')\n", (1503, 1535), False, 'import gillespy2\n'), ((1549, 1595), 'gillespy2.Parameter', 'gillespy2.Parameter', ([], {'name': '"""mu"""', 'expression': '(1.0)'}), "(name='mu', expression=1.0)\n", (1568, 1595), False, 'import gillespy2\n'), ((1689, 1734), 'gillespy2.Species', 'gillespy2.Species', ([], {'name': '"""U"""', 'initial_value': '(10)'}), "(name='U', initial_value=10)\n", (1706, 1734), False, 'import gillespy2\n'), ((1747, 1792), 'gillespy2.Species', 'gillespy2.Species', ([], {'name': '"""V"""', 'initial_value': '(10)'}), "(name='V', initial_value=10)\n", (1764, 1792), False, 'import gillespy2\n'), ((1860, 1970), 'gillespy2.Reaction', 'gillespy2.Reaction', ([], {'name': '"""r1"""', 'reactants': '{}', 'products': '{U: 1}', 'propensity_function': '"""alpha1/(1+pow(V,beta))"""'}), "(name='r1', reactants={}, products={U: 1},\n propensity_function='alpha1/(1+pow(V,beta))')\n", (1878, 1970), False, 'import gillespy2\n'), ((2012, 2123), 'gillespy2.Reaction', 'gillespy2.Reaction', ([], {'name': '"""r2"""', 'reactants': '{}', 'products': '{V: 1}', 'propensity_function': '"""alpha2/(1+pow(U,gamma))"""'}), "(name='r2', reactants={}, products={V: 1},\n propensity_function='alpha2/(1+pow(U,gamma))')\n", (2030, 2123), False, 'import gillespy2\n'), ((2165, 2234), 'gillespy2.Reaction', 'gillespy2.Reaction', ([], {'name': '"""r3"""', 'reactants': '{U: 1}', 'products': '{}', 'rate': 'mu'}), "(name='r3', reactants={U: 1}, products={}, rate=mu)\n", (2183, 2234), False, 'import gillespy2\n'), ((2280, 2349), 'gillespy2.Reaction', 'gillespy2.Reaction', ([], {'name': '"""r4"""', 'reactants': '{V: 1}', 'products': '{}', 'rate': 'mu'}), "(name='r4', reactants={V: 1}, products={}, rate=mu)\n", (2298, 2349), False, 'import gillespy2\n'), ((2448, 2471), 'numpy.linspace', 'np.linspace', (['(0)', '(50)', '(101)'], {}), '(0, 50, 101)\n', (2459, 2471), True, 'import numpy as np\n')] |
import numpy as np
from probly.lib.utils import array
from probly.distr import Normal, Distribution
class Wigner(Distribution):
"""
A Wigner random matrix.
A random symmetric matrix whose upper-diagonal entries are independent,
identically distributed random variables.
Parameters
----------
dim : int
The matrix dimension.
rv : RandomVariable, optional
A random variable whose distribution the entries will share. Default is
a standard normal random variable.
"""
def __init__(self, dim, rv=None):
self.dim = dim
if rv is None:
self.rv = Normal()
else:
self.rv = rv
arr = [[self.rv.copy() for _ in range(dim)] for _ in range(dim)]
self.arr = array([[arr[i][j] if i <= j else arr[j][i] for i in range(dim)] for j in range(dim)])
super().__init__()
def _sampler(self, seed):
return self.arr(seed)
def __str__(self):
return 'Wigner({}, {})'.format(self.dim, self.rv)
class Wishart(Distribution):
"""
A Wishart random matrix.
An `n` by `n` random symmetric matrix obtained as the (matrix) product of
an `m` by `n` random matrix with independent, identically distributed
entries, and its transpose.
Parameters
----------
m : int
The first dimension parameter.
n : int
The second dimension parameter.
rv : RandomVariable, optional
A random variable.
Attributes
----------
lambda_ : float
The ratio `m / n`.
"""
def __init__(self, m, n, rv=None):
self.m = m
self.n = n
self.lambda_ = m / n
if rv is None:
self.rv = Normal()
else:
self.rv = rv
rect = np.array([[self.rv.copy() for _ in range(n)] for _ in range(m)])
self.arr = array(np.dot(rect.T, rect))
super().__init__()
def _sampler(self, seed):
return self.arr(seed)
def __str__(self):
return 'Wishart({}, {}, {})'.format(self.m, self.n, self.rv)
| [
"numpy.dot",
"probly.distr.Normal"
] | [((637, 645), 'probly.distr.Normal', 'Normal', ([], {}), '()\n', (643, 645), False, 'from probly.distr import Normal, Distribution\n'), ((1721, 1729), 'probly.distr.Normal', 'Normal', ([], {}), '()\n', (1727, 1729), False, 'from probly.distr import Normal, Distribution\n'), ((1874, 1894), 'numpy.dot', 'np.dot', (['rect.T', 'rect'], {}), '(rect.T, rect)\n', (1880, 1894), True, 'import numpy as np\n')] |
import numpy as np
import scipy
import math
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import scipy.ndimage as ndimage
import scipy.ndimage.filters as filters
from orientTensor import calcOrientTensor
def calcHarris(Im, gradKsize, gradSigma, window_size, kappa):
T11, T12, T22 = calcOrientTensor(Im, gradKsize, gradSigma, window_size);
Ch = np.multiply(T11,T22) - np.multiply(T12,T12) -kappa*(T11+T22)
return Ch
def cornerThresh(Im, gradKsize, gradSigma, window_size, kappa, thresh):
Ch = calcHarris(Im, gradKsize, gradSigma, window_size, kappa)
threshIm = np.multiply((Ch > thresh),Ch)
return threshIm
def harrisMax(Im, gradKsize, gradSigma, window_size, kappa, thresh, numToTrack):
threshIm = cornerThresh(Im, gradKsize, gradSigma, window_size, kappa, thresh)
areaSize = 5
#img_max = scipy.signal.order_filter(threshIm, np.ones((3,3)), 9-1)
#[row, col] = np.nonzero(threshIm == img_max)
img_max = filters.maximum_filter(threshIm, areaSize)
maxima = (threshIm == img_max)
img_min = filters.minimum_filter(threshIm, areaSize)
diff = ((img_max- img_min) > thresh)
maxima[diff == 0] = 0
labeled, num_objects = ndimage.label(maxima)
slices = ndimage.find_objects(labeled)
maxes = np.zeros(threshIm.shape)
'''
for dy, dx in slices:
x_center = (dx.start + dx.stop -1)/2
col.append(x_center)
y_center = (dy.start + dy.stop -1 )/2
row.append(y_center)
return row[0:numToTrack], col[0:numToTrack]
'''
for dy, dx in slices:
x_center = (dx.start + dx.stop -1)/2
y_center = (dy.start + dy.stop -1 )/2
maxes[int(round(y_center)), int(round(x_center))] = 1;
indices = (-maxes).argpartition(numToTrack, axis=None)[:numToTrack]
row, col = np.unravel_index(indices, maxes.shape)
bestMaxes = np.zeros(threshIm.shape)
bestMaxes[row, col] = 1
return maxes, bestMaxes, row, col
| [
"scipy.ndimage.filters.maximum_filter",
"numpy.multiply",
"scipy.ndimage.filters.minimum_filter",
"orientTensor.calcOrientTensor",
"scipy.ndimage.find_objects",
"numpy.zeros",
"numpy.unravel_index",
"scipy.ndimage.label"
] | [((306, 361), 'orientTensor.calcOrientTensor', 'calcOrientTensor', (['Im', 'gradKsize', 'gradSigma', 'window_size'], {}), '(Im, gradKsize, gradSigma, window_size)\n', (322, 361), False, 'from orientTensor import calcOrientTensor\n'), ((601, 629), 'numpy.multiply', 'np.multiply', (['(Ch > thresh)', 'Ch'], {}), '(Ch > thresh, Ch)\n', (612, 629), True, 'import numpy as np\n'), ((970, 1012), 'scipy.ndimage.filters.maximum_filter', 'filters.maximum_filter', (['threshIm', 'areaSize'], {}), '(threshIm, areaSize)\n', (992, 1012), True, 'import scipy.ndimage.filters as filters\n'), ((1062, 1104), 'scipy.ndimage.filters.minimum_filter', 'filters.minimum_filter', (['threshIm', 'areaSize'], {}), '(threshIm, areaSize)\n', (1084, 1104), True, 'import scipy.ndimage.filters as filters\n'), ((1199, 1220), 'scipy.ndimage.label', 'ndimage.label', (['maxima'], {}), '(maxima)\n', (1212, 1220), True, 'import scipy.ndimage as ndimage\n'), ((1234, 1263), 'scipy.ndimage.find_objects', 'ndimage.find_objects', (['labeled'], {}), '(labeled)\n', (1254, 1263), True, 'import scipy.ndimage as ndimage\n'), ((1276, 1300), 'numpy.zeros', 'np.zeros', (['threshIm.shape'], {}), '(threshIm.shape)\n', (1284, 1300), True, 'import numpy as np\n'), ((1807, 1845), 'numpy.unravel_index', 'np.unravel_index', (['indices', 'maxes.shape'], {}), '(indices, maxes.shape)\n', (1823, 1845), True, 'import numpy as np\n'), ((1863, 1887), 'numpy.zeros', 'np.zeros', (['threshIm.shape'], {}), '(threshIm.shape)\n', (1871, 1887), True, 'import numpy as np\n'), ((372, 393), 'numpy.multiply', 'np.multiply', (['T11', 'T22'], {}), '(T11, T22)\n', (383, 393), True, 'import numpy as np\n'), ((395, 416), 'numpy.multiply', 'np.multiply', (['T12', 'T12'], {}), '(T12, T12)\n', (406, 416), True, 'import numpy as np\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright CNRS 2012
# <NAME> (LULI)
# ajout modificaton pour cross_section = 'gaussian1D' <NAME> (2016)
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software.
import numpy as np
from ..math.integrals import Int_super_gaussian
from ..math.integrals import Int_super_gaussian1D
class LaserPulse(object):
def __init__(self, pulse_profile, I0, **args):
"""
Initialize the laser pulse
"""
self.I0 = I0
self.dt = args['dt']
self.profile = pulse_profile
if pulse_profile == 'square':
self._instance_call= lambda x: self.square_pulse(x, I0, **args)
elif pulse_profile == 'gaussian':
self._instance_call= lambda x: self.gaussian_pulse(x, I0, **args)
else:
raise NotImplementedError
def square_pulse(self, P_time, I0, dt=1.0e-9, t0=0.2e-9, rise_time=30e-12):
P_pattern = np.ones(P_time.shape)
mask = P_time < t0
P_pattern[mask] = np.exp(-(P_time[mask] - t0)**2/(2*(rise_time)**2))
mask = P_time > dt + t0
# P_pattern[mask] = 0
# mask = P_time > dt + t0
P_pattern[mask] = np.exp(-(P_time[mask] - (t0 + dt))**2/(2*(rise_time)**2))
P_pattern[-1] = 0
return P_pattern*I0
def gaussian_pulse(self, P_time, I0, dt=1.0e-9, t0=1.0e-9):
P_pattern = np.exp(-(P_time - t0)**2/(2*(dt)**2))
return P_pattern*I0
def __call__(self, x):
return self._instance_call(x)
class LaserBeams(object):
def __init__(self, pars, N=500, t_end=1.7e-9, gaussian_target_ratio=2.0):
"""
Initalize the Laser Beam
Warning: This is a partial implementation that assumes that
a laser pulse is associated to a single laser beam.
Parameters:
-----------
- pars: a MegredDict object with the parameters of the simulation
- N : number of points in the laser pulse profile
- t_end: duration of the ray-tracing
- gaussian_target_ratio: the size of the domain to map for the laser ray tracing.
By default, use 2x the size of ed_gaussianRadiusMajor
"""
self.p = pars
self.t_end = t_end
self.gaussian_target_ratio = gaussian_target_ratio
self.P_time = np.linspace(0, t_end, N)
self._get_beams_surface()
self.gridnRadialTics = []
self.numberOfRays = []
def set_pulses(self, pulses):
self.Intensity = [el.I0 for el in pulses]
self.dt = [el.dt for el in pulses]
self.pulse_profile = [el.profile for el in pulses]
self.P_pattern = [el(self.P_time) for el in pulses]
if self.p['NDIM'] == 1:
self.P_power = [P_pattern for P_pattern in self.P_pattern]
elif self.p['NDIM'] == 2:
self.P_power = [S0*P_pattern for P_pattern, S0 in zip(self.P_pattern, self.beam_surface)]
else:
raise ValueError
self.Energy = [ np.trapz(P_power, self.P_time) for P_power in self.P_power]
def _get_beams_surface(self):
self.beam_surface = []
self.num_bream = len(self.p['ed_crossSectionFunctionType'])
if self.p['NDIM'] == 1:
return
self.targetSemiAxis = []
for idx, cross_section in enumerate(self.p['ed_crossSectionFunctionType']):
if cross_section == 'gaussian2D':
S0 = Int_super_gaussian(self.p['ed_gaussianRadiusMajor'][idx], 2*self.p['ed_gaussianExponent'][idx])
self.targetSemiAxis.append(self.p['ed_gaussianRadiusMajor'][idx]*self.gaussian_target_ratio)
elif cross_section == 'gaussian1D':
S0 = Int_super_gaussian1D(self.p['ed_gaussianRadiusMajor'][idx], 2*self.p['ed_gaussianExponent'][idx])
self.targetSemiAxis.append(self.p['ed_gaussianRadiusMajor'][idx]*self.gaussian_target_ratio)
else:
raise NotImplementedError
self.beam_surface.append(S0)
def get_pars(self):
if self.p['NDIM'] == 1:
out = {'ed_power': self.P_power,
'ed_time': [self.P_time]*self.num_bream,
'ed_numberOfSections': [len(self.P_time)]*self.num_bream,
'ed_pulseNumber': range(self.num_bream),
'ed_numberOfBeams': self.num_bream,
'ed_numberOfPulses': self.num_bream,
'ed_numberOfRays': [1]*self.num_bream,
'ed_pulseNumber': range(1, self.num_bream+1) # this is very restrictive and needs to be extended
}
elif self.p['NDIM'] == 2:
out = {'ed_power': self.P_power,
'ed_time': [self.P_time]*self.num_bream,
'ed_numberOfSections': [len(self.P_time)]*self.num_bream,
'ed_pulseNumber': range(self.num_bream),
'ed_numberOfBeams': self.num_bream,
'ed_numberOfPulses': self.num_bream,
'ed_targetSemiAxisMajor': self.targetSemiAxis,
'ed_targetSemiAxisMinor': self.targetSemiAxis,
'ed_gridnRadialTics': self.gridnRadialTics,
'ed_numberOfRays': self.numberOfRays,
'ed_pulseNumber': range(1, self.num_bream+1) # this is very restrictive and needs to be extended
}
else:
raise NotImplementedError
return out
def adapt_ray_tracing(self, dx, rays_per_cell=4, radial_ticks_to_rays_factor=8):
"""
This assumes 3D in 2D ray tracing
"""
if self.p['NDIM'] == 1:
self.numberOfRays = [1]*self.num_bream
elif self.p['NDIM'] == 2:
for idx, cross_section in enumerate(self.p['ed_crossSectionFunctionType']):
if cross_section == 'gaussian2D':
self.gridnRadialTics = np.asarray([ rays_per_cell*beam_size/dx for beam_size in self.targetSemiAxis], dtype=np.int)
self.numberOfRays = self.gridnRadialTics*int(radial_ticks_to_rays_factor)
elif cross_section == 'gaussian1D':
self.gridnRadialTics = np.asarray([ rays_per_cell*beam_size/dx for beam_size in self.targetSemiAxis], dtype=np.int)
self.numberOfRays = self.gridnRadialTics*int(radial_ticks_to_rays_factor)
else:
raise NotImplementedError
def __repr__(self):
"""
Pretty print the laser parameters
"""
labels = ['Intensity [W/cm2]',
'Energy [J]',
'Wavelenght [um]',
'Pulse profile',
'Duration [ns]',
'Cross section',
"numberOfRays"
]
dataset = [np.asarray(self.Intensity),
self.Energy,
self.p['ed_wavelength'],
self.pulse_profile,
np.asarray(self.dt)*1e9,
self.p['ed_crossSectionFunctionType'],
self.numberOfRays
]
entry_format = ['{:>10.2e}',
'{:>10.2f}',
'{:>10.3f}',
'{:>10}',
'{:>10.3f}',
'{:>10}',
'{:>10.0f}',
]
if self.p['NDIM'] == 2:
dataset.append( np.asarray(self.p['ed_gaussianRadiusMajor'])*1e4,)
dataset.append( self.p['ed_gaussianExponent'],)
dataset.append( self.gridnRadialTics,)
labels.append( 'FWHM [um]')
labels.append('SG gamma',)
labels.append('nRadialTicks')
entry_format.append( '{:>10.1f}',)
entry_format.append('{:>10.2f}',)
entry_format.append('{:>10.0f}')
out = ['', '='*80, ' '*26 + 'Laser parameters', '='*80]
row_format_labels ="{:18}" + "{:>10}" * self.num_bream
beam_labels = ['Beam {}'.format(idx) for idx in range(self.num_bream)]
out.append(row_format_labels.format('', *beam_labels))
for label, value, fmt in zip(labels, dataset, entry_format):
row_format ="{:18}" + fmt*self.num_bream
#if not isinstance(value, (int, long, float)):
# value = [value]
try:
out.append( row_format.format(label, *value))
except:
#out.append( row_format.format(label, value))
out.append(('Formatting error: {} {} {}'.format(label, value, fmt)))
out += ['='*80, '']
return '\n'.join(out)
| [
"numpy.trapz",
"numpy.asarray",
"numpy.ones",
"numpy.exp",
"numpy.linspace"
] | [((1011, 1032), 'numpy.ones', 'np.ones', (['P_time.shape'], {}), '(P_time.shape)\n', (1018, 1032), True, 'import numpy as np\n'), ((1086, 1142), 'numpy.exp', 'np.exp', (['(-(P_time[mask] - t0) ** 2 / (2 * rise_time ** 2))'], {}), '(-(P_time[mask] - t0) ** 2 / (2 * rise_time ** 2))\n', (1092, 1142), True, 'import numpy as np\n'), ((1257, 1320), 'numpy.exp', 'np.exp', (['(-(P_time[mask] - (t0 + dt)) ** 2 / (2 * rise_time ** 2))'], {}), '(-(P_time[mask] - (t0 + dt)) ** 2 / (2 * rise_time ** 2))\n', (1263, 1320), True, 'import numpy as np\n'), ((1455, 1498), 'numpy.exp', 'np.exp', (['(-(P_time - t0) ** 2 / (2 * dt ** 2))'], {}), '(-(P_time - t0) ** 2 / (2 * dt ** 2))\n', (1461, 1498), True, 'import numpy as np\n'), ((2410, 2434), 'numpy.linspace', 'np.linspace', (['(0)', 't_end', 'N'], {}), '(0, t_end, N)\n', (2421, 2434), True, 'import numpy as np\n'), ((3089, 3119), 'numpy.trapz', 'np.trapz', (['P_power', 'self.P_time'], {}), '(P_power, self.P_time)\n', (3097, 3119), True, 'import numpy as np\n'), ((6932, 6958), 'numpy.asarray', 'np.asarray', (['self.Intensity'], {}), '(self.Intensity)\n', (6942, 6958), True, 'import numpy as np\n'), ((7094, 7113), 'numpy.asarray', 'np.asarray', (['self.dt'], {}), '(self.dt)\n', (7104, 7113), True, 'import numpy as np\n'), ((7576, 7620), 'numpy.asarray', 'np.asarray', (["self.p['ed_gaussianRadiusMajor']"], {}), "(self.p['ed_gaussianRadiusMajor'])\n", (7586, 7620), True, 'import numpy as np\n'), ((6008, 6110), 'numpy.asarray', 'np.asarray', (['[(rays_per_cell * beam_size / dx) for beam_size in self.targetSemiAxis]'], {'dtype': 'np.int'}), '([(rays_per_cell * beam_size / dx) for beam_size in self.\n targetSemiAxis], dtype=np.int)\n', (6018, 6110), True, 'import numpy as np\n'), ((6290, 6392), 'numpy.asarray', 'np.asarray', (['[(rays_per_cell * beam_size / dx) for beam_size in self.targetSemiAxis]'], {'dtype': 'np.int'}), '([(rays_per_cell * beam_size / dx) for beam_size in self.\n targetSemiAxis], dtype=np.int)\n', (6300, 6392), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A class that talks directly to the telnet console of the Android emulator.
NOTE: This class will be deprecated in favour of gRPC-based communication.
"""
import os
import telnetlib
import threading
import time
from typing import List, Optional
import uuid
from absl import logging
from android_env.components import errors
from android_env.proto import raw_observation_pb2
import numpy as np
class _FifoReader(threading.Thread):
"""A thread which reads from a Unix pipe.
This thread is meant to run indefinitely, consuming from `fifo` and providing
observations via `latest_observation()`.
Any exceptions that are caught in `run()` are forwarded to
`latest_exception()` and then execution is terminated.
Users of this thread may call `stop()` to set a signal on
`self._terminate_event`, which is checked periodically by this thread to end
execution, but the `f.read()` call below may get stuck indefinitely causing
this thread to block until the whole process is terminated. In this case, no
CPU will be used, but a file descriptor will be consumed (from the `open()`
call) and threading state will linger until the process dies.
This thread was designed to terminate when facing possibly recoverable errors
allowing its caller thread to time out when waiting on `data_ready()`, then
optionally spawning a new thread to continue the work.
"""
def __init__(self, fifo=str):
super(_FifoReader, self).__init__()
self._fifo = fifo
self._latest_observation = None
self._latest_exception = None
self._data_ready = threading.Condition()
self._terminate_event = threading.Event()
def stop(self) -> None:
self._terminate_event.set()
def data_ready(self) -> threading.Condition:
"""Returns a condition variable that protects shared state."""
return self._data_ready
def latest_observation(self) -> List[np.ndarray]:
return self._latest_observation
def latest_exception(self) -> Exception:
return self._latest_exception
def run(self):
while True:
# Check if the caller thread asked this thread to stop running.
if self._terminate_event.is_set():
self._terminate_event.clear()
return
# Read the data from the pipe.
raw_obs = None
with open(self._fifo, 'rb') as f:
data = []
# Read data from the pipe in chunks.
while True:
# The call `f.read()` may block forever for all sorts of reasons, and
# unfortunately Python does not allow specifying a timeout and there's
# no good way to clean up this thread. When that occurs, the client of
# this thread will timeout when reading from `output`.
try:
chunk = f.read()
except Exception as e: # pylint: disable=broad-except
# It's nearly impossible to be exhaustive here so we use a generic
# Exception to catch all errors, not only known ones such as IOError
# and OSError,
with self._data_ready:
self._latest_exception = e
self._data_ready.notify()
return
if not chunk: # Writer closed the pipe.
break
data.append(chunk)
data = b''.join(
data) # Joining is much faster than string concatenation.
if not data:
# Not having data here is abnormal, so terminate execution.
with self._data_ready:
self._latest_exception = errors.ObservationDecodingError(
'No data from pipe.')
self._data_ready.notify()
return
try:
raw_obs = raw_observation_pb2.RawObservation.FromString(data)
if (raw_obs.screen.height <= 0 or raw_obs.screen.width <= 0 or
raw_obs.screen.num_channels <= 0):
with self._data_ready:
self._latest_exception = errors.ObservationDecodingError(
f'height: {raw_obs.screen.height} '
f'width: {raw_obs.screen.width} '
f'num_channels: {raw_obs.screen.num_channels} '
f'len(data): {len(data)}')
self._data_ready.notify()
return
except: # pylint: disable=bare-except
with self._data_ready:
self._latest_exception = errors.ObservationDecodingError(
f'len(data): {len(data)}')
self._data_ready.notify()
return
if not raw_obs:
with self._data_ready:
self._latest_exception = errors.ObservationDecodingError(
f'No data in {self._fifo}')
self._data_ready.notify()
return
screen = raw_obs.screen
img = np.frombuffer(screen.data, dtype=np.uint8, count=len(screen.data))
img.shape = (screen.height, screen.width, screen.num_channels)
# Delete the 'Alpha' channel along the 'num_channels' axis
img = np.delete(img, 3, 2)
obs = [img, np.int64(raw_obs.timestamp_us)]
with self._data_ready:
self._latest_observation = obs
self._data_ready.notify()
class EmulatorConsole():
"""Handles communication with the emulator."""
def __init__(self,
console_port: int,
auth_code: str = '',
tmp_dir: str = '/tmp',
pipe_read_timeout_sec: float = 20.0):
"""Initializes this EmulatorConsole.
Args:
console_port: Integer
auth_code: String
tmp_dir: String
pipe_read_timeout_sec: Maximum amount of time in seconds to wait for
reading data from a pipe.
"""
self._console_port = console_port
self._tmp_dir = tmp_dir
self._pipe_read_timeout_sec = pipe_read_timeout_sec
self._read_thread = None
self._setup_fifo()
self._connect()
self._authenticate_to_console(auth_code)
self._read_thread = _FifoReader(fifo=self._fifo)
self._read_thread.daemon = True
self._read_thread.start()
def close(self):
self._connection.close()
self._read_thread.stop()
if os.path.isfile(self._fifo):
os.remove(self._fifo)
def fetch_screenshot(self) -> Optional[List[np.ndarray]]:
"""Returns the observation via telnet through a pipe.
This makes use of a feature in the AndroidEmulator
(https://android-review.googlesource.com/c/platform/external/qemu/+/891716)
that saves screenshots as a binary protobuf instead of a compressed PNG,
greatly improving the performance and latency.
Returns: Observation
Raises:
errors.ReadObservationError: if the observation could not be read.
"""
# Ask the emulator for a screenshot.
self._connection.write(b'screenrecord screenshot %s\n' %
self._fifo.encode('utf-8'))
with self._read_thread.data_ready():
# Check for outstanding errors before waiting.
if self._read_thread.latest_exception():
raise self._read_thread.latest_exception()
if self._read_thread.data_ready().wait(
timeout=self._pipe_read_timeout_sec):
# Check for errors while reading observations.
if self._read_thread.latest_exception():
raise self._read_thread.latest_exception()
# Check if the observation was successfully read.
if self._read_thread.latest_observation():
return self._read_thread.latest_observation()
else:
raise errors.ObservationDecodingError(
'No observation from reader thread.')
else: # Timed out.
# _read_fifo is stuck, so we spawn a new thread.
self._read_thread = _FifoReader(fifo=self._fifo)
self._read_thread.daemon = True
self._read_thread.start()
raise errors.PipeTimedOutError()
def send_mouse_action(self, x: str, y: str, down: bool = True) -> None:
"""Sends mouse events via the emulator telnet console connection.
This functionality is already available in the emulator and is relatively
fast. It sends a "one-finger" touch event to the screen (i.e. it does not
support multitouch).
Args:
x: Integer The absolute value for the x-coordinate.
y: Integer The absolute value for the y-coordinate.
down: Boolean Whether the button is down.
Returns: None
"""
self._connection.write(
('event mouse %s %s 0 %s\n' %
(int(x), int(y), '1' if down else '0')).encode('utf-8'))
def _setup_fifo(self):
"""Creates a named pipe for receiving images from the console."""
self._fifo = os.path.join(self._tmp_dir,
'screenshot_pipe-%s.pb' % uuid.uuid4())
if os.path.isfile(self._fifo): # Remove it before trying to make a new one.
os.remove(self._fifo)
# The following call may raise OSError if it can't create the FIFO, but we
# do not want to catch it because it may hide other more serious errors.
# Because we're executing this at the start of the server, we prefer to fail
# fast and loud.
os.mkfifo(self._fifo)
def _connect(self):
"""Connects to the emulator console."""
logging.info('Connecting to Emulator console on port %s...',
self._console_port)
num_connection_attempts = 3
connected = False
retries = 0
while not connected:
try:
self._connection = telnetlib.Telnet('localhost', self._console_port)
connected = True
except ConnectionRefusedError:
retries += 1
if retries >= num_connection_attempts:
raise errors.ConsoleConnectionError()
logging.error('Console connection refused, retrying in 5 seconds.')
time.sleep(5)
logging.info('Done connecting to Emulator console on port %s.',
self._console_port)
def _authenticate_to_console(self, auth_code):
logging.info('Authenticating to console.')
if not auth_code:
with open(os.path.expanduser('~/.emulator_console_auth_token')) as f:
auth_code = f.read()
self._connection.write(b'auth %s\n' %
auth_code.encode('utf-8')) # Authenticate session.
self._connection.read_until(b'OK', timeout=5) # Look for 'OK' for 5s.
| [
"absl.logging.error",
"os.remove",
"uuid.uuid4",
"android_env.proto.raw_observation_pb2.RawObservation.FromString",
"android_env.components.errors.ConsoleConnectionError",
"android_env.components.errors.ObservationDecodingError",
"threading.Condition",
"time.sleep",
"absl.logging.info",
"os.path.i... | [((2184, 2205), 'threading.Condition', 'threading.Condition', ([], {}), '()\n', (2203, 2205), False, 'import threading\n'), ((2234, 2251), 'threading.Event', 'threading.Event', ([], {}), '()\n', (2249, 2251), False, 'import threading\n'), ((6652, 6678), 'os.path.isfile', 'os.path.isfile', (['self._fifo'], {}), '(self._fifo)\n', (6666, 6678), False, 'import os\n'), ((9227, 9253), 'os.path.isfile', 'os.path.isfile', (['self._fifo'], {}), '(self._fifo)\n', (9241, 9253), False, 'import os\n'), ((9592, 9613), 'os.mkfifo', 'os.mkfifo', (['self._fifo'], {}), '(self._fifo)\n', (9601, 9613), False, 'import os\n'), ((9685, 9770), 'absl.logging.info', 'logging.info', (['"""Connecting to Emulator console on port %s..."""', 'self._console_port'], {}), "('Connecting to Emulator console on port %s...', self._console_port\n )\n", (9697, 9770), False, 'from absl import logging\n'), ((10246, 10334), 'absl.logging.info', 'logging.info', (['"""Done connecting to Emulator console on port %s."""', 'self._console_port'], {}), "('Done connecting to Emulator console on port %s.', self.\n _console_port)\n", (10258, 10334), False, 'from absl import logging\n'), ((10401, 10443), 'absl.logging.info', 'logging.info', (['"""Authenticating to console."""'], {}), "('Authenticating to console.')\n", (10413, 10443), False, 'from absl import logging\n'), ((5531, 5551), 'numpy.delete', 'np.delete', (['img', '(3)', '(2)'], {}), '(img, 3, 2)\n', (5540, 5551), True, 'import numpy as np\n'), ((6686, 6707), 'os.remove', 'os.remove', (['self._fifo'], {}), '(self._fifo)\n', (6695, 6707), False, 'import os\n'), ((9307, 9328), 'os.remove', 'os.remove', (['self._fifo'], {}), '(self._fifo)\n', (9316, 9328), False, 'import os\n'), ((5570, 5600), 'numpy.int64', 'np.int64', (['raw_obs.timestamp_us'], {}), '(raw_obs.timestamp_us)\n', (5578, 5600), True, 'import numpy as np\n'), ((8322, 8348), 'android_env.components.errors.PipeTimedOutError', 'errors.PipeTimedOutError', ([], {}), '()\n', (8346, 8348), False, 'from android_env.components import errors\n'), ((9206, 9218), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (9216, 9218), False, 'import uuid\n'), ((9916, 9965), 'telnetlib.Telnet', 'telnetlib.Telnet', (['"""localhost"""', 'self._console_port'], {}), "('localhost', self._console_port)\n", (9932, 9965), False, 'import telnetlib\n'), ((4255, 4306), 'android_env.proto.raw_observation_pb2.RawObservation.FromString', 'raw_observation_pb2.RawObservation.FromString', (['data'], {}), '(data)\n', (4300, 4306), False, 'from android_env.proto import raw_observation_pb2\n'), ((5149, 5208), 'android_env.components.errors.ObservationDecodingError', 'errors.ObservationDecodingError', (['f"""No data in {self._fifo}"""'], {}), "(f'No data in {self._fifo}')\n", (5180, 5208), False, 'from android_env.components import errors\n'), ((8009, 8078), 'android_env.components.errors.ObservationDecodingError', 'errors.ObservationDecodingError', (['"""No observation from reader thread."""'], {}), "('No observation from reader thread.')\n", (8040, 8078), False, 'from android_env.components import errors\n'), ((10152, 10219), 'absl.logging.error', 'logging.error', (['"""Console connection refused, retrying in 5 seconds."""'], {}), "('Console connection refused, retrying in 5 seconds.')\n", (10165, 10219), False, 'from absl import logging\n'), ((10228, 10241), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (10238, 10241), False, 'import time\n'), ((10482, 10534), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.emulator_console_auth_token"""'], {}), "('~/.emulator_console_auth_token')\n", (10500, 10534), False, 'import os\n'), ((4095, 4148), 'android_env.components.errors.ObservationDecodingError', 'errors.ObservationDecodingError', (['"""No data from pipe."""'], {}), "('No data from pipe.')\n", (4126, 4148), False, 'from android_env.components import errors\n'), ((10112, 10143), 'android_env.components.errors.ConsoleConnectionError', 'errors.ConsoleConnectionError', ([], {}), '()\n', (10141, 10143), False, 'from android_env.components import errors\n')] |
# Author: <NAME>
# Time: 2021-07-31
import numpy as np
from log import get_logger
logger = get_logger()
class HMM:
def __init__(self, A=None, B=None, pi=None):
self.A = A
self.B = B
self.pi = pi
self.forward_p = None
self.forward = None
self.betas = None
self.backward_p = None
def set_param(self, A, B, pi):
self.A = A
self.B = B
self.pi = pi
# 前向算法
def forward_algo(self, Q, V, O):
logger.info("forward algrithm")
N = len(Q)
M = len(O)
T = M
logger.debug("N: {}, M: {}, T: {}".format(N, M, T))
alphas = np.zeros((N, M))
for t in range(T):
indexofO = V.index(O[t])
for i in range(N):
if t > 0:
alphas[i][t] = np.dot([alpha[t-1] for alpha in alphas], [a[i] for a in self.A]) * self.B[i][indexofO]
else:
alphas[i][t] = self.B[i][indexofO] * self.pi[t][i]
logger.debug("forward t: {}, i: {}, alphas[{}][{}]={}".format(t, i, i, t, alphas[i][t]))
p = np.sum([alpha[T-1] for alpha in alphas])
self.forward_p = p
logger.info("forward final p: {}".format(p))
self.alphas = alphas
# 后向算法
def backward_algo(self, Q, V, O):
N = len(Q)
M = len(O)
T = M
betas = np.ones((N, M))
for t in range(T-2, -1, -1):
indexofO = V.index(O[t+1])
for i in range(N):
betas[i][t] = np.dot(np.multiply(self.A[i], [b[indexofO] for b in self.B]), [beta[t+1] for beta in betas])
logger.debug("backward t: {}, i: {}, betas[{}][{}]={}".format(t, i, i, t, betas[i][t]))
indexofO = V.index(O[0])
self.betas = betas
p = np.dot(np.multiply(self.pi, [b[indexofO] for b in self.B]), [beta[0] for beta in betas])
self.backward_p = p
logger.info("backward final p: {}".format(p))
# viterbi 算法
def viterbi(self, Q, V, O):
N = len(Q)
M = len(O)
T = M
deltas = np.zeros((N, M))
psis = np.zeros((N, M))
I = np.zeros((1, M))
for t in range(T):
indexofO = V.index(O[t])
for i in range(N):
if t == 0:
deltas[i][t] = self.pi[t][i] * self.B[i][indexofO]
psis[i][t] = 0
else:
deltas[i][t] = np.max(np.multiply([delta[t-1] for delta in deltas], [a[i] for a in self.A])) * self.B[i][indexofO]
psis[i][t] = np.argmax(np.multiply([delta[t-1] for delta in deltas], [a[i] for a in self.A]))
logger.debug("i: {}, t: {}, delta[{}][{}]: {}, psis[{}][{}]: {}".format(i, t, i, t, deltas[i][t], i, t, psis[i][t]))
I[0][T-1] = np.argmax([delta[T-1] for delta in deltas])
for t in range(T-2, -1, -1):
I[0][t] = psis[int(I[0][t+1])][t+1]
logger.debug("i{} = psis{} * i{} = {}".format(t+1, t+2, t+2, I[0][t]+1))
logger.info("best path: {}".format('->'.join(str(int(i+1)) for i in I[0])))
# EM 算法
def fit(self):
pass
# E step
def _do_estep(self):
pass
# M step
def _do_mstep(self):
pass
if __name__ == '__main__':
Q = [1, 2, 3]
V = ['红', '白']
A = [[0.5, 0.2, 0.3], [0.3, 0.5, 0.2], [0.2, 0.3, 0.5]]
B = [[0.5, 0.5], [0.4, 0.6], [0.7, 0.3]]
# O = ['红', '白', '红', '红', '白', '红', '白', '白']
# O = ['红', '白', '红', '白'] #习题10.1的例子
O = ['红', '白', '红'] #习题10.3的例子
PI = [[0.2, 0.4, 0.4]]
hmm = HMM(A=A, B=B, pi=PI)
hmm.forward_algo(Q, V, O)
hmm.backward_algo(Q, V, O)
hmm.viterbi(Q, V, O)
| [
"numpy.sum",
"numpy.multiply",
"numpy.argmax",
"numpy.zeros",
"numpy.ones",
"log.get_logger",
"numpy.dot"
] | [((93, 105), 'log.get_logger', 'get_logger', ([], {}), '()\n', (103, 105), False, 'from log import get_logger\n'), ((654, 670), 'numpy.zeros', 'np.zeros', (['(N, M)'], {}), '((N, M))\n', (662, 670), True, 'import numpy as np\n'), ((1124, 1166), 'numpy.sum', 'np.sum', (['[alpha[T - 1] for alpha in alphas]'], {}), '([alpha[T - 1] for alpha in alphas])\n', (1130, 1166), True, 'import numpy as np\n'), ((1401, 1416), 'numpy.ones', 'np.ones', (['(N, M)'], {}), '((N, M))\n', (1408, 1416), True, 'import numpy as np\n'), ((2115, 2131), 'numpy.zeros', 'np.zeros', (['(N, M)'], {}), '((N, M))\n', (2123, 2131), True, 'import numpy as np\n'), ((2147, 2163), 'numpy.zeros', 'np.zeros', (['(N, M)'], {}), '((N, M))\n', (2155, 2163), True, 'import numpy as np\n'), ((2176, 2192), 'numpy.zeros', 'np.zeros', (['(1, M)'], {}), '((1, M))\n', (2184, 2192), True, 'import numpy as np\n'), ((2845, 2890), 'numpy.argmax', 'np.argmax', (['[delta[T - 1] for delta in deltas]'], {}), '([delta[T - 1] for delta in deltas])\n', (2854, 2890), True, 'import numpy as np\n'), ((1832, 1883), 'numpy.multiply', 'np.multiply', (['self.pi', '[b[indexofO] for b in self.B]'], {}), '(self.pi, [b[indexofO] for b in self.B])\n', (1843, 1883), True, 'import numpy as np\n'), ((1562, 1615), 'numpy.multiply', 'np.multiply', (['self.A[i]', '[b[indexofO] for b in self.B]'], {}), '(self.A[i], [b[indexofO] for b in self.B])\n', (1573, 1615), True, 'import numpy as np\n'), ((827, 893), 'numpy.dot', 'np.dot', (['[alpha[t - 1] for alpha in alphas]', '[a[i] for a in self.A]'], {}), '([alpha[t - 1] for alpha in alphas], [a[i] for a in self.A])\n', (833, 893), True, 'import numpy as np\n'), ((2621, 2692), 'numpy.multiply', 'np.multiply', (['[delta[t - 1] for delta in deltas]', '[a[i] for a in self.A]'], {}), '([delta[t - 1] for delta in deltas], [a[i] for a in self.A])\n', (2632, 2692), True, 'import numpy as np\n'), ((2485, 2556), 'numpy.multiply', 'np.multiply', (['[delta[t - 1] for delta in deltas]', '[a[i] for a in self.A]'], {}), '([delta[t - 1] for delta in deltas], [a[i] for a in self.A])\n', (2496, 2556), True, 'import numpy as np\n')] |
"""This module contains tests for the integration function of PySim
"""
from unittest import TestCase
import numpy as np
from numpy import cos, sin, sqrt
from pysim.simulation import Sim
from pysim.simulation import Runge_Kutta_4
from pysim.simulation import Cash_Karp
from pysim.simulation import Dormand_Prince_5
from pysim.systems import MassSpringDamper
from pysim.systems.python_systems import MassSpringDamper as PyMassSpringDamper
__copyright__ = 'Copyright (c) 2014-2016 SSPA Sweden AB'
def solve_msd_analytically(t):
"""Returns the analytical solution to the differential equation
at the timepoints t.
"""
mass = 100
springk = 50
damping = 1
x0 = 1
#Calculate help variables
zeta = damping/(2*sqrt(mass*springk))
omega0 = sqrt(springk/mass)
#Solve the differential equation assuming zeta<0, an
#under dampened system.
assert zeta < 1
omegad = omega0*sqrt(1-zeta**2)
A = x0
B = (zeta*omega0*x0)/omegad
y = np.power(np.e, -1*zeta*omega0*t)*(A*cos(omegad*t)+B*sin(omegad*t))
return y
class IntegrationTest(TestCase):
"""Tests the integration function of a under dampened mass-spring-damper
system. The differential equation is solved analytically for the system
and this solution is then compared to the PySim solution.
"""
def setUp(self):
self.sim = Sim()
self.sys = MassSpringDamper()
self.sys.store("x1")
self.sim.add_system(self.sys)
self.integrationlength = 50
self.timestep = 0.1
def compare_to_analytical(self, tolerance):
"""Compare the result in the sys to the analytical solution of the
diff-equation. If the difference is larger than tolerance an assert
will fail.
"""
time = self.sys.res.time
integrated_result = self.sys.res.x1
analytic_result = solve_msd_analytically(time)
maxerror = np.max(np.abs(integrated_result-analytic_result))
print(maxerror)
assert maxerror < tolerance
def test_default(self):
"""Test the default sover is a Runge Kutta and compares the results
solver results for accuracy
"""
self.sim.simulate(self.integrationlength, self.timestep)
self.compare_to_analytical(1e-5)
def test_rk4(self):
"""Test the Runge Kutta solver results for accuracy"""
solver = Runge_Kutta_4
self.sim.simulate(self.integrationlength, self.timestep, solver)
self.compare_to_analytical(1e-5)
def test_dp5(self):
"""Test the Dormand Price solver results for accuracy when the
default dense output is used, as well as the default relative
error of 10^-4
"""
solver = Dormand_Prince_5()
self.sim.simulate(self.integrationlength, self.timestep, solver)
self.compare_to_analytical(1e-3)
def test_dp5_high_accuracy(self):
"""Test the Dormand Price solver results for accuracy with an increased
relative error control of 10^-6. The default dense output is used.
"""
solver = Dormand_Prince_5()
solver.relative_error = 1e-6
self.sim.simulate(self.integrationlength, self.timestep, solver)
self.compare_to_analytical(1e-5)
def test_dp5_dense_timediff(self):
"""Test that the timesteps delivered from the Dormand Price solver is
regurarly spaced.
"""
solver = Dormand_Prince_5()
self.sim.simulate(self.integrationlength, self.timestep, solver)
diff = np.diff(self.sys.res.time)
absdifferr = np.abs(diff-self.timestep)
assert np.max(absdifferr) < 1e-14
def test_ck(self):
"""Test the Cash Karp solver results for accuracy with its default
relative
error of 10^-4
"""
solver = Cash_Karp()
self.sim.simulate(self.integrationlength, self.timestep, solver)
self.compare_to_analytical(1e-2)
def test_ck_high_accuracy(self):
"""Test the Cash Karp solver results for accuracy with an increased
relative error control of 10^-6. The default dense output is used.
"""
solver = Cash_Karp()
solver.relative_error = 1e-8
self.sim.simulate(self.integrationlength, self.timestep, solver)
self.compare_to_analytical(1e-7)
class CythonIntegrationTest(IntegrationTest):
"""Use a Cython System to simulate. The results are compared with the
analytical results.
"""
def setUp(self):
self.sim = Sim()
self.sys = PyMassSpringDamper()
self.sys.store("x1")
self.sim.add_system(self.sys)
self.integrationlength = 50
self.timestep = 0.1
| [
"pysim.systems.MassSpringDamper",
"numpy.abs",
"numpy.power",
"numpy.max",
"numpy.diff",
"pysim.simulation.Cash_Karp",
"numpy.sin",
"numpy.cos",
"pysim.simulation.Sim",
"pysim.systems.python_systems.MassSpringDamper",
"pysim.simulation.Dormand_Prince_5",
"numpy.sqrt"
] | [((778, 798), 'numpy.sqrt', 'sqrt', (['(springk / mass)'], {}), '(springk / mass)\n', (782, 798), False, 'from numpy import cos, sin, sqrt\n'), ((923, 942), 'numpy.sqrt', 'sqrt', (['(1 - zeta ** 2)'], {}), '(1 - zeta ** 2)\n', (927, 942), False, 'from numpy import cos, sin, sqrt\n'), ((991, 1029), 'numpy.power', 'np.power', (['np.e', '(-1 * zeta * omega0 * t)'], {}), '(np.e, -1 * zeta * omega0 * t)\n', (999, 1029), True, 'import numpy as np\n'), ((1369, 1374), 'pysim.simulation.Sim', 'Sim', ([], {}), '()\n', (1372, 1374), False, 'from pysim.simulation import Sim\n'), ((1394, 1412), 'pysim.systems.MassSpringDamper', 'MassSpringDamper', ([], {}), '()\n', (1410, 1412), False, 'from pysim.systems import MassSpringDamper\n'), ((2749, 2767), 'pysim.simulation.Dormand_Prince_5', 'Dormand_Prince_5', ([], {}), '()\n', (2765, 2767), False, 'from pysim.simulation import Dormand_Prince_5\n'), ((3105, 3123), 'pysim.simulation.Dormand_Prince_5', 'Dormand_Prince_5', ([], {}), '()\n', (3121, 3123), False, 'from pysim.simulation import Dormand_Prince_5\n'), ((3448, 3466), 'pysim.simulation.Dormand_Prince_5', 'Dormand_Prince_5', ([], {}), '()\n', (3464, 3466), False, 'from pysim.simulation import Dormand_Prince_5\n'), ((3555, 3581), 'numpy.diff', 'np.diff', (['self.sys.res.time'], {}), '(self.sys.res.time)\n', (3562, 3581), True, 'import numpy as np\n'), ((3603, 3631), 'numpy.abs', 'np.abs', (['(diff - self.timestep)'], {}), '(diff - self.timestep)\n', (3609, 3631), True, 'import numpy as np\n'), ((3841, 3852), 'pysim.simulation.Cash_Karp', 'Cash_Karp', ([], {}), '()\n', (3850, 3852), False, 'from pysim.simulation import Cash_Karp\n'), ((4185, 4196), 'pysim.simulation.Cash_Karp', 'Cash_Karp', ([], {}), '()\n', (4194, 4196), False, 'from pysim.simulation import Cash_Karp\n'), ((4545, 4550), 'pysim.simulation.Sim', 'Sim', ([], {}), '()\n', (4548, 4550), False, 'from pysim.simulation import Sim\n'), ((4570, 4590), 'pysim.systems.python_systems.MassSpringDamper', 'PyMassSpringDamper', ([], {}), '()\n', (4588, 4590), True, 'from pysim.systems.python_systems import MassSpringDamper as PyMassSpringDamper\n'), ((745, 765), 'numpy.sqrt', 'sqrt', (['(mass * springk)'], {}), '(mass * springk)\n', (749, 765), False, 'from numpy import cos, sin, sqrt\n'), ((1934, 1977), 'numpy.abs', 'np.abs', (['(integrated_result - analytic_result)'], {}), '(integrated_result - analytic_result)\n', (1940, 1977), True, 'import numpy as np\n'), ((3645, 3663), 'numpy.max', 'np.max', (['absdifferr'], {}), '(absdifferr)\n', (3651, 3663), True, 'import numpy as np\n'), ((1027, 1042), 'numpy.cos', 'cos', (['(omegad * t)'], {}), '(omegad * t)\n', (1030, 1042), False, 'from numpy import cos, sin, sqrt\n'), ((1043, 1058), 'numpy.sin', 'sin', (['(omegad * t)'], {}), '(omegad * t)\n', (1046, 1058), False, 'from numpy import cos, sin, sqrt\n')] |
from lxml import etree
import numpy as np
import pandas as pd
import re
from sklearn.model_selection import train_test_split
import Bio
from Bio import SeqIO
from pathlib import Path
import glob
#console
from tqdm import tqdm as tqdm
import re
import os
import itertools
#jupyter
#from tqdm import tqdm_notebook as tqdm
#not supported in current tqdm version
#from tqdm.autonotebook import tqdm
#import logging
#logging.getLogger('proteomics_utils').addHandler(logging.NullHandler())
#logger=logging.getLogger('proteomics_utils')
#for cd-hit
import subprocess
from sklearn.metrics import f1_score
import hashlib #for mhcii datasets
from utils.dataset_utils import split_clusters_single,pick_all_members_from_clusters
#######################################################################################################
#Parsing all sorts of protein data
#######################################################################################################
def parse_uniprot_xml(filename,max_entries=0,parse_features=[]):
'''parse uniprot xml file, which contains the full uniprot information (e.g. ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.xml.gz)
using custom low-level https://www.ibm.com/developerworks/xml/library/x-hiperfparse/
c.f. for full format https://www.uniprot.org/docs/uniprot.xsd
parse_features: a list of strings specifying the kind of features to be parsed such as "modified residue" for phosphorylation sites etc. (see https://www.uniprot.org/help/mod_res)
(see the xsd file for all possible entries)
'''
context = etree.iterparse(str(filename), events=["end"], tag="{http://uniprot.org/uniprot}entry")
context = iter(context)
rows =[]
for _, elem in tqdm(context):
parse_func_uniprot(elem,rows,parse_features=parse_features)
elem.clear()
while elem.getprevious() is not None:
del elem.getparent()[0]
if(max_entries > 0 and len(rows)==max_entries):
break
df=pd.DataFrame(rows).set_index("ID")
df['name'] = df.name.astype(str)
df['dataset'] = df.dataset.astype('category')
df['organism'] = df.organism.astype('category')
df['sequence'] = df.sequence.astype(str)
return df
def parse_func_uniprot(elem, rows, parse_features=[]):
'''extracting a single record from uniprot xml'''
seqs = elem.findall("{http://uniprot.org/uniprot}sequence")
sequence=""
#print(seqs)
for s in seqs:
sequence=s.text
#print("sequence",sequence)
if sequence =="" or str(sequence)=="None":
continue
else:
break
#Sequence & fragment
sequence=""
fragment_map = {"single":1, "multiple":2}
fragment = 0
seqs = elem.findall("{http://uniprot.org/uniprot}sequence")
for s in seqs:
if 'fragment' in s.attrib:
fragment = fragment_map[s.attrib["fragment"]]
sequence=s.text
if sequence != "":
break
#print("sequence:",sequence)
#print("fragment:",fragment)
#dataset
dataset=elem.attrib["dataset"]
#accession
accession = ""
accessions = elem.findall("{http://uniprot.org/uniprot}accession")
for a in accessions:
accession=a.text
if accession !="":#primary accession! https://www.uniprot.org/help/accession_numbers!!!
break
#print("accession",accession)
#protein existence (PE in plain text)
proteinexistence_map = {"evidence at protein level":5,"evidence at transcript level":4,"inferred from homology":3,"predicted":2,"uncertain":1}
proteinexistence = -1
accessions = elem.findall("{http://uniprot.org/uniprot}proteinExistence")
for a in accessions:
proteinexistence=proteinexistence_map[a.attrib["type"]]
break
#print("protein existence",proteinexistence)
#name
name = ""
names = elem.findall("{http://uniprot.org/uniprot}name")
for n in names:
name=n.text
break
#print("name",name)
#organism
organism = ""
organisms = elem.findall("{http://uniprot.org/uniprot}organism")
for s in organisms:
s1=s.findall("{http://uniprot.org/uniprot}name")
for s2 in s1:
if(s2.attrib["type"]=='scientific'):
organism=s2.text
break
if organism !="":
break
#print("organism",organism)
#dbReference: PMP,GO,Pfam, EC
ids = elem.findall("{http://uniprot.org/uniprot}dbReference")
pfams = []
gos =[]
ecs = []
pdbs =[]
for i in ids:
#print(i.attrib["id"],i.attrib["type"])
#cf. http://geneontology.org/external2go/uniprotkb_kw2go for Uniprot Keyword<->GO mapping
#http://geneontology.org/ontology/go-basic.obo for List of go terms
#https://www.uniprot.org/help/keywords_vs_go keywords vs. go
if(i.attrib["type"]=="GO"):
tmp1 = i.attrib["id"]
for i2 in i:
if i2.attrib["type"]=="evidence":
tmp2= i2.attrib["value"]
gos.append([int(tmp1[3:]),int(tmp2[4:])]) #first value is go code, second eco evidence ID (see mapping below)
elif(i.attrib["type"]=="Pfam"):
pfams.append(i.attrib["id"])
elif(i.attrib["type"]=="EC"):
ecs.append(i.attrib["id"])
elif(i.attrib["type"]=="PDB"):
pdbs.append(i.attrib["id"])
#print("PMP: ", pmp)
#print("GOs:",gos)
#print("Pfams:",pfam)
#print("ECs:",ecs)
#print("PDBs:",pdbs)
#keyword
keywords = elem.findall("{http://uniprot.org/uniprot}keyword")
keywords_lst = []
#print(keywords)
for k in keywords:
keywords_lst.append(int(k.attrib["id"][-4:]))#remove the KW-
#print("keywords: ",keywords_lst)
#comments = elem.findall("{http://uniprot.org/uniprot}comment")
#comments_lst=[]
##print(comments)
#for c in comments:
# if(c.attrib["type"]=="function"):
# for c1 in c:
# comments_lst.append(c1.text)
#print("function: ",comments_lst)
#ptm etc
if len(parse_features)>0:
ptms=[]
features = elem.findall("{http://uniprot.org/uniprot}feature")
for f in features:
if(f.attrib["type"] in parse_features):#only add features of the requested type
locs=[]
for l in f[0]:
locs.append(int(l.attrib["position"]))
ptms.append([f.attrib["type"],f.attrib["description"] if 'description' in f.attrib else "NaN",locs, f.attrib['evidence'] if 'evidence' in f.attrib else "NaN"])
#print(ptms)
data_dict={"ID": accession, "name": name, "dataset":dataset, "proteinexistence":proteinexistence, "fragment":fragment, "organism":organism, "ecs": ecs, "pdbs": pdbs, "pfams" : pfams, "keywords": keywords_lst, "gos": gos, "sequence": sequence}
if len(parse_features)>0:
data_dict["features"]=ptms
#print("all children:")
#for c in elem:
# print(c)
# print(c.tag)
# print(c.attrib)
rows.append(data_dict)
def parse_uniprot_seqio(filename,max_entries=0):
'''parse uniprot xml file using the SeqIO parser (smaller functionality e.g. does not extract evidence codes for GO)'''
sprot = SeqIO.parse(filename, "uniprot-xml")
rows = []
for p in tqdm(sprot):
accession = str(p.name)
name = str(p.id)
dataset = str(p.annotations['dataset'])
organism = str(p.annotations['organism'])
ecs, pdbs, pfams, gos = [],[],[],[]
for ref in p.dbxrefs:
k = ref.split(':')
if k[0] == 'GO':
gos.append(':'.join(k[1:]))
elif k[0] == 'Pfam':
pfams.append(k[1])
elif k[0] == 'EC':
ecs.append(k[1])
elif k[0] == 'PDB':
pdbs.append(k[1:])
if 'keywords' in p.annotations.keys():
keywords = p.annotations['keywords']
else:
keywords = []
sequence = str(p.seq)
row = {
'ID': accession,
'name':name,
'dataset':dataset,
'organism':organism,
'ecs':ecs,
'pdbs':pdbs,
'pfams':pfams,
'keywords':keywords,
'gos':gos,
'sequence':sequence}
rows.append(row)
if(max_entries>0 and len(rows)==max_entries):
break
df=pd.DataFrame(rows).set_index("ID")
df['name'] = df.name.astype(str)
df['dataset'] = df.dataset.astype('category')
df['organism'] = df.organism.astype('category')
df['sequence'] = df.sequence.astype(str)
return df
def filter_human_proteome(df_sprot):
'''extracts human proteome from swissprot proteines in DataFrame with column organism '''
is_Human = np.char.find(df_sprot.organism.values.astype(str), "Human") !=-1
is_human = np.char.find(df_sprot.organism.values.astype(str), "human") !=-1
is_sapiens = np.char.find(df_sprot.organism.values.astype(str), "sapiens") !=-1
is_Sapiens = np.char.find(df_sprot.organism.values.astype(str), "Sapiens") !=-1
return df_sprot[is_Human|is_human|is_sapiens|is_Sapiens]
def filter_aas(df, exclude_aas=["B","J","X","Z"]):
'''excludes sequences containing exclude_aas: B = D or N, J = I or L, X = unknown, Z = E or Q'''
return df[~df.sequence.apply(lambda x: any([e in x for e in exclude_aas]))]
######################################################################################################
def explode_clusters_df(df_cluster):
'''aux. function to convert cluster dataframe from one row per cluster to one row per ID'''
df=df_cluster.reset_index(level=0)
rows = []
if('repr_accession' in df.columns):#include representative if it exists
_ = df.apply(lambda row: [rows.append([nn,row['entry_id'], row['repr_accession']==nn ]) for nn in row.members], axis=1)
df_exploded = pd.DataFrame(rows, columns=['ID',"cluster_ID","representative"]).set_index(['ID'])
else:
_ = df.apply(lambda row: [rows.append([nn,row['entry_id']]) for nn in row.members], axis=1)
df_exploded = pd.DataFrame(rows, columns=['ID',"cluster_ID"]).set_index(['ID'])
return df_exploded
def parse_uniref(filename,max_entries=0,parse_sequence=False, df_selection=None, exploded=True):
'''parse uniref (clustered sequences) xml ftp://ftp.ebi.ac.uk/pub/databases/uniprot/uniref/uniref50/uniref50.xml.gz unzipped 100GB file
using custom low-level parser https://www.ibm.com/developerworks/xml/library/x-hiperfparse/
max_entries: only return first max_entries entries (0=all)
parse_sequences: return also representative sequence
df_selection: only include entries with accessions that are present in df_selection.index (None keeps all records)
exploded: return one row per ID instead of one row per cluster
c.f. for full format ftp://ftp.ebi.ac.uk/pub/databases/uniprot/uniref/uniref50/README
'''
#issue with long texts https://stackoverflow.com/questions/30577796/etree-incomplete-child-text
#wait for end rather than start tag
context = etree.iterparse(str(filename), events=["end"], tag="{http://uniprot.org/uniref}entry")
context = iter(context)
rows =[]
for _, elem in tqdm(context):
parse_func_uniref(elem,rows,parse_sequence=parse_sequence, df_selection=df_selection)
elem.clear()
while elem.getprevious() is not None:
del elem.getparent()[0]
if(max_entries > 0 and len(rows)==max_entries):
break
df=pd.DataFrame(rows).set_index("entry_id")
df["num_members"]=df.members.apply(len)
if(exploded):
return explode_clusters_df(df)
return df
def parse_func_uniref(elem, rows, parse_sequence=False, df_selection=None):
'''extract a single uniref entry'''
#entry ID
entry_id = elem.attrib["id"]
#print("cluster id",entry_id)
#name
name = ""
names = elem.findall("{http://uniprot.org/uniref}name")
for n in names:
name=n.text[9:]
break
#print("cluster name",name)
members=[]
#representative member
repr_accession = ""
repr_sequence =""
repr = elem.findall("{http://uniprot.org/uniref}representativeMember")
for r in repr:
s1=r.findall("{http://uniprot.org/uniref}dbReference")
for s2 in s1:
for s3 in s2:
if s3.attrib["type"]=="UniProtKB accession":
if(repr_accession == ""):
repr_accession = s3.attrib["value"]#pick primary accession
members.append(s3.attrib["value"])
if parse_sequence is True:
s1=r.findall("{http://uniprot.org/uniref}sequence")
for s2 in s1:
repr_sequence = s2.text
if repr_sequence !="":
break
#print("representative member accession:",repr_accession)
#print("representative member sequence:",repr_sequence)
#all members
repr = elem.findall("{http://uniprot.org/uniref}member")
for r in repr:
s1=r.findall("{http://uniprot.org/uniref}dbReference")
for s2 in s1:
for s3 in s2:
if s3.attrib["type"]=="UniProtKB accession":
members.append(s3.attrib["value"]) #add primary and secondary accessions
#print("members", members)
if(not(df_selection is None)): #apply selection filter
members = [y for y in members if y in df_selection.index]
#print("all children")
#for c in elem:
# print(c)
# print(c.tag)
# print(c.attrib)
if(len(members)>0):
data_dict={"entry_id": entry_id, "name": name, "repr_accession":repr_accession, "members":members}
if parse_sequence is True:
data_dict["repr_sequence"]=repr_sequence
rows.append(data_dict)
###########################################################################################################################
#proteins and peptides from fasta
###########################################################################################################################
def parse_uniprot_fasta(fasta_path, max_entries=0):
'''parse uniprot from fasta file (which contains less information than the corresponding xml but is also much smaller e.g. ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.fasta)'''
rows=[]
dataset_dict={"sp":"Swiss-Prot","tr":"TrEMBL"}
for seq_record in tqdm(SeqIO.parse(fasta_path, "fasta")):
sid=seq_record.id.split("|")
accession = sid[1]
dataset = dataset_dict[sid[0]]
name = sid[2]
description = seq_record.description
sequence=str(seq_record.seq)
#print(description)
m = re.search('PE=\d', description)
pe=int(m.group(0).split("=")[1])
m = re.search('OS=.* (?=OX=)', description)
organism=m.group(0).split("=")[1].strip()
data_dict={"ID": accession, "name": name, "dataset":dataset, "proteinexistence":pe, "organism":organism, "sequence": sequence}
rows.append(data_dict)
if(max_entries > 0 and len(rows)==max_entries):
break
df=pd.DataFrame(rows).set_index("ID")
df['name'] = df.name.astype(str)
df['dataset'] = df.dataset.astype('category')
df['organism'] = df.organism.astype('category')
df['sequence'] = df.sequence.astype(str)
return df
def proteins_from_fasta(fasta_path):
'''load proteins (as seqrecords) from fasta (just redirects)'''
return seqrecords_from_fasta(fasta_path)
def seqrecords_from_fasta(fasta_path):
'''load seqrecords from fasta file'''
seqrecords = list(SeqIO.parse(fasta_path, "fasta"))
return seqrecords
def seqrecords_to_sequences(seqrecords):
'''converts biopythons seqrecords into a plain list of sequences'''
return [str(p.seq) for p in seqrecords]
def sequences_to_fasta(sequences, fasta_path, sequence_id_prefix="s"):
'''save plain list of sequences to fasta'''
with open(fasta_path, "w") as output_handle:
for i,s in tqdm(enumerate(sequences)):
record = Bio.SeqRecord.SeqRecord(Bio.Seq.Seq(s), id=sequence_id_prefix+str(i), description="")
SeqIO.write(record, output_handle, "fasta")
def df_to_fasta(df, fasta_path):
'''Save column "sequence" from pandas DataFrame to fasta file using the index of the DataFrame as ID. Preserves original IDs in contrast to the function sequences_to_fasta()'''
with open(fasta_path, "w") as output_handle:
for row in df.iterrows():
record = Bio.SeqRecord.SeqRecord(Bio.Seq.Seq(row[1]["sequence"]), id=str(row[0]), description="")
SeqIO.write(record, output_handle, "fasta")
def sequences_to_df(sequences, sequence_id_prefix="s"):
data = {'ID': [(sequence_id_prefix+str(i) if sequence_id_prefix!="" else i) for i in range(len(sequences))], 'sequence': sequences}
df=pd.DataFrame.from_dict(data)
return df.set_index("ID")
def fasta_to_df(fasta_path):
seqs=SeqIO.parse(fasta_path, "fasta")
res=[]
for s in seqs:
res.append({"ID":s.id,"sequence":str(s.seq)})
return pd.DataFrame(res)
def peptides_from_proteins(protein_seqrecords, miss_cleavage=2,min_length=5,max_length=300):
'''extract peptides from proteins seqrecords by trypsin digestion
min_length: only return peptides of length min_length or greater (0 for all)
max_length: only return peptides of length max_length or smaller (0 for all)
'''
peptides = []
for seq in tqdm(protein_seqrecords):
peps = trypsin_digest(str(seq.seq), miss_cleavage)
peptides.extend(peps)
tmp=list(set(peptides))
if(min_length>0 and max_length>0):
tmp=[t for t in tmp if (len(t)>=min_length and len(t)<=max_length)]
elif(min_length==0 and max_length>0):
tmp=[t for t in tmp if len(t)<=max_length]
elif(min_length>0 and max_length==0):
tmp=[t for t in tmp if len(t)>=min_length]
print("Extracted",len(tmp),"unique peptides.")
return tmp
def trypsin_digest(proseq, miss_cleavage):
'''trypsin digestion of protein seqrecords
TRYPSIN from https://github.com/yafeng/trypsin/blob/master/trypsin.py'''
peptides=[]
cut_sites=[0]
for i in range(0,len(proseq)-1):
if proseq[i]=='K' and proseq[i+1]!='P':
cut_sites.append(i+1)
elif proseq[i]=='R' and proseq[i+1]!='P':
cut_sites.append(i+1)
if cut_sites[-1]!=len(proseq):
cut_sites.append(len(proseq))
if len(cut_sites)>2:
if miss_cleavage==0:
for j in range(0,len(cut_sites)-1):
peptides.append(proseq[cut_sites[j]:cut_sites[j+1]])
elif miss_cleavage==1:
for j in range(0,len(cut_sites)-2):
peptides.append(proseq[cut_sites[j]:cut_sites[j+1]])
peptides.append(proseq[cut_sites[j]:cut_sites[j+2]])
peptides.append(proseq[cut_sites[-2]:cut_sites[-1]])
elif miss_cleavage==2:
for j in range(0,len(cut_sites)-3):
peptides.append(proseq[cut_sites[j]:cut_sites[j+1]])
peptides.append(proseq[cut_sites[j]:cut_sites[j+2]])
peptides.append(proseq[cut_sites[j]:cut_sites[j+3]])
peptides.append(proseq[cut_sites[-3]:cut_sites[-2]])
peptides.append(proseq[cut_sites[-3]:cut_sites[-1]])
peptides.append(proseq[cut_sites[-2]:cut_sites[-1]])
else: #there is no trypsin site in the protein sequence
peptides.append(proseq)
return list(set(peptides))
###########################################################################
# Processing CD-HIT clusters
###########################################################################
def clusters_df_from_sequence_df(df,threshold=[1.0,0.9,0.5],alignment_coverage=[0.0,0.9,0.8],memory=16000, threads=8, exploded=True, verbose=False):
'''create clusters df from sequence df (using cd hit)
df: dataframe with sequence information
threshold: similarity threshold for clustering (pass a list for hierarchical clustering e.g [1.0, 0.9, 0.5])
alignment_coverage: required minimum coverage of the longer sequence (to mimic uniref https://www.uniprot.org/help/uniref)
memory: limit available memory
threads: limit number of threads
exploded: return exploded view of the dataframe (one row for every member vs. one row for every cluster)
uses CD-HIT for clustering
https://github.com/weizhongli/cdhit/wiki/3.-User's-Guide
copy cd-hit into ~/bin
TODO: extend to psi-cd-hit for thresholds smaller than 0.4
'''
if verbose:
print("Exporting original dataframe as fasta...")
fasta_file = "cdhit.fasta"
df_original_index = list(df.index) #reindex the dataframe since cdhit can only handle 19 letters
df = df.reset_index(drop=True)
df_to_fasta(df, fasta_file)
if(not(isinstance(threshold, list))):
threshold=[threshold]
alignment_coverage=[alignment_coverage]
assert(len(threshold)==len(alignment_coverage))
fasta_files=[]
for i,thr in enumerate(threshold):
if(thr< 0.4):#use psi-cd-hit here
print("thresholds lower than 0.4 require psi-cd-hit.pl require psi-cd-hit.pl (building on BLAST) which is currently not supported")
return pd.DataFrame()
elif(thr<0.5):
wl = 2
elif(thr<0.6):
wl = 3
elif(thr<0.7):
wl = 4
else:
wl = 5
aL = alignment_coverage[i]
#cd-hit -i nr -o nr80 -c 0.8 -n 5
#cd-hit -i nr80 -o nr60 -c 0.6 -n 4
#psi-cd-hit.pl -i nr60 -o nr30 -c 0.3
if verbose:
print("Clustering using cd-hit at threshold", thr, "using wordlength", wl, "and alignment coverage", aL, "...")
fasta_file_new= "cdhit"+str(int(thr*100))+".fasta"
command = "cd-hit -i "+fasta_file+" -o "+fasta_file_new+" -c "+str(thr)+" -n "+str(wl)+" -aL "+str(aL)+" -M "+str(memory)+" -T "+str(threads)
if(verbose):
print(command)
process= subprocess.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output, error = process.communicate()
if(verbose):
print(output)
if(error !=""):
print(error)
fasta_files.append(fasta_file)
if(i==len(threshold)-1):
fasta_files.append(fasta_file_new)
fasta_file= fasta_file_new
#join results from all clustering steps
if verbose:
print("Joining results from different clustering steps...")
for i,f in enumerate(reversed(fasta_files[1:])):
if verbose:
print("Processing",f,"...")
if(i==0):
df_clusters = parse_cdhit_clstr(f+".clstr",exploded=False)
else:
df_clusters2 = parse_cdhit_clstr(f+".clstr",exploded=False)
for id,row in df_clusters.iterrows():
members = row['members']
new_members = [list(df_clusters2[df_clusters2.repr_accession==y].members)[0] for y in members]
new_members = [item for sublist in new_members for item in sublist] #flattened
row['members']=new_members
df_clusters["members"]=df_clusters["members"].apply(lambda x:[df_original_index[int(y)] for y in x])
df_clusters["repr_accession"]=df_clusters["repr_accession"].apply(lambda x:df_original_index[int(x)])
if(exploded):
return explode_clusters_df(df_clusters)
return df_clusters
def parse_cdhit_clstr(filename, exploded=True):
'''Aux. Function (used by clusters_df_from_sequence_df) to parse CD-HITs clstr output file in a similar way as the uniref data
for the format see https://github.com/weizhongli/cdhit/wiki/3.-User's-Guide#CDHIT
exploded: single row for every ID instead of single for every cluster
'''
def save_cluster(rows,members,representative):
if(len(members)>0):
rows.append({"entry_id":filename[:-6]+"_"+representative, "members":members, "repr_accession":representative})
rows=[]
with open(filename, 'r') as f:
members=[]
representative=""
for l in tqdm(f):
if(l[0]==">"):
save_cluster(rows,members,representative)
members=[]
representative=""
else:
member=(l.split(">")[1]).split("...")[0]
members.append(member)
if "*" in l:
representative = member
save_cluster(rows,members,representative)
df=pd.DataFrame(rows).set_index("entry_id")
if(exploded):
return explode_clusters_df(df)
return df
###########################################################################
# MHC DATA
###########################################################################
######### Helper functions ##########
def _label_binder(data, threshold=500, measurement_column="meas"):
# Drop entries above IC50 > 500nM with inequality < (ambiguous)
to_drop = (( (data['inequality']=='<')&(data[measurement_column]>threshold))|((data['inequality']=='>')&(data[measurement_column]<threshold))).mean()
if to_drop > 0:
print('Dropping {} % because of ambiguous inequality'.format(to_drop))
data = data[~(( (data['inequality']=='<')&(data[measurement_column]>threshold))|((data['inequality']=='>')&(data[measurement_column]<threshold)))]
# Labeling
data['label'] = (1* data[measurement_column]<=threshold).astype(int)
return data
def _transform_ic50(data, how="log",max_ic50=50000.0, inequality_offset=True, label_column="meas"):
"""Transform ic50 measurements
how: "log" logarithmic transform, inequality "=" mapped to [0,1], inequality ">" mapped to [2,3], inequality "<" mapped to [4,5]
"norm"
"cap"
"""
x = data[label_column]
if how=="cap":
x = np.minimum(x, 50000)
elif how=="norm":
x = np.minimum(x, 50000)
x = (x - x.mean()) / x.std()
elif how=="log":
# log transform
x = 1 - (np.log(x)/np.log(max_ic50))
x = np.minimum(1.0, np.maximum(0.0,x))
if(inequality_offset):
# add offsets for loss
offsets = pd.Series(data['inequality']).map({'=': 0, '>': 2, '<': 4,}).values
x += offsets
return x
def _string_index(data):
# Add prefix letter "a" to the numerical index (such that it is clearly a string in order to avoid later errors).
data["ID"] = data.index
data["ID"] = data["ID"].apply(lambda x: "a"+ str(x))
data = data.set_index(["ID"])
return data
def _format_alleles(x):
if x[:3]=='HLA':
return x[:5]+'-'+x[6:8]+x[9:]
if x[:4]=='Mamu':
return x[:6]+'-'+x[7:]
else:
return x
def _get_allele_ranking(data_dir='.'):
'''
Allele ranking should be the same across different datasets (noMS, withMS) to avoid confusion.
Thus, the ranking is based on the larger withMS dataset
'''
data_dir = Path(data_dir)
curated_withMS_path = data_path/'data_curated.20180219'/'curated_training_data.with_mass_spec.csv'
df = pd.read_csv(curated_withMS_path)
# Drop duplicates
df = df.drop_duplicates(["allele", "peptide","measurement_value"])
lens = df['peptide'].apply(len)
df = df[(lens>7) & (lens<16)]
# Keep only alleles with min 25 peptides like MHC flurry
peptides_per_allele = df.groupby('allele').size()
alleles_select = peptides_per_allele[peptides_per_allele>24].index
df = df[df['allele'].isin(alleles_select)]
mhc_rank = df.groupby('allele').size().sort_values(ascending=False).reset_index()['allele']
return mhc_rank
def netmhpan_4_0_special_allele_map(allele):
minus_idx = allele.find("-")
pre, post = allele[:minus_idx], allele[minus_idx+1:]
if pre=="Mamu":
special_map = {"A01": "A1*00101",
"A02": "A1*00201",
"A07": "A1*00701",
"A11": "A1*01101",
"A2201": "A1*02201",
"A2601": "A1*02601",
'A20102': "A2*00102", # "A2*0102"
"A70103": "A7*00103", # "A7*0103"
"B01": "B*00101",
"B03": "B*00301",
"B04": "B*00401",
"B08": "B*00801",
"B17": "B*01701",
"B52": "B*05201",
"B1001": "B*01001",
'B3901': "B*03901", #?
'B6601': "B*06601", #?
'B8301': "B*08301", #?
'B8701': "B*08701", #?
}
if post in special_map.keys():
post = special_map[post]
elif pre=="BoLA":
#source: select allele menu on http://www.cbs.dtu.dk/services/NetMHCpan-4.0/
special_map = {
"D18.4": "1:02301",
"T2a": "2:01201",
"AW10": "3:00101",
"JSP.1": "3:00201",
"HD6": "6:01301",
"T2b": "6:04101"
}
if post in special_map.keys():
post = special_map[post]
return pre + "-" + post
def prepare_pseudo_mhc_sequences(mhc_class, data_dir='.'):
"""
The pseudo sequences are provided with the NetMHCpan4.1/NetMHCIIpan4.0 data.
"""
data_path = Path(data_dir)
if mhc_class=="II":
pseudo_seq_file = "NetMHCIIpan_train/pseudosequence.2016.all.X.dat"
else:
pseudo_seq_file = "NetMHCpan_4_1_train/MHC_pseudo.dat"
pseudo_mhc = []
with open(data_path/pseudo_seq_file, "r") as f:
for line in f:
allele, seq = line.split()
pseudo_mhc.append((allele,seq))
pseudo_mhc = pd.DataFrame(pseudo_mhc, columns=("allele", "sequence1"))
pseudo_mhc = pseudo_mhc[~pseudo_mhc["allele"].duplicated()]
return pseudo_mhc
########## Generate DataFrame ##########
def generate_mhc_kim(cv_type=None, mhc_select=0, regression=False, transform_ic50=None, to_csv=False, filename=None, data_dir='.', keep_all_alleles=False):
'''
cv_type: string, strategy for 5-fold cross validation, options:
- None: No cv-strategy, cv column is filled with 'TBD'
- sr: removal of similar peptides seperatly in binder/ non-binder set, using similarity threshold of 80%, similarity found with 'Hobohm 1 like algorithm'
- gs: grouping similar peptides in the same cv-partition
- rnd: random partioning
transform_ic50: string, ignnored if not regression
- None: use raw ic50 measurements as labels
- cap: cap ic50 meas at 50000
- norm: cap ic50 meas at 50000 and normalize
- log: take log_50000 and cap at 50000
mhc_select: int between 0 and 50, choose allele by frequency rank in Binding Data 2009
'''
# Binding Data 2009. Used by Kim et al for Cross Validation. Used by MHCnugget for training.
bd09_file = 'bdata.2009.mhci.public.1.txt'
# Similar peptides removed
bd09_cv_sr_file = 'bdata.2009.mhci.public.1.cv_sr.txt'
# Random partioning
bd09_cv_rnd_file = 'bdata.2009.mhci.public.1.cv_rnd.txt'
# Similar peptides grouped
bd09_cv_gs_file = 'bdata.2009.mhci.public.1.cv_gs.txt'
# 'blind' used by Kim et al to estimate true predicitve accuracy. Used by MHCnugget for testing.
# Generated by subtracting BD2009 from BD 2013 and removing similar peptides with respect to BD2009
# (similar = at least 80% similarity and same length)
bdblind_file = 'bdata.2013.mhci.public.blind.1.txt'
data_dir = Path(data_dir)/"benchmark_mhci_reliability/binding"
# Read in data with specified cv type
if cv_type=='sr':
bd09 = pd.read_csv(data_dir/'bd2009.1'/bd09_cv_sr_file, sep='\t')
elif cv_type=='gs':
bd09 = pd.read_csv(data_dir/'bd2009.1'/bd09_cv_gs_file, sep='\t')
elif cv_type=='rnd':
bd09 = pd.read_csv(data_dir/'bd2009.1'/bd09_cv_rnd_file, sep='\t')
else:
bd09 = pd.read_csv(data_dir/'bd2009.1'/bd09_file, sep='\t')
# Read in blind data
bdblind = pd.read_csv(data_dir/'blind.1'/bdblind_file, sep='\t')
# alleles are spelled differently in bdblind and bd2009, change spelling in bdblind
bdblind['mhc'] = bdblind['mhc'].apply(_format_alleles)
# Confirm there is no overlap
print('{} entries from the blind data set are in the 2009 data set'.format(bdblind[['sequence', 'mhc']].isin(bd09[['sequence', 'mhc']]).all(axis=1).sum()))
if regression:
# For now: use only quantitative measurements, later tuple (label, inequality as int)
#print('Using quantitative {} % percent of the data.'.format((bd09['inequality']=='=').mean()))
#bd09 = bd09[bd09['inequality']=='=']
#bd09.rename(columns={'meas':'label'}, inplace=True)
#bdblind = bdblind[bdblind['inequality']=='=']
#bdblind.rename(columns={'meas':'label'}, inplace=True)
# Convert ic50 measurements to range [0,1]
if transform_ic50 is not None:
bd09['label'] = _transform_ic50(bd09, how=transform_ic50)
bdblind['label'] = _transform_ic50(bdblind, how=transform_ic50)
else:
# Labeling for binder/NonBinder
bd09 = _label_binder(bd09)[['mhc', 'sequence', 'label', 'cv']]
#bdblind = _label_binder(bdblind)[['mhc', 'sequence', 'label', 'cv']]
bdblind = bdblind.rename(columns={"meas":"label"})
if not keep_all_alleles:
# in bd09 (train set) keep only entries with mhc also occuring in bdblind (test set)
bd09 = bd09[bd09['mhc'].isin(bdblind['mhc'])]
# Combine
bdblind['cv'] = 'blind'
bd = pd.concat([bd09, bdblind], ignore_index=True)
if not(regression):
# Test if there is at least one binder in bd09 AND bdblind
min_one_binder = pd.concat([(bd09.groupby('mhc')['label'].sum() > 0), (bdblind.groupby('mhc')['label'].sum() > 0)], axis=1).all(axis=1)
print('For {} alleles there is not at least one binder in bd 2009 AND bd blind. These will be dismissed.'.format((~min_one_binder).sum()))
alleles = bd['mhc'].unique()
allesles_to_keep = alleles[min_one_binder]
# Dismiss alleles without at least one binder
bd = bd[bd['mhc'].isin(allesles_to_keep)]
# Make allele ranking based on binding data 2009
mhc_rank = bd[bd['cv']!='blind'].groupby('mhc').size().sort_values(ascending=False).reset_index()['mhc']
# Select allele
if mhc_select is not None:
print('Selecting allele {}'.format(mhc_rank.loc[mhc_select]))
bd = bd[bd['mhc']==mhc_rank.loc[mhc_select]][['sequence', 'label', 'cv']]
# Turn indices into strings
bd = _string_index(bd)
if to_csv and filename is not None:
bd.to_csv(filename)
return bd
def generate_mhc_flurry(ms='noMS', mhc_select=0, regression=False, transform_ic50=None, binder_threshold=500, filter_length=True, label_binary=False, random_seed=42,data_dir='.'):
'''
Load the MHC I data curated and uploaded to https://data.mendeley.com/datasets/8pz43nvvxh/1 by MHCFlurry
Used by them for training and model selection
ms: string, specifies if mass spectroscopy data should be included, options:
- noMS: MHCFlurry no MS dataset
- withMS: MHCFlurry with MS dataset
mhc_select: int between 0 and 150 (noMS)/ 188 (withMS), choose allele by frequency rank
filter_length: boolean, MHCFlurry selected peptides of length 8-15 (their model only deals with these lengths)
'''
data_path = Path(data_dir)
curated_noMS_path = data_path/'data_curated.20180219'/'curated_training_data.no_mass_spec.csv'
curated_withMS_path = data_path/'data_curated.20180219'/'curated_training_data.with_mass_spec.csv'
if ms=='noMS':
df = pd.read_csv(curated_noMS_path)
elif ms=='withMS':
df = pd.read_csv(curated_withMS_path)
if filter_length:
lens = df['peptide'].apply(len)
df = df[(lens>7) & (lens<16)]
# Keep only alleles with min 25 peptides
peptides_per_allele = df.groupby('allele').size()
alleles_select = peptides_per_allele[peptides_per_allele>24].index
df = df[df['allele'].isin(alleles_select)]
df.rename(columns={'measurement_value':'meas', 'measurement_inequality':'inequality', 'peptide':'sequence'}, inplace=True)
# label binder/non binder
if label_binary:
df = _label_binder(df, threshold=binder_threshold, measurement_column='label')
if regression:
df["label"] = _transform_ic50(df, how=transform_ic50)
if mhc_select is not None:
if type(mhc_select)==int:
mhc_rank = df.groupby('allele').size().sort_values(ascending=False).reset_index()['allele']
print('Selecting allele {}'.format(mhc_rank.loc[mhc_select]))
df = df[df['allele']==mhc_rank.loc[mhc_select]]
else:
print('Selecting allele {}'.format(mhc_select))
df = df[df['allele']==mhc_select]
# Mark 10% of the data as validation set
np.random.seed(seed=random_seed)
val_ind = np.random.randint(0,high=df.shape[0],size=int(df.shape[0]/10))
df['cluster_ID'] = (df.reset_index().index.isin(val_ind))*1
df["ID"]=df.sequence.apply(lambda x: hashlib.md5(x.encode('utf-8')).hexdigest())
#df = _string_index(df)
return df
def generate_abelin(mhc_select=0, data_dir='.'):
'''
mhc_select: int in [1, 2, 4, 6, 8, 10, 13, 14, 15, 16, 17, 21, 22, 36, 50, 63]
'''
data_path = Path(data_dir)
abelin = pd.read_csv(data_path/"abelin_peptides.all_predictions.csv")[['hit', 'allele', 'peptide']]
abelin.rename(columns={'peptide':'sequence'}, inplace=True)
# Remove entries present in training set (training data here: noMS as only MHCFlurry noMS is benchmarked with Abelin data)
train = generate_mhc_flurry(ms='noMS',mhc_select=None, data_dir=data_dir)[['allele', 'sequence']]
overlap_ind = abelin[['allele', 'sequence']].merge(train.drop_duplicates(['allele','sequence']).assign(vec=True),how='left', on=['allele', 'sequence']).fillna(False)['vec']
#print(abelin.shape[0], overlap_ind.shape, overlap_ind.sum() )
abelin = abelin[~overlap_ind.values]
# Select allele specific data
if type(mhc_select)==int:
allele_ranking = _get_allele_ranking(data_dir=data_dir)
mhc_select = allele_ranking.iloc[mhc_select]
abelin = abelin[abelin['allele']==mhc_select]
abelin.rename(columns={'hit':'label'}, inplace=True)
abelin['cluster_ID'] = 2
return abelin
def prepare_hpv(mhc_select, data_dir='.'):
'''
To run, download Table S2 from Supplementary Material of [<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, et al. Performance evaluation of MHC class-I binding prediction tools based on an experimentally validated MHC–peptide binding data set. Cancer Immunol Res 2019;7:719–36.] and save as HPV_data.csv in ./data
mhc_select: string from ['HLAA1', 'HLAA2', 'HLAA3', 'HLAA11', 'HLAA24', 'HLAB7', 'HLAB15']
'''
data_path = Path(data_dir)
df = pd.read_csv(data_path/"HPV_data.csv")
df["label"] = df["Experimental binding capacity"].mask(df["Experimental binding capacity"]=="nb")
return df[df["allele"]==mhc_select][["sequence","label"]]
def prepare_sars_cov(mhc_select, mhc_class="I", with_MHC_seq=False, data_dir='.'):
'''
To run, download https://www.immunitrack.com/wp/wp-content/uploads/Covid19-Intavis-Immunitrack-datasetV2.xlsx from
[<NAME>., <NAME>., <NAME>.B. et al.
Identification and validation of 174 COVID-19 vaccine candidate epitopes reveals
low performance of common epitope prediction tools. Sci Rep 10, 20465 (2020).
https://doi.org/10.1038/s41598-020-77466-4]
and save in datadir
mhc_select: string from ["1 A0101",
"2 A0201",
"3 A0301",
"4 A1101",
"5 A2402",
"6 B4001",
"7 C0401",
"8 C0701",
"9 C0702",
"10 C0102",
"11 DRB10401"]
'''
allele_sheets = ["1 A0101",
"2 A0201",
"3 A0301",
"4 A1101",
"5 A2402",
"6 B4001",
"7 C0401",
"8 C0701",
"9 C0702",
"10 C0102",
"11 DRB10401"]
data_path = Path(data_dir)
df = pd.read_excel(data_path/"Covid19-Intavis-Immunitrack-datasetV2.xlsx", sheet_name=allele_sheets)
df = pd.concat(df, sort=True)[["Sequence","Stab %"]]
df.rename(columns={"Sequence":"sequence","Stab %":"label"}, inplace=True)
if mhc_select is not None:
df["allele"] = mhc_select
df = df.loc[mhc_select]
if with_MHC_seq:
df = df.reset_index(level=0).rename(columns={"level_0":"allele"})
if mhc_class=="I":
covid_allele_map = {"1 A0101": 'HLA-A01:01',
"2 A0201": 'HLA-A02:01',
"3 A0301": 'HLA-A03:01',
"4 A1101": 'HLA-A11:01',
"5 A2402": 'HLA-A24:02',
"6 B4001": 'HLA-B40:01',
"7 C0401": 'HLA-C04:01',
"8 C0701": 'HLA-C07:01',
"9 C0702": 'HLA-C07:02'
}
elif mhc_class=="II":
covid_allele_map = {"11 DRB10401": "DRB1_0401"
}
df.allele = df.allele.map(covid_allele_map)
# filter out nan alleles (eg. class i (ii) alleles for class ii (i))
df = df[~df.allele.isnull()]
allele_df = prepare_pseudo_mhc_sequences(mhc_class, data_dir)
df = df.merge(allele_df, on="allele", how="left")
return df
def prepare_mhci_netmhcpan_4(mhc_select, MS=False, with_MHC_seq=False, data_dir="./", netmhc_data_version="4.0"):
"""
Prepare training data of NetMHCpan4.0/NetMHCpan4.1 with test data from
<NAME>., <NAME>., <NAME>. et al.
Identification and validation of 174 COVID-19 vaccine candidate epitopes reveals low performance
of common epitope prediction tools.
Sci Rep 10, 20465 (2020).
https://doi.org/10.1038/s41598-020-77466-4
Download
- Train/Val Data Affinity measurements
- either NetMHCpan4.1 data:
http://www.cbs.dtu.dk/suppl/immunology/NAR_NetMHCpan_NetMHCIIpan/NetMHCpan_train.tar.gz
unpack and rename as NetMHCpan_4_1_train
- or NetMhCpan4.0 data:
from http://www.cbs.dtu.dk/suppl/immunology/NetMHCpan-4.0/ download 0th CV split files f000_ba (train) and c000_ba (val, 20%)
store in data_dir/NetMHCpan_4_0_train
save everything in data_dir
"""
datatype = "ba" if not MS else "el"
data = []
if netmhc_data_version=="4.0":
for file in glob.glob(str(data_dir/"NetMHCpan_4_0_train/*000_{}".format(datatype))):
df = pd.read_csv(file, header=None, delimiter=" " if not MS else "\t", names=("sequence","label","allele","ic50"))
df["cluster_ID"] = 0 if file.split("/")[-1][0]=="f" else 1
data.append(df)
elif netmhc_data_version=="4.1":
for file in glob.glob(str(data_dir/"NetMHCpan_4_1_train/*_ba")):
df = pd.read_csv(file, header=None, delimiter=" ", names=("sequence","label","allele"))
# use one partition as validation set
df["cluster_ID"] = 1 if file.split("/")[-1]=="c004_ba" else 0
data.append(df)
data = pd.concat(data, ignore_index=True, sort=True)
if mhc_select is not None:
data = data[data["allele"]==mhc_select]
if with_MHC_seq:
if netmhc_data_version=="4.0":
# some BoLA and Mamu alleles in NetMhCpan4.0 have names that can't be mapped to ipd alleles names with simple rules
# map these to the convention of NetMhCpan4.1 first (these names can be mapped to ipd allele names)
data.allele = data.allele.apply(netmhpan_4_0_special_allele_map)
allele_df = prepare_pseudo_mhc_sequences("I", data_dir)
data = data.merge(allele_df, on="allele", how="left")
return data
def prepare_mhcii_netmhcpan(mhc_select, MS=False, with_MHC_seq=False, data_dir="./", netmhc_data_version="3.2"):
"""
Prepare training data of NetMHCIIpan3.2/NetMHCIIpan4.0 with test data from
<NAME>., <NAME>., <NAME>. et al.
Identification and validation of 174 COVID-19 vaccine candidate epitopes reveals low performance
of common epitope prediction tools.
Sci Rep 10, 20465 (2020).
https://doi.org/10.1038/s41598-020-77466-4
Download
- Train/Val Data Affinity measurements
- either NetMHCpan4.1 data:
http://www.cbs.dtu.dk/suppl/immunology/NAR_NetMHCpan_NetMHCIIpan/NetMHCpan_train.tar.gz
unpack and rename as NetMHCpan_4_1_train
- or NetMhCpan4.0 data:
from http://www.cbs.dtu.dk/suppl/immunology/NetMHCpan-4.0/ download 0th CV split files f000_ba (train) and c000_ba (val, 20%)
store in data_dir/NetMHCpan_4_0_train
- MS measurement data:
Download http://www.cbs.dtu.dk/suppl/immunology/NAR_NetMHCpan_NetMHCIIpan/NetMHCIIpan_train.tar.gz, unpack and rename as NetMHCIIpan_train
save everything in data_dir
"""
data = []
if MS:
for file in glob.glob(str(data_dir/"NetMHCIIpan_train/*_EL1.txt")):
df = pd.read_csv(file, header=None, delimiter="\t", names=("sequence","label","allele","context"))
# use one partition as validation set
df["cluster_ID"] = 1 if file.split("/")[-1]=="test_EL1.txt" else 0
data.append(df)
else:
if netmhc_data_version=="3.2":
for file in glob.glob(str(data_dir/"NetMHCIIpan_3_2_train/*1.txt")):
#print(file)
df = pd.read_csv(file, header=None, delimiter="\t", names=("sequence","label","allele"))
# use one partition as validation set
df["cluster_ID"] = 1 if file.split("/")[-1]=="test1.txt" else 0
data.append(df)
elif netmhc_data_version=="4.0":
for file in glob.glob(str(data_dir/"NetMHCIIpan_train/*_BA1.txt")):
df = pd.read_csv(file, header=None, delimiter="\t", names=("sequence","label","allele","context"))
# use one partition as validation set
df["cluster_ID"] = 1 if file.split("/")[-1]=="test_BA1.txt" else 0
data.append(df)
data = pd.concat(data, ignore_index=True, sort=True)
if mhc_select is not None:
if MS:
data = data[data["allele"].apply(lambda x: mhc_select in x)]
else:
data = data[data["allele"]==mhc_select]
if with_MHC_seq:
allele_df = prepare_pseudo_mhc_sequences("II", data_dir)
data = data.merge(allele_df, on="allele", how="left")
return data
def prepare_mhcii_iedb2016(mhc_select, cv_fold, path_iedb="../data/iedb2016", path_jensen_csv="../data/jensen_et_al_2018_immunology_supplTabl3.csv"):
'''prepares mhcii iedb 2016 dataset using train1 ... test5 from http://www.cbs.dtu.dk/suppl/immunology/NetMHCIIpan-3.2/'''
def prepare_df(filename):
df = pd.read_csv(filename,header=None,sep="\t")
df.columns=["sequence","aff_log50k","allele"]
df["ID"]=df.sequence.apply(lambda x: hashlib.md5(x.encode('utf-8')).hexdigest())
df["aff"]=df.aff_log50k.apply(lambda x:np.power(50000,1-x))
return df
path_iedb = Path(path_iedb)
dfs_traintest = []
for i in range(1,6):
dfs_traintest.append(prepare_df(path_iedb/("test"+str(i))))
dfs_traintest[-1]["cv_fold"]=i-1
dfs_traintest[-1]["traintest"]="test"
dfs_traintest.append(prepare_df(path_iedb/("train"+str(i))))
dfs_traintest[-1]["cv_fold"]=i-1
dfs_traintest[-1]["traintest"]="train"
df_traintest = pd.concat(dfs_traintest,ignore_index=True)
df_traintest.rename(columns={"aff_log50k":"label"},inplace=True)
# select only alleles with results in the Jensen et al paper
df_pub = pd.read_csv(path_jensen_csv)
df_traintest = df_traintest[df_traintest["allele"].isin(df_pub["Molecule"].unique())]
# select one allele based on training set size ranking
allele_ranking = df_traintest[df_traintest["cv_fold"]==0].groupby("allele").size().sort_values(ascending=False).index
df_traintest = df_traintest[df_traintest["allele"]==allele_ranking[mhc_select]]
#select specified cv_fold
df_traintest = df_traintest[df_traintest["cv_fold"]==cv_fold]
#stratified split of train -> train & val
binder = 1.0*(df_traintest[df_traintest["traintest"]=="train"]["aff"] < 500).values
tts = train_test_split(df_traintest[df_traintest["traintest"]=="train"], test_size=0.1, random_state=42, stratify=binder)
df_train = tts[0]
df_train["cluster_ID"] = 0
df_val = tts[1]
df_val["cluster_ID"] = 1
df_test = df_traintest[df_traintest["traintest"]=="test"]
df_test["cluster_ID"] = 2
df = pd.concat([df_train, df_val, df_test], ignore_index=True)
return df
def prepare_mhcii_iedb2009(path_2009= "../data/classII_binding_data_Nov_16_2009",similarity_reduced=True):
'''prepares mhcii iedb 2009 dataset using classII_binding_data_Nov_16_2009.zip from http://tools.iedb.org/mhcii/download/'''
def mhc_ii_allelename_format(x):
if x.startswith("HLA"):
if len(x)==23:
return x[:8]+x[9:13]+"-"+x[14:18]+x[19:]
elif len(x)==13:
return x[:8]+x[9:]
else:
return x
else:
return x
path_2009 = Path(path_2009)
if(similarity_reduced):
path_2009 = path_2009/"class_II_similarity_reduced_5cv_sep"
else:
path_2009= path_2009/ "class_II_all_split_5cv"
dfs_2009=[]
for f in Path(path_2009).glob("*.txt"):
df_tmp = pd.read_csv(f,header=None,sep="\t").drop([3,5],axis=1)
df_tmp.columns=["species","allele","sequence_len","sequence","aff"]
df_tmp["cv_fold"] = f.stem.split("_")[-1]
filename = np.array(f.stem.split("_"))
df_tmp["traintest"] = filename[np.logical_or(filename=="train", filename=="test")][0] #f.stem.split("_")[1]
dfs_2009.append(df_tmp)
df_2009=pd.concat(dfs_2009)
df_2009.rename(columns={"aff":"label"}, inplace=True)
df_2009["ID"]=df_2009.sequence.apply(lambda x: hashlib.md5(x.encode('utf-8')).hexdigest())
# rename the alleles to match the format of the corresponding test dataset from https://doi.org/10.1371/journal.pcbi.1006457.s010
df_2009["allele"] = df_2009["allele"].apply(mhc_ii_allelename_format)
# transform ic50 values
df_2009["label"] = _transform_ic50(df_2009,how="log",inequality_offset=False,label_column="label")
return df_2009
def prepare_mhci_pcbi(path_pcbi="../data/journal.pcbi.1006457.s009",mer=None):
'''prepares mhci test dataset from https://doi.org/10.1371/journal.pcbi.1006457.s009'''
if mer is None:
path_pcbi = [Path(path_pcbi)/"9mer",Path(path_pcbi)/"10mer"]
else:
path_pcbi=[Path(path_pcbi)/"{}mer".format(mer)]
#print(path_pcbi)
dfs_pcbi=[]
for p in path_pcbi:
for f in Path(p).glob("*.txt"):
df_tmp = pd.read_csv(f,header=None,sep="\t")
if(len(df_tmp.columns)!=2):
#print("Warning:",f,"does not have the correct format. Skipping.")
continue
df_tmp.columns=["sequence","aff"]
df_tmp["dataset"] = f.stem
dfs_pcbi.append(df_tmp)
df_pcbi=pd.concat(dfs_pcbi,ignore_index=True)
df_pcbi.rename(columns={"dataset":"allele","aff":"label"}, inplace=True)
df_pcbi["ID"]=df_pcbi.sequence.apply(lambda x: hashlib.md5(x.encode('utf-8')).hexdigest())
def kim_allele_format(x):
return x[:5] + "-" + x[8:10] + x[13:]
df_pcbi["allele"] = df_pcbi["allele"].apply(kim_allele_format)
# transform ic50 values
df_pcbi["label"] = _transform_ic50(df_pcbi,how="log",inequality_offset=False,label_column="label")
return df_pcbi
def prepare_mhcii_pcbi(path_pcbi="../data/journal.pcbi.1006457.s010"):
'''prepares mhcii test dataset from https://doi.org/10.1371/journal.pcbi.1006457.s010'''
path_pcbi=Path(path_pcbi)/"15mer"
dfs_pcbi=[]
for f in Path(path_pcbi).glob("*.txt"):
df_tmp = pd.read_csv(f,header=None,sep="\t")
if(len(df_tmp.columns)!=2):
#print("Warning:",f,"does not have the correct format. Skipping.")
continue
df_tmp.columns=["sequence","aff"]
df_tmp["dataset"] = f.stem
dfs_pcbi.append(df_tmp)
df_pcbi=pd.concat(dfs_pcbi)
df_pcbi.rename(columns={"dataset":"allele","aff":"label"}, inplace=True)
# select only the alleles present in the mhcii iedb 2009 training dataset
# df_2009 = prepare_mhcii_iedb2009(path_2009= path_2009,similarity_reduced=similarity_reduced)
# df_pcbi = df_pcbi[df_pcbi["allele"].isin(df_2009["allele"].unique())]
df_pcbi["ID"]=df_pcbi.sequence.apply(lambda x: hashlib.md5(x.encode('utf-8')).hexdigest())
# transform ic50 values
df_pcbi["label"] = _transform_ic50(df_pcbi,how="log",inequality_offset=False,label_column="label")
return df_pcbi
#############################cleavage stuff
#parsing cleavage positions from chipper/netchop
def parse_cleavage_chipper(filename, probs=True, return_sequences=True):
'''
probs: parse chipper -p output (probabilities)
returns list of cleavage positions for each protein
parses chipper output e.g. result of ./chipper -i ../share/chipper/test.fa | gunzip >test2.dat'''
with open(filename, 'r') as f:
tmp=f.readlines()
indices=[]
for i,t in enumerate(tmp):
if("@" in t):
indices.append(i)
output=[]
output_seq = []
for i in indices:
name = tmp[i]
cleavage_string = tmp[i+3].strip()
if(probs is False):
cleavage_ids = np.where(np.array([t for t in cleavage_string])=='+')[0]
else:
cleavage_ids = [np.where(np.array([t for t in cleavage_string])==str(i))[0] for i in range(10)]
output.append(cleavage_ids)
#print("\n\nname",name)
#print(cleavage_string)
#print(len(cleavage_string))
#print(len(cleavage_ids))
if(return_sequences is True):
output_seq.append(tmp[i+1].strip())
if(return_sequences is True):
return zip(output_seq, output)
else:
return output
def parse_cleavage_netchop_short(filename, return_sequences=True, startidx=4):
'''
returns cleavage positions
parses chipper output e.g. result of e.g. result of ./chipper -s -i ../share/chipper/test.fa >test2.dat
note: the chipper output is buggy and does not return the last AA (but it cannot make predictions for it anyway)
startidx=4 for chipper and startidx=21 for netchop
'''
with open(filename, 'r') as f:
tmp=f.readlines()
indices=[]
for i,t in enumerate(tmp):
if("-----" in t):
indices.append(i-1)
endindices=indices[::2]
startindices=[startidx]+[t+4 for t in indices[1::2][:-1]]
output = []
output_seq = []
for s,e in zip(startindices,endindices):
#print(s,e)
ids= range(s,e+1,2)
ids_seq = range(s-1,e,2)
#print(list(ids))
name = tmp[s-2]
cleavage_string = "".join([tmp[i].strip() for i in ids])
if(return_sequences):
output_seq.append("".join([tmp[i].strip() for i in ids_seq]))
cleavage_ids = np.where(np.array([t for t in cleavage_string])=='S')[0]
output.append(cleavage_ids)
#print("\n\nname",name)
#print(cleavage_string)
#print(len(cleavage_string))
#print(len(cleavage_ids))
if(return_sequences is True):
return zip(output_seq, output)
else:
return output
def cut_seq_chipper(seq,cleavs,threshold=5,min_length=5,max_length=20, skip_cleav=0):
'''cuts sequence according to cleavage predictions
seq: sequence as string
cleavs: cleavage predictions as np.array (e.g. output of parse_cleavage_...)
threshold: cutting threshold 0...9 (e.g. 8 corresponds to threshold 0.8)- None in case cleavage file was parsed with probs=False
min_length: minimum length of resulting peptides
max_length: maximum length of resulting peptides
skip_cleav: number of cleavage predictions to skip
'''
if(threshold is not None):
cleavs = np.concatenate(cleavs[threshold:],axis=0)
cleavs=np.sort(cleavs)
peptides =[]
for i in range(len(cleavs)):
#cuts after the specified position c.f. netchop documentation
for j in range(skip_cleav+1):
if (i+j)<len(cleavs):
peptides.append(seq[0 if i==0 else cleavs[i-1] + 1: cleavs[i+j]+1])
if(len(cleavs)>0):
for j in range(skip_cleav+1):
if j<len(cleavs):
peptides.append(seq[cleavs[-1-j]+1:])
peptides = [p for p in peptides if (len(p)>=min_length and (max_length==0 or len(p)<=max_length))]
return np.unique(peptides)
def cut_chipper(filename="test.dat",threshold=5, min_length=5, max_length=20):
'''
loads chipper output and returns corresponding set of peptides
usage: run chipper on fasta file ./chipper -i input.fasta | gunzip >cleavage.dat
pass cleavage.dat to cut_chipper
'''
out=parse_cleavage_chipper(filename, probs=True, return_sequences=True)
peptides=[]
for seq,cleavs in out:
peptides.append(cut_seq_chipper(seq,cleavs,threshold=threshold,min_length=min_length,max_length=max_length))
peptides = np.unique(np.concatenate(peptides,axis=0))
return peptides
def cut_netchop(filename="test.dat", min_length=5, max_length=20, skip_cleav=0):
'''
loads netchop short output and returns corresponding set of peptides
usage: run chipper on fasta file ./chipper -i input.fasta | gunzip >cleavage.dat
pass cleavage.dat to cut_chipper
'''
out=parse_cleavage_netchop_short(filename, return_sequences=True, startidx=21)
peptides=[]
for seq,cleavs in out:
peptides.append(cut_seq_chipper(seq,cleavs,threshold=None,min_length=min_length,max_length=max_length,skip_cleav=skip_cleav))
peptides = np.unique(np.concatenate(peptides,axis=0))
return peptides
#sequence cleaner (drops sequences with unknown AAs from fasta)
#adopted from https://biopython.org/wiki/Sequence_Cleaner
def sequence_cleaner(fasta_file_in, fasta_file_out, min_length=0, drop_aas=["X","Z","B"]):
# Create our hash table to add the sequences
sequences={}
# Using the Biopython fasta parse we can read our fasta input
for seq_record in SeqIO.parse(str(fasta_file_in), "fasta"):
# Take the current sequence
sequence = str(seq_record.seq).upper()
# Check if the current sequence is according to the user parameters
if(len(sequence) >= min_length and np.all([sequence.count(a)==0 for a in drop_aas])):
# If the sequence passed in the test "is it clean?" and it isn't in the
# hash table, the sequence and its id are going to be in the hash
if sequence not in sequences:
sequences[sequence] = seq_record.id
# If it is already in the hash table, we're just gonna concatenate the ID
# of the current sequence to another one that is already in the hash table
else:
sequences[sequence] += "_" + seq_record.id
# Write the clean sequences
# Create a file in the same directory where you ran this script
with open(str(fasta_file_out), "w+") as output_file:
# Just read the hash table and write on the file as a fasta format
for sequence in sequences:
if "|" in sequences[sequence]:
ID = sequences[sequence].split('|')[1]
else:
ID = sequences[sequence]
output_file.write(">" + ID + "\n" + sequence + "\n")
print("Clean fasta file written to " + str(fasta_file_out))
def chipper_digest(fasta_file, chipper_path="chipper", threshold=7, min_length=5, max_length=20,verbose=True):
'''Cuts proteins from given fasta_file into peptides using chipper cleavage predictions
call with chipper_path e.g. ./chipper for a custom chipper path (otherwise chipper has to be in searchpath)
'''
# 1. save clean fasta file
if(verbose):
print("Saving clean fasta file...")
fasta_file_clean = fasta_file.parent/("clean_"+fasta_file.stem+".fasta")
sequence_cleaner(fasta_file, fasta_file_clean , min_length=min_length, drop_aas=["X","Z","B","U","O"])#also drop U and O as chipper cannot handle them
# 2. run chipper (exec has to be in searchpath e.g. ~/bin)
if(verbose):
print("Running chipper for cleavage prediction...")
command = chipper_path+" -p -i "+str(fasta_file_clean)+" -o cleavage.dat.gz"
if(verbose):
print(command)
process= subprocess.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output, error = process.communicate()
if(verbose):
print(output)
if(error !=""):
print(error)
command2 = "gunzip -f cleavage.dat.gz"
if(verbose):
print(command2)
process2= subprocess.Popen(command2.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output, error = process2.communicate()
if(verbose):
print(output)
if(error !=""):
print(error)
# 3. cut proteins according to predictions
if(verbose):
print("Cutting proteins according to predictions...")
peptides = cut_chipper("cleavage.dat",threshold=threshold,min_length=min_length,max_length=max_length)
print("extracted ",len(peptides), "peptides.")
fasta_file_clean.unlink()
#Path("cleavage.dat.tar.gz").unlink()
Path("cleavage.dat").unlink()
return peptides
def netchop_digest(fasta_file, netchop_path="netchop", threshold=0.7, min_length=5, max_length=20, repeats=10, verbose=True):
'''Cuts proteins from given fasta_file into peptides using netchop cleavage predictions
netchop_path: e.g. ./netchop to call netchop in present directory (the default value requires to place symlink to netchop tcsh in netchop3.1 into searchpath e.g. ~/bin) BE CAREFUL NETCHOP TMP PATH MAY NOT BE TOO LONG
'''
# 1. save clean fasta file
if(verbose):
print("Saving clean fasta file...")
if(isinstance(fasta_file,str)):
fasta_file = Path(fasta_file)
fasta_file_clean = fasta_file.parent/("clean_"+fasta_file.stem+".fasta")
sequence_cleaner(fasta_file, fasta_file_clean , min_length=min_length, drop_aas=["X","Z","B","U","O"])#also drop U and O as netChop cannot handle them?
# 2. run netChop (exec has to be in searchpath e.g. ~/bin) BE CAREFUL NETCHOP TMP PATH MAY NOT BE TOO LONG
fasta_file_clean_output = fasta_file_clean.parent / (fasta_file_clean.stem+".out")
if(verbose):
print("Running netchop for cleavage prediction...")
command = netchop_path+" "+str(fasta_file_clean)+" -t "+str(threshold)
if(verbose):
print(command)
with open(fasta_file_clean_output,"wb") as out:
process= subprocess.Popen(command.split(), stdout=out, stderr=subprocess.PIPE, universal_newlines=True)
output, error = process.communicate()
#if(verbose):
# print(output)
if(error !=""):
print(error)
cleavage_df = parse_netchop_long(fasta_file_clean_output)
# 3. cut proteins according to predictions
if(verbose):
print("Cutting proteins according to predictions...")
peptides = cut_netchop_stochastic(cleavage_df,repeats=repeats,min_len=min_length,max_len=max_length)
print("extracted ",len(peptides), "peptides.")
#fasta_file_clean.unlink()
#fasta_file_clean_output.unlink()
return peptides
def parse_netchop_long(filename):
'''parses netchops output (long form) into dataframe'''
with open(filename, "r") as f:
tmp=f.readlines()
start=np.where([x == ' pos AA C score Ident\n' for x in tmp])[0]+2
endx=np.where([x == '\n' for x in tmp])[0]-1
end=[]
for s in start:
for e in endx:
if(e>s):
end.append(e)
break
cleavages =[]
accessions = []
sequences=[]
#print(start,end)
for s,e in zip(start,end):
#print(s,e,tmp[s:e])
raw = [[t.strip() for t in x.split(" ") if t!=""] for x in tmp[s:e]]
cleavages.append([float(r[3]) for r in raw])
accessions.append(raw[0][-1].split(" ")[-1].replace("tr|","").replace("sp|","").replace("|",""))
sequences.append("".join([r[1] for r in raw]))
df=pd.DataFrame({"accession":accessions,"cleavages":cleavages, "sequence":sequences})
return df.set_index("accession")
def netchop_cleavage_from_fasta(fasta_file,netchop_path="netchop",min_length=5,verbose=True):
'''return cleavage probs for given fasta file'''
if(isinstance(fasta_file,str)):
fasta_file = Path(fasta_file)
# 2. run netChop (exec has to be in searchpath e.g. ~/bin) BE CAREFUL NETCHOP TMP PATH MAY NOT BE TOO LONG
fasta_file_clean_output = fasta_file.parent / (fasta_file.stem+".out")
if(verbose):
print("Running netchop for cleavage prediction...")
command = netchop_path+" "+str(fasta_file)
if(verbose):
print(command)
with open(fasta_file_clean_output,"wb") as out:
process= subprocess.Popen(command.split(), stdout=out, stderr=subprocess.PIPE, universal_newlines=True)
output, error = process.communicate()
#if(verbose):
# print(output)
if(error !=""):
print(error)
df = parse_netchop_long(fasta_file_clean_output)
fasta_file_clean_output.unlink()
return df
def netchop_cleavage_from_df(df,netchop_path="netchop",bs=100,min_length=5):
'''obtain cleavage probs for a given df with sequences (processed batch-wise)'''
#remove sequences with nonstandard amino acids
df_clean = df[df.sequence.apply(lambda x:np.all([not(s in x) for s in ["X","Z","B","U","O"]]))==True].copy()
start_idx=range(0,len(df_clean),bs)
dfs_cleavage=[]
for s in tqdm(start_idx):
df_to_fasta(df_clean.iloc[s:min(s+bs,len(df_clean))],"tmp.fasta")
dfs_cleavage.append(netchop_cleavage_from_fasta(Path("tmp.fasta"),netchop_path=netchop_path,min_length=min_length,verbose=False))
Path("tmp.fasta").unlink()
return pd.concat(dfs_cleavage)
def sample_using_cleavages(seq,cleavages,repeats=10,min_len=5,max_len=20):
'''cuts a single sequence stochastically using the given cleavage probabilities'''
#cuts after the specified position c.f. netchop documentation
seq_len = len(cleavages)
fragments = []
for _ in range(repeats):
cuts= (np.random.uniform(size=seq_len)<cleavages)
cuts_ends = list(np.where(cuts)[0] + 1)
if(len(cuts_ends)==0 or cuts_ends[-1] != seq_len):
cuts_ends.append(seq_len)
for i in range(len(cuts_ends)):
cut_start = 0 if i==0 else cuts_ends[i-1]
cut_end = cuts_ends[i]
fragment_len = cut_end-cut_start
if(fragment_len>=min_len and fragment_len<=max_len):
fragments.append(seq[cut_start:cut_end])
return fragments
def cut_netchop_stochastic(cleavage_df,repeats=10,min_len=5,max_len=20):
'''cuts proteins stochastically based on netchops cleavage probabilities (passed via cleavage df)'''
fragments=[]
for i,row in tqdm(cleavage_df.iterrows()):
tmp = sample_using_cleavages(row.sequence,row.cleavages,repeats=repeats,min_len=min_len,max_len=max_len)
fragments.append(tmp)
return [item for sublist in fragments for item in sublist]
return pd.concat(dfs_cleavage)
def hobohl_similarity(query, seqs,threshold=0.8):
'''returns all sequences that are similar in the hobohl sense i.e. have same length and sequence identity>threshold'''
lst=[]
seqs_same_length = np.array([len(x)==len(query) for x in seqs])
for x in seqs[np.where(seqs_same_length)[0]]:
if(x!=query and sum([int(i==j) for i,j in zip(query,x)])/len(query)>=threshold):
lst.append(x)
return lst
def compute_hobohl_clusters(seqs,threshold=0.8,exploded=True):
'''computes clusters of hobohl similar peptides from list of peptides'''
neighbors = np.array([hobohl_similarity(q,seqs,threshold=threshold) for q in seqs])
neighbors_length = np.array([len(x) for x in neighbors])
idxs = np.argsort(neighbors_length)
neighbors_sorted = list(neighbors[idxs].copy())
seqs_sorted = list(seqs[idxs].copy())
clusters = []
while(len(seqs_sorted)>0):
seq = seqs_sorted[0]
neigh = neighbors_sorted[0]
existing=np.where([np.any([n in c for n in neigh]) for c in clusters])[0]
if(len(existing)>0):
if(len(existing)>1):
#join existing clusters
for e2 in reversed(existing[1:]):
for c2 in clusters[e2]:
clusters[existing[0]].append(c2)
del clusters[e2]
clusters[existing[0]].append(seqs_sorted[0])
else:
clusters.append([seqs_sorted[0]])
del seqs_sorted[0]
del neighbors_sorted[0]
df=pd.DataFrame({"members":clusters,"repr_accession":[x[0] for x in clusters],"entry_id":["c"+str(x) for x in range(len(clusters))]}).set_index("entry_id")
if(exploded):
return explode_clusters_df(df)
else:
return df
| [
"Bio.Seq.Seq",
"numpy.random.seed",
"Bio.SeqIO.write",
"numpy.maximum",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.argsort",
"pathlib.Path",
"numpy.unique",
"pandas.DataFrame",
"numpy.power",
"re.search",
"pandas.concat",
"tqdm.tqdm",
"numpy.minimum",
"Bio.Se... | [((1788, 1801), 'tqdm.tqdm', 'tqdm', (['context'], {}), '(context)\n', (1792, 1801), True, 'from tqdm import tqdm as tqdm\n'), ((7328, 7364), 'Bio.SeqIO.parse', 'SeqIO.parse', (['filename', '"""uniprot-xml"""'], {}), "(filename, 'uniprot-xml')\n", (7339, 7364), False, 'from Bio import SeqIO\n'), ((7392, 7403), 'tqdm.tqdm', 'tqdm', (['sprot'], {}), '(sprot)\n', (7396, 7403), True, 'from tqdm import tqdm as tqdm\n'), ((11391, 11404), 'tqdm.tqdm', 'tqdm', (['context'], {}), '(context)\n', (11395, 11404), True, 'from tqdm import tqdm as tqdm\n'), ((17172, 17200), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['data'], {}), '(data)\n', (17194, 17200), True, 'import pandas as pd\n'), ((17270, 17302), 'Bio.SeqIO.parse', 'SeqIO.parse', (['fasta_path', '"""fasta"""'], {}), "(fasta_path, 'fasta')\n", (17281, 17302), False, 'from Bio import SeqIO\n'), ((17398, 17415), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {}), '(res)\n', (17410, 17415), True, 'import pandas as pd\n'), ((17783, 17807), 'tqdm.tqdm', 'tqdm', (['protein_seqrecords'], {}), '(protein_seqrecords)\n', (17787, 17807), True, 'from tqdm import tqdm as tqdm\n'), ((27441, 27455), 'pathlib.Path', 'Path', (['data_dir'], {}), '(data_dir)\n', (27445, 27455), False, 'from pathlib import Path\n'), ((27568, 27600), 'pandas.read_csv', 'pd.read_csv', (['curated_withMS_path'], {}), '(curated_withMS_path)\n', (27579, 27600), True, 'import pandas as pd\n'), ((29588, 29602), 'pathlib.Path', 'Path', (['data_dir'], {}), '(data_dir)\n', (29592, 29602), False, 'from pathlib import Path\n'), ((29986, 30043), 'pandas.DataFrame', 'pd.DataFrame', (['pseudo_mhc'], {'columns': "('allele', 'sequence1')"}), "(pseudo_mhc, columns=('allele', 'sequence1'))\n", (29998, 30043), True, 'import pandas as pd\n'), ((32348, 32406), 'pandas.read_csv', 'pd.read_csv', (["(data_dir / 'blind.1' / bdblind_file)"], {'sep': '"""\t"""'}), "(data_dir / 'blind.1' / bdblind_file, sep='\\t')\n", (32359, 32406), True, 'import pandas as pd\n'), ((33928, 33973), 'pandas.concat', 'pd.concat', (['[bd09, bdblind]'], {'ignore_index': '(True)'}), '([bd09, bdblind], ignore_index=True)\n', (33937, 33973), True, 'import pandas as pd\n'), ((35842, 35856), 'pathlib.Path', 'Path', (['data_dir'], {}), '(data_dir)\n', (35846, 35856), False, 'from pathlib import Path\n'), ((37367, 37399), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'random_seed'}), '(seed=random_seed)\n', (37381, 37399), True, 'import numpy as np\n'), ((37846, 37860), 'pathlib.Path', 'Path', (['data_dir'], {}), '(data_dir)\n', (37850, 37860), False, 'from pathlib import Path\n'), ((39431, 39445), 'pathlib.Path', 'Path', (['data_dir'], {}), '(data_dir)\n', (39435, 39445), False, 'from pathlib import Path\n'), ((39459, 39498), 'pandas.read_csv', 'pd.read_csv', (["(data_path / 'HPV_data.csv')"], {}), "(data_path / 'HPV_data.csv')\n", (39470, 39498), True, 'import pandas as pd\n'), ((41142, 41156), 'pathlib.Path', 'Path', (['data_dir'], {}), '(data_dir)\n', (41146, 41156), False, 'from pathlib import Path\n'), ((41170, 41271), 'pandas.read_excel', 'pd.read_excel', (["(data_path / 'Covid19-Intavis-Immunitrack-datasetV2.xlsx')"], {'sheet_name': 'allele_sheets'}), "(data_path / 'Covid19-Intavis-Immunitrack-datasetV2.xlsx',\n sheet_name=allele_sheets)\n", (41183, 41271), True, 'import pandas as pd\n'), ((44361, 44406), 'pandas.concat', 'pd.concat', (['data'], {'ignore_index': '(True)', 'sort': '(True)'}), '(data, ignore_index=True, sort=True)\n', (44370, 44406), True, 'import pandas as pd\n'), ((47391, 47436), 'pandas.concat', 'pd.concat', (['data'], {'ignore_index': '(True)', 'sort': '(True)'}), '(data, ignore_index=True, sort=True)\n', (47400, 47436), True, 'import pandas as pd\n'), ((48404, 48419), 'pathlib.Path', 'Path', (['path_iedb'], {}), '(path_iedb)\n', (48408, 48419), False, 'from pathlib import Path\n'), ((48800, 48843), 'pandas.concat', 'pd.concat', (['dfs_traintest'], {'ignore_index': '(True)'}), '(dfs_traintest, ignore_index=True)\n', (48809, 48843), True, 'import pandas as pd\n'), ((49000, 49028), 'pandas.read_csv', 'pd.read_csv', (['path_jensen_csv'], {}), '(path_jensen_csv)\n', (49011, 49028), True, 'import pandas as pd\n'), ((49638, 49759), 'sklearn.model_selection.train_test_split', 'train_test_split', (["df_traintest[df_traintest['traintest'] == 'train']"], {'test_size': '(0.1)', 'random_state': '(42)', 'stratify': 'binder'}), "(df_traintest[df_traintest['traintest'] == 'train'],\n test_size=0.1, random_state=42, stratify=binder)\n", (49654, 49759), False, 'from sklearn.model_selection import train_test_split\n'), ((49957, 50014), 'pandas.concat', 'pd.concat', (['[df_train, df_val, df_test]'], {'ignore_index': '(True)'}), '([df_train, df_val, df_test], ignore_index=True)\n', (49966, 50014), True, 'import pandas as pd\n'), ((50591, 50606), 'pathlib.Path', 'Path', (['path_2009'], {}), '(path_2009)\n', (50595, 50606), False, 'from pathlib import Path\n'), ((51235, 51254), 'pandas.concat', 'pd.concat', (['dfs_2009'], {}), '(dfs_2009)\n', (51244, 51254), True, 'import pandas as pd\n'), ((52548, 52586), 'pandas.concat', 'pd.concat', (['dfs_pcbi'], {'ignore_index': '(True)'}), '(dfs_pcbi, ignore_index=True)\n', (52557, 52586), True, 'import pandas as pd\n'), ((53648, 53667), 'pandas.concat', 'pd.concat', (['dfs_pcbi'], {}), '(dfs_pcbi)\n', (53657, 53667), True, 'import pandas as pd\n'), ((57590, 57605), 'numpy.sort', 'np.sort', (['cleavs'], {}), '(cleavs)\n', (57597, 57605), True, 'import numpy as np\n'), ((58148, 58167), 'numpy.unique', 'np.unique', (['peptides'], {}), '(peptides)\n', (58157, 58167), True, 'import numpy as np\n'), ((65853, 65943), 'pandas.DataFrame', 'pd.DataFrame', (["{'accession': accessions, 'cleavages': cleavages, 'sequence': sequences}"], {}), "({'accession': accessions, 'cleavages': cleavages, 'sequence':\n sequences})\n", (65865, 65943), True, 'import pandas as pd\n'), ((67375, 67390), 'tqdm.tqdm', 'tqdm', (['start_idx'], {}), '(start_idx)\n', (67379, 67390), True, 'from tqdm import tqdm as tqdm\n'), ((67646, 67669), 'pandas.concat', 'pd.concat', (['dfs_cleavage'], {}), '(dfs_cleavage)\n', (67655, 67669), True, 'import pandas as pd\n'), ((68958, 68981), 'pandas.concat', 'pd.concat', (['dfs_cleavage'], {}), '(dfs_cleavage)\n', (68967, 68981), True, 'import pandas as pd\n'), ((69721, 69749), 'numpy.argsort', 'np.argsort', (['neighbors_length'], {}), '(neighbors_length)\n', (69731, 69749), True, 'import numpy as np\n'), ((14671, 14703), 'Bio.SeqIO.parse', 'SeqIO.parse', (['fasta_path', '"""fasta"""'], {}), "(fasta_path, 'fasta')\n", (14682, 14703), False, 'from Bio import SeqIO\n'), ((14971, 15003), 're.search', 're.search', (['"""PE=\\\\d"""', 'description'], {}), "('PE=\\\\d', description)\n", (14980, 15003), False, 'import re\n'), ((15056, 15095), 're.search', 're.search', (['"""OS=.* (?=OX=)"""', 'description'], {}), "('OS=.* (?=OX=)', description)\n", (15065, 15095), False, 'import re\n'), ((15899, 15931), 'Bio.SeqIO.parse', 'SeqIO.parse', (['fasta_path', '"""fasta"""'], {}), "(fasta_path, 'fasta')\n", (15910, 15931), False, 'from Bio import SeqIO\n'), ((24550, 24557), 'tqdm.tqdm', 'tqdm', (['f'], {}), '(f)\n', (24554, 24557), True, 'from tqdm import tqdm as tqdm\n'), ((26293, 26313), 'numpy.minimum', 'np.minimum', (['x', '(50000)'], {}), '(x, 50000)\n', (26303, 26313), True, 'import numpy as np\n'), ((31838, 31852), 'pathlib.Path', 'Path', (['data_dir'], {}), '(data_dir)\n', (31842, 31852), False, 'from pathlib import Path\n'), ((31974, 32036), 'pandas.read_csv', 'pd.read_csv', (["(data_dir / 'bd2009.1' / bd09_cv_sr_file)"], {'sep': '"""\t"""'}), "(data_dir / 'bd2009.1' / bd09_cv_sr_file, sep='\\t')\n", (31985, 32036), True, 'import pandas as pd\n'), ((36096, 36126), 'pandas.read_csv', 'pd.read_csv', (['curated_noMS_path'], {}), '(curated_noMS_path)\n', (36107, 36126), True, 'import pandas as pd\n'), ((37874, 37936), 'pandas.read_csv', 'pd.read_csv', (["(data_path / 'abelin_peptides.all_predictions.csv')"], {}), "(data_path / 'abelin_peptides.all_predictions.csv')\n", (37885, 37936), True, 'import pandas as pd\n'), ((41279, 41303), 'pandas.concat', 'pd.concat', (['df'], {'sort': '(True)'}), '(df, sort=True)\n', (41288, 41303), True, 'import pandas as pd\n'), ((48116, 48160), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'header': 'None', 'sep': '"""\t"""'}), "(filename, header=None, sep='\\t')\n", (48127, 48160), True, 'import pandas as pd\n'), ((53242, 53257), 'pathlib.Path', 'Path', (['path_pcbi'], {}), '(path_pcbi)\n', (53246, 53257), False, 'from pathlib import Path\n'), ((53344, 53381), 'pandas.read_csv', 'pd.read_csv', (['f'], {'header': 'None', 'sep': '"""\t"""'}), "(f, header=None, sep='\\t')\n", (53355, 53381), True, 'import pandas as pd\n'), ((57537, 57579), 'numpy.concatenate', 'np.concatenate', (['cleavs[threshold:]'], {'axis': '(0)'}), '(cleavs[threshold:], axis=0)\n', (57551, 57579), True, 'import numpy as np\n'), ((58719, 58751), 'numpy.concatenate', 'np.concatenate', (['peptides'], {'axis': '(0)'}), '(peptides, axis=0)\n', (58733, 58751), True, 'import numpy as np\n'), ((59350, 59382), 'numpy.concatenate', 'np.concatenate', (['peptides'], {'axis': '(0)'}), '(peptides, axis=0)\n', (59364, 59382), True, 'import numpy as np\n'), ((63612, 63628), 'pathlib.Path', 'Path', (['fasta_file'], {}), '(fasta_file)\n', (63616, 63628), False, 'from pathlib import Path\n'), ((66178, 66194), 'pathlib.Path', 'Path', (['fasta_file'], {}), '(fasta_file)\n', (66182, 66194), False, 'from pathlib import Path\n'), ((2060, 2078), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {}), '(rows)\n', (2072, 2078), True, 'import pandas as pd\n'), ((8536, 8554), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {}), '(rows)\n', (8548, 8554), True, 'import pandas as pd\n'), ((11689, 11707), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {}), '(rows)\n', (11701, 11707), True, 'import pandas as pd\n'), ((15407, 15425), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {}), '(rows)\n', (15419, 15425), True, 'import pandas as pd\n'), ((16448, 16491), 'Bio.SeqIO.write', 'SeqIO.write', (['record', 'output_handle', '"""fasta"""'], {}), "(record, output_handle, 'fasta')\n", (16459, 16491), False, 'from Bio import SeqIO\n'), ((16928, 16971), 'Bio.SeqIO.write', 'SeqIO.write', (['record', 'output_handle', '"""fasta"""'], {}), "(record, output_handle, 'fasta')\n", (16939, 16971), False, 'from Bio import SeqIO\n'), ((21636, 21650), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (21648, 21650), True, 'import pandas as pd\n'), ((24945, 24963), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {}), '(rows)\n', (24957, 24963), True, 'import pandas as pd\n'), ((26348, 26368), 'numpy.minimum', 'np.minimum', (['x', '(50000)'], {}), '(x, 50000)\n', (26358, 26368), True, 'import numpy as np\n'), ((32072, 32134), 'pandas.read_csv', 'pd.read_csv', (["(data_dir / 'bd2009.1' / bd09_cv_gs_file)"], {'sep': '"""\t"""'}), "(data_dir / 'bd2009.1' / bd09_cv_gs_file, sep='\\t')\n", (32083, 32134), True, 'import pandas as pd\n'), ((36163, 36195), 'pandas.read_csv', 'pd.read_csv', (['curated_withMS_path'], {}), '(curated_withMS_path)\n', (36174, 36195), True, 'import pandas as pd\n'), ((43774, 43891), 'pandas.read_csv', 'pd.read_csv', (['file'], {'header': 'None', 'delimiter': "(' ' if not MS else '\\t')", 'names': "('sequence', 'label', 'allele', 'ic50')"}), "(file, header=None, delimiter=' ' if not MS else '\\t', names=(\n 'sequence', 'label', 'allele', 'ic50'))\n", (43785, 43891), True, 'import pandas as pd\n'), ((46288, 46388), 'pandas.read_csv', 'pd.read_csv', (['file'], {'header': 'None', 'delimiter': '"""\t"""', 'names': "('sequence', 'label', 'allele', 'context')"}), "(file, header=None, delimiter='\\t', names=('sequence', 'label',\n 'allele', 'context'))\n", (46299, 46388), True, 'import pandas as pd\n'), ((50798, 50813), 'pathlib.Path', 'Path', (['path_2009'], {}), '(path_2009)\n', (50802, 50813), False, 'from pathlib import Path\n'), ((52226, 52263), 'pandas.read_csv', 'pd.read_csv', (['f'], {'header': 'None', 'sep': '"""\t"""'}), "(f, header=None, sep='\\t')\n", (52237, 52263), True, 'import pandas as pd\n'), ((53296, 53311), 'pathlib.Path', 'Path', (['path_pcbi'], {}), '(path_pcbi)\n', (53300, 53311), False, 'from pathlib import Path\n'), ((62965, 62985), 'pathlib.Path', 'Path', (['"""cleavage.dat"""'], {}), "('cleavage.dat')\n", (62969, 62985), False, 'from pathlib import Path\n'), ((65165, 65234), 'numpy.where', 'np.where', (["[(x == ' pos AA C score Ident\\n') for x in tmp]"], {}), "([(x == ' pos AA C score Ident\\n') for x in tmp])\n", (65173, 65234), True, 'import numpy as np\n'), ((65247, 65283), 'numpy.where', 'np.where', (["[(x == '\\n') for x in tmp]"], {}), "([(x == '\\n') for x in tmp])\n", (65255, 65283), True, 'import numpy as np\n'), ((67608, 67625), 'pathlib.Path', 'Path', (['"""tmp.fasta"""'], {}), "('tmp.fasta')\n", (67612, 67625), False, 'from pathlib import Path\n'), ((67991, 68022), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'seq_len'}), '(size=seq_len)\n', (68008, 68022), True, 'import numpy as np\n'), ((69253, 69279), 'numpy.where', 'np.where', (['seqs_same_length'], {}), '(seqs_same_length)\n', (69261, 69279), True, 'import numpy as np\n'), ((10043, 10109), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {'columns': "['ID', 'cluster_ID', 'representative']"}), "(rows, columns=['ID', 'cluster_ID', 'representative'])\n", (10055, 10109), True, 'import pandas as pd\n'), ((10258, 10306), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {'columns': "['ID', 'cluster_ID']"}), "(rows, columns=['ID', 'cluster_ID'])\n", (10270, 10306), True, 'import pandas as pd\n'), ((16374, 16388), 'Bio.Seq.Seq', 'Bio.Seq.Seq', (['s'], {}), '(s)\n', (16385, 16388), False, 'import Bio\n'), ((16851, 16882), 'Bio.Seq.Seq', 'Bio.Seq.Seq', (["row[1]['sequence']"], {}), "(row[1]['sequence'])\n", (16862, 16882), False, 'import Bio\n'), ((32171, 32234), 'pandas.read_csv', 'pd.read_csv', (["(data_dir / 'bd2009.1' / bd09_cv_rnd_file)"], {'sep': '"""\t"""'}), "(data_dir / 'bd2009.1' / bd09_cv_rnd_file, sep='\\t')\n", (32182, 32234), True, 'import pandas as pd\n'), ((32256, 32312), 'pandas.read_csv', 'pd.read_csv', (["(data_dir / 'bd2009.1' / bd09_file)"], {'sep': '"""\t"""'}), "(data_dir / 'bd2009.1' / bd09_file, sep='\\t')\n", (32267, 32312), True, 'import pandas as pd\n'), ((44110, 44198), 'pandas.read_csv', 'pd.read_csv', (['file'], {'header': 'None', 'delimiter': '""" """', 'names': "('sequence', 'label', 'allele')"}), "(file, header=None, delimiter=' ', names=('sequence', 'label',\n 'allele'))\n", (44121, 44198), True, 'import pandas as pd\n'), ((46720, 46809), 'pandas.read_csv', 'pd.read_csv', (['file'], {'header': 'None', 'delimiter': '"""\t"""', 'names': "('sequence', 'label', 'allele')"}), "(file, header=None, delimiter='\\t', names=('sequence', 'label',\n 'allele'))\n", (46731, 46809), True, 'import pandas as pd\n'), ((48349, 48371), 'numpy.power', 'np.power', (['(50000)', '(1 - x)'], {}), '(50000, 1 - x)\n', (48357, 48371), True, 'import numpy as np\n'), ((50846, 50883), 'pandas.read_csv', 'pd.read_csv', (['f'], {'header': 'None', 'sep': '"""\t"""'}), "(f, header=None, sep='\\t')\n", (50857, 50883), True, 'import pandas as pd\n'), ((51113, 51167), 'numpy.logical_or', 'np.logical_or', (["(filename == 'train')", "(filename == 'test')"], {}), "(filename == 'train', filename == 'test')\n", (51126, 51167), True, 'import numpy as np\n'), ((51989, 52004), 'pathlib.Path', 'Path', (['path_pcbi'], {}), '(path_pcbi)\n', (51993, 52004), False, 'from pathlib import Path\n'), ((52012, 52027), 'pathlib.Path', 'Path', (['path_pcbi'], {}), '(path_pcbi)\n', (52016, 52027), False, 'from pathlib import Path\n'), ((52066, 52081), 'pathlib.Path', 'Path', (['path_pcbi'], {}), '(path_pcbi)\n', (52070, 52081), False, 'from pathlib import Path\n'), ((52182, 52189), 'pathlib.Path', 'Path', (['p'], {}), '(p)\n', (52186, 52189), False, 'from pathlib import Path\n'), ((67522, 67539), 'pathlib.Path', 'Path', (['"""tmp.fasta"""'], {}), "('tmp.fasta')\n", (67526, 67539), False, 'from pathlib import Path\n'), ((26525, 26543), 'numpy.maximum', 'np.maximum', (['(0.0)', 'x'], {}), '(0.0, x)\n', (26535, 26543), True, 'import numpy as np\n'), ((47112, 47212), 'pandas.read_csv', 'pd.read_csv', (['file'], {'header': 'None', 'delimiter': '"""\t"""', 'names': "('sequence', 'label', 'allele', 'context')"}), "(file, header=None, delimiter='\\t', names=('sequence', 'label',\n 'allele', 'context'))\n", (47123, 47212), True, 'import pandas as pd\n'), ((56612, 56650), 'numpy.array', 'np.array', (['[t for t in cleavage_string]'], {}), '([t for t in cleavage_string])\n', (56620, 56650), True, 'import numpy as np\n'), ((68059, 68073), 'numpy.where', 'np.where', (['cuts'], {}), '(cuts)\n', (68067, 68073), True, 'import numpy as np\n'), ((69990, 70023), 'numpy.any', 'np.any', (['[(n in c) for n in neigh]'], {}), '([(n in c) for n in neigh])\n', (69996, 70023), True, 'import numpy as np\n'), ((26469, 26478), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (26475, 26478), True, 'import numpy as np\n'), ((26479, 26495), 'numpy.log', 'np.log', (['max_ic50'], {}), '(max_ic50)\n', (26485, 26495), True, 'import numpy as np\n'), ((54997, 55035), 'numpy.array', 'np.array', (['[t for t in cleavage_string]'], {}), '([t for t in cleavage_string])\n', (55005, 55035), True, 'import numpy as np\n'), ((55096, 55134), 'numpy.array', 'np.array', (['[t for t in cleavage_string]'], {}), '([t for t in cleavage_string])\n', (55104, 55134), True, 'import numpy as np\n'), ((67229, 67286), 'numpy.all', 'np.all', (["[(not s in x) for s in ['X', 'Z', 'B', 'U', 'O']]"], {}), "([(not s in x) for s in ['X', 'Z', 'B', 'U', 'O']])\n", (67235, 67286), True, 'import numpy as np\n'), ((26641, 26670), 'pandas.Series', 'pd.Series', (["data['inequality']"], {}), "(data['inequality'])\n", (26650, 26670), True, 'import pandas as pd\n')] |
import re
from skimage import io
from glob import glob
import numpy as np
import os
import bioformats
from sklearn.externals import joblib
from skimage.filters import roberts
import os
import time
#os.chdir(os.getcwd() + '\utils')
#print(os.getcwd())
#from utils import *
def dataset(path, path_n):
positive = []
titles_pos = []
for files in glob(path + '*.tif'):
titles_pos.append(re.findall('\d+.tif', files ))
positive.append(io.imread(files))
negative = []
titles_neg = []
for files in glob(path_n + '*.tif'):
titles_neg.append(re.findall('\d+_n.tif', files ))
negative.append(io.imread(files))
X1 = np.squeeze(np.stack(positive))
X2 = np.squeeze(np.stack(negative))
X = np.vstack((X1,X2))
return(X, X1, X2, titles_pos, titles_neg)
def save_file(path, filename, data, model=True):
if os.path.isfile(path+filename+".npy"):
expand = 0
while True:
expand += 1
new_filename = filename + "_" + str(expand)
if os.path.isfile(path+new_filename):
continue
else:
filename = new_filename
break
if model:
joblib.dump(data, path+filename)
else:
np.save(path+filename, data)
def metadata(path):
xml = bioformats.get_omexml_metadata(path)
md = bioformats.omexml.OMEXML(xml)
meta={'AcquisitionDate': md.image().AcquisitionDate}
meta['Name']=md.image().Name
meta['SizeC']=md.image().Pixels.SizeC
meta['SizeT']=md.image().Pixels.SizeT
meta['SizeX']=md.image().Pixels.SizeX
meta['SizeY']=md.image().Pixels.SizeY
meta['SizeZ']=md.image().Pixels.SizeZ
meta['PhysicalSizeX'] = md.image().Pixels.PhysicalSizeX
meta['PhysicalSizeY'] = md.image().Pixels.PhysicalSizeY
meta['PhysicalSizeZ'] = md.image().Pixels.PhysicalSizeZ
meta['PositionX'] = md.image().Pixels.Plane().PositionX
meta['PositionY'] = md.image().Pixels.Plane().PositionY
meta['Timepoint'] = md.image().Pixels.Plane().DeltaT
return(meta)
def stage_position(path):
position = []
time_point = []
variance = []
boxes = []
for files in glob(path + '/'+ '*ALX.dv'):
meta = metadata(files)
position.append((meta['PositionX'], meta['PositionY']))
time_point.append(int(meta['Timepoint']))
####WILL NEED THAT SOMEWHERE ELSE!!!
#img = load_bioformats(files)
#var = roberts(np.amax(img[:,:,:,3], axis=0)).var()
#variance.append(var)
####
#if var > 2e+07:
# result = search.sliding_window(img[:,:,:,3], clf, scaler, stepSize=16, Zstep =8)
# boxes.append(search.non_max_suppression(result, probaThresh=0.01,
# overlapThresh=0.3))
#elif var <= 2e+07:
# boxes.append([])
position = np.asarray(position)
time_point = np.asarray(time_point)
#variance = np.asarray(variance)
#boxes = np.asarray(boxes)
#return position, time_point, variance
return position, time_point
def load_bioformats(path, folder_batch = "", channel = None, no_meta_direct = False):
meta = metadata(path)
if channel:
image = np.empty((meta['SizeT'], meta['SizeZ'], meta['SizeX'], meta['SizeY'], 1))
with bioformats.ImageReader(path) as rdr:
for t in range(0, meta['SizeT']):
for z in range(0, meta['SizeZ']):
image[t,z,:,:,0]=rdr.read(c=channel, z=z, t=t, series=None,
index=None, rescale=False, wants_max_intensity=False,
channel_names=None)
else:
image = np.empty((meta['SizeT'], meta['SizeZ'], meta['SizeX'], meta['SizeY'], meta['SizeC']))
with bioformats.ImageReader(path) as rdr:
for t in range(0, meta['SizeT']):
for z in range(0, meta['SizeZ']):
for c in range(0, meta['SizeC']):
image[t,z,:,:,c]=rdr.read(c=c, z=z, t=t, series=None,
index=None, rescale=False, wants_max_intensity=False,
channel_names=None)
if no_meta_direct == True:
return(np.squeeze(image))
else:
if folder_batch:
path = folder_batch
return(np.squeeze(image), meta, _new_directory_batch(path, meta))
else:
return(np.squeeze(image), meta, _new_directory(path, meta))
def _new_directory_batch(path, meta):
directory = path +"/"+"result"+'_'+meta["Name"]+'_'+ time.strftime('%m'+'_'+'%d'+'_'+'%Y')
if os.path.exists(directory):
expand = 0
while True:
expand += 1
new_directory = directory+"_"+str(expand)
if os.path.exists(new_directory):
continue
else:
directory = new_directory
os.makedirs(directory)
break
else:
os.makedirs(directory)
return(directory)
def _new_directory(path, meta):
directory = os.path.dirname(path) +"/"+"result"+'_'+meta["Name"]+'_'+ time.strftime('%m'+'_'+'%d'+'_'+'%Y')
if os.path.exists(directory):
expand = 0
while True:
expand += 1
new_directory = directory+"_"+str(expand)
if os.path.exists(new_directory):
continue
else:
directory = new_directory
os.makedirs(directory)
break
else:
os.makedirs(directory)
return(directory)
def directory_batch(path):
folder = os.path.basename(os.path.normpath(path))
directory = path+"/"+"result"+'_'+folder+'_'+ time.strftime('%m'+'_'+'%d'+'_'+'%Y')
if os.path.exists(directory):
expand = 0
while True:
expand += 1
new_directory = directory+"_"+str(expand)
if os.path.exists(new_directory):
continue
else:
directory = new_directory
os.makedirs(directory)
break
else:
os.makedirs(directory)
return(directory)
def _line_coord(position1, position2):
i = [position1[0], position2[0]]
j = [position1[1], position2[1]]
a, b = np.polyfit(i, j, 1)
#y = ax + b
if position1[0] == position2[0]:
# No change in x, slope is infinite, adjust with change in y
step = abs(position1[1] - position2[1])/20
x = np.repeat(position1[0], 20)
if position1[1] < position2[1]:
#if it goes up..
y = np.arange(position1[1], position2[1], step)
if position1[1] > position2[1]:
#if it goes down...
y = np.arange(position2[1], position1[1], step)
if position1[0] > position2[0]:
# Goes negative direction
step = abs(position2[0] - position1[0])/20
x = np.arange(position2[0], position1[0], step)
y = a*x+b
x = x[::-1]
y = y[::-1]
if position1[0] < position2[0]:
step = abs(position1[0] - position2[0])/20
x = np.arange(position1[0], position2[0], step)
y = a*x+b
return x, y
def skeleton_coord(position,time_point):
data = np.concatenate((position,time_point[:, np.newaxis]), axis=1)
sort_data = data[np.argsort(data[:,2])]
list_line = []
for position1, position2 in zip(sort_data, sort_data[1:]):
x,y = _line_coord(position1, position2)
list_line.append((x,y))
return np.transpose(np.hstack(list_line), (1,0))
| [
"sklearn.externals.joblib.dump",
"numpy.polyfit",
"numpy.empty",
"time.strftime",
"numpy.argsort",
"os.path.isfile",
"numpy.arange",
"glob.glob",
"bioformats.omexml.OMEXML",
"os.path.dirname",
"os.path.exists",
"re.findall",
"os.path.normpath",
"skimage.io.imread",
"numpy.repeat",
"num... | [((355, 375), 'glob.glob', 'glob', (["(path + '*.tif')"], {}), "(path + '*.tif')\n", (359, 375), False, 'from glob import glob\n'), ((532, 554), 'glob.glob', 'glob', (["(path_n + '*.tif')"], {}), "(path_n + '*.tif')\n", (536, 554), False, 'from glob import glob\n'), ((746, 765), 'numpy.vstack', 'np.vstack', (['(X1, X2)'], {}), '((X1, X2))\n', (755, 765), True, 'import numpy as np\n'), ((868, 908), 'os.path.isfile', 'os.path.isfile', (["(path + filename + '.npy')"], {}), "(path + filename + '.npy')\n", (882, 908), False, 'import os\n'), ((1309, 1345), 'bioformats.get_omexml_metadata', 'bioformats.get_omexml_metadata', (['path'], {}), '(path)\n', (1339, 1345), False, 'import bioformats\n'), ((1355, 1384), 'bioformats.omexml.OMEXML', 'bioformats.omexml.OMEXML', (['xml'], {}), '(xml)\n', (1379, 1384), False, 'import bioformats\n'), ((2175, 2203), 'glob.glob', 'glob', (["(path + '/' + '*ALX.dv')"], {}), "(path + '/' + '*ALX.dv')\n", (2179, 2203), False, 'from glob import glob\n'), ((2883, 2903), 'numpy.asarray', 'np.asarray', (['position'], {}), '(position)\n', (2893, 2903), True, 'import numpy as np\n'), ((2921, 2943), 'numpy.asarray', 'np.asarray', (['time_point'], {}), '(time_point)\n', (2931, 2943), True, 'import numpy as np\n'), ((4726, 4751), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (4740, 4751), False, 'import os\n'), ((5280, 5305), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (5294, 5305), False, 'import os\n'), ((5856, 5881), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (5870, 5881), False, 'import os\n'), ((6380, 6399), 'numpy.polyfit', 'np.polyfit', (['i', 'j', '(1)'], {}), '(i, j, 1)\n', (6390, 6399), True, 'import numpy as np\n'), ((7340, 7401), 'numpy.concatenate', 'np.concatenate', (['(position, time_point[:, np.newaxis])'], {'axis': '(1)'}), '((position, time_point[:, np.newaxis]), axis=1)\n', (7354, 7401), True, 'import numpy as np\n'), ((678, 696), 'numpy.stack', 'np.stack', (['positive'], {}), '(positive)\n', (686, 696), True, 'import numpy as np\n'), ((718, 736), 'numpy.stack', 'np.stack', (['negative'], {}), '(negative)\n', (726, 736), True, 'import numpy as np\n'), ((1198, 1232), 'sklearn.externals.joblib.dump', 'joblib.dump', (['data', '(path + filename)'], {}), '(data, path + filename)\n', (1209, 1232), False, 'from sklearn.externals import joblib\n'), ((1249, 1279), 'numpy.save', 'np.save', (['(path + filename)', 'data'], {}), '(path + filename, data)\n', (1256, 1279), True, 'import numpy as np\n'), ((3234, 3307), 'numpy.empty', 'np.empty', (["(meta['SizeT'], meta['SizeZ'], meta['SizeX'], meta['SizeY'], 1)"], {}), "((meta['SizeT'], meta['SizeZ'], meta['SizeX'], meta['SizeY'], 1))\n", (3242, 3307), True, 'import numpy as np\n'), ((3740, 3830), 'numpy.empty', 'np.empty', (["(meta['SizeT'], meta['SizeZ'], meta['SizeX'], meta['SizeY'], meta['SizeC'])"], {}), "((meta['SizeT'], meta['SizeZ'], meta['SizeX'], meta['SizeY'], meta[\n 'SizeC']))\n", (3748, 3830), True, 'import numpy as np\n'), ((4332, 4349), 'numpy.squeeze', 'np.squeeze', (['image'], {}), '(image)\n', (4342, 4349), True, 'import numpy as np\n'), ((4680, 4725), 'time.strftime', 'time.strftime', (["('%m' + '_' + '%d' + '_' + '%Y')"], {}), "('%m' + '_' + '%d' + '_' + '%Y')\n", (4693, 4725), False, 'import time\n'), ((5080, 5102), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (5091, 5102), False, 'import os\n'), ((5234, 5279), 'time.strftime', 'time.strftime', (["('%m' + '_' + '%d' + '_' + '%Y')"], {}), "('%m' + '_' + '%d' + '_' + '%Y')\n", (5247, 5279), False, 'import time\n'), ((5634, 5656), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (5645, 5656), False, 'import os\n'), ((5737, 5759), 'os.path.normpath', 'os.path.normpath', (['path'], {}), '(path)\n', (5753, 5759), False, 'import os\n'), ((5811, 5856), 'time.strftime', 'time.strftime', (["('%m' + '_' + '%d' + '_' + '%Y')"], {}), "('%m' + '_' + '%d' + '_' + '%Y')\n", (5824, 5856), False, 'import time\n'), ((6210, 6232), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (6221, 6232), False, 'import os\n'), ((6585, 6612), 'numpy.repeat', 'np.repeat', (['position1[0]', '(20)'], {}), '(position1[0], 20)\n', (6594, 6612), True, 'import numpy as np\n'), ((7007, 7050), 'numpy.arange', 'np.arange', (['position2[0]', 'position1[0]', 'step'], {}), '(position2[0], position1[0], step)\n', (7016, 7050), True, 'import numpy as np\n'), ((7208, 7251), 'numpy.arange', 'np.arange', (['position1[0]', 'position2[0]', 'step'], {}), '(position1[0], position2[0], step)\n', (7217, 7251), True, 'import numpy as np\n'), ((7422, 7444), 'numpy.argsort', 'np.argsort', (['data[:, 2]'], {}), '(data[:, 2])\n', (7432, 7444), True, 'import numpy as np\n'), ((7631, 7651), 'numpy.hstack', 'np.hstack', (['list_line'], {}), '(list_line)\n', (7640, 7651), True, 'import numpy as np\n'), ((403, 432), 're.findall', 're.findall', (['"""\\\\d+.tif"""', 'files'], {}), "('\\\\d+.tif', files)\n", (413, 432), False, 'import re\n'), ((458, 474), 'skimage.io.imread', 'io.imread', (['files'], {}), '(files)\n', (467, 474), False, 'from skimage import io\n'), ((582, 613), 're.findall', 're.findall', (['"""\\\\d+_n.tif"""', 'files'], {}), "('\\\\d+_n.tif', files)\n", (592, 613), False, 'import re\n'), ((639, 655), 'skimage.io.imread', 'io.imread', (['files'], {}), '(files)\n', (648, 655), False, 'from skimage import io\n'), ((1040, 1075), 'os.path.isfile', 'os.path.isfile', (['(path + new_filename)'], {}), '(path + new_filename)\n', (1054, 1075), False, 'import os\n'), ((3321, 3349), 'bioformats.ImageReader', 'bioformats.ImageReader', (['path'], {}), '(path)\n', (3343, 3349), False, 'import bioformats\n'), ((3839, 3867), 'bioformats.ImageReader', 'bioformats.ImageReader', (['path'], {}), '(path)\n', (3861, 3867), False, 'import bioformats\n'), ((4885, 4914), 'os.path.exists', 'os.path.exists', (['new_directory'], {}), '(new_directory)\n', (4899, 4914), False, 'import os\n'), ((5439, 5468), 'os.path.exists', 'os.path.exists', (['new_directory'], {}), '(new_directory)\n', (5453, 5468), False, 'import os\n'), ((6015, 6044), 'os.path.exists', 'os.path.exists', (['new_directory'], {}), '(new_directory)\n', (6029, 6044), False, 'import os\n'), ((6698, 6741), 'numpy.arange', 'np.arange', (['position1[1]', 'position2[1]', 'step'], {}), '(position1[1], position2[1], step)\n', (6707, 6741), True, 'import numpy as np\n'), ((6830, 6873), 'numpy.arange', 'np.arange', (['position2[1]', 'position1[1]', 'step'], {}), '(position2[1], position1[1], step)\n', (6839, 6873), True, 'import numpy as np\n'), ((4437, 4454), 'numpy.squeeze', 'np.squeeze', (['image'], {}), '(image)\n', (4447, 4454), True, 'import numpy as np\n'), ((4529, 4546), 'numpy.squeeze', 'np.squeeze', (['image'], {}), '(image)\n', (4539, 4546), True, 'import numpy as np\n'), ((5017, 5039), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (5028, 5039), False, 'import os\n'), ((5571, 5593), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (5582, 5593), False, 'import os\n'), ((6147, 6169), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (6158, 6169), False, 'import os\n'), ((5176, 5197), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (5191, 5197), False, 'import os\n')] |
#!/usr/bin/python3
from nicenet import NeuralNetwork
from nicenet import Dataset
from helpers import shuffle_array, split_arr
import numpy as np
inputs = 4
outputs = 3
network = NeuralNetwork(inputs, outputs, cost="ce")
network.add_layer(8, activation_function="sigmoid")
network.add_layer(8, activation_function="sigmoid")
network.compile(activation_function="softmax")
network.set_learning_rate(0.1)
dataset_handler = Dataset(inputs, outputs)
dataset_handler.make_dataset(
'./datasets/Iris/inputs.csv', './datasets/Iris/targets.csv')
data, size = dataset_handler.get_raw_data()
# data = dataset_handler.scale_data(data, size)
data = shuffle_array(data)
training, testing = split_arr(data, 3/4)
# print(len(training))
network.Train(training, len(training), epochs=50,
logging=False, epoch_logging=False)
network.evaluate()
network.epoch_vs_error()
network.export_model('iris_model.json')
correct = 0
total = 0
for sample in testing:
features = sample[0]
prediction = network.predict(features)
actual = sample[1]
p = np.argmax(prediction)
a = np.argmax(actual.T[0])
if p == a:
correct += 1
total += 1
print("Testing accuracy:", correct*100/total)
# new_network = NeuralNetwork.load_model('iris_model.json')
# correct = 0
# total = 0
# for sample in testing :
# features = sample[0]
# prediction = new_network.predict(features)
# actual = sample[1]
# p = np.argmax(prediction)
# a = np.argmax(actual.T[0])
# if p == a:
# correct += 1
# total += 1
# print("Testing accuracy:", correct*100/total)
| [
"numpy.argmax",
"nicenet.Dataset",
"nicenet.NeuralNetwork",
"helpers.shuffle_array",
"helpers.split_arr"
] | [((180, 221), 'nicenet.NeuralNetwork', 'NeuralNetwork', (['inputs', 'outputs'], {'cost': '"""ce"""'}), "(inputs, outputs, cost='ce')\n", (193, 221), False, 'from nicenet import NeuralNetwork\n'), ((423, 447), 'nicenet.Dataset', 'Dataset', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (430, 447), False, 'from nicenet import Dataset\n'), ((642, 661), 'helpers.shuffle_array', 'shuffle_array', (['data'], {}), '(data)\n', (655, 661), False, 'from helpers import shuffle_array, split_arr\n'), ((682, 704), 'helpers.split_arr', 'split_arr', (['data', '(3 / 4)'], {}), '(data, 3 / 4)\n', (691, 704), False, 'from helpers import shuffle_array, split_arr\n'), ((1057, 1078), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (1066, 1078), True, 'import numpy as np\n'), ((1087, 1109), 'numpy.argmax', 'np.argmax', (['actual.T[0]'], {}), '(actual.T[0])\n', (1096, 1109), True, 'import numpy as np\n')] |
import SchematicTools
from schematic import SchematicFile
import numpy as np
import SchematicTools
from PIL import Image
#import glob
## Take a large schematic (or multiple schematics) as input data.
## Process and export a numpy array that contains a large number of cube-shaped samples.
## The samples are also processed and converted to a simplified block format.
## voxel size in width, height, and length.
SAMPLESIZE = 8
## export data path
FILEPATH = 'data/np_samples_%dx.npy' % SAMPLESIZE
## Input schematic files.
WORLDS = ['data/zearth_64_144_32.schematic']
## Or you could load a number of schematics from a folder.
#WORLDS = glob.glob("C:/yourschematicfolder/" + '/*.schematic')
## Number of samples to export.
SAMPLECOUNT = 100000
def sample(area:np.ndarray, samples:int, size:int) :
samplerY = np.random.randint(0, area.shape[0] - size, samples)
samplerZ = np.random.randint(0, area.shape[1] - size, samples)
samplerX = np.random.randint(0, area.shape[2] - size, samples)
sampler = np.stack((samplerY, samplerZ, samplerX), axis=-1)
slices = np.empty((samples, size, size, size), dtype=int)
for i in range(samples) :
slices[i] = area[
sampler[i,0]:sampler[i,0]+size,
sampler[i,1]:sampler[i,1]+size,
sampler[i,2]:sampler[i,2]+size]
print("sampled %s" % str(slices.shape))
return slices
def sampleFlat(area:np.ndarray, samples:int, size:int) :
print("sampling flat %d, size %d" % (samples, size))
samplerY = np.random.randint(0, area.shape[0], samples)
samplerZ = np.random.randint(0, area.shape[1] - size, samples)
samplerX = np.random.randint(0, area.shape[2] - size, samples)
sampler = np.stack((samplerY, samplerZ, samplerX), axis=-1)
slices = np.empty((samples, size, size), dtype=int)
for i in range(samples) :
slices[i] = area[
sampler[i,0],
sampler[i,1]:sampler[i,1]+size,
sampler[i,2]:sampler[i,2]+size]
return slices
def filter(slices:np.ndarray, minFill, maxFill) :
minBlocks = minFill * SAMPLESIZE**3
maxBlocks = maxFill * SAMPLESIZE**3
mask = np.logical_and(slices.sum(axis=(1,2,3)) > minBlocks, slices.sum(axis=(1,2,3)) < maxBlocks)
return slices[mask]
def filterFlat(slices:np.ndarray, threshold) :
mask = slices.sum(axis=(1,2)) > threshold
return slices[mask]
def showSamples(samples, width, height) :
preview = np.empty((0, SAMPLESIZE * width,3), np.uint8)
for i in range(width):
row = np.empty((SAMPLESIZE, 0, 3), np.uint8)
for j in range(height):
imageR = samples[i*width + j, :, :, :] * 255
imageG = np.average(imageR, axis=0)
image = np.stack((imageR[0], imageG, imageG), axis=2)
row = np.hstack((row, image))
preview = np.vstack((preview, row))
outputimage = Image.fromarray(preview.astype(np.uint8))
outputimage.save('samplesPreview.png')
outputimage.show()
def exportSamples(samples, width, height, spacing) :
exportWorld = np.empty((SAMPLESIZE, (SAMPLESIZE + spacing) * width, 0), np.uint8)
for i in range(width):
row = np.empty((SAMPLESIZE, 0, SAMPLESIZE), np.uint8)
for j in range(height):
structure = samples[i*width + j, :, :, :] # load a sample
#apply spacing
structure = np.concatenate((structure, np.zeros((SAMPLESIZE, spacing, SAMPLESIZE), dtype=np.uint8)), axis=1)
#add to the row
row = np.concatenate((row, structure), axis=1)
#add to the column
exportWorld = np.concatenate((exportWorld, row), axis=2)
#apply spacing
exportWorld = np.concatenate((exportWorld, np.zeros((SAMPLESIZE, (SAMPLESIZE + spacing) * width, spacing), dtype=np.uint8)), axis=2)
exportSchematic = SchematicFile(shape=exportWorld.shape)
exportSchematic.blocks = exportWorld
exportSchematic.save("data/sampledExample.schematic")
simpleWorlds = []
for w in WORLDS :
simpleWorlds.append(SchematicTools.simplify(SchematicTools.loadArea(w)))
print("loaded %d worlds" % len(simpleWorlds))
samples = np.empty((0, SAMPLESIZE, SAMPLESIZE, SAMPLESIZE))
for s in simpleWorlds :
samples = np.concatenate((samples, sample(s, SAMPLECOUNT, SAMPLESIZE)), axis=0)
#sampleWorld = SchematicTools.loadArea('data/sample_world.schematic')
#simplifiedWorld = SchematicTools.simplify(sampleWorld)
#print("loaded area: %d, %d, %d" % simplifiedWorld.shape)
#testSamples = sample(simplifiedWorld, SAMPLECOUNT, SAMPLESIZE)
print("sampled total %s" % str(samples.shape))
filtered = filter(samples, .05, .7)
print("filtered to %s" % str(filtered.shape))
np.save(FILEPATH, filtered)
print("saved to: %s" % FILEPATH)
#showSamples(filtered, 10, 10)
exportSamples(filtered, 16, 16, 2) | [
"numpy.stack",
"numpy.save",
"numpy.average",
"numpy.concatenate",
"schematic.SchematicFile",
"numpy.empty",
"numpy.zeros",
"numpy.hstack",
"numpy.random.randint",
"SchematicTools.loadArea",
"numpy.vstack"
] | [((4132, 4181), 'numpy.empty', 'np.empty', (['(0, SAMPLESIZE, SAMPLESIZE, SAMPLESIZE)'], {}), '((0, SAMPLESIZE, SAMPLESIZE, SAMPLESIZE))\n', (4140, 4181), True, 'import numpy as np\n'), ((4667, 4694), 'numpy.save', 'np.save', (['FILEPATH', 'filtered'], {}), '(FILEPATH, filtered)\n', (4674, 4694), True, 'import numpy as np\n'), ((819, 870), 'numpy.random.randint', 'np.random.randint', (['(0)', '(area.shape[0] - size)', 'samples'], {}), '(0, area.shape[0] - size, samples)\n', (836, 870), True, 'import numpy as np\n'), ((886, 937), 'numpy.random.randint', 'np.random.randint', (['(0)', '(area.shape[1] - size)', 'samples'], {}), '(0, area.shape[1] - size, samples)\n', (903, 937), True, 'import numpy as np\n'), ((954, 1005), 'numpy.random.randint', 'np.random.randint', (['(0)', '(area.shape[2] - size)', 'samples'], {}), '(0, area.shape[2] - size, samples)\n', (971, 1005), True, 'import numpy as np\n'), ((1020, 1069), 'numpy.stack', 'np.stack', (['(samplerY, samplerZ, samplerX)'], {'axis': '(-1)'}), '((samplerY, samplerZ, samplerX), axis=-1)\n', (1028, 1069), True, 'import numpy as np\n'), ((1083, 1131), 'numpy.empty', 'np.empty', (['(samples, size, size, size)'], {'dtype': 'int'}), '((samples, size, size, size), dtype=int)\n', (1091, 1131), True, 'import numpy as np\n'), ((1514, 1558), 'numpy.random.randint', 'np.random.randint', (['(0)', 'area.shape[0]', 'samples'], {}), '(0, area.shape[0], samples)\n', (1531, 1558), True, 'import numpy as np\n'), ((1574, 1625), 'numpy.random.randint', 'np.random.randint', (['(0)', '(area.shape[1] - size)', 'samples'], {}), '(0, area.shape[1] - size, samples)\n', (1591, 1625), True, 'import numpy as np\n'), ((1642, 1693), 'numpy.random.randint', 'np.random.randint', (['(0)', '(area.shape[2] - size)', 'samples'], {}), '(0, area.shape[2] - size, samples)\n', (1659, 1693), True, 'import numpy as np\n'), ((1708, 1757), 'numpy.stack', 'np.stack', (['(samplerY, samplerZ, samplerX)'], {'axis': '(-1)'}), '((samplerY, samplerZ, samplerX), axis=-1)\n', (1716, 1757), True, 'import numpy as np\n'), ((1771, 1813), 'numpy.empty', 'np.empty', (['(samples, size, size)'], {'dtype': 'int'}), '((samples, size, size), dtype=int)\n', (1779, 1813), True, 'import numpy as np\n'), ((2437, 2483), 'numpy.empty', 'np.empty', (['(0, SAMPLESIZE * width, 3)', 'np.uint8'], {}), '((0, SAMPLESIZE * width, 3), np.uint8)\n', (2445, 2483), True, 'import numpy as np\n'), ((3051, 3118), 'numpy.empty', 'np.empty', (['(SAMPLESIZE, (SAMPLESIZE + spacing) * width, 0)', 'np.uint8'], {}), '((SAMPLESIZE, (SAMPLESIZE + spacing) * width, 0), np.uint8)\n', (3059, 3118), True, 'import numpy as np\n'), ((3823, 3861), 'schematic.SchematicFile', 'SchematicFile', ([], {'shape': 'exportWorld.shape'}), '(shape=exportWorld.shape)\n', (3836, 3861), False, 'from schematic import SchematicFile\n'), ((2524, 2562), 'numpy.empty', 'np.empty', (['(SAMPLESIZE, 0, 3)', 'np.uint8'], {}), '((SAMPLESIZE, 0, 3), np.uint8)\n', (2532, 2562), True, 'import numpy as np\n'), ((2826, 2851), 'numpy.vstack', 'np.vstack', (['(preview, row)'], {}), '((preview, row))\n', (2835, 2851), True, 'import numpy as np\n'), ((3160, 3207), 'numpy.empty', 'np.empty', (['(SAMPLESIZE, 0, SAMPLESIZE)', 'np.uint8'], {}), '((SAMPLESIZE, 0, SAMPLESIZE), np.uint8)\n', (3168, 3207), True, 'import numpy as np\n'), ((3594, 3636), 'numpy.concatenate', 'np.concatenate', (['(exportWorld, row)'], {'axis': '(2)'}), '((exportWorld, row), axis=2)\n', (3608, 3636), True, 'import numpy as np\n'), ((2673, 2699), 'numpy.average', 'np.average', (['imageR'], {'axis': '(0)'}), '(imageR, axis=0)\n', (2683, 2699), True, 'import numpy as np\n'), ((2720, 2765), 'numpy.stack', 'np.stack', (['(imageR[0], imageG, imageG)'], {'axis': '(2)'}), '((imageR[0], imageG, imageG), axis=2)\n', (2728, 2765), True, 'import numpy as np\n'), ((2784, 2807), 'numpy.hstack', 'np.hstack', (['(row, image)'], {}), '((row, image))\n', (2793, 2807), True, 'import numpy as np\n'), ((3504, 3544), 'numpy.concatenate', 'np.concatenate', (['(row, structure)'], {'axis': '(1)'}), '((row, structure), axis=1)\n', (3518, 3544), True, 'import numpy as np\n'), ((4047, 4073), 'SchematicTools.loadArea', 'SchematicTools.loadArea', (['w'], {}), '(w)\n', (4070, 4073), False, 'import SchematicTools\n'), ((3711, 3790), 'numpy.zeros', 'np.zeros', (['(SAMPLESIZE, (SAMPLESIZE + spacing) * width, spacing)'], {'dtype': 'np.uint8'}), '((SAMPLESIZE, (SAMPLESIZE + spacing) * width, spacing), dtype=np.uint8)\n', (3719, 3790), True, 'import numpy as np\n'), ((3388, 3447), 'numpy.zeros', 'np.zeros', (['(SAMPLESIZE, spacing, SAMPLESIZE)'], {'dtype': 'np.uint8'}), '((SAMPLESIZE, spacing, SAMPLESIZE), dtype=np.uint8)\n', (3396, 3447), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 27 09:42:34 2019
This script passes the speech segments identified in step 1 through
Google Speech2Text (settings are for English) and creates an updated
textgrid with speech tier.
@author: szekely
"""
from google.cloud import speech_v1p1beta1 as speech
import argparse
import io
import pickle
import os
import csv
import re
import json
import numpy as np
from praatio import tgio
from google.api_core import client_options
import soundfile
import codes
from codes.helpers import list_filenames, annot2textgrid, load_wav
#%% settings
orig_wav_root = './Cormac/denoised_44k/' #location of input wavs
textgrid_root = './Cormac/TG_corrected/' #location of (corrected) TextGrids
output_root = './Cormac/ASR/' #output location
infiles = list(list_filenames(orig_wav_root, extensions='.wav', add_ext=False))
infiles.sort()
sr = 44100
#%% episode settings
epi = 1
episode = 'C'+infiles[epi]
tg_file = episode+'_bc'
output_loc = output_root + episode
if not os.path.exists(output_loc):
os.makedirs(output_loc)
if not os.path.exists(output_loc + '/temp'):
os.makedirs(output_loc + '/temp')
#%%
tg = tgio.openTextgrid(textgrid_root + tg_file + ".TextGrid")
firstTier = tg.tierDict[tg.tierNameList[0]]
tg_start = [entry[0] for entry in firstTier.entryList]
tg_stop = [entry[1] for entry in firstTier.entryList]
tg_label = [entry[2] for entry in firstTier.entryList]
sps = [i for i, x in enumerate(tg_label) if x == 'sp']
# test if speech segments are correctly placed (not at start or end, not next to each other)
test_sp = [sps[i+1]-sps[max(0,i)] for i, x in enumerate(sps[1:])]
print(f"first element is SP: {sps[0]==0}")
print(f"last element is SP: {sps[-1]==len(tg_label)-1}")
print(f"consequtive SPs: {[sps[i] for i, x in enumerate(test_sp) if x==1]}")
#%%
y = load_wav(orig_wav_root+infiles[epi]+'.wav', sr=sr)
wav_out = np.asarray(y[1])
for i in range(len(sps)):
wav_temp = wav_out[int(sr*tg_start[sps[i]-1]):int(sr*tg_stop[sps[i]+1])]
wav_name = f"{episode}_{str(i).zfill(4)}.wav"
soundfile.write(output_loc + '/temp/' + wav_name, wav_temp, sr, subtype='PCM_16')
asr_files = list(list_filenames(output_loc + '/temp', extensions='.wav', add_ext=False))
asr_files.sort()
#%% set Google Cloud speech2text credentials file
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = "./codes/transcribespeech-312319-d025b453aaeb.json"
#%% transcribe list
results = [None]*len(sps)
longresults = [None]*len(sps)
client_ops = client_options.ClientOptions(api_endpoint="eu-speech.googleapis.com")
#%% run ASR on each breath group
for i in range(len(sps)):
client = speech.SpeechClient(client_options=client_ops)
metadata = speech.types.RecognitionMetadata()
metadata.interaction_type = (
speech.RecognitionMetadata.InteractionType.PRESENTATION)
metadata.microphone_distance = (
speech.RecognitionMetadata.MicrophoneDistance.NEARFIELD)
metadata.recording_device_type = (
speech.RecognitionMetadata.RecordingDeviceType.OTHER_INDOOR_DEVICE)
metadata.recording_device_type = (
speech.RecognitionMetadata.OriginalMediaType.AUDIO)
with io.open( f"{output_loc}/temp/{episode}_{str(i).zfill(4)}.wav", 'rb') as audio_file:
content = audio_file.read()
audio = speech.RecognitionAudio(content=content)
config = speech.RecognitionConfig(
encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=44100,
language_code='en-US',
enable_automatic_punctuation=False,
enable_word_time_offsets=True,
use_enhanced=True,
model='video',
#speech_contexts=[speech.types.SpeechContext(
#phrases=['ThinkComputers', 'Podcast', 'Bob', 'Derrick', 'Intel', 'AMD', 'ASUS', 'envy ME', 'Corsair', 'Nvidia', 'GPU', 'CPU', 'RGB', 'Fortnite']
#)],
metadata=metadata)
response = client.recognize(config=config, audio=audio)
if len(response.results) == 1:
results[i] = response.results[0].alternatives[0]
elif len(response.results) == 2:
results[i] = response.results[0].alternatives[0]
longresults[i] = response.results[1].alternatives[0]
elif len(response.results) > 2:
results[i] = response.results[0].alternatives[0]
longresults[i] = response.results[1].alternatives[0]
print(f"multiple responses: {i}")
#%% extract transcript
transcript = [None]*len(sps)
for i in range(len(results)):
try:
transcript[i] = results[i].transcript
except:
print('empty transcript', i)
for j in range(len(longresults)):
if longresults[j]:
print('second line', j)
transcript[j] += (' ' + longresults[j].transcript)
transcript2 = [None]*len(sps)
for k in range(len(transcript)):
if transcript[k]:
transcript2[k] = re.sub(r"(?<=\w)([A-Z])", r" \1", transcript[k])
transcript2[k] = re.sub(r"([a-z])\-([a-z])", r"\1 \2", transcript2[k] , 0, re.IGNORECASE)
#%% save results
tg_asr = list(tg_label)
for i in range(len(sps)):
if transcript2[i]:
tg_asr[sps[i]] = transcript2[i]
annotTier = tgio.IntervalTier('labels', [], 0, pairedWav=orig_wav_root+infiles[epi]+'.wav')
asrTier = tgio.IntervalTier('transcript', [], 0, pairedWav=orig_wav_root+infiles[epi]+'.wav')
tg2 = tgio.Textgrid()
tg2.addTier(annotTier)
tg2.addTier(asrTier)
for i in range(len(tg_start)):
annotTier.insertEntry((tg_start[i], tg_stop[i], tg_label[i]), warnFlag=True, collisionCode='replace')
asrTier.insertEntry((tg_start[i], tg_stop[i], tg_asr[i]), warnFlag=True, collisionCode='replace')
tg2.save(output_root + episode + ".TextGrid") | [
"google.cloud.speech_v1p1beta1.RecognitionConfig",
"codes.helpers.list_filenames",
"os.makedirs",
"google.api_core.client_options.ClientOptions",
"google.cloud.speech_v1p1beta1.types.RecognitionMetadata",
"numpy.asarray",
"os.path.exists",
"google.cloud.speech_v1p1beta1.RecognitionAudio",
"praatio.t... | [((1189, 1245), 'praatio.tgio.openTextgrid', 'tgio.openTextgrid', (["(textgrid_root + tg_file + '.TextGrid')"], {}), "(textgrid_root + tg_file + '.TextGrid')\n", (1206, 1245), False, 'from praatio import tgio\n'), ((1854, 1908), 'codes.helpers.load_wav', 'load_wav', (["(orig_wav_root + infiles[epi] + '.wav')"], {'sr': 'sr'}), "(orig_wav_root + infiles[epi] + '.wav', sr=sr)\n", (1862, 1908), False, 'from codes.helpers import list_filenames, annot2textgrid, load_wav\n'), ((1915, 1931), 'numpy.asarray', 'np.asarray', (['y[1]'], {}), '(y[1])\n', (1925, 1931), True, 'import numpy as np\n'), ((2531, 2600), 'google.api_core.client_options.ClientOptions', 'client_options.ClientOptions', ([], {'api_endpoint': '"""eu-speech.googleapis.com"""'}), "(api_endpoint='eu-speech.googleapis.com')\n", (2559, 2600), False, 'from google.api_core import client_options\n'), ((5223, 5310), 'praatio.tgio.IntervalTier', 'tgio.IntervalTier', (['"""labels"""', '[]', '(0)'], {'pairedWav': "(orig_wav_root + infiles[epi] + '.wav')"}), "('labels', [], 0, pairedWav=orig_wav_root + infiles[epi] +\n '.wav')\n", (5240, 5310), False, 'from praatio import tgio\n'), ((5313, 5405), 'praatio.tgio.IntervalTier', 'tgio.IntervalTier', (['"""transcript"""', '[]', '(0)'], {'pairedWav': "(orig_wav_root + infiles[epi] + '.wav')"}), "('transcript', [], 0, pairedWav=orig_wav_root + infiles[\n epi] + '.wav')\n", (5330, 5405), False, 'from praatio import tgio\n'), ((5412, 5427), 'praatio.tgio.Textgrid', 'tgio.Textgrid', ([], {}), '()\n', (5425, 5427), False, 'from praatio import tgio\n'), ((817, 880), 'codes.helpers.list_filenames', 'list_filenames', (['orig_wav_root'], {'extensions': '""".wav"""', 'add_ext': '(False)'}), "(orig_wav_root, extensions='.wav', add_ext=False)\n", (831, 880), False, 'from codes.helpers import list_filenames, annot2textgrid, load_wav\n'), ((1034, 1060), 'os.path.exists', 'os.path.exists', (['output_loc'], {}), '(output_loc)\n', (1048, 1060), False, 'import os\n'), ((1066, 1089), 'os.makedirs', 'os.makedirs', (['output_loc'], {}), '(output_loc)\n', (1077, 1089), False, 'import os\n'), ((1098, 1134), 'os.path.exists', 'os.path.exists', (["(output_loc + '/temp')"], {}), "(output_loc + '/temp')\n", (1112, 1134), False, 'import os\n'), ((1140, 1173), 'os.makedirs', 'os.makedirs', (["(output_loc + '/temp')"], {}), "(output_loc + '/temp')\n", (1151, 1173), False, 'import os\n'), ((2089, 2175), 'soundfile.write', 'soundfile.write', (["(output_loc + '/temp/' + wav_name)", 'wav_temp', 'sr'], {'subtype': '"""PCM_16"""'}), "(output_loc + '/temp/' + wav_name, wav_temp, sr, subtype=\n 'PCM_16')\n", (2104, 2175), False, 'import soundfile\n'), ((2193, 2263), 'codes.helpers.list_filenames', 'list_filenames', (["(output_loc + '/temp')"], {'extensions': '""".wav"""', 'add_ext': '(False)'}), "(output_loc + '/temp', extensions='.wav', add_ext=False)\n", (2207, 2263), False, 'from codes.helpers import list_filenames, annot2textgrid, load_wav\n'), ((2674, 2720), 'google.cloud.speech_v1p1beta1.SpeechClient', 'speech.SpeechClient', ([], {'client_options': 'client_ops'}), '(client_options=client_ops)\n', (2693, 2720), True, 'from google.cloud import speech_v1p1beta1 as speech\n'), ((2741, 2775), 'google.cloud.speech_v1p1beta1.types.RecognitionMetadata', 'speech.types.RecognitionMetadata', ([], {}), '()\n', (2773, 2775), True, 'from google.cloud import speech_v1p1beta1 as speech\n'), ((3342, 3382), 'google.cloud.speech_v1p1beta1.RecognitionAudio', 'speech.RecognitionAudio', ([], {'content': 'content'}), '(content=content)\n', (3365, 3382), True, 'from google.cloud import speech_v1p1beta1 as speech\n'), ((3396, 3659), 'google.cloud.speech_v1p1beta1.RecognitionConfig', 'speech.RecognitionConfig', ([], {'encoding': 'speech.RecognitionConfig.AudioEncoding.LINEAR16', 'sample_rate_hertz': '(44100)', 'language_code': '"""en-US"""', 'enable_automatic_punctuation': '(False)', 'enable_word_time_offsets': '(True)', 'use_enhanced': '(True)', 'model': '"""video"""', 'metadata': 'metadata'}), "(encoding=speech.RecognitionConfig.AudioEncoding.\n LINEAR16, sample_rate_hertz=44100, language_code='en-US',\n enable_automatic_punctuation=False, enable_word_time_offsets=True,\n use_enhanced=True, model='video', metadata=metadata)\n", (3420, 3659), True, 'from google.cloud import speech_v1p1beta1 as speech\n'), ((4932, 4980), 're.sub', 're.sub', (['"""(?<=\\\\w)([A-Z])"""', '""" \\\\1"""', 'transcript[k]'], {}), "('(?<=\\\\w)([A-Z])', ' \\\\1', transcript[k])\n", (4938, 4980), False, 'import re\n'), ((5006, 5078), 're.sub', 're.sub', (['"""([a-z])\\\\-([a-z])"""', '"""\\\\1 \\\\2"""', 'transcript2[k]', '(0)', 're.IGNORECASE'], {}), "('([a-z])\\\\-([a-z])', '\\\\1 \\\\2', transcript2[k], 0, re.IGNORECASE)\n", (5012, 5078), False, 'import re\n')] |
import sys
import csv
import random
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
def generate_ohe(data_x,name):
new_data = np.asmatrix(np.zeros((data_x.shape[0],95),dtype=int))
for i in range(len(data_x)):
for j in range((int)(data_x.shape[1]-1)/2):
new_data[i,17*j + data_x[i,2*j]-1] = 1
new_data[i,17*j + 4 + data_x[i,2*j+1]-1] = 1
new_data[i,85+data_x[i,-1]] = 1
pd.DataFrame(np.array(new_data)).to_csv(name,header=None,index=None)
def readfile(datapath,name):
data = pd.read_csv(datapath,header=None)
datashape = data.shape
data_array = np.asmatrix(np.array(data,dtype=int))
generate_ohe(data_array,name)
def main():
train_datapath = sys.argv[1]
test_datapath = sys.argv[2]
train_ohe_datapath = sys.argv[3]
test_ohe_datapath = sys.argv[4]
readfile(train_datapath,train_ohe_datapath)
readfile(test_datapath,test_ohe_datapath)
if __name__ == "__main__":
main() | [
"pandas.read_csv",
"numpy.zeros",
"numpy.array"
] | [((529, 563), 'pandas.read_csv', 'pd.read_csv', (['datapath'], {'header': 'None'}), '(datapath, header=None)\n', (540, 563), True, 'import pandas as pd\n'), ((179, 221), 'numpy.zeros', 'np.zeros', (['(data_x.shape[0], 95)'], {'dtype': 'int'}), '((data_x.shape[0], 95), dtype=int)\n', (187, 221), True, 'import numpy as np\n'), ((613, 638), 'numpy.array', 'np.array', (['data'], {'dtype': 'int'}), '(data, dtype=int)\n', (621, 638), True, 'import numpy as np\n'), ((435, 453), 'numpy.array', 'np.array', (['new_data'], {}), '(new_data)\n', (443, 453), True, 'import numpy as np\n')] |
'''
Please note that this code is optimized towards comprehension and not performance.
'''
from tensorflow.python.keras import backend as K
import tensorflow as tf
import numpy as np
import tqdm
class ContrastivTensionModel(tf.keras.Model):
def __init__(self, model1, model2, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model1 = model1
self.model2 = model2
self.loss = tf.losses.BinaryCrossentropy(from_logits=True)
self.nonReductionLoss = lambda y, x: K.binary_crossentropy(y, x, from_logits=True)
def generateSingleEmbedding(self, model, inData, training=False):
inds, att = inData
embs = model({'input_ids': inds, 'attention_mask': att}, training=training)[0]
outAtt = tf.cast(att, tf.float32)
sampleLength = tf.reduce_sum(outAtt, axis=-1, keepdims=True)
maskedEmbs = embs * tf.expand_dims(outAtt, axis=-1)
return tf.reduce_sum(maskedEmbs, axis=1) / tf.cast(sampleLength, tf.float32)
@tf.function
def call(self, inputs, training=False, mask=None):
emb1 = self.generateSingleEmbedding(self.model1, inputs, training)
emb2 = self.generateSingleEmbedding(self.model2, inputs, training)
return emb1, emb2
@tf.function
def predictandCompareSents(self, x1, x2, training=False):
emb1 = self.generateSingleEmbedding(self.model1, x1, training)
emb2 = self.generateSingleEmbedding(self.model2, x2, training)
return self.compareSents(emb1, emb2), emb1, emb2
def compareSents(self, emb1, emb2):
return tf.reduce_sum(emb1 * emb2, axis=-1)
def extractPositiveAndNegativeLoss(self, predValues, labels):
losses = self.nonReductionLoss(labels, predValues)
pLoss = tf.reduce_sum(losses * labels)
nLoss = tf.reduce_sum(losses * (labels - 1) * -1)
return pLoss, nLoss
@tf.function
def predictAndUpdate(self, inds1, att1, inds2, att2, labels):
with tf.GradientTape() as tape:
predValues, emb1, emb2 = self.predictandCompareSents((inds1, att1), (inds2, att2),
training=False)
cosineLoss = self.loss(labels, predValues)
grad = tape.gradient(cosineLoss, self.trainable_variables)
self.optimizer.apply_gradients(zip(grad, self.trainable_variables))
# Extract loss for Positive/Negative examples for later examination
pLoss, nLoss = self.extractPositiveAndNegativeLoss(predValues, labels)
return cosineLoss, pLoss, nLoss
def fit(self, x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0.,
validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0,
steps_per_epoch=None, validation_steps=None, validation_freq=1, max_queue_size=10, workers=1,
use_multiprocessing=False, **kwargs):
contrastiveLosses, pLosses, nLosses = [], [], []
f = lambda x, i: x[i:i + batch_size]
inds1, att1, inds2, att2 = x
for i in tqdm.tqdm(range(0, len(inds1), batch_size)):
# Main Training Loop
batchInd1, batchInd2, batchAtt1, batchAtt2, = f(inds1, i), f(inds2, i), f(att1, i), f(att2, i)
cLoss, pLoss, nLoss = self.predictAndUpdate(batchInd1, batchAtt1, batchInd2, batchAtt2, f(y, i))
# Convert Losses into numpy format, instead of TF tensors, for faster np operations
contrastiveLosses.append(cLoss.numpy())
pLosses.append(pLoss.numpy())
nLosses.append(nLoss.numpy())
return np.mean(contrastiveLosses), np.mean(pLosses), np.mean(nLosses)
| [
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.losses.BinaryCrossentropy",
"numpy.mean",
"tensorflow.python.keras.backend.binary_crossentropy",
"tensorflow.GradientTape",
"tensorflow.expand_dims"
] | [((423, 469), 'tensorflow.losses.BinaryCrossentropy', 'tf.losses.BinaryCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (451, 469), True, 'import tensorflow as tf\n'), ((763, 787), 'tensorflow.cast', 'tf.cast', (['att', 'tf.float32'], {}), '(att, tf.float32)\n', (770, 787), True, 'import tensorflow as tf\n'), ((811, 856), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['outAtt'], {'axis': '(-1)', 'keepdims': '(True)'}), '(outAtt, axis=-1, keepdims=True)\n', (824, 856), True, 'import tensorflow as tf\n'), ((1586, 1621), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(emb1 * emb2)'], {'axis': '(-1)'}), '(emb1 * emb2, axis=-1)\n', (1599, 1621), True, 'import tensorflow as tf\n'), ((1764, 1794), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(losses * labels)'], {}), '(losses * labels)\n', (1777, 1794), True, 'import tensorflow as tf\n'), ((1811, 1852), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(losses * (labels - 1) * -1)'], {}), '(losses * (labels - 1) * -1)\n', (1824, 1852), True, 'import tensorflow as tf\n'), ((515, 560), 'tensorflow.python.keras.backend.binary_crossentropy', 'K.binary_crossentropy', (['y', 'x'], {'from_logits': '(True)'}), '(y, x, from_logits=True)\n', (536, 560), True, 'from tensorflow.python.keras import backend as K\n'), ((885, 916), 'tensorflow.expand_dims', 'tf.expand_dims', (['outAtt'], {'axis': '(-1)'}), '(outAtt, axis=-1)\n', (899, 916), True, 'import tensorflow as tf\n'), ((932, 965), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['maskedEmbs'], {'axis': '(1)'}), '(maskedEmbs, axis=1)\n', (945, 965), True, 'import tensorflow as tf\n'), ((968, 1001), 'tensorflow.cast', 'tf.cast', (['sampleLength', 'tf.float32'], {}), '(sampleLength, tf.float32)\n', (975, 1001), True, 'import tensorflow as tf\n'), ((1978, 1995), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (1993, 1995), True, 'import tensorflow as tf\n'), ((3667, 3693), 'numpy.mean', 'np.mean', (['contrastiveLosses'], {}), '(contrastiveLosses)\n', (3674, 3693), True, 'import numpy as np\n'), ((3695, 3711), 'numpy.mean', 'np.mean', (['pLosses'], {}), '(pLosses)\n', (3702, 3711), True, 'import numpy as np\n'), ((3713, 3729), 'numpy.mean', 'np.mean', (['nLosses'], {}), '(nLosses)\n', (3720, 3729), True, 'import numpy as np\n')] |
"""
Test the sensitivity reader
"""
from unittest import TestCase, skipUnless
from collections import OrderedDict
from itertools import product
from io import BytesIO
from numpy import array, inf
from numpy.testing import assert_allclose, assert_array_equal
from serpentTools.data import getFile
from serpentTools.parsers.sensitivity import SensitivityReader
from tests import (
plotTest, getLegendTexts, MatlabTesterHelper, compareDictOfArrays,
HAS_SCIPY,
)
TEST_FILE = getFile('bwr_sens0.m')
class SensitivityTestHelper(TestCase):
@classmethod
def setUpClass(cls):
cls.reader = SensitivityReader(TEST_FILE)
cls.reader.read()
class SensitivityTester(SensitivityTestHelper):
"""Class for testing the sensitivity reader."""
def test_expectedSensitivities(self):
"""Verify the sensitivity arrays are loaded correctly."""
expected = {
'fis2flx': array([[[
[[-2.61375000e-01, 7.10000000e-02], # noqa: E126
[-1.04396000e-01, 1.30000000e-01]],
[[8.14309000e-04, 1.00000000e+00],
[-1.89700000e-03, 1.00000000e+00]],
[[0.00000000e+00, 0.00000000e+00],
[0.00000000e+00, 0.00000000e+00]],
[[0.00000000e+00, 0.00000000e+00],
[5.08970000e-03, 6.80000000e-01]],
[[-3.80915000e-02, 1.50000000e-01],
[-3.32722000e-02, 1.50000000e-01]],
[[-2.24098000e-01, 7.40000000e-02],
[-7.40533000e-02, 1.50000000e-01]],
[[0.00000000e+00, 0.00000000e+00],
[-2.63575000e-04, 1.00000000e+00]]],
[[[-1.82609000e-02, 8.50000000e-01],
[9.13794000e-03, 1.00000000e+00]],
[[1.05618000e-02, 1.00000000e+00],
[2.10562000e-02, 1.00000000e+00]],
[[0.00000000e+00, 0.00000000e+00],
[0.00000000e+00, 0.00000000e+00]],
[[0.00000000e+00, 0.00000000e+00],
[4.73019000e-02, 4.00000000e-01]],
[[-2.88227000e-02, 1.30000000e-01],
[-7.02287000e-02, 1.10000000e-01]],
[[0.00000000e+00, 0.00000000e+00],
[1.05040000e-02, 6.80000000e-01]],
[[0.00000000e+00, 0.00000000e+00],
[5.04490000e-04, 1.00000000e+00]]]]]),
'keff': array([[[
[[2.33920000e-01, 8.00000000e-02],
[8.70984000e-02, 2.10000000e-01]],
[[-5.39529000e-03, 7.90000000e-01],
[-1.93425000e-04, 1.00000000e+00]],
[[0.00000000e+00, 0.00000000e+00],
[0.00000000e+00, 0.00000000e+00]],
[[0.00000000e+00, 0.00000000e+00],
[-5.16380000e-03, 7.60000000e-01]],
[[-6.34126000e-02, 8.10000000e-02],
[-5.09998000e-02, 9.00000000e-02]],
[[3.02727000e-01, 6.00000000e-02],
[1.43519000e-01, 8.30000000e-02]],
[[0.00000000e+00, 0.00000000e+00],
[-6.34103000e-05, 1.00000000e+00]]],
[[[-2.55744000e-02, 4.90000000e-01],
[-1.08870000e-01, 4.00000000e-01]],
[[7.10802000e-03, 1.00000000e+00],
[-1.57546000e-02, 1.00000000e+00]],
[[0.00000000e+00, 0.00000000e+00],
[0.00000000e+00, 0.00000000e+00]],
[[0.00000000e+00, 0.00000000e+00],
[-2.26180000e-02, 7.00000000e-01]],
[[-3.26824000e-02, 1.20000000e-01],
[-1.20262000e-01, 8.30000000e-02]],
[[0.00000000e+00, 0.00000000e+00],
[4.75881000e-02, 1.10000000e-01]],
[[0.00000000e+00, 0.00000000e+00],
[2.17649000e-03, 5.30000000e-01]]]]])
}
compareDictOfArrays(expected, self.reader.sensitivities,
'Error in sensitivities at {key}', testCase=self)
def test_integratedSensitivities(self):
"""Verify the energy integrated sensitivities are correct."""
expected = {
'fis2flx': array([[ # noqa: E126
[[-3.65771000e-01, 5.50000000e-02],
[-1.08269000e-03, 1.00000000e+00],
[0.00000000e+00, 0.00000000e+00],
[5.08970000e-03, 6.80000000e-01],
[-7.13637000e-02, 1.10000000e-01],
[-2.98151000e-01, 6.50000000e-02],
[-2.63575000e-04, 1.00000000e+00]],
[[-9.12298000e-03, 1.00000000e+00],
[3.16180000e-02, 1.00000000e+00],
[0.00000000e+00, 0.00000000e+00],
[4.73019000e-02, 4.00000000e-01],
[-9.90513000e-02, 8.80000000e-02],
[1.05040000e-02, 6.80000000e-01],
[5.04490000e-04, 1.00000000e+00]]]]),
'keff': array([[
[[3.21018000e-01, 7.40000000e-02],
[-5.58871000e-03, 1.00000000e+00],
[0.00000000e+00, 0.00000000e+00],
[-5.16380000e-03, 7.60000000e-01],
[-1.14412000e-01, 5.70000000e-02],
[4.46246000e-01, 4.20000000e-02],
[-6.34103000e-05, 1.00000000e+00]],
[[-1.34445000e-01, 3.40000000e-01],
[-8.64658000e-03, 1.00000000e+00],
[0.00000000e+00, 0.00000000e+00],
[-2.26180000e-02, 7.00000000e-01],
[-1.52945000e-01, 7.30000000e-02],
[4.75881000e-02, 1.10000000e-01],
[2.17649000e-03, 5.30000000e-01]]]])
}
compareDictOfArrays(expected, self.reader.energyIntegratedSens,
'energy integrated sensitivities')
def test_parameters(self):
expected = {'nMat': 1, 'nEne': 2, 'nZai': 2, 'nPert': 7, 'latGen': 14}
for key, value in expected.items():
actual = getattr(self.reader, key)
self.assertEqual(value, actual,
msg="Parameter: {}".format(key))
def test_energyBounds(self):
"""Verify the energy bounds are stored properly."""
expectedBounds = array([0, 6.250E-7, 1.0E37])
expectedLethWidths = array([inf, 9.94812E+1])
for expected, actualStr in zip(
(expectedBounds, expectedLethWidths),
('energies', 'lethargyWidths')):
actual = getattr(self.reader, actualStr)
assert_allclose(expected, actual, err_msg=actualStr)
def test_perts(self):
"""Verify the ordered dictionary of perturbations is correct."""
expected = OrderedDict([
('total xs', 0), ('ela scatt xs', 1), ('sab scatt xs', 2),
('inl scatt xs', 3), ('capture xs', 4), ('fission xs', 5),
('nxn xs', 6)])
actual = self.reader.perts
self.assertDictEqual(expected, actual)
def test_zai(self):
"""Verify the ordered dictionary of perturbed isotopes is correct."""
expected = OrderedDict([
(922350, 0), (922380, 1)])
actual = self.reader.zais
self.assertDictEqual(expected, actual)
def test_materials(self):
"""Verify the ordered dictionary of perturbed materials is correct."""
expected = OrderedDict([
('total', 0)])
actual = self.reader.materials
self.assertDictEqual(expected, actual)
class SensitivityPlotTester(SensitivityTestHelper):
"""Class for testing rudimentary plot aspects of the reader."""
ZAIS = [922350]
RESP = 'keff'
DEFAULT_XLABEL = "Energy [eV]"
def _plot(self, **kwargs):
"""Shortcut for plotting."""
return self.reader.plot(self.RESP, **kwargs)
def _checkAxisLabels(self, ax, xlabel, ylabel, msg=None):
self.assertEqual(ax.get_xlabel(), xlabel, msg=msg)
self.assertEqual(ax.get_ylabel(), ylabel, msg=msg)
@plotTest
def test_plot_normalized(self):
"""Verify the default axis labels when normalized and with errors."""
ax = self._plot(normalize=True, sigma=3)
self._checkAxisLabels(ax, self.DEFAULT_XLABEL,
'Sensitivity per unit lethargy $\\pm3\\sigma$')
@plotTest
def test_plot_notNormalized_noSigma(self):
"""
Verify the default axis labels when not normalized nor errors given.
"""
ax = self._plot(normalize=False, sigma=0)
self._checkAxisLabels(ax, self.DEFAULT_XLABEL,
'Sensitivity')
@plotTest
def test_plot_passLabels_noset(self):
"""Verify that the axis labels can be set to empty strings."""
xlabel = False
ylabel = ""
nosetLabel = "" # un-set matplotlib axis label
ax = self._plot(xlabel=xlabel, ylabel=ylabel)
self._checkAxisLabels(ax, nosetLabel, nosetLabel)
def _generateLegendTexts(self, formatter, zais=None, mats=None,
perts=None):
reader = self.reader
zais = zais or list(reader.zais.keys())
mats = mats or list(reader.materials.keys())
perts = perts or list(reader.perts.keys())
texts = []
# match iteration order of sensitivity reader
for z, m, p in product(zais, mats, perts):
texts.append(formatter.format(z=z, m=m, p=p, r=self.RESP))
return texts
@plotTest
def test_plot_labelFormatter(self):
"""Verify the label formatter for sensitivity plots."""
labelFmt = "{m} {z} {p} {r}"
ax = self._plot(labelFmt=labelFmt)
actual = getLegendTexts(ax)
expected = self._generateLegendTexts(labelFmt)
self.assertListEqual(actual, expected)
@plotTest
def test_plot_labelFormatter_oneIso(self):
"""Verify the label formatter for sensitivity plots - pass one ZAI."""
labelFmt = "{m} {z} {p} {r}"
ax = self._plot(zai=922350, labelFmt=labelFmt)
actual = getLegendTexts(ax)
expected = self._generateLegendTexts(labelFmt, zais=[922350, ])
self.assertListEqual(actual, expected)
@plotTest
def test_plot_raiseError_missingPert(self):
"""Verify that an error is raised if a bad perturbation is passed."""
with self.assertRaises(KeyError):
self._plot(zai=-100)
@plotTest
def test_plot_raiseError_missingResp(self):
"""Verify that an error is raised if a bad response is passed."""
with self.assertRaises(KeyError):
self.reader.plot("THIS SHOULD FAIL")
class Sens2MatlabHelper(MatlabTesterHelper, SensitivityTestHelper):
"""Base class for comparing sensitivity reader to matlab"""
def setUp(self):
convertIx = int(self.RECONVERT)
self.attrMap = {
key: value[convertIx]
for key, value in SensitivityReader._RECONVERT_ATTR_MAP.items()}
self.listMap = {
key: value[convertIx]
for key, value in SensitivityReader._RECONVERT_LIST_MAP.items()}
self.sensFmts = SensitivityReader._RECONVERT_SENS_FMT[convertIx]
MatlabTesterHelper.setUp(self)
def _testGathered(self, gathered):
"""Test the contents of the gathered data"""
# attributes
for attr, expKey in self.attrMap.items():
self.assertTrue(expKey in gathered, msg=expKey)
expVal = getattr(self.reader, attr)
actVal = gathered[expKey]
assert_array_equal(actVal, expVal, err_msg=expKey)
# lists -> compare against ordered dictionaries
for attr, expKey in self.listMap.items():
self.assertTrue(expKey in gathered, msg=expKey)
expVal = list(getattr(self.reader, attr).keys())
actVal = gathered[expKey]
assert_array_equal(actVal, expVal, err_msg=expKey)
for sensKey, expSens in self.reader.sensitivities.items():
expEneSens = self.reader.energyIntegratedSens[sensKey]
sensName, eneName = [fmt.format(sensKey) for fmt in self.sensFmts]
self.assertTrue(sensName in gathered,
msg="{}//{}".format(sensKey, sensName))
self.assertTrue(eneName in gathered,
msg="{}//{}".format(sensKey, eneName))
actSens = gathered[sensName]
actEneSens = gathered[eneName]
assert_array_equal(actSens, expSens,
err_msg="{}//{}".format(sensKey, sensName))
assert_array_equal(actEneSens, expEneSens,
err_msg="{}//{}".format(sensKey, eneName))
def test_gatherMatlab(self):
"""Test the readers ability to gather for matlab"""
gathered = self.reader._gather_matlab(self.RECONVERT)
self._testGathered(gathered)
@skipUnless(HAS_SCIPY, "SCIPY needed for this test")
def test_toMatlab(self):
"""Verify the contents of the reader can be written to matlab"""
from scipy.io import loadmat
stream = BytesIO()
self.reader.toMatlab(stream)
gathered = loadmat(stream)
# some vectors will be written as 2D row/column vectors
# need to reshape them to 1D arrays
keys = gathered.keys()
for key in keys:
if key[:2] == '__': # special stuff from savemat
continue
value = gathered[key]
if value.size > 1 and 1 in value.shape:
gathered[key] = value.flatten()
class ReconvertedSens2MatlabTester(Sens2MatlabHelper):
"""Class for testing the sens - matlab conversion with original names"""
RECONVERT = True
class UnconvertedSens2MatlabTester(Sens2MatlabHelper):
"""Class for testing the sens - matlab conversion with unconverted names"""
RECONVERT = False
del Sens2MatlabHelper
| [
"io.BytesIO",
"tests.MatlabTesterHelper.setUp",
"scipy.io.loadmat",
"serpentTools.parsers.sensitivity.SensitivityReader._RECONVERT_ATTR_MAP.items",
"numpy.testing.assert_array_equal",
"serpentTools.parsers.sensitivity.SensitivityReader._RECONVERT_LIST_MAP.items",
"unittest.skipUnless",
"tests.getLegen... | [((482, 504), 'serpentTools.data.getFile', 'getFile', (['"""bwr_sens0.m"""'], {}), "('bwr_sens0.m')\n", (489, 504), False, 'from serpentTools.data import getFile\n'), ((13212, 13263), 'unittest.skipUnless', 'skipUnless', (['HAS_SCIPY', '"""SCIPY needed for this test"""'], {}), "(HAS_SCIPY, 'SCIPY needed for this test')\n", (13222, 13263), False, 'from unittest import TestCase, skipUnless\n'), ((609, 637), 'serpentTools.parsers.sensitivity.SensitivityReader', 'SensitivityReader', (['TEST_FILE'], {}), '(TEST_FILE)\n', (626, 637), False, 'from serpentTools.parsers.sensitivity import SensitivityReader\n'), ((3990, 4100), 'tests.compareDictOfArrays', 'compareDictOfArrays', (['expected', 'self.reader.sensitivities', '"""Error in sensitivities at {key}"""'], {'testCase': 'self'}), "(expected, self.reader.sensitivities,\n 'Error in sensitivities at {key}', testCase=self)\n", (4009, 4100), False, 'from tests import plotTest, getLegendTexts, MatlabTesterHelper, compareDictOfArrays, HAS_SCIPY\n'), ((6012, 6114), 'tests.compareDictOfArrays', 'compareDictOfArrays', (['expected', 'self.reader.energyIntegratedSens', '"""energy integrated sensitivities"""'], {}), "(expected, self.reader.energyIntegratedSens,\n 'energy integrated sensitivities')\n", (6031, 6114), False, 'from tests import plotTest, getLegendTexts, MatlabTesterHelper, compareDictOfArrays, HAS_SCIPY\n'), ((6566, 6593), 'numpy.array', 'array', (['[0, 6.25e-07, 1e+37]'], {}), '([0, 6.25e-07, 1e+37])\n', (6571, 6593), False, 'from numpy import array, inf\n'), ((6624, 6645), 'numpy.array', 'array', (['[inf, 99.4812]'], {}), '([inf, 99.4812])\n', (6629, 6645), False, 'from numpy import array, inf\n'), ((7029, 7180), 'collections.OrderedDict', 'OrderedDict', (["[('total xs', 0), ('ela scatt xs', 1), ('sab scatt xs', 2), ('inl scatt xs',\n 3), ('capture xs', 4), ('fission xs', 5), ('nxn xs', 6)]"], {}), "([('total xs', 0), ('ela scatt xs', 1), ('sab scatt xs', 2), (\n 'inl scatt xs', 3), ('capture xs', 4), ('fission xs', 5), ('nxn xs', 6)])\n", (7040, 7180), False, 'from collections import OrderedDict\n'), ((7417, 7456), 'collections.OrderedDict', 'OrderedDict', (['[(922350, 0), (922380, 1)]'], {}), '([(922350, 0), (922380, 1)])\n', (7428, 7456), False, 'from collections import OrderedDict\n'), ((7680, 7707), 'collections.OrderedDict', 'OrderedDict', (["[('total', 0)]"], {}), "([('total', 0)])\n", (7691, 7707), False, 'from collections import OrderedDict\n'), ((9657, 9683), 'itertools.product', 'product', (['zais', 'mats', 'perts'], {}), '(zais, mats, perts)\n', (9664, 9683), False, 'from itertools import product\n'), ((9993, 10011), 'tests.getLegendTexts', 'getLegendTexts', (['ax'], {}), '(ax)\n', (10007, 10011), False, 'from tests import plotTest, getLegendTexts, MatlabTesterHelper, compareDictOfArrays, HAS_SCIPY\n'), ((10364, 10382), 'tests.getLegendTexts', 'getLegendTexts', (['ax'], {}), '(ax)\n', (10378, 10382), False, 'from tests import plotTest, getLegendTexts, MatlabTesterHelper, compareDictOfArrays, HAS_SCIPY\n'), ((11495, 11525), 'tests.MatlabTesterHelper.setUp', 'MatlabTesterHelper.setUp', (['self'], {}), '(self)\n', (11519, 11525), False, 'from tests import plotTest, getLegendTexts, MatlabTesterHelper, compareDictOfArrays, HAS_SCIPY\n'), ((13420, 13429), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (13427, 13429), False, 'from io import BytesIO\n'), ((13486, 13501), 'scipy.io.loadmat', 'loadmat', (['stream'], {}), '(stream)\n', (13493, 13501), False, 'from scipy.io import loadmat\n'), ((919, 1466), 'numpy.array', 'array', (['[[[[[-0.261375, 0.071], [-0.104396, 0.13]], [[0.000814309, 1.0], [-0.001897,\n 1.0]], [[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0050897, 0.68]], [[-\n 0.0380915, 0.15], [-0.0332722, 0.15]], [[-0.224098, 0.074], [-0.0740533,\n 0.15]], [[0.0, 0.0], [-0.000263575, 1.0]]], [[[-0.0182609, 0.85], [\n 0.00913794, 1.0]], [[0.0105618, 1.0], [0.0210562, 1.0]], [[0.0, 0.0], [\n 0.0, 0.0]], [[0.0, 0.0], [0.0473019, 0.4]], [[-0.0288227, 0.13], [-\n 0.0702287, 0.11]], [[0.0, 0.0], [0.010504, 0.68]], [[0.0, 0.0], [\n 0.00050449, 1.0]]]]]'], {}), '([[[[[-0.261375, 0.071], [-0.104396, 0.13]], [[0.000814309, 1.0], [-\n 0.001897, 1.0]], [[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0050897, \n 0.68]], [[-0.0380915, 0.15], [-0.0332722, 0.15]], [[-0.224098, 0.074],\n [-0.0740533, 0.15]], [[0.0, 0.0], [-0.000263575, 1.0]]], [[[-0.0182609,\n 0.85], [0.00913794, 1.0]], [[0.0105618, 1.0], [0.0210562, 1.0]], [[0.0,\n 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0473019, 0.4]], [[-0.0288227, 0.13],\n [-0.0702287, 0.11]], [[0.0, 0.0], [0.010504, 0.68]], [[0.0, 0.0], [\n 0.00050449, 1.0]]]]])\n', (924, 1466), False, 'from numpy import array, inf\n'), ((2462, 3012), 'numpy.array', 'array', (['[[[[[0.23392, 0.08], [0.0870984, 0.21]], [[-0.00539529, 0.79], [-\n 0.000193425, 1.0]], [[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [-0.0051638,\n 0.76]], [[-0.0634126, 0.081], [-0.0509998, 0.09]], [[0.302727, 0.06], [\n 0.143519, 0.083]], [[0.0, 0.0], [-6.34103e-05, 1.0]]], [[[-0.0255744, \n 0.49], [-0.10887, 0.4]], [[0.00710802, 1.0], [-0.0157546, 1.0]], [[0.0,\n 0.0], [0.0, 0.0]], [[0.0, 0.0], [-0.022618, 0.7]], [[-0.0326824, 0.12],\n [-0.120262, 0.083]], [[0.0, 0.0], [0.0475881, 0.11]], [[0.0, 0.0], [\n 0.00217649, 0.53]]]]]'], {}), '([[[[[0.23392, 0.08], [0.0870984, 0.21]], [[-0.00539529, 0.79], [-\n 0.000193425, 1.0]], [[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [-0.0051638,\n 0.76]], [[-0.0634126, 0.081], [-0.0509998, 0.09]], [[0.302727, 0.06], [\n 0.143519, 0.083]], [[0.0, 0.0], [-6.34103e-05, 1.0]]], [[[-0.0255744, \n 0.49], [-0.10887, 0.4]], [[0.00710802, 1.0], [-0.0157546, 1.0]], [[0.0,\n 0.0], [0.0, 0.0]], [[0.0, 0.0], [-0.022618, 0.7]], [[-0.0326824, 0.12],\n [-0.120262, 0.083]], [[0.0, 0.0], [0.0475881, 0.11]], [[0.0, 0.0], [\n 0.00217649, 0.53]]]]])\n', (2467, 3012), False, 'from numpy import array, inf\n'), ((4288, 4573), 'numpy.array', 'array', (['[[[[-0.365771, 0.055], [-0.00108269, 1.0], [0.0, 0.0], [0.0050897, 0.68], [\n -0.0713637, 0.11], [-0.298151, 0.065], [-0.000263575, 1.0]], [[-\n 0.00912298, 1.0], [0.031618, 1.0], [0.0, 0.0], [0.0473019, 0.4], [-\n 0.0990513, 0.088], [0.010504, 0.68], [0.00050449, 1.0]]]]'], {}), '([[[[-0.365771, 0.055], [-0.00108269, 1.0], [0.0, 0.0], [0.0050897, \n 0.68], [-0.0713637, 0.11], [-0.298151, 0.065], [-0.000263575, 1.0]], [[\n -0.00912298, 1.0], [0.031618, 1.0], [0.0, 0.0], [0.0473019, 0.4], [-\n 0.0990513, 0.088], [0.010504, 0.68], [0.00050449, 1.0]]]])\n', (4293, 4573), False, 'from numpy import array, inf\n'), ((5160, 5447), 'numpy.array', 'array', (['[[[[0.321018, 0.074], [-0.00558871, 1.0], [0.0, 0.0], [-0.0051638, 0.76], [\n -0.114412, 0.057], [0.446246, 0.042], [-6.34103e-05, 1.0]], [[-0.134445,\n 0.34], [-0.00864658, 1.0], [0.0, 0.0], [-0.022618, 0.7], [-0.152945, \n 0.073], [0.0475881, 0.11], [0.00217649, 0.53]]]]'], {}), '([[[[0.321018, 0.074], [-0.00558871, 1.0], [0.0, 0.0], [-0.0051638, \n 0.76], [-0.114412, 0.057], [0.446246, 0.042], [-6.34103e-05, 1.0]], [[-\n 0.134445, 0.34], [-0.00864658, 1.0], [0.0, 0.0], [-0.022618, 0.7], [-\n 0.152945, 0.073], [0.0475881, 0.11], [0.00217649, 0.53]]]])\n', (5165, 5447), False, 'from numpy import array, inf\n'), ((6857, 6909), 'numpy.testing.assert_allclose', 'assert_allclose', (['expected', 'actual'], {'err_msg': 'actualStr'}), '(expected, actual, err_msg=actualStr)\n', (6872, 6909), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((11848, 11898), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['actVal', 'expVal'], {'err_msg': 'expKey'}), '(actVal, expVal, err_msg=expKey)\n', (11866, 11898), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((12177, 12227), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['actVal', 'expVal'], {'err_msg': 'expKey'}), '(actVal, expVal, err_msg=expKey)\n', (12195, 12227), False, 'from numpy.testing import assert_allclose, assert_array_equal\n'), ((11231, 11276), 'serpentTools.parsers.sensitivity.SensitivityReader._RECONVERT_ATTR_MAP.items', 'SensitivityReader._RECONVERT_ATTR_MAP.items', ([], {}), '()\n', (11274, 11276), False, 'from serpentTools.parsers.sensitivity import SensitivityReader\n'), ((11367, 11412), 'serpentTools.parsers.sensitivity.SensitivityReader._RECONVERT_LIST_MAP.items', 'SensitivityReader._RECONVERT_LIST_MAP.items', ([], {}), '()\n', (11410, 11412), False, 'from serpentTools.parsers.sensitivity import SensitivityReader\n')] |
from .base import QA
import glob
import os
import collections
import numpy as np
import fitsio
import multiprocessing as mp
import scipy.ndimage
from astropy.table import Table
import desiutil.log
from desispec.maskbits import ccdmask
from ..run import get_ncpu
def _fix_amp_names(hdr):
'''In-place fix of header `hdr` amp names 1-4 to A-D if needed.'''
#- Assume that if any are right, all are right
if 'DATASECA' in hdr:
return
log = desiutil.log.get_logger()
log.debug('Correcting AMP 1-4 to A-D for night {} expid {}'.format(
hdr['NIGHT'], hdr['EXPID']))
for prefix in [
'GAIN', 'RDNOISE', 'PRESEC', 'PRRSEC', 'DATASEC', 'TRIMSEC', 'BIASSEC',
'ORSEC', 'CCDSEC', 'DETSEC', 'AMPSEC', 'OBSRDN', 'OVERSCN'
]:
for ampnum, ampname in [('1','A'), ('2','B'), ('3','C'), ('4','D')]:
if prefix+ampnum in hdr:
hdr[prefix+ampname] = hdr[prefix+ampnum]
hdr.delete(prefix+ampnum)
class QAAmp(QA):
"""docstring for QAAmp"""
def __init__(self):
self.output_type = "PER_AMP"
pass
def valid_obstype(self, obstype):
'''PER_AMP QA metrics work for all obstypes of exposures'''
return True
def count_cosmics(self, mask):
'''
Count number of cosmics in this mask; doesn't try to deblend
overlapping cosmics
'''
cosmics_mask = mask & ccdmask.COSMIC
#- Any nonzero adjacent pixels count as connected, even if diagonal
structure=np.ones((3,3))
num_cosmics = scipy.ndimage.label(cosmics_mask, structure=structure)[1]
return num_cosmics
def run(self, indir):
'''Generates table of PER_AMP qa metrics (READNOISE, BIAS, COSMICS_RATE).
Args:
indir: path to directory containing preproc-*.fits files for the given exposure
Returns an astropy Table object.'''
infiles = glob.glob(os.path.join(indir, 'preproc-*.fits'))
results = list()
argslist = [(self, infile, amp) for infile in infiles for amp in ['A', 'B', 'C', 'D']]
ncpu = get_ncpu(None)
if ncpu > 1:
pool = mp.Pool(ncpu)
results = pool.starmap(get_dico, argslist)
pool.close()
pool.join()
else:
for args in argslist:
results.append(get_dico(**args))
#- remove None entries from missing amp (e.g. 2-amp readout)
results = [r for r in results if r is not None]
table = Table(results, names=results[0].keys())
return table
def get_dico(self, filename, amp):
'''Function to generate per amp metrics given a preproc file to analyze, and a specific amp.
Args:
filename: path to preproc file (str)
amp: name of amp to analyze (str), either A, B, C, or D
Returns an OrderedDict object or None if amp isn't in data.
'''
hdr = fitsio.read_header(filename, 'IMAGE') #- for readnoise, bias
if 'BIASSEC'+amp not in hdr.keys():
return None
mask = fitsio.read(filename, 'MASK') #- for cosmics
_fix_amp_names(hdr)
night = hdr['NIGHT']
expid = hdr['EXPID']
cam = hdr['CAMERA'][0].upper()
spectro = int(hdr['CAMERA'][1])
#- for cosmics, use exposure time + half of readout time
exptime = hdr['EXPTIME']
if 'DIGITIME' in hdr:
exptime += hdr['DIGITIME']/2
else:
exptime += 30.0
ny, nx = mask.shape
npix_amp = nx*ny//4
#- CCD read noise and overscan offset (bias) level
readnoise = hdr['OBSRDN'+amp]
biaslevel = hdr['OVERSCN'+amp]
#- Subregion of mask covered by this amp
if amp == 'A':
submask = mask[0:ny//2, 0:nx//2]
elif amp == 'B':
submask = mask[0:ny//2, nx//2:]
elif amp == 'C':
submask = mask[ny//2:, 0:nx//2]
else:
submask = mask[ny//2:, nx//2:]
#- Number of cosmics per minute on this amplifier
num_cosmics = self.count_cosmics(submask)
cosmics_rate = (num_cosmics / (exptime/60) )
dico = {'NIGHT': night, 'EXPID': expid, 'SPECTRO': spectro, 'CAM': cam, 'AMP': amp,
'READNOISE': readnoise, 'BIAS': biaslevel, 'COSMICS_RATE': cosmics_rate
}
return collections.OrderedDict(**dico)
| [
"numpy.ones",
"fitsio.read",
"multiprocessing.Pool",
"collections.OrderedDict",
"os.path.join",
"fitsio.read_header"
] | [((2989, 3026), 'fitsio.read_header', 'fitsio.read_header', (['filename', '"""IMAGE"""'], {}), "(filename, 'IMAGE')\n", (3007, 3026), False, 'import fitsio\n'), ((3123, 3152), 'fitsio.read', 'fitsio.read', (['filename', '"""MASK"""'], {}), "(filename, 'MASK')\n", (3134, 3152), False, 'import fitsio\n'), ((4314, 4345), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '(**dico)\n', (4337, 4345), False, 'import collections\n'), ((1541, 1556), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (1548, 1556), True, 'import numpy as np\n'), ((1951, 1988), 'os.path.join', 'os.path.join', (['indir', '"""preproc-*.fits"""'], {}), "(indir, 'preproc-*.fits')\n", (1963, 1988), False, 'import os\n'), ((2198, 2211), 'multiprocessing.Pool', 'mp.Pool', (['ncpu'], {}), '(ncpu)\n', (2205, 2211), True, 'import multiprocessing as mp\n')] |
import phenom
import numpy as np
import scipy
from scipy.fftpack import fft, fftfreq, fftshift, ifft
def fft(t, h):
"""
t : in units of seconds
h : in units of strain
"""
dt = t[1] - t[0]
N = len(t)
htilde = scipy.fftpack.fft(h) * dt
f = scipy.fftpack.fftfreq(N, dt)
# mask = ( f > 0 )
# return f[mask], htilde[mask]
return f, htilde
def myifft(f, htilde, f0, taper_low_width):
"""
f : in units of Hz
f0 : float, start frequency of taper
taper_low_width : float, width in Hz of taper
"""
phase = np.unwrap(np.angle(htilde))
phase_shift = (phase[0] - phase[-1])
# phase_shift = 0.
# phase_shift = (phase[-500] - phase[-1])
phase_shift = phase[-1]
extra_cycles = 6.0
f_min = f0 + (f[1]-f[0])
textra = int( np.ceil( extra_cycles / f_min) )
# htilde *= np.exp( -1.j * 2. * np.pi * f * textra)
htilde *= np.exp( -1.j * 2. * np.pi * f * phase_shift)
win_minus = phenom.planck_taper( f, f0, f0 + taper_low_width )
htilde *= win_minus
N = len(f)
df = f[1] - f[0]
dt = 1.0 / ( df * N )
# I am not sure what the factor of two is here...
# FIXME: Is this factor of 2 correct or should it be somewhere else?
h = 2 * ifft(htilde) / dt
maxTime = dt * N
# print("highest time (maxTime) = {0}".format(maxTime))
# print("dt = {0}".format(dt))
t = np.arange( 0., maxTime, dt )
return t, h
| [
"numpy.ceil",
"numpy.angle",
"phenom.planck_taper",
"scipy.fftpack.fft",
"scipy.fftpack.ifft",
"numpy.arange",
"numpy.exp",
"scipy.fftpack.fftfreq"
] | [((273, 301), 'scipy.fftpack.fftfreq', 'scipy.fftpack.fftfreq', (['N', 'dt'], {}), '(N, dt)\n', (294, 301), False, 'import scipy\n'), ((911, 956), 'numpy.exp', 'np.exp', (['(-1.0j * 2.0 * np.pi * f * phase_shift)'], {}), '(-1.0j * 2.0 * np.pi * f * phase_shift)\n', (917, 956), True, 'import numpy as np\n'), ((976, 1024), 'phenom.planck_taper', 'phenom.planck_taper', (['f', 'f0', '(f0 + taper_low_width)'], {}), '(f, f0, f0 + taper_low_width)\n', (995, 1024), False, 'import phenom\n'), ((1397, 1424), 'numpy.arange', 'np.arange', (['(0.0)', 'maxTime', 'dt'], {}), '(0.0, maxTime, dt)\n', (1406, 1424), True, 'import numpy as np\n'), ((239, 259), 'scipy.fftpack.fft', 'scipy.fftpack.fft', (['h'], {}), '(h)\n', (256, 259), False, 'import scipy\n'), ((579, 595), 'numpy.angle', 'np.angle', (['htilde'], {}), '(htilde)\n', (587, 595), True, 'import numpy as np\n'), ((807, 836), 'numpy.ceil', 'np.ceil', (['(extra_cycles / f_min)'], {}), '(extra_cycles / f_min)\n', (814, 836), True, 'import numpy as np\n'), ((1255, 1267), 'scipy.fftpack.ifft', 'ifft', (['htilde'], {}), '(htilde)\n', (1259, 1267), False, 'from scipy.fftpack import fft, fftfreq, fftshift, ifft\n')] |
import streamlit as st
import pandas as pd
import numpy as np
import os
def main():
st.title('Uber pickups in NYC')
DATE_COLUMN = 'date/time'
DATA_URL = ('https://s3-us-west-2.amazonaws.com/'
'streamlit-demo-data/uber-raw-data-sep14.csv.gz')
@st.cache
def load_data(nrows):
data = pd.read_csv(DATA_URL, nrows=nrows)
lowercase = lambda x: str(x).lower()
data.rename(lowercase, axis='columns', inplace=True)
data[DATE_COLUMN] = pd.to_datetime(data[DATE_COLUMN])
return data
# Create a text element and let the reader know the data is loading.
data_load_state = st.text('Loading data...')
# Load 10,000 rows of data into the dataframe.
data = load_data(10000)
# Notify the reader that the data was successfully loaded.
data_load_state.text("Done! (using st.cache)")
# Tabela Pura
st.subheader('Raw data')
st.write(data)
# Tabela Pura com interruptor
if st.checkbox('Show raw data'):
st.subheader('Raw data Com interruptor')
st.write(data)
# Plotando Grafico Barras
st.subheader('Number of pickups by hour')
hist_values = np.histogram(
data[DATE_COLUMN].dt.hour, bins=24, range=(0,24))[0]
st.bar_chart(hist_values)
# Plotando Mapa magia
st.subheader('Map of all pickups')
st.map(data)
# Filtrando o horario no mapa
#hour_to_filter = 17
hour_to_filter = st.slider('hour', 0, 23, 17) # min: 0h, max: 23h, default: 17h
filtered_data = data[data[DATE_COLUMN].dt.hour == hour_to_filter]
st.subheader('Map of all pickups at {}:00'.format(hour_to_filter))
st.map(filtered_data)
main() | [
"streamlit.subheader",
"streamlit.map",
"streamlit.slider",
"streamlit.checkbox",
"pandas.read_csv",
"streamlit.write",
"streamlit.title",
"streamlit.text",
"numpy.histogram",
"pandas.to_datetime",
"streamlit.bar_chart"
] | [((89, 120), 'streamlit.title', 'st.title', (['"""Uber pickups in NYC"""'], {}), "('Uber pickups in NYC')\n", (97, 120), True, 'import streamlit as st\n'), ((643, 669), 'streamlit.text', 'st.text', (['"""Loading data..."""'], {}), "('Loading data...')\n", (650, 669), True, 'import streamlit as st\n'), ((886, 910), 'streamlit.subheader', 'st.subheader', (['"""Raw data"""'], {}), "('Raw data')\n", (898, 910), True, 'import streamlit as st\n'), ((915, 929), 'streamlit.write', 'st.write', (['data'], {}), '(data)\n', (923, 929), True, 'import streamlit as st\n'), ((972, 1000), 'streamlit.checkbox', 'st.checkbox', (['"""Show raw data"""'], {}), "('Show raw data')\n", (983, 1000), True, 'import streamlit as st\n'), ((1109, 1150), 'streamlit.subheader', 'st.subheader', (['"""Number of pickups by hour"""'], {}), "('Number of pickups by hour')\n", (1121, 1150), True, 'import streamlit as st\n'), ((1248, 1273), 'streamlit.bar_chart', 'st.bar_chart', (['hist_values'], {}), '(hist_values)\n', (1260, 1273), True, 'import streamlit as st\n'), ((1305, 1339), 'streamlit.subheader', 'st.subheader', (['"""Map of all pickups"""'], {}), "('Map of all pickups')\n", (1317, 1339), True, 'import streamlit as st\n'), ((1344, 1356), 'streamlit.map', 'st.map', (['data'], {}), '(data)\n', (1350, 1356), True, 'import streamlit as st\n'), ((1438, 1466), 'streamlit.slider', 'st.slider', (['"""hour"""', '(0)', '(23)', '(17)'], {}), "('hour', 0, 23, 17)\n", (1447, 1466), True, 'import streamlit as st\n'), ((1647, 1668), 'streamlit.map', 'st.map', (['filtered_data'], {}), '(filtered_data)\n', (1653, 1668), True, 'import streamlit as st\n'), ((324, 358), 'pandas.read_csv', 'pd.read_csv', (['DATA_URL'], {'nrows': 'nrows'}), '(DATA_URL, nrows=nrows)\n', (335, 358), True, 'import pandas as pd\n'), ((493, 526), 'pandas.to_datetime', 'pd.to_datetime', (['data[DATE_COLUMN]'], {}), '(data[DATE_COLUMN])\n', (507, 526), True, 'import pandas as pd\n'), ((1010, 1050), 'streamlit.subheader', 'st.subheader', (['"""Raw data Com interruptor"""'], {}), "('Raw data Com interruptor')\n", (1022, 1050), True, 'import streamlit as st\n'), ((1059, 1073), 'streamlit.write', 'st.write', (['data'], {}), '(data)\n', (1067, 1073), True, 'import streamlit as st\n'), ((1169, 1232), 'numpy.histogram', 'np.histogram', (['data[DATE_COLUMN].dt.hour'], {'bins': '(24)', 'range': '(0, 24)'}), '(data[DATE_COLUMN].dt.hour, bins=24, range=(0, 24))\n', (1181, 1232), True, 'import numpy as np\n')] |
import numpy as np
#linear line is f(x) = ax +b for a and b is constant
# x is order
# y is data set
def linear_vec(y):
y_size = np.size(y)
a = np.zeros(y_size-1)
#for i in range(x_size):
# x[i] = i
#x is order now
#need dy/dx
b = np.zeros(y_size -1 )
for i in range(y_size-1):
a[i] = y[i+1]-y[i]
if i < (y_size-2):
m = np.float(a[i])
s = np.float(a[i+1])
if m != s:
b[i] = -1
else:
b[i] = +1
#it is not ready now
return b
def devide_vec(y,x):
y_size = np.size(y)
x_size = np.size(x)
z = np.zeros(y_size)
if y_size == x_size:
for i in range(y_size):
z[i] = np.float(y[i])/x[i]
return z
def crossProduct_vec(y,x):
y_size = np.size(y)
x_size = np.size(x)
z = np.zeros(y_size)
if y_size == x_size:
for i in range(y_size):
z[i] = np.float(y[i])*x[i]
return z
def line(xvec,a,b):
#to line produce
size = np.size(xvec)
y = [0]*size
for i in range(size):
y[i] = a*xvec[i] + b
return y
#xvec = [5,7,8,9,10,11,12,13]
#y = line(xvec,3,9)
#print y
#not work now
#for example
#y = [3,5,7,9]
#print (linear(y))
#i = linear(y)
| [
"numpy.size",
"numpy.zeros",
"numpy.float"
] | [((135, 145), 'numpy.size', 'np.size', (['y'], {}), '(y)\n', (142, 145), True, 'import numpy as np\n'), ((154, 174), 'numpy.zeros', 'np.zeros', (['(y_size - 1)'], {}), '(y_size - 1)\n', (162, 174), True, 'import numpy as np\n'), ((265, 285), 'numpy.zeros', 'np.zeros', (['(y_size - 1)'], {}), '(y_size - 1)\n', (273, 285), True, 'import numpy as np\n'), ((622, 632), 'numpy.size', 'np.size', (['y'], {}), '(y)\n', (629, 632), True, 'import numpy as np\n'), ((646, 656), 'numpy.size', 'np.size', (['x'], {}), '(x)\n', (653, 656), True, 'import numpy as np\n'), ((665, 681), 'numpy.zeros', 'np.zeros', (['y_size'], {}), '(y_size)\n', (673, 681), True, 'import numpy as np\n'), ((839, 849), 'numpy.size', 'np.size', (['y'], {}), '(y)\n', (846, 849), True, 'import numpy as np\n'), ((863, 873), 'numpy.size', 'np.size', (['x'], {}), '(x)\n', (870, 873), True, 'import numpy as np\n'), ((882, 898), 'numpy.zeros', 'np.zeros', (['y_size'], {}), '(y_size)\n', (890, 898), True, 'import numpy as np\n'), ((1075, 1088), 'numpy.size', 'np.size', (['xvec'], {}), '(xvec)\n', (1082, 1088), True, 'import numpy as np\n'), ((390, 404), 'numpy.float', 'np.float', (['a[i]'], {}), '(a[i])\n', (398, 404), True, 'import numpy as np\n'), ((421, 439), 'numpy.float', 'np.float', (['a[i + 1]'], {}), '(a[i + 1])\n', (429, 439), True, 'import numpy as np\n'), ((758, 772), 'numpy.float', 'np.float', (['y[i]'], {}), '(y[i])\n', (766, 772), True, 'import numpy as np\n'), ((975, 989), 'numpy.float', 'np.float', (['y[i]'], {}), '(y[i])\n', (983, 989), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import random
import os
import re
import math
import constants
import scipy.misc
from segmentModule import *
from matplotlib import pyplot as plt
#reads in training image for cnn using pixel data as the training set
#28 x 28 surrounding area of each pixel used for training
#3x3 conv, 7x7 conv
#all training images must be passed when calling nn.py
def cnn_readOneImg2(image_dir):
inputs = []
img = cv2.imread(image_dir,cv2.IMREAD_COLOR)
original,markers = getSegments(img,False)
uniqueMarkers = np.unique(markers)
canvas = original.copy()
for uq_mark in uniqueMarkers:
#make a canvas and paint each unique segment
b = random.randint(0,255)
g = random.randint(0,255)
r = random.randint(0,255)
canvas[markers == uq_mark] = [b,g,r]
return(canvas,markers)
#function for stitching 4 different images together
#top left = cat1 = 'treematter'
#top right = cat2 = 'plywood'
#bot left = cat3 = 'cardboard'
#bot right = cat4 = 'construction'
def getStitchedImage():
#initialize variablees
cat1_dir = constants.cat1_dir
cat2_dir = constants.cat2_dir
cat3_dir = constants.cat3_dir
cat4_dir = constants.cat4_dir
cat1files = []
cat2files = []
cat3files = []
cat4files = []
#check if the file directories exist and push all files into their respective categories
if os.path.exists(cat1_dir) and os.path.exists(cat2_dir) and os.path.exists(cat3_dir) and os.path.exists(cat4_dir):
for filename in os.listdir(cat1_dir):
cat1files.append(filename)
for filename in os.listdir(cat2_dir):
cat2files.append(filename)
for filename in os.listdir(cat3_dir):
cat3files.append(filename)
for filename in os.listdir(cat4_dir):
cat4files.append(filename)
#pick a random file from the list of files for each category and read them in
random.seed(None)
a = random.randint(0,len(cat1files) - 1)
b = random.randint(0,len(cat2files) - 1)
c = random.randint(0,len(cat3files) - 1)
d = random.randint(0,len(cat4files) - 1)
img1 = cv2.imread(cat1_dir + '/' + cat1files[a],cv2.IMREAD_COLOR)
img2 = cv2.imread(cat2_dir + '/' + cat2files[b],cv2.IMREAD_COLOR)
img3 = cv2.imread(cat3_dir + '/' + cat3files[c],cv2.IMREAD_COLOR)
img4 = cv2.imread(cat4_dir + '/' + cat4files[d],cv2.IMREAD_COLOR)
#create the image by resizing and putting them into their correct positions
topleft = cv2.resize(img1,(500,500),interpolation = cv2.INTER_CUBIC)
bottomleft = cv2.resize(img2,(500,500),interpolation = cv2.INTER_CUBIC)
topright = cv2.resize(img3,(500,500),interpolation = cv2.INTER_CUBIC)
bottomright = cv2.resize(img4,(500,500),interpolation = cv2.INTER_CUBIC)
toprow = np.concatenate((topleft,topright),axis = 1)
bottomrow = np.concatenate((bottomleft,bottomright),axis = 1)
full_img = np.concatenate((toprow,bottomrow),axis = 0)
return full_img
def testStitcher():
for i in range(10):
full_img = stitchImage()
rgb = scipy.misc.toimage(full_img)
cv2.imshow('stiched image',full_img)
cv2.imwrite('full_img.png',full_img)
cv2.waitKey(0)
#gets n patches from an image with its respective label
def getPixelBatch(n):
inputs = []
labels = []
#initialize variablees
cat1_dir = constants.cat1_dir
cat2_dir = constants.cat2_dir
cat3_dir = constants.cat3_dir
cat4_dir = constants.cat4_dir
cat5_dir = constants.cat5_dir
cat6_dir = constants.cat6_dir
dirs = [constants.cat1_dir,constants.cat2_dir,constants.cat3_dir,constants.cat4_dir,constants.cat5_dir,constants.cat6_dir]
categories = [constants.CAT1_ONEHOT,constants.CAT2_ONEHOT,constants.CAT3_ONEHOT,constants.CAT4_ONEHOT,constants.CAT5_ONEHOT,constants.CAT6_ONEHOT]
images = []
files = []
#check if the file directories exist and push all files into their respective categories
for d in dirs:
if os.path.exists(d):
files.append([])
for fname in os.listdir(d):
files[-1].append(fname)
else:
print("%s directory does not exist" % d)
#pick a random file from the list of files for each category and read them in
random.seed(None)
for i,f_list in enumerate(files):
a = random.randint(0,len(f_list) - 1)
img1 = cv2.imread(dirs[i] + '/' + f_list[a],cv2.IMREAD_COLOR)
images.append(cv2.resize(img1,(constants.FULL_IMGSIZE,constants.FULL_IMGSIZE),interpolation = cv2.INTER_CUBIC))
if(len(images) == len(categories)):
for img,cat in zip(images,categories):
w, h, d = img.shape
for j in range(int(n / 4)):
low = int(constants.IMG_SIZE / 2)
high = int(w - (constants.IMG_SIZE / 2) - 1)
a = random.randint(low,high)
b = random.randint(low,high)
box_low1 = int(a - (constants.IMG_SIZE / 2))
box_low2 = int(b - (constants.IMG_SIZE / 2))
box_high1 = int(a + (constants.IMG_SIZE / 2))
box_high2 = int(b + (constants.IMG_SIZE / 2))
box = img[box_low1:box_high1,box_low2:box_high2]
inputs.append(box)
labels.append(cat)
else:
print('COULD NOT CREATE BATCH')
print('invalid number of categories and images found: %i <--> %i' % (len(images),len(categories)))
c = list(zip(inputs,labels))
random.shuffle(c)
inputs,labels = zip(*c)
return inputs,labels
#get the batch of segments to process
def getSegmentBatch(n,filepath):
#initialize variables
seg_dir = filepath
seg_fnames = os.listdir(seg_dir)
inputs = []
labels = []
#start a random seed for the random number generator
random.seed(None)
for i in range(n):
#get a random segment from list of segments
rand_int = random.randint(0,len(seg_fnames) - 1)
segment_file = seg_fnames[rand_int]
label = re.findall("treematter|plywood|cardboard|bottles|trashbag|blackbag",segment_file)
#push the found category into the labels list
if(label[0] == constants.CAT1):
labels.append(constants.CAT1_ONEHOT)
elif(label[0] == constants.CAT2):
labels.append(constants.CAT2_ONEHOT)
elif(label[0] == constants.CAT3):
labels.append(constants.CAT3_ONEHOT)
elif(label[0] == constants.CAT4):
labels.append(constants.CAT4_ONEHOT)
elif(label[0] == constants.CAT5):
labels.append(constants.CAT5_ONEHOT)
elif(label[0] == constants.CAT6):
labels.append(constants.CAT6_ONEHOT)
else:
print(label[0])
#read the image and push it into the inputs list we have to normalize the image to a smaller pixel size.
#we actually lose a ton of image quality here
full_dir = seg_dir + segment_file
img = cv2.imread(full_dir,cv2.IMREAD_COLOR)
normal_img = cv2.resize(img,(constants.IMG_SIZE, constants.IMG_SIZE), interpolation = cv2.INTER_CUBIC)
inputs.append(normal_img)
return np.array(inputs),np.array(labels)
| [
"numpy.concatenate",
"random.randint",
"cv2.waitKey",
"random.shuffle",
"cv2.imwrite",
"numpy.unique",
"os.path.exists",
"cv2.imread",
"re.findall",
"random.seed",
"numpy.array",
"cv2.imshow",
"os.listdir",
"cv2.resize"
] | [((439, 478), 'cv2.imread', 'cv2.imread', (['image_dir', 'cv2.IMREAD_COLOR'], {}), '(image_dir, cv2.IMREAD_COLOR)\n', (449, 478), False, 'import cv2\n'), ((544, 562), 'numpy.unique', 'np.unique', (['markers'], {}), '(markers)\n', (553, 562), True, 'import numpy as np\n'), ((1940, 1957), 'random.seed', 'random.seed', (['None'], {}), '(None)\n', (1951, 1957), False, 'import random\n'), ((2149, 2208), 'cv2.imread', 'cv2.imread', (["(cat1_dir + '/' + cat1files[a])", 'cv2.IMREAD_COLOR'], {}), "(cat1_dir + '/' + cat1files[a], cv2.IMREAD_COLOR)\n", (2159, 2208), False, 'import cv2\n'), ((2219, 2278), 'cv2.imread', 'cv2.imread', (["(cat2_dir + '/' + cat2files[b])", 'cv2.IMREAD_COLOR'], {}), "(cat2_dir + '/' + cat2files[b], cv2.IMREAD_COLOR)\n", (2229, 2278), False, 'import cv2\n'), ((2289, 2348), 'cv2.imread', 'cv2.imread', (["(cat3_dir + '/' + cat3files[c])", 'cv2.IMREAD_COLOR'], {}), "(cat3_dir + '/' + cat3files[c], cv2.IMREAD_COLOR)\n", (2299, 2348), False, 'import cv2\n'), ((2359, 2418), 'cv2.imread', 'cv2.imread', (["(cat4_dir + '/' + cat4files[d])", 'cv2.IMREAD_COLOR'], {}), "(cat4_dir + '/' + cat4files[d], cv2.IMREAD_COLOR)\n", (2369, 2418), False, 'import cv2\n'), ((2513, 2572), 'cv2.resize', 'cv2.resize', (['img1', '(500, 500)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img1, (500, 500), interpolation=cv2.INTER_CUBIC)\n', (2523, 2572), False, 'import cv2\n'), ((2589, 2648), 'cv2.resize', 'cv2.resize', (['img2', '(500, 500)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img2, (500, 500), interpolation=cv2.INTER_CUBIC)\n', (2599, 2648), False, 'import cv2\n'), ((2663, 2722), 'cv2.resize', 'cv2.resize', (['img3', '(500, 500)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img3, (500, 500), interpolation=cv2.INTER_CUBIC)\n', (2673, 2722), False, 'import cv2\n'), ((2740, 2799), 'cv2.resize', 'cv2.resize', (['img4', '(500, 500)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img4, (500, 500), interpolation=cv2.INTER_CUBIC)\n', (2750, 2799), False, 'import cv2\n'), ((2812, 2855), 'numpy.concatenate', 'np.concatenate', (['(topleft, topright)'], {'axis': '(1)'}), '((topleft, topright), axis=1)\n', (2826, 2855), True, 'import numpy as np\n'), ((2872, 2921), 'numpy.concatenate', 'np.concatenate', (['(bottomleft, bottomright)'], {'axis': '(1)'}), '((bottomleft, bottomright), axis=1)\n', (2886, 2921), True, 'import numpy as np\n'), ((2937, 2980), 'numpy.concatenate', 'np.concatenate', (['(toprow, bottomrow)'], {'axis': '(0)'}), '((toprow, bottomrow), axis=0)\n', (2951, 2980), True, 'import numpy as np\n'), ((4294, 4311), 'random.seed', 'random.seed', (['None'], {}), '(None)\n', (4305, 4311), False, 'import random\n'), ((5524, 5541), 'random.shuffle', 'random.shuffle', (['c'], {}), '(c)\n', (5538, 5541), False, 'import random\n'), ((5735, 5754), 'os.listdir', 'os.listdir', (['seg_dir'], {}), '(seg_dir)\n', (5745, 5754), False, 'import os\n'), ((5849, 5866), 'random.seed', 'random.seed', (['None'], {}), '(None)\n', (5860, 5866), False, 'import random\n'), ((691, 713), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (705, 713), False, 'import random\n'), ((725, 747), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (739, 747), False, 'import random\n'), ((759, 781), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (773, 781), False, 'import random\n'), ((1400, 1424), 'os.path.exists', 'os.path.exists', (['cat1_dir'], {}), '(cat1_dir)\n', (1414, 1424), False, 'import os\n'), ((1429, 1453), 'os.path.exists', 'os.path.exists', (['cat2_dir'], {}), '(cat2_dir)\n', (1443, 1453), False, 'import os\n'), ((1458, 1482), 'os.path.exists', 'os.path.exists', (['cat3_dir'], {}), '(cat3_dir)\n', (1472, 1482), False, 'import os\n'), ((1487, 1511), 'os.path.exists', 'os.path.exists', (['cat4_dir'], {}), '(cat4_dir)\n', (1501, 1511), False, 'import os\n'), ((1537, 1557), 'os.listdir', 'os.listdir', (['cat1_dir'], {}), '(cat1_dir)\n', (1547, 1557), False, 'import os\n'), ((1622, 1642), 'os.listdir', 'os.listdir', (['cat2_dir'], {}), '(cat2_dir)\n', (1632, 1642), False, 'import os\n'), ((1707, 1727), 'os.listdir', 'os.listdir', (['cat3_dir'], {}), '(cat3_dir)\n', (1717, 1727), False, 'import os\n'), ((1792, 1812), 'os.listdir', 'os.listdir', (['cat4_dir'], {}), '(cat4_dir)\n', (1802, 1812), False, 'import os\n'), ((3131, 3168), 'cv2.imshow', 'cv2.imshow', (['"""stiched image"""', 'full_img'], {}), "('stiched image', full_img)\n", (3141, 3168), False, 'import cv2\n'), ((3176, 3213), 'cv2.imwrite', 'cv2.imwrite', (['"""full_img.png"""', 'full_img'], {}), "('full_img.png', full_img)\n", (3187, 3213), False, 'import cv2\n'), ((3221, 3235), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3232, 3235), False, 'import cv2\n'), ((4012, 4029), 'os.path.exists', 'os.path.exists', (['d'], {}), '(d)\n', (4026, 4029), False, 'import os\n'), ((4411, 4466), 'cv2.imread', 'cv2.imread', (["(dirs[i] + '/' + f_list[a])", 'cv2.IMREAD_COLOR'], {}), "(dirs[i] + '/' + f_list[a], cv2.IMREAD_COLOR)\n", (4421, 4466), False, 'import cv2\n'), ((6059, 6145), 're.findall', 're.findall', (['"""treematter|plywood|cardboard|bottles|trashbag|blackbag"""', 'segment_file'], {}), "('treematter|plywood|cardboard|bottles|trashbag|blackbag',\n segment_file)\n", (6069, 6145), False, 'import re\n'), ((7006, 7044), 'cv2.imread', 'cv2.imread', (['full_dir', 'cv2.IMREAD_COLOR'], {}), '(full_dir, cv2.IMREAD_COLOR)\n', (7016, 7044), False, 'import cv2\n'), ((7065, 7158), 'cv2.resize', 'cv2.resize', (['img', '(constants.IMG_SIZE, constants.IMG_SIZE)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img, (constants.IMG_SIZE, constants.IMG_SIZE), interpolation=cv2\n .INTER_CUBIC)\n', (7075, 7158), False, 'import cv2\n'), ((7202, 7218), 'numpy.array', 'np.array', (['inputs'], {}), '(inputs)\n', (7210, 7218), True, 'import numpy as np\n'), ((7219, 7235), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (7227, 7235), True, 'import numpy as np\n'), ((4085, 4098), 'os.listdir', 'os.listdir', (['d'], {}), '(d)\n', (4095, 4098), False, 'import os\n'), ((4488, 4589), 'cv2.resize', 'cv2.resize', (['img1', '(constants.FULL_IMGSIZE, constants.FULL_IMGSIZE)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img1, (constants.FULL_IMGSIZE, constants.FULL_IMGSIZE),\n interpolation=cv2.INTER_CUBIC)\n', (4498, 4589), False, 'import cv2\n'), ((4877, 4902), 'random.randint', 'random.randint', (['low', 'high'], {}), '(low, high)\n', (4891, 4902), False, 'import random\n'), ((4922, 4947), 'random.randint', 'random.randint', (['low', 'high'], {}), '(low, high)\n', (4936, 4947), False, 'import random\n')] |
import numpy as np
a = np.random.rand(1000)
print(a) | [
"numpy.random.rand"
] | [((23, 43), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (37, 43), True, 'import numpy as np\n')] |
import numpy as np
def calculate_energy(wfc, H_k, H_r, dx):
"""Calculate the energy <Psi|H|Psi>."""
# Creating momentum conjugate wavefunctions
wfc_k = np.fft.fft(wfc)
wfc_c = np.conj(wfc)
# Finding the momentum and real-space energy terms
energy_k = 0.5 * wfc_c * np.fft.ifft((H_k ** 2) * wfc_k)
energy_r = wfc_c * H_r * wfc
# Integrating over all space
energy_final = sum(energy_k + energy_r).real
return energy_final * dx
| [
"numpy.conj",
"numpy.fft.fft",
"numpy.fft.ifft"
] | [((166, 181), 'numpy.fft.fft', 'np.fft.fft', (['wfc'], {}), '(wfc)\n', (176, 181), True, 'import numpy as np\n'), ((194, 206), 'numpy.conj', 'np.conj', (['wfc'], {}), '(wfc)\n', (201, 206), True, 'import numpy as np\n'), ((292, 321), 'numpy.fft.ifft', 'np.fft.ifft', (['(H_k ** 2 * wfc_k)'], {}), '(H_k ** 2 * wfc_k)\n', (303, 321), True, 'import numpy as np\n')] |
from allensdk.brain_observatory.ecephys.ecephys_project_cache import EcephysProjectCache
import os
from sqlalchemy import delete
from sqlalchemy.orm import sessionmaker
import json
import numpy as np
import pandas as pd
from datetime import date,datetime,timedelta
import sqla_schema as sch
import ingest
data_directory = 'C:\\Users\\yoni.browning\\Documents\\DataJoint\\AllenData'
manifest_path = os.path.join(data_directory, "manifest.json")
def get_first_value(value):
if isinstance(value,str):
if '[' in value and ']' in value:
value = np.fromstring(value[1:-1],sep = ',')
value = value[0]
else:
value = float(value)
elif isinstance(value,np.ndarray) and len(value)==1:
value = value[0]
elif isinstance(value,int) or isinstance(value,float):
value =value
else:
value = None
return value
def query_to_df(Q):
df = pd.read_sql(Q.statement, Q.session.bind)
return df
def ingest_session_stimulus(session, engine):
with sessionmaker(engine)() as dbsession:
rf_stim_table = session.stimulus_presentations
rf_stim_table.reset_index(inplace=True)
rf_stim_table.insert(0,'session_id',\
(session.ecephys_session_id*np.ones(len(rf_stim_table))).\
astype(int))
K = rf_stim_table.keys()
# Fix 'null' to be None variables
for ii in range(len(K)):
rf_stim_table[K[ii]][rf_stim_table[K[ii]]=='null'] = None
# Convert strings to arrays
rf_stim_table['phase'] = rf_stim_table['phase'].apply(get_first_value)
rf_stim_table['size'] = rf_stim_table['size'].apply(get_first_value)
rf_stim_table['temporal_frequency'] =\
rf_stim_table['temporal_frequency'].apply(get_first_value).astype(float)
rf_stim_table['spatial_frequency'] = \
rf_stim_table['spatial_frequency'].apply(get_first_value).astype(float)
rf_stim_table.to_sql('stimulus', engine, index_label='id', if_exists='append')
def main():
# Connect to the database (kinda worthless without this)
engine = ingest.connect_to_db()
print('Connected to engine')
# Distroy any existing versions of these table
#sch.Base.metadata.drop_all(engine, tables=(sch.StimulusType.__table__,))
#sch.Base.metadata.drop_all(engine, tables=(sch.Stimulus.__table__,))
print('Killed old tables')
# Generate a new version of these tables
sch.Base.metadata.create_all(engine, tables=(sch.StimulusType.__table__,))
#sch.Base.metadata.create_all(engine, tables=(sch.Stimulus.__table__,))
print('Spawned new tables')
#
cache = ingest.get_ecephys_cache(manifest = manifest_path)
# Add Stimulus Type arguments.
# This bit is a little funky, but you only need to do it once.
session = cache.get_session_data(715093703)
print('Grabbed some data')
rf_stim_table = session.stimulus_presentations
stimulus_name_df = \
pd.DataFrame(data={'stimulus_name': rf_stim_table['stimulus_name'].unique()})
stimulus_name_df.to_sql('stimulus_type', engine, index_label='id', if_exists='append')
print('Added to DB')
# loop through these sessions, get the data
#ingest_session_stimulus(session, engine)
# Actually do the ingest
# Run it!!!!
if __name__ == '__main__':main()
| [
"pandas.read_sql",
"ingest.get_ecephys_cache",
"ingest.connect_to_db",
"sqlalchemy.orm.sessionmaker",
"sqla_schema.Base.metadata.create_all",
"os.path.join",
"numpy.fromstring"
] | [((402, 447), 'os.path.join', 'os.path.join', (['data_directory', '"""manifest.json"""'], {}), "(data_directory, 'manifest.json')\n", (414, 447), False, 'import os\n'), ((922, 962), 'pandas.read_sql', 'pd.read_sql', (['Q.statement', 'Q.session.bind'], {}), '(Q.statement, Q.session.bind)\n', (933, 962), True, 'import pandas as pd\n'), ((2167, 2189), 'ingest.connect_to_db', 'ingest.connect_to_db', ([], {}), '()\n', (2187, 2189), False, 'import ingest\n'), ((2507, 2581), 'sqla_schema.Base.metadata.create_all', 'sch.Base.metadata.create_all', (['engine'], {'tables': '(sch.StimulusType.__table__,)'}), '(engine, tables=(sch.StimulusType.__table__,))\n', (2535, 2581), True, 'import sqla_schema as sch\n'), ((2710, 2758), 'ingest.get_ecephys_cache', 'ingest.get_ecephys_cache', ([], {'manifest': 'manifest_path'}), '(manifest=manifest_path)\n', (2734, 2758), False, 'import ingest\n'), ((569, 604), 'numpy.fromstring', 'np.fromstring', (['value[1:-1]'], {'sep': '""","""'}), "(value[1:-1], sep=',')\n", (582, 604), True, 'import numpy as np\n'), ((1033, 1053), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', (['engine'], {}), '(engine)\n', (1045, 1053), False, 'from sqlalchemy.orm import sessionmaker\n')] |
from functools import reduce
from config import PERIODO_INI, PERIODO_FIN
import pandas as pd
import numpy as np
pd.options.mode.chained_assignment = None
def check_periods(col):
print(pd.DataFrame(
{"Rango": [col.min(), col.max()]},
index=['MIN', 'MAX'])
)
# HELPER FUNCTIONS
def transform_date(s):
dates = {date: pd.to_datetime(date) for date in s.unique()}
return s.map(dates)
def dates_to_int(dates):
periodos = {fecha: i + 1
for i, fecha
in enumerate(sorted(dates.unique(),
reverse=True))
}
return dates.map(periodos)
def simplify_history(x):
return "".join(["1" if int(n) > 0 else "0" for n in x.split(" ")])
def to_yearmonth(s):
dates = {date: pd.Timestamp(date).strftime('%Y-%m') for date in s.unique()}
return s.map(dates)
# TRANSFORMING PIPELINE FUNCTIONS
def transform_polizas(df_polizas):
df_polizas['FECHA_VIG_POL'] = transform_date(df_polizas['FECHA_VIG_POL'])
df_polizas['mes_anio_vig'] = df_polizas['FECHA_VIG_POL'].dt.strftime('%Y-%m')
to_pivot = df_polizas[["CIF_ID",
"NUM_SECU_POL",
"MCA_VIGENCIA",
"mes_anio_vig"]].drop_duplicates()
del df_polizas
df_polizas_pivoted = to_pivot.pivot_table(index='CIF_ID',
columns=['mes_anio_vig'],
values=['MCA_VIGENCIA'],
aggfunc='count',
fill_value=0)
del to_pivot
df_polizas_pivoted = df_polizas_pivoted.astype(str)
df_polizas_pivoted["history"] = df_polizas_pivoted.apply(" ".join, axis=1)
new_df = pd.DataFrame(df_polizas_pivoted.index)
new_df = new_df.set_index('CIF_ID')
new_df["hist_polizas"] = df_polizas_pivoted["history"]
del df_polizas_pivoted
return new_df
def add_id(df, with_table, id_col, fk_col):
df_aux = pd.read_csv(with_table,
sep='\t',
encoding='latin1',
decimal=',',
usecols=[id_col, fk_col])
return pd.merge(df, df_aux, on=fk_col, how='inner')
def transform_pagos(df_pagos):
df_pagos["FECHA_VTO"] = transform_date(df_pagos["FECHA_VTO"])
df_pagos["FEC_PAGO"] = transform_date(df_pagos["FEC_PAGO"])
df_pagos["demora_pago"] = ((df_pagos["FEC_PAGO"] - df_pagos["FECHA_VTO"]) / np.timedelta64(1, 'M')).astype("float")
df_pagos.loc[df_pagos["COD_COBRO"] == "TM", "COD_COBRO"] = "TA"
# FILTER CURRENT PERIOD
df_pagos = df_pagos[df_pagos["FECHA_VTO"].between(PERIODO_INI,
PERIODO_FIN)]
# TRANSFORM DATE TO PERIOD
df_pagos["FECHA_VTO"] = to_yearmonth(df_pagos["FECHA_VTO"].dropna())
df_pagos["periodo"] = dates_to_int(df_pagos["FECHA_VTO"])
# BEGIN PIVOTING
to_pivot = df_pagos[["CIF_ID","demora_pago","periodo","COD_COBRO","COD_SITUACION","MONTO_PAGO"]]
df_pagos_datediff = to_pivot.pivot_table(index=["CIF_ID"], columns=["periodo"], values=["demora_pago","MONTO_PAGO"], aggfunc="mean")
df_pagos_datediff = pd.DataFrame(df_pagos_datediff.to_records())
df_pagos_datediff = df_pagos_datediff.rename(columns=lambda x: x.replace("(","").replace(")","").replace(", ","_").replace("'",""))
df_cods = to_pivot.pivot_table(index=["CIF_ID"], columns=["periodo","COD_SITUACION"], aggfunc="size")
df_cods = pd.DataFrame(df_cods.to_records())
df_cods = df_cods.rename(columns=lambda x: x.replace("(","").replace(")", "").replace(", ","_SITUACION_").replace("'", ""))
df_codc = to_pivot.pivot_table(index=["CIF_ID"], columns=["periodo","COD_COBRO"], aggfunc="size")
df_codc = pd.DataFrame(df_codc.to_records())
df_codc = df_codc.rename(columns=lambda x: x.replace("(","").replace(")", "").replace(", ","_COBRO_").replace("'", ""))
del to_pivot
del df_pagos
return reduce(lambda left, right: pd.merge(left, right, on=['CIF_ID'], how='outer'), [df_cods, df_codc, df_pagos_datediff])
def transform_siniestros(df_sini):
df_sini.drop_duplicates(subset=["NUM_SECU_POL", "FEC_DENU_SINI"],
keep='last',
inplace=True)
df_sini["FEC_DENU_SINI"] = transform_date(df_sini["FEC_DENU_SINI"])
df_sini["FECHA_LIQUIDACION"] = transform_date(df_sini["FECHA_LIQUIDACION"])
df_sini["FECHA_RECHAZO"] = transform_date(df_sini["FECHA_RECHAZO"])
# FILTER CURRENT PERIOD
df_sini = df_sini[df_sini["FEC_DENU_SINI"].between(PERIODO_INI,
PERIODO_FIN)]
check_periods(df_sini["FEC_DENU_SINI"])
# TRANSFORM DATE TO PERIOD
df_sini["FEC_DENU_SINI"] = to_yearmonth(df_sini["FEC_DENU_SINI"].dropna())
df_sini["FECHA_LIQUIDACION"] = to_yearmonth(df_sini["FECHA_LIQUIDACION"].dropna())
df_sini["FECHA_RECHAZO"] = to_yearmonth(df_sini["FECHA_RECHAZO"].dropna())
periodos = {fecha: i + 1
for i, fecha in enumerate(sorted(df_sini["FEC_DENU_SINI"].unique(),
reverse=True))
}
df_sini["periodo_denu_sini"] = df_sini["FEC_DENU_SINI"].map(periodos)
df_sini["periodo_liquidacion_sini"] = df_sini["FECHA_LIQUIDACION"].map(periodos)
df_sini["periodo_rechazo_sini"] = df_sini["FECHA_RECHAZO"].map(periodos)
# BEGIN PIVOTING
to_pivot = df_sini[["CIF_ID",
"NUM_SECU_POL",
"periodo_denu_sini",
"periodo_liquidacion_sini",
"periodo_rechazo_sini"]]
df_sini = to_pivot.pivot_table(index='CIF_ID',
columns=['periodo_denu_sini'],
values=['NUM_SECU_POL',
'periodo_liquidacion_sini',
'periodo_rechazo_sini'],
aggfunc='count',
fill_value=0)
df_sini = pd.DataFrame(df_sini.to_records())
df_sini = df_sini.rename(columns=lambda x: x.replace("(","").replace(")","").replace(", ","_").replace("'","").replace("NUM_SECU_POL","periodo_sini"))
return df_sini
def transform_interacciones(df):
df = df[~pd.to_numeric(df['ID'], errors='coerce').isnull()]
# SOME CLEANING
to_check = []
for val in df["CIF_ID"].unique():
try:
float(val)
except Exception:
to_check.append(val)
df = df[~df["CIF_ID"].isin(to_check)]
to_check = []
for val in df["ID"].unique():
try:
int(val)
except Exception:
to_check.append(val)
df = df[~df["ID"].isin(to_check)]
df = df.drop(columns='ID').astype({'CIF_ID': 'float64'})
df = df[df["IN_OUT"].isin(['O', 'I', 'A'])]
df["FECHA"] = df["FECHA"].str.slice(stop=10)
df.loc[df["FECHA"].str.contains(" [0-9]", na=False), "FECHA"] = df.loc[df["FECHA"].str.contains(" [0-9]", na=False), "FECHA"].str.slice(stop=8)
df["FECHA"] = df["FECHA"].str.replace(" ", "")
df["periodo"] = transform_date(df["FECHA"])
# FILTER CURRENT PERIOD
df = df[df["periodo"].between(PERIODO_INI, PERIODO_FIN)]
check_periods(df["periodo"])
df = df[["CIF_ID", "IN_OUT", "periodo"]]
# TRANSFORM DATE TO PERIOD
df["periodo"] = to_yearmonth(df["periodo"].dropna())
df["periodo_int"] = dates_to_int(df["periodo"])
# BEGIN PIVOTING
to_pivot = df[["CIF_ID", "IN_OUT", "periodo_int"]]
df = to_pivot.pivot_table(index=["CIF_ID"], columns=["periodo_int", "IN_OUT"], aggfunc="size")
df = pd.DataFrame(df.to_records())
df = df.rename(columns=lambda x: x.replace("(", "").replace(")", "").replace(", ", "_TIPOINT_").replace("'", ""))
return df
| [
"pandas.DataFrame",
"pandas.Timestamp",
"pandas.read_csv",
"pandas.merge",
"numpy.timedelta64",
"pandas.to_datetime",
"pandas.to_numeric"
] | [((1820, 1858), 'pandas.DataFrame', 'pd.DataFrame', (['df_polizas_pivoted.index'], {}), '(df_polizas_pivoted.index)\n', (1832, 1858), True, 'import pandas as pd\n'), ((2062, 2158), 'pandas.read_csv', 'pd.read_csv', (['with_table'], {'sep': '"""\t"""', 'encoding': '"""latin1"""', 'decimal': '""","""', 'usecols': '[id_col, fk_col]'}), "(with_table, sep='\\t', encoding='latin1', decimal=',', usecols=[\n id_col, fk_col])\n", (2073, 2158), True, 'import pandas as pd\n'), ((2266, 2310), 'pandas.merge', 'pd.merge', (['df', 'df_aux'], {'on': 'fk_col', 'how': '"""inner"""'}), "(df, df_aux, on=fk_col, how='inner')\n", (2274, 2310), True, 'import pandas as pd\n'), ((361, 381), 'pandas.to_datetime', 'pd.to_datetime', (['date'], {}), '(date)\n', (375, 381), True, 'import pandas as pd\n'), ((4085, 4134), 'pandas.merge', 'pd.merge', (['left', 'right'], {'on': "['CIF_ID']", 'how': '"""outer"""'}), "(left, right, on=['CIF_ID'], how='outer')\n", (4093, 4134), True, 'import pandas as pd\n'), ((807, 825), 'pandas.Timestamp', 'pd.Timestamp', (['date'], {}), '(date)\n', (819, 825), True, 'import pandas as pd\n'), ((2554, 2576), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""M"""'], {}), "(1, 'M')\n", (2568, 2576), True, 'import numpy as np\n'), ((6433, 6473), 'pandas.to_numeric', 'pd.to_numeric', (["df['ID']"], {'errors': '"""coerce"""'}), "(df['ID'], errors='coerce')\n", (6446, 6473), True, 'import pandas as pd\n')] |
"""
Helpers for tfrecord conversion.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities.
Taken from
https://github.com/tensorflow/models/blob/master/inception/inception/data/build_image_data.py
"""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(
image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(
self._decode_jpeg_data, channels=3)
self._encode_jpeg_data = tf.placeholder(dtype=tf.uint8)
self._encode_jpeg = tf.image.encode_jpeg(
self._encode_jpeg_data, format='rgb')
# png: uint8
self._decode_png_data = tf.placeholder(dtype=tf.string)
# the previous value is channels=3 in this code;
self._decode_png = tf.image.decode_png(self._decode_png_data, channels = 0,
dtype= tf.uint8)
#NOTE: to decode the PNG depth map;
# png: uint16
self._decode_png_uint16_data = tf.placeholder(dtype=tf.string)
self._decode_png_uint16 = tf.image.decode_png(self._decode_png_uint16_data, channels = 0,
dtype= tf.uint16)
""" here I use channels=1 :
0: Use the number of channels in the PNG-encoded image.
1: output a grayscale image.
3: output an RGB image.
4: output an RGBA image."""
self._encode_png_data = tf.placeholder(dtype=tf.uint8)
self._encode_png = tf.image.encode_png(self._encode_png_data)
def png_to_jpeg(self, image_data):
return self._sess.run(
self._png_to_jpeg, feed_dict={
self._png_data: image_data
})
def decode_jpeg(self, image_data):
image = self._sess.run(
self._decode_jpeg, feed_dict={
self._decode_jpeg_data: image_data
})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def encode_jpeg(self, image):
image_data = self._sess.run(
self._encode_jpeg, feed_dict={
self._encode_jpeg_data: image
})
return image_data
def encode_png(self, image):
image_data = self._sess.run(
self._encode_png, feed_dict={
self._encode_png_data: image,
})
return image_data
def decode_png(self, image_data):
image = self._sess.run(
self._decode_png, feed_dict={
self._decode_png_data: image_data,
})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def decode_png_uint16(self, image_data):
image = self._sess.run(
self._decode_png_uint16, feed_dict={ self._decode_png_uint16_data: image_data,})
assert len(image.shape) == 3
assert image.shape[2] == 1
return image
def int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list) and not isinstance(value, np.ndarray):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list) and not isinstance(value, np.ndarray):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def convert_to_example(image_data, image_path, height, width, label, center):
"""Build an Example proto for an image example.
Args:
image_data: string, JPEG encoding of RGB image;
image_path: string, path to this image file
labels: 3 x 14 joint location + visibility --> This could be 3 x 19
height, width: integers, image shapes in pixels.
center: 2 x 1 center of the tight bbox
Returns:
Example proto
"""
from os.path import basename
image_format = 'JPEG'
add_face = False
if label.shape[1] == 19:
add_face = True
# Split and save facepts on it's own.
face_pts = label[:, 14:]
label = label[:, :14]
feat_dict = {
'image/height': int64_feature(height),
'image/width': int64_feature(width),
'image/center': int64_feature(center.astype(np.int)),
'image/x': float_feature(label[0, :].astype(np.float32)),
'image/y': float_feature(label[1, :].astype(np.float32)),
'image/visibility': int64_feature(label[2, :].astype(np.int)),
'image/format': bytes_feature(tf.compat.as_bytes(image_format)),
'image/filename': bytes_feature(
tf.compat.as_bytes(basename(image_path))),
'image/encoded': bytes_feature(tf.compat.as_bytes(image_data)),
}
if add_face:
# 3 x 5
feat_dict.update({
'image/face_pts':
float_feature(face_pts.ravel().astype(np.float32))
})
example = tf.train.Example(features=tf.train.Features(feature=feat_dict))
return example
# added by CCJ:
# convert to example with depth data;
def convert_to_example_wdepth(depth_binary, image_data, image_path, height, width, label, center):
"""Build an Example proto for an image example.
Args:
image_data: string, PNG encoding of RGB image;
image_path: string, path to this image file
labels: 3 x 14 joint location + visibility --> This could be 3 x 19
height, width: integers, image shapes in pixels.
center: 2 x 1 center of the tight bbox
Returns:
Example proto
"""
from os.path import basename
image_format = 'JPEG'
add_face = False
if label.shape[1] == 19:
add_face = True
# Split and save facepts on it's own.
face_pts = label[:, 14:]
label = label[:, :14]
feat_dict = {
'image/height': int64_feature(height),
'image/width': int64_feature(width),
'image/center': int64_feature(center.astype(np.int)),
'image/x': float_feature(label[0, :].astype(np.float32)),
'image/y': float_feature(label[1, :].astype(np.float32)),
'image/visibility': int64_feature(label[2, :].astype(np.int)),
'image/format': bytes_feature(tf.compat.as_bytes(image_format)),
'image/filename': bytes_feature(
tf.compat.as_bytes(basename(image_path))),
'image/encoded': bytes_feature(tf.compat.as_bytes(image_data)),
'depth/raw': bytes_feature(tf.compat.as_bytes(depth_binary)),
'depth/size': bytes_feature(tf.compat.as_bytes(np.array([height, width, 1], np.int32).tobytes())),
}
if add_face:
# 3 x 5
feat_dict.update({
'image/face_pts':
float_feature(face_pts.ravel().astype(np.float32))
})
example = tf.train.Example(features=tf.train.Features(feature=feat_dict))
return example
# added by CCJ on 6/13/2019;
def convert_to_example_wmosh_wdepth(depth_binary, image_data, image_path, height, width, label,
center, gt3d, pose, shape, scale_factors, start_pt, cam, gender_vect):
"""Build an Example proto for an image example.
Args:
image_data: string, JPEG encoding of RGB image;
image_path: string, path to this image file
label: 3 x 14 joint location + visibility
height, width: integers, image shapes in pixels.
center: 2 x 1 center of the tight bbox
gt3d: 14x3 3D joint locations
scale_factors: 2 x 1, scale factor used to scale image.
start_pt: the left corner used to crop the _scaled_ image to 300x300
cam: (3,), [f, px, py] intrinsic camera parameters.
gender: string, "male", "female", or "neutral" otherwise; # newly added on 07/19/2019;
Returns:
Example proto
"""
from os.path import basename
image_format = 'JPEG'
if label.shape[0] != 3:
label = label.T
if label.shape[1] > 14:
print('This shouldnt be happening')
import ipdb
ipdb.set_trace()
if pose is None:
has_3d = 0
# Use -1 to save.
pose = -np.ones(72)
shape = -np.ones(10)
else:
has_3d = 1
example = tf.train.Example(
features=tf.train.Features(feature={
'image/height':
int64_feature(height),
'image/width':
int64_feature(width),
'image/center':
int64_feature(center.astype(np.int)),
'image/x':
float_feature(label[0, :].astype(np.float32)),
'image/y':
float_feature(label[1, :].astype(np.float32)),
'image/visibility':
int64_feature(label[2, :].astype(np.int)),
'image/format':
bytes_feature(tf.compat.as_bytes(image_format)),
'image/filename':
bytes_feature(tf.compat.as_bytes(basename(image_path))),
'image/encoded':
bytes_feature(tf.compat.as_bytes(image_data)),
# added by CCJ;
'depth/raw':
bytes_feature(tf.compat.as_bytes(depth_binary)),
'depth/size':
bytes_feature(tf.compat.as_bytes(np.array([height, width, 1], np.int32).tobytes())),
'mosh/pose':
float_feature(pose.astype(np.float32)),
'mosh/shape':
float_feature(shape.astype(np.float32)),
'smpl/gender': # added for smpl gender info;
float_feature(gender_vect.astype(np.float32)),
#NOTE: comment added by CCJ:
# (x,y,z) values of 3d joints in camera coordinate system;
'mosh/gt3d':
float_feature(gt3d.ravel().astype(np.float32)),
'meta/scale_factors':
float_feature(np.array(scale_factors).astype(np.float32)),
'meta/crop_pt':
int64_feature(start_pt.astype(np.int)),
'meta/has_3d': # pose and shape of smpl modle of 3d joints;
int64_feature(has_3d),
'image/cam':
float_feature(cam.astype(np.float32)),
}))
return example
# save variables to h5py files for evaluation, added by CCJ on 8/18/2019;
def convert_to_h5_wmosh_wdepth(depth_data, image_data, image_path, height, width, label,
center, gt3d, pose, shape, scale_factors, start_pt, cam, gender_vect):
"""Build an Example proto for an image example.
Args:
image_data: string, JPEG encoding of RGB image;
image_path: string, path to this image file
label: 3 x 14 joint location + visibility
height, width: integers, image shapes in pixels.
center: 2 x 1 center of the tight bbox
gt3d: 14x3 3D joint locations
scale_factors: 2 x 1, scale factor used to scale image.
start_pt: the left corner used to crop the _scaled_ image to 300x300
cam: (3,), [f, px, py] intrinsic camera parameters.
gender: string, "male", "female", or "neutral" otherwise; # newly added on 07/19/2019;
Returns:
Example proto
"""
from os.path import basename
if label.shape[0] != 3:
label = label.T
if label.shape[1] > 14:
print('This shouldnt be happening')
import ipdb
ipdb.set_trace()
if pose is None:
has_3d = 0
# Use -1 to save.
pose = -np.ones(72)
shape = -np.ones(10)
else:
has_3d = 1
example = {
'image_height': height,
'image_width': width,
'image_center': center.astype(np.int),
'image_x': label[0, :].astype(np.float32),
'image_y': label[1, :].astype(np.float32),
'image_visibility': label[2, :].astype(np.int),
'image_filename': basename(image_path),
'image_img_rgb': image_data,
# added by CCJ;
'depth_data': depth_data,
'mosh_pose': pose.astype(np.float32),
'mosh_shape': shape.astype(np.float32),
'smpl_gender': gender_vect.astype(np.float32),
'mosh_gt3d': gt3d.ravel().astype(np.float32),
'meta_scale_factors': np.array(scale_factors).astype(np.float32),
'meta_crop_pt':start_pt.astype(np.int),
# pose and shape of smpl modle of 3d joints;
'meta_has_3d': has_3d,
'image_cam': cam.astype(np.float32),
}
return example
def convert_to_example_wmosh(image_data, image_path, height, width, label,
center, gt3d, pose, shape, scale_factors,
start_pt, cam):
"""Build an Example proto for an image example.
Args:
image_data: string, JPEG encoding of RGB image;
image_path: string, path to this image file
labels: 3 x 14 joint location + visibility
height, width: integers, image shapes in pixels.
center: 2 x 1 center of the tight bbox
gt3d: 14x3 3D joint locations
scale_factors: 2 x 1, scale factor used to scale image.
start_pt: the left corner used to crop the _scaled_ image to 300x300
cam: (3,), [f, px, py] intrinsic camera parameters.
Returns:
Example proto
"""
from os.path import basename
image_format = 'JPEG'
if label.shape[0] != 3:
label = label.T
if label.shape[1] > 14:
print('This shouldnt be happening')
import ipdb
ipdb.set_trace()
if pose is None:
has_3d = 0
# Use -1 to save.
pose = -np.ones(72)
shape = -np.ones(10)
else:
has_3d = 1
assert(gt3d.shape[1] == 3)
example = tf.train.Example(
features=tf.train.Features(feature={
'image/height':
int64_feature(height),
'image/width':
int64_feature(width),
'image/center':
int64_feature(center.astype(np.int)),
'image/x':
float_feature(label[0, :].astype(np.float32)),
'image/y':
float_feature(label[1, :].astype(np.float32)),
'image/visibility':
int64_feature(label[2, :].astype(np.int)),
'image/format':
bytes_feature(tf.compat.as_bytes(image_format)),
'image/filename':
bytes_feature(tf.compat.as_bytes(basename(image_path))),
'image/encoded':
bytes_feature(tf.compat.as_bytes(image_data)),
'mosh/pose':
float_feature(pose.astype(np.float32)),
'mosh/shape':
float_feature(shape.astype(np.float32)),
'mosh/gt3d': # (x,y,z) values of 3d joints;
float_feature(gt3d.ravel().astype(np.float32)),
'meta/scale_factors':
float_feature(np.array(scale_factors).astype(np.float32)),
'meta/crop_pt':
int64_feature(start_pt.astype(np.int)),
'meta/has_3d': # pose and shape of smpl modle of 3d joints;
int64_feature(has_3d),
'image/cam': # cam: (3,), [f, px, py] intrinsic camera parameters.
float_feature(cam.astype(np.float32)),
}))
return example
def resize_img(img, scale_factor):
import cv2
import numpy as np
new_size = (np.floor(np.array(img.shape[0:2]) * scale_factor)).astype(int)
new_img = cv2.resize(img, (new_size[1], new_size[0]))
# This is scale factor of [height, width] i.e. [y, x]
actual_factor = [
new_size[0] / float(img.shape[0]), new_size[1] / float(img.shape[1])
]
return new_img, actual_factor
def read_images_from_tfrecords(tf_path, img_size=224, sess=None):
"""
Returns image, kp, and gt3d from the tf_paths
This returns a preprocessed image, cropped around img_size.
"""
from time import time
from os.path import exists
if not exists(tf_path):
print('%s doesnt exist!' % tf_path)
exit(1)
if sess is None:
sess = tf.Session()
t0 = time()
all_images, all_kps, all_gt3ds = [], [], []
itr = 0
# Decode op graph
image_data_pl = tf.placeholder(dtype=tf.string)
decode_op = tf.image.decode_jpeg(image_data_pl)
for serialized_ex in tf.python_io.tf_record_iterator(tf_path):
example = tf.train.Example()
example.ParseFromString(serialized_ex)
image_data = example.features.feature['image/encoded'].bytes_list.value[0]
image = sess.run(decode_op, feed_dict={image_data_pl: image_data})
x = example.features.feature['image/x'].float_list.value
y = example.features.feature['image/y'].float_list.value
vis = example.features.feature['image/visibility'].int64_list.value
center = example.features.feature['image/center'].int64_list.value
x = np.array(x)
y = np.array(y)
vis = np.array(vis, dtype='bool')
center = np.array(center)
# Crop img_size.
# Pad in case.
margin = int(img_size/2)
image_pad = np.pad(image, ((margin,), (margin,), (0,)), mode='edge')
# figure out starting point
start_pt = center
end_pt = center + 2*margin
x_crop = x + margin - start_pt[0]
y_crop = y + margin - start_pt[1]
kp_crop = np.vstack([x_crop, y_crop])
kp_final = 2 * (kp_crop / img_size) - 1
kp_final = np.vstack((vis * kp_final, vis)).T
# crop:
crop = image_pad[start_pt[1]:end_pt[1], start_pt[0]:end_pt[0], :]
# Normalize image to [-1, 1]
crop = 2 * ((crop / 255.) - 0.5)
# Note: This says mosh but gt3d is the gt H3.6M joints & not from mosh.
gt3d = example.features.feature['mosh/gt3d'].float_list.value
gt3d = np.array(gt3d).reshape(-1, 3)
all_images.append(crop)
all_kps.append(kp_final)
all_gt3ds.append(gt3d)
itr += 1
images = np.stack(all_images)
kps = np.stack(all_kps)
gt3ds = np.stack(all_gt3ds)
print('Read %d images, %g secs' % (images.shape[0], time()-t0))
return images, kps, gt3ds
# added by CCJ on 7/22/2019;
def convert_to_example_smpl_pose_joints3d_pairs(
image_path, joints3d_smpl, trans, pose, shape, gender_vect):
"""Build an Example proto for an image example.
Args:
image_path: string, path to this image file
joints3d_smpl: 24x3 3D joint locations inferred from smpl model, given the grouhd truth pose and shape as input;
trans: translation between root_gt in surreal dataset and root_gt of smpl model;
gender: \in R^3, 3-d vector for:
"female": [1.0, 0.0, .0],
"male": [.0, 1.0, .0],
"nuetral": [.0, 0.0, 1.0];
pose: in shape (72,)
shape: in shape (10,)
Returns:
Example proto
"""
from os.path import basename
assert(joints3d_smpl.shape[1] == 3)
example = tf.train.Example(
features=tf.train.Features(feature={
'image/filename':
bytes_feature(tf.compat.as_bytes(basename(image_path))),
'mosh/pose':
float_feature(pose.astype(np.float32)),
'mosh/shape':
float_feature(shape.astype(np.float32)),
'smpl/gender': # added for smpl gender info;
float_feature(gender_vect.astype(np.float32)),
'smpl/trans':
float_feature(trans.astype(np.float32)),
#NOTE: comment added by CCJ:
# (x,y,z) values of 3d joints in camera coordinate system;
'mosh/joints3d_from_smpl':
# np.ravel() : returns contiguous flattened array
float_feature(joints3d_smpl.ravel().astype(np.float32)),
}))
return example
# added by CCJ on 8/01/2019;
""" 14 lsp joints3d """
def convert_to_example_smpl_pose_lsp_joints3d_pairs(
image_path, lsp_joints3d_smpl, root, pose, shape):
"""Build an Example proto for an image example.
Args:
image_path: string, path to this image file
lsp_joints3d_smpl: 14x3 3D joint locations inferred from smpl model,
given the grouhd truth pose and shape as input. Also,
the joints3d is the distance w.r.t. the root joint.
That is, joints3d = joints3d - root;
So we also keep the root info
root: the root joint of 24-smpl joints;
pose: in shape (72,)
shape: in shape (10,)
Returns:
Example proto
"""
from os.path import basename
assert(lsp_joints3d_smpl.shape[1] == 3 and lsp_joints3d_smpl.shape[0] == 14)
example = tf.train.Example(
features=tf.train.Features(feature={
'image/filename':
bytes_feature(tf.compat.as_bytes(basename(image_path))),
'mosh/pose':
float_feature(pose.astype(np.float32)),
'mosh/shape':
float_feature(shape.astype(np.float32)),
#'smpl/gender': # added for smpl gender info;
#float_feature(gender_vect.astype(np.float32)),
'smpl/root':
float_feature(root.astype(np.float32)),
#NOTE: comment added by CCJ:
# (x,y,z) values of 3d joints in camera coordinate system;
'mosh/lsp_joints3d_from_smpl':
# np.ravel() : returns contiguous flattened array
float_feature(lsp_joints3d_smpl.ravel().astype(np.float32)),
}))
return example | [
"tensorflow.train.Int64List",
"ipdb.set_trace",
"numpy.ones",
"tensorflow.image.decode_png",
"tensorflow.train.FloatList",
"tensorflow.compat.as_bytes",
"tensorflow.python_io.tf_record_iterator",
"numpy.pad",
"tensorflow.train.Example",
"os.path.exists",
"tensorflow.placeholder",
"cv2.resize",... | [((16003, 16046), 'cv2.resize', 'cv2.resize', (['img', '(new_size[1], new_size[0])'], {}), '(img, (new_size[1], new_size[0]))\n', (16013, 16046), False, 'import cv2\n'), ((16648, 16654), 'time.time', 'time', ([], {}), '()\n', (16652, 16654), False, 'from time import time\n'), ((16759, 16790), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.string'}), '(dtype=tf.string)\n', (16773, 16790), True, 'import tensorflow as tf\n'), ((16807, 16842), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['image_data_pl'], {}), '(image_data_pl)\n', (16827, 16842), True, 'import tensorflow as tf\n'), ((16869, 16909), 'tensorflow.python_io.tf_record_iterator', 'tf.python_io.tf_record_iterator', (['tf_path'], {}), '(tf_path)\n', (16900, 16909), True, 'import tensorflow as tf\n'), ((18553, 18573), 'numpy.stack', 'np.stack', (['all_images'], {}), '(all_images)\n', (18561, 18573), True, 'import numpy as np\n'), ((18584, 18601), 'numpy.stack', 'np.stack', (['all_kps'], {}), '(all_kps)\n', (18592, 18601), True, 'import numpy as np\n'), ((18614, 18633), 'numpy.stack', 'np.stack', (['all_gt3ds'], {}), '(all_gt3ds)\n', (18622, 18633), True, 'import numpy as np\n'), ((524, 536), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (534, 536), True, 'import tensorflow as tf\n'), ((626, 657), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.string'}), '(dtype=tf.string)\n', (640, 657), True, 'import tensorflow as tf\n'), ((674, 721), 'tensorflow.image.decode_png', 'tf.image.decode_png', (['self._png_data'], {'channels': '(3)'}), '(self._png_data, channels=3)\n', (693, 721), True, 'import tensorflow as tf\n'), ((750, 804), 'tensorflow.image.encode_jpeg', 'tf.image.encode_jpeg', (['image'], {'format': '"""rgb"""', 'quality': '(100)'}), "(image, format='rgb', quality=100)\n", (770, 804), True, 'import tensorflow as tf\n'), ((911, 942), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.string'}), '(dtype=tf.string)\n', (925, 942), True, 'import tensorflow as tf\n'), ((971, 1027), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['self._decode_jpeg_data'], {'channels': '(3)'}), '(self._decode_jpeg_data, channels=3)\n', (991, 1027), True, 'import tensorflow as tf\n'), ((1075, 1105), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.uint8'}), '(dtype=tf.uint8)\n', (1089, 1105), True, 'import tensorflow as tf\n'), ((1134, 1192), 'tensorflow.image.encode_jpeg', 'tf.image.encode_jpeg', (['self._encode_jpeg_data'], {'format': '"""rgb"""'}), "(self._encode_jpeg_data, format='rgb')\n", (1154, 1192), True, 'import tensorflow as tf\n'), ((1268, 1299), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.string'}), '(dtype=tf.string)\n', (1282, 1299), True, 'import tensorflow as tf\n'), ((1384, 1454), 'tensorflow.image.decode_png', 'tf.image.decode_png', (['self._decode_png_data'], {'channels': '(0)', 'dtype': 'tf.uint8'}), '(self._decode_png_data, channels=0, dtype=tf.uint8)\n', (1403, 1454), True, 'import tensorflow as tf\n'), ((1589, 1620), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.string'}), '(dtype=tf.string)\n', (1603, 1620), True, 'import tensorflow as tf\n'), ((1655, 1733), 'tensorflow.image.decode_png', 'tf.image.decode_png', (['self._decode_png_uint16_data'], {'channels': '(0)', 'dtype': 'tf.uint16'}), '(self._decode_png_uint16_data, channels=0, dtype=tf.uint16)\n', (1674, 1733), True, 'import tensorflow as tf\n'), ((1992, 2022), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.uint8'}), '(dtype=tf.uint8)\n', (2006, 2022), True, 'import tensorflow as tf\n'), ((2050, 2092), 'tensorflow.image.encode_png', 'tf.image.encode_png', (['self._encode_png_data'], {}), '(self._encode_png_data)\n', (2069, 2092), True, 'import tensorflow as tf\n'), ((8706, 8722), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (8720, 8722), False, 'import ipdb\n'), ((11932, 11948), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (11946, 11948), False, 'import ipdb\n'), ((12439, 12459), 'os.path.basename', 'basename', (['image_path'], {}), '(image_path)\n', (12447, 12459), False, 'from os.path import basename\n'), ((14098, 14114), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (14112, 14114), False, 'import ipdb\n'), ((16511, 16526), 'os.path.exists', 'exists', (['tf_path'], {}), '(tf_path)\n', (16517, 16526), False, 'from os.path import exists\n'), ((16625, 16637), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (16635, 16637), True, 'import tensorflow as tf\n'), ((16929, 16947), 'tensorflow.train.Example', 'tf.train.Example', ([], {}), '()\n', (16945, 16947), True, 'import tensorflow as tf\n'), ((17449, 17460), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (17457, 17460), True, 'import numpy as np\n'), ((17473, 17484), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (17481, 17484), True, 'import numpy as np\n'), ((17499, 17526), 'numpy.array', 'np.array', (['vis'], {'dtype': '"""bool"""'}), "(vis, dtype='bool')\n", (17507, 17526), True, 'import numpy as np\n'), ((17544, 17560), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (17552, 17560), True, 'import numpy as np\n'), ((17663, 17719), 'numpy.pad', 'np.pad', (['image', '((margin,), (margin,), (0,))'], {'mode': '"""edge"""'}), "(image, ((margin,), (margin,), (0,)), mode='edge')\n", (17669, 17719), True, 'import numpy as np\n'), ((17921, 17948), 'numpy.vstack', 'np.vstack', (['[x_crop, y_crop]'], {}), '([x_crop, y_crop])\n', (17930, 17948), True, 'import numpy as np\n'), ((3712, 3743), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': 'value'}), '(value=value)\n', (3730, 3743), True, 'import tensorflow as tf\n'), ((3977, 4008), 'tensorflow.train.FloatList', 'tf.train.FloatList', ([], {'value': 'value'}), '(value=value)\n', (3995, 4008), True, 'import tensorflow as tf\n'), ((4144, 4177), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': '[value]'}), '(value=[value])\n', (4162, 4177), True, 'import tensorflow as tf\n'), ((5297, 5329), 'tensorflow.compat.as_bytes', 'tf.compat.as_bytes', (['image_format'], {}), '(image_format)\n', (5315, 5329), True, 'import tensorflow as tf\n'), ((5467, 5497), 'tensorflow.compat.as_bytes', 'tf.compat.as_bytes', (['image_data'], {}), '(image_data)\n', (5485, 5497), True, 'import tensorflow as tf\n'), ((5711, 5747), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feat_dict'}), '(feature=feat_dict)\n', (5728, 5747), True, 'import tensorflow as tf\n'), ((6960, 6992), 'tensorflow.compat.as_bytes', 'tf.compat.as_bytes', (['image_format'], {}), '(image_format)\n', (6978, 6992), True, 'import tensorflow as tf\n'), ((7130, 7160), 'tensorflow.compat.as_bytes', 'tf.compat.as_bytes', (['image_data'], {}), '(image_data)\n', (7148, 7160), True, 'import tensorflow as tf\n'), ((7198, 7230), 'tensorflow.compat.as_bytes', 'tf.compat.as_bytes', (['depth_binary'], {}), '(depth_binary)\n', (7216, 7230), True, 'import tensorflow as tf\n'), ((7551, 7587), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feat_dict'}), '(feature=feat_dict)\n', (7568, 7587), True, 'import tensorflow as tf\n'), ((8805, 8816), 'numpy.ones', 'np.ones', (['(72)'], {}), '(72)\n', (8812, 8816), True, 'import numpy as np\n'), ((8834, 8845), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (8841, 8845), True, 'import numpy as np\n'), ((12031, 12042), 'numpy.ones', 'np.ones', (['(72)'], {}), '(72)\n', (12038, 12042), True, 'import numpy as np\n'), ((12060, 12071), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (12067, 12071), True, 'import numpy as np\n'), ((14197, 14208), 'numpy.ones', 'np.ones', (['(72)'], {}), '(72)\n', (14204, 14208), True, 'import numpy as np\n'), ((14226, 14237), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (14233, 14237), True, 'import numpy as np\n'), ((18016, 18048), 'numpy.vstack', 'np.vstack', (['(vis * kp_final, vis)'], {}), '((vis * kp_final, vis))\n', (18025, 18048), True, 'import numpy as np\n'), ((5404, 5424), 'os.path.basename', 'basename', (['image_path'], {}), '(image_path)\n', (5412, 5424), False, 'from os.path import basename\n'), ((7067, 7087), 'os.path.basename', 'basename', (['image_path'], {}), '(image_path)\n', (7075, 7087), False, 'from os.path import basename\n'), ((12836, 12859), 'numpy.array', 'np.array', (['scale_factors'], {}), '(scale_factors)\n', (12844, 12859), True, 'import numpy as np\n'), ((18394, 18408), 'numpy.array', 'np.array', (['gt3d'], {}), '(gt3d)\n', (18402, 18408), True, 'import numpy as np\n'), ((15935, 15959), 'numpy.array', 'np.array', (['img.shape[0:2]'], {}), '(img.shape[0:2])\n', (15943, 15959), True, 'import numpy as np\n'), ((18691, 18697), 'time.time', 'time', ([], {}), '()\n', (18695, 18697), False, 'from time import time\n'), ((7288, 7326), 'numpy.array', 'np.array', (['[height, width, 1]', 'np.int32'], {}), '([height, width, 1], np.int32)\n', (7296, 7326), True, 'import numpy as np\n'), ((9460, 9492), 'tensorflow.compat.as_bytes', 'tf.compat.as_bytes', (['image_format'], {}), '(image_format)\n', (9478, 9492), True, 'import tensorflow as tf\n'), ((9649, 9679), 'tensorflow.compat.as_bytes', 'tf.compat.as_bytes', (['image_data'], {}), '(image_data)\n', (9667, 9679), True, 'import tensorflow as tf\n'), ((9775, 9807), 'tensorflow.compat.as_bytes', 'tf.compat.as_bytes', (['depth_binary'], {}), '(depth_binary)\n', (9793, 9807), True, 'import tensorflow as tf\n'), ((14887, 14919), 'tensorflow.compat.as_bytes', 'tf.compat.as_bytes', (['image_format'], {}), '(image_format)\n', (14905, 14919), True, 'import tensorflow as tf\n'), ((15076, 15106), 'tensorflow.compat.as_bytes', 'tf.compat.as_bytes', (['image_data'], {}), '(image_data)\n', (15094, 15106), True, 'import tensorflow as tf\n'), ((9570, 9590), 'os.path.basename', 'basename', (['image_path'], {}), '(image_path)\n', (9578, 9590), False, 'from os.path import basename\n'), ((14997, 15017), 'os.path.basename', 'basename', (['image_path'], {}), '(image_path)\n', (15005, 15017), False, 'from os.path import basename\n'), ((19679, 19699), 'os.path.basename', 'basename', (['image_path'], {}), '(image_path)\n', (19687, 19699), False, 'from os.path import basename\n'), ((21407, 21427), 'os.path.basename', 'basename', (['image_path'], {}), '(image_path)\n', (21415, 21427), False, 'from os.path import basename\n'), ((10477, 10500), 'numpy.array', 'np.array', (['scale_factors'], {}), '(scale_factors)\n', (10485, 10500), True, 'import numpy as np\n'), ((15441, 15464), 'numpy.array', 'np.array', (['scale_factors'], {}), '(scale_factors)\n', (15449, 15464), True, 'import numpy as np\n'), ((9882, 9920), 'numpy.array', 'np.array', (['[height, width, 1]', 'np.int32'], {}), '([height, width, 1], np.int32)\n', (9890, 9920), True, 'import numpy as np\n')] |
import hypney
import numpy as np
from scipy import stats
def test_mixture():
m1 = hypney.models.uniform(rate=40)
m2_free = hypney.models.uniform(rate=20)
m2_frozen = m2_free.freeze()
m3 = hypney.models.uniform(rate=30)
for m2 in m2_free, m2_frozen:
mix = hypney.models.mixture(m1, m2)
assert mix.rate() == 60.0
assert mix.pdf(data=0) == 1.0
assert mix.diff_rate(data=0) == 60.0
np.testing.assert_array_equal(
mix.cdf(data=[0.0, 0.5, 1.0]), np.array([0.0, 0.5, 1.0])
)
assert mix.simulate().shape[0] > 0
assert mix.rvs(size=50).shape[0] > 0
# Test forming mixtures by +
mix2 = m1 + m2
assert mix2.diff_rate(data=0) == 60.0
mix3 = m3 + mix2
assert mix3.diff_rate(data=0) == 90.0
assert len(mix3.models) == 3, "Should unpack mixtures"
mix4 = m1 + m2 + m3
assert mix4.diff_rate(data=0) == 90.0
# Test mean and std
mix = hypney.models.norm() + hypney.models.uniform(loc=5, scale=2)
data = mix.rvs(100_000)
np.testing.assert_allclose(mix.mean(), data.mean(), rtol=0.05)
np.testing.assert_allclose(mix.std(), data.std(), rtol=0.05)
# Test parameter after renaming
mix = m1 + m2_free
assert mix.rate(params=dict(m0_rate=1)) == 21.0
mix = m1 + m2_frozen
assert mix.rate(params=dict(rate=1)) == 21.0
m2 = m2_free
# Test parameter sharing
m_shared = hypney.models.mixture(m1, m2, m3, share="scale")
assert "scale" in m_shared.param_names
assert "scale_0" not in m_shared.param_names
assert "scale_1" not in m_shared.param_names
assert "scale_2" not in m_shared.param_names
assert m_shared(scale=2).pdf(2) == 0.5 * (m1 + m2 + m3).pdf(1)
# Test vectorization
m = hypney.models.mixture(
hypney.models.norm(), hypney.models.norm().shift(1), share="loc"
)
locs = np.linspace(0, 2, 10)
np.testing.assert_almost_equal(m.pdf(0, loc=locs), [m.pdf(0, loc=x) for x in locs])
def test_tensor_product():
m1 = hypney.models.uniform(rate=40)
m2 = hypney.models.uniform(rate=20)
m3 = hypney.models.uniform(rate=30)
prod = m1 ** m2 ** m3
data = np.array([[0, 0, 0], [1, 1, 1]])
np.testing.assert_array_equal(prod.pdf(data=data), np.array([1, 1]))
np.testing.assert_array_equal(
prod.logpdf(data=data), stats.uniform().logpdf(0) ** 3
)
assert prod.rate() == 40.0
data = prod.simulate()
assert data.shape[0] > 0
assert data.shape[1] == 3
| [
"hypney.models.norm",
"scipy.stats.uniform",
"numpy.array",
"numpy.linspace",
"hypney.models.uniform",
"hypney.models.mixture"
] | [((89, 119), 'hypney.models.uniform', 'hypney.models.uniform', ([], {'rate': '(40)'}), '(rate=40)\n', (110, 119), False, 'import hypney\n'), ((134, 164), 'hypney.models.uniform', 'hypney.models.uniform', ([], {'rate': '(20)'}), '(rate=20)\n', (155, 164), False, 'import hypney\n'), ((207, 237), 'hypney.models.uniform', 'hypney.models.uniform', ([], {'rate': '(30)'}), '(rate=30)\n', (228, 237), False, 'import hypney\n'), ((1485, 1533), 'hypney.models.mixture', 'hypney.models.mixture', (['m1', 'm2', 'm3'], {'share': '"""scale"""'}), "(m1, m2, m3, share='scale')\n", (1506, 1533), False, 'import hypney\n'), ((1938, 1959), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(10)'], {}), '(0, 2, 10)\n', (1949, 1959), True, 'import numpy as np\n'), ((2086, 2116), 'hypney.models.uniform', 'hypney.models.uniform', ([], {'rate': '(40)'}), '(rate=40)\n', (2107, 2116), False, 'import hypney\n'), ((2126, 2156), 'hypney.models.uniform', 'hypney.models.uniform', ([], {'rate': '(20)'}), '(rate=20)\n', (2147, 2156), False, 'import hypney\n'), ((2166, 2196), 'hypney.models.uniform', 'hypney.models.uniform', ([], {'rate': '(30)'}), '(rate=30)\n', (2187, 2196), False, 'import hypney\n'), ((2236, 2268), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 1, 1]]'], {}), '([[0, 0, 0], [1, 1, 1]])\n', (2244, 2268), True, 'import numpy as np\n'), ((287, 316), 'hypney.models.mixture', 'hypney.models.mixture', (['m1', 'm2'], {}), '(m1, m2)\n', (308, 316), False, 'import hypney\n'), ((1856, 1876), 'hypney.models.norm', 'hypney.models.norm', ([], {}), '()\n', (1874, 1876), False, 'import hypney\n'), ((2325, 2341), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (2333, 2341), True, 'import numpy as np\n'), ((518, 543), 'numpy.array', 'np.array', (['[0.0, 0.5, 1.0]'], {}), '([0.0, 0.5, 1.0])\n', (526, 543), True, 'import numpy as np\n'), ((1002, 1022), 'hypney.models.norm', 'hypney.models.norm', ([], {}), '()\n', (1020, 1022), False, 'import hypney\n'), ((1025, 1062), 'hypney.models.uniform', 'hypney.models.uniform', ([], {'loc': '(5)', 'scale': '(2)'}), '(loc=5, scale=2)\n', (1046, 1062), False, 'import hypney\n'), ((1878, 1898), 'hypney.models.norm', 'hypney.models.norm', ([], {}), '()\n', (1896, 1898), False, 'import hypney\n'), ((2411, 2426), 'scipy.stats.uniform', 'stats.uniform', ([], {}), '()\n', (2424, 2426), False, 'from scipy import stats\n')] |
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2021 Scipp contributors (https://github.com/scipp)
# @file
# @author <NAME>
import numpy as np
import pytest
import scipp as sc
from .common import assert_export
def make_variables():
data = np.arange(1, 4, dtype=float)
a = sc.Variable(dims=['x'], values=data)
b = sc.Variable(dims=['x'], values=data)
a_slice = a['x', :]
b_slice = b['x', :]
return a, b, a_slice, b_slice, data
def test_create_default():
var = sc.Variable()
assert var.dims == []
assert var.dtype == sc.dtype.float64
assert var.unit == sc.units.dimensionless
assert var.value == 0.0
def test_create_default_dtype():
var = sc.Variable(dims=['x'], shape=[4])
assert var.dtype == sc.dtype.float64
def test_create_with_dtype():
var = sc.Variable(dims=['x'], shape=[2], dtype=sc.dtype.float32)
assert var.dtype == sc.dtype.float32
def test_create_with_numpy_dtype():
var = sc.Variable(dims=['x'], shape=[2], dtype=np.dtype(np.float32))
assert var.dtype == sc.dtype.float32
def test_create_with_unit_as_string():
var = sc.Variable(dims=['x'], unit='meV', values=np.arange(2))
assert var.unit == sc.units.meV
var.unit = 'm/s'
assert var.unit == sc.units.m / sc.units.s
def test_create_with_variances():
assert sc.Variable(dims=['x'], shape=[2]).variances is None
assert sc.Variable(dims=['x'], shape=[2],
variances=False).variances is None
assert sc.Variable(dims=['x'], shape=[2],
variances=True).variances is not None
def test_create_with_shape_and_variances():
# If no values are given, variances must be Bool, cannot pass array.
with pytest.raises(TypeError):
sc.Variable(dims=['x'], shape=[2], variances=np.arange(2))
def test_create_from_numpy_1d():
var = sc.Variable(dims=['x'], values=np.arange(4.0))
assert var.dtype == sc.dtype.float64
np.testing.assert_array_equal(var.values, np.arange(4))
def test_create_from_numpy_1d_bool():
var = sc.Variable(dims=['x'], values=np.array([True, False, True]))
assert var.dtype == sc.dtype.bool
np.testing.assert_array_equal(var.values, np.array([True, False, True]))
def test_create_with_variances_from_numpy_1d():
var = sc.Variable(dims=['x'],
values=np.arange(4.0),
variances=np.arange(4.0, 8.0))
assert var.dtype == sc.dtype.float64
np.testing.assert_array_equal(var.values, np.arange(4))
np.testing.assert_array_equal(var.variances, np.arange(4, 8))
@pytest.mark.parametrize(
"value",
[1.2, np.float64(1.2), sc.Variable(1.2).value])
def test_create_scalar(value):
var = sc.Variable(value)
assert var.value == value
assert var.dims == []
assert var.dtype == sc.dtype.float64
assert var.unit == sc.units.dimensionless
@pytest.mark.parametrize(
"unit",
[sc.units.m,
sc.Unit('m'), 'm',
sc.Variable(1.2, unit=sc.units.m).unit])
def test_create_scalar_with_unit(unit):
var = sc.Variable(1.2, unit=unit)
assert var.value == 1.2
assert var.dims == []
assert var.dtype == sc.dtype.float64
assert var.unit == sc.units.m
def test_create_scalar_Variable():
elem = sc.Variable(dims=['x'], values=np.arange(4.0))
var = sc.Variable(elem)
assert sc.identical(var.value, elem)
assert var.dims == []
assert var.dtype == sc.dtype.Variable
assert var.unit == sc.units.dimensionless
var = sc.Variable(elem['x', 1:3])
assert var.dtype == sc.dtype.Variable
def test_create_scalar_DataArray():
elem = sc.DataArray(data=sc.Variable(dims=['x'], values=np.arange(4.0)))
var = sc.Variable(elem)
assert sc.identical(var.value, elem)
assert var.dims == []
assert var.dtype == sc.dtype.DataArray
assert var.unit == sc.units.dimensionless
var = sc.Variable(elem['x', 1:3])
assert var.dtype == sc.dtype.DataArray
def test_create_scalar_Dataset():
elem = sc.Dataset({'a': sc.Variable(dims=['x'], values=np.arange(4.0))})
var = sc.Variable(elem)
assert sc.identical(var.value, elem)
assert var.dims == []
assert var.dtype == sc.dtype.Dataset
assert var.unit == sc.units.dimensionless
var = sc.Variable(elem['x', 1:3])
assert var.dtype == sc.dtype.Dataset
def test_create_scalar_quantity():
var = sc.Variable(1.2, unit=sc.units.m)
assert var.value == 1.2
assert var.dims == []
assert var.dtype == sc.dtype.float64
assert var.unit == sc.units.m
def test_create_via_unit():
expected = sc.Variable(1.2, unit=sc.units.m)
var = 1.2 * sc.units.m
assert sc.identical(var, expected)
def test_create_1D_string():
var = sc.Variable(dims=['row'], values=['a', 'bb'], unit=sc.units.m)
assert len(var.values) == 2
assert var.values[0] == 'a'
assert var.values[1] == 'bb'
assert var.dims == ['row']
assert var.dtype == sc.dtype.string
assert var.unit == sc.units.m
def test_create_1D_vector_3_float64():
var = sc.vectors(dims=['x'],
values=[[1, 2, 3], [4, 5, 6]],
unit=sc.units.m)
assert len(var.values) == 2
np.testing.assert_array_equal(var.values[0], [1, 2, 3])
np.testing.assert_array_equal(var.values[1], [4, 5, 6])
assert var.dims == ['x']
assert var.dtype == sc.dtype.vector_3_float64
assert var.unit == sc.units.m
def test_create_2D_inner_size_3():
var = sc.Variable(dims=['x', 'y'],
values=np.arange(6.0).reshape(2, 3),
unit=sc.units.m)
assert var.shape == [2, 3]
np.testing.assert_array_equal(var.values[0], [0, 1, 2])
np.testing.assert_array_equal(var.values[1], [3, 4, 5])
assert var.dims == ['x', 'y']
assert var.dtype == sc.dtype.float64
assert var.unit == sc.units.m
def test_astype():
var = sc.Variable(dims=['x'],
values=np.array([1, 2, 3, 4], dtype=np.int64))
assert var.dtype == sc.dtype.int64
var_as_float = var.astype(sc.dtype.float32)
assert var_as_float.dtype == sc.dtype.float32
def test_astype_bad_conversion():
var = sc.Variable(dims=['x'],
values=np.array([1, 2, 3, 4], dtype=np.int64))
assert var.dtype == sc.dtype.int64
with pytest.raises(sc.DTypeError):
var.astype(sc.dtype.string)
def test_operation_with_scalar_quantity():
reference = sc.Variable(dims=['x'], values=np.arange(4.0) * 1.5)
reference.unit = sc.units.kg
var = sc.Variable(dims=['x'], values=np.arange(4.0))
var *= sc.Variable(1.5, unit=sc.units.kg)
assert sc.identical(reference, var)
def test_0D_scalar_access():
var = sc.Variable()
assert var.value == 0.0
var.value = 1.2
assert var.value == 1.2
assert var.values.shape == ()
assert var.values == 1.2
def test_0D_scalar_string():
var = sc.Variable(value='a')
assert var.value == 'a'
var.value = 'b'
assert sc.identical(var, sc.Variable(value='b'))
def test_1D_scalar_access_fail():
var = sc.Variable(dims=['x'], shape=(1, ))
with pytest.raises(RuntimeError):
assert var.value == 0.0
with pytest.raises(RuntimeError):
var.value = 1.2
def test_1D_access_shape_mismatch_fail():
var = sc.Variable(dims=['x'], shape=(2, ))
with pytest.raises(RuntimeError):
var.values = 1.2
def test_1D_access():
var = sc.Variable(dims=['x'], shape=(2, ))
assert len(var.values) == 2
assert var.values.shape == (2, )
var.values[1] = 1.2
assert var.values[1] == 1.2
def test_1D_set_from_list():
var = sc.Variable(dims=['x'], shape=(2, ))
var.values = [1.0, 2.0]
assert sc.identical(var, sc.Variable(dims=['x'], values=[1.0, 2.0]))
def test_1D_string():
var = sc.Variable(dims=['x'], values=['a', 'b'])
assert len(var.values) == 2
assert var.values[0] == 'a'
assert var.values[1] == 'b'
var.values = ['c', 'd']
assert sc.identical(var, sc.Variable(dims=['x'], values=['c', 'd']))
def test_1D_converting():
var = sc.Variable(dims=['x'], values=[1, 2])
var.values = [3.3, 4.6]
# floats get truncated
assert sc.identical(var, sc.Variable(dims=['x'], values=[3, 4]))
def test_1D_dataset():
var = sc.Variable(dims=['x'], shape=(2, ), dtype=sc.dtype.Dataset)
d1 = sc.Dataset({'a': 1.5 * sc.units.m})
d2 = sc.Dataset({'a': 2.5 * sc.units.m})
var.values = [d1, d2]
assert sc.identical(var.values[0], d1)
assert sc.identical(var.values[1], d2)
def test_1D_access_bad_shape_fail():
var = sc.Variable(dims=['x'], shape=(2, ))
with pytest.raises(RuntimeError):
var.values = np.arange(3)
def test_2D_access():
var = sc.Variable(dims=['x', 'y'], shape=(2, 3))
assert var.values.shape == (2, 3)
assert len(var.values) == 2
assert len(var.values[0]) == 3
var.values[1] = 1.2 # numpy assigns to all elements in "slice"
var.values[1][2] = 2.2
assert var.values[1][0] == 1.2
assert var.values[1][1] == 1.2
assert var.values[1][2] == 2.2
def test_2D_access_bad_shape_fail():
var = sc.Variable(dims=['x', 'y'], shape=(2, 3))
with pytest.raises(RuntimeError):
var.values = np.ones(shape=(3, 2))
def test_2D_access_variances():
var = sc.Variable(dims=['x', 'y'], shape=(2, 3), variances=True)
assert var.values.shape == (2, 3)
assert var.variances.shape == (2, 3)
var.values[1] = 1.2
assert np.array_equal(var.variances, np.zeros(shape=(2, 3)))
var.variances = np.ones(shape=(2, 3))
assert np.array_equal(var.variances, np.ones(shape=(2, 3)))
def test_create_dtype():
var = sc.Variable(dims=['x'], values=np.arange(4).astype(np.int64))
assert var.dtype == sc.dtype.int64
var = sc.Variable(dims=['x'], values=np.arange(4).astype(np.int32))
assert var.dtype == sc.dtype.int32
var = sc.Variable(dims=['x'], values=np.arange(4).astype(np.float64))
assert var.dtype == sc.dtype.float64
var = sc.Variable(dims=['x'], values=np.arange(4).astype(np.float32))
assert var.dtype == sc.dtype.float32
var = sc.Variable(dims=['x'], shape=(4, ), dtype=np.dtype(np.float64))
assert var.dtype == sc.dtype.float64
var = sc.Variable(dims=['x'], shape=(4, ), dtype=np.dtype(np.float32))
assert var.dtype == sc.dtype.float32
var = sc.Variable(dims=['x'], shape=(4, ), dtype=np.dtype(np.int64))
assert var.dtype == sc.dtype.int64
var = sc.Variable(dims=['x'], shape=(4, ), dtype=np.dtype(np.int32))
assert var.dtype == sc.dtype.int32
def test_getitem():
var = sc.Variable(dims=['x', 'y'], values=np.arange(0, 8).reshape(2, 4))
var_slice = var['x', 1:2]
assert sc.identical(
var_slice,
sc.Variable(dims=['x', 'y'], values=np.arange(4, 8).reshape(1, 4)))
def test_setitem_broadcast():
var = sc.Variable(dims=['x'], values=[1, 2, 3, 4], dtype=sc.dtype.int64)
var['x', 1:3] = sc.Variable(value=5, dtype=sc.dtype.int64)
assert sc.identical(
var, sc.Variable(dims=['x'], values=[1, 5, 5, 4],
dtype=sc.dtype.int64))
def test_slicing():
var = sc.Variable(dims=['x'], values=np.arange(0, 3))
for slice_, expected in ((slice(0, 2), [0, 1]), (slice(-3, -1), [0, 1]),
(slice(2, 1), [])):
var_slice = var[('x', slice_)]
assert len(var_slice.values) == len(expected)
assert np.array_equal(var_slice.values, np.array(expected))
def test_sizes():
a = sc.Variable(value=1)
assert a.sizes == {}
a = sc.Variable(['x'], shape=[2])
assert a.sizes == {'x': 2}
a = sc.Variable(['y', 'z'], shape=[3, 4])
assert a.sizes == {'y': 3, 'z': 4}
def test_iadd():
expected = sc.Variable(2.2)
a = sc.Variable(1.2)
b = a
a += 1.0
assert sc.identical(a, expected)
assert sc.identical(b, expected)
# This extra check is important: It can happen that an implementation of,
# e.g., __iadd__ does an in-place modification, updating `b`, but then the
# return value is assigned to `a`, which could break the connection unless
# the correct Python object is returned.
a += 1.0
assert sc.identical(a, b)
def test_isub():
expected = sc.Variable(2.2 - 1.0)
a = sc.Variable(2.2)
b = a
a -= 1.0
assert sc.identical(a, expected)
assert sc.identical(b, expected)
a -= 1.0
assert sc.identical(a, b)
def test_imul():
expected = sc.Variable(2.4)
a = sc.Variable(1.2)
b = a
a *= 2.0
assert sc.identical(a, expected)
assert sc.identical(b, expected)
a *= 2.0
assert sc.identical(a, b)
def test_idiv():
expected = sc.Variable(1.2)
a = sc.Variable(2.4)
b = a
a /= 2.0
assert sc.identical(a, expected)
assert sc.identical(b, expected)
a /= 2.0
assert sc.identical(a, b)
def test_iand():
expected = sc.Variable(False)
a = sc.Variable(True)
b = a
a &= sc.Variable(False)
assert sc.identical(a, expected)
assert sc.identical(b, expected)
a |= sc.Variable(True)
assert sc.identical(a, b)
def test_ior():
expected = sc.Variable(True)
a = sc.Variable(False)
b = a
a |= sc.Variable(True)
assert sc.identical(a, expected)
assert sc.identical(b, expected)
a &= sc.Variable(True)
assert sc.identical(a, b)
def test_ixor():
expected = sc.Variable(True)
a = sc.Variable(False)
b = a
a ^= sc.Variable(True)
assert sc.identical(a, expected)
assert sc.identical(b, expected)
a ^= sc.Variable(True)
assert sc.identical(a, b)
def test_binary_plus():
a, b, a_slice, b_slice, data = make_variables()
c = a + b
assert np.array_equal(c.values, data + data)
c = a + 2.0
assert np.array_equal(c.values, data + 2.0)
c = a + b_slice
assert np.array_equal(c.values, data + data)
c += b
assert np.array_equal(c.values, data + data + data)
c += b_slice
assert np.array_equal(c.values, data + data + data + data)
c = 3.5 + c
assert np.array_equal(c.values, data + data + data + data + 3.5)
def test_binary_minus():
a, b, a_slice, b_slice, data = make_variables()
c = a - b
assert np.array_equal(c.values, data - data)
c = a - 2.0
assert np.array_equal(c.values, data - 2.0)
c = a - b_slice
assert np.array_equal(c.values, data - data)
c -= b
assert np.array_equal(c.values, data - data - data)
c -= b_slice
assert np.array_equal(c.values, data - data - data - data)
c = 3.5 - c
assert np.array_equal(c.values, 3.5 - data + data + data + data)
def test_binary_multiply():
a, b, a_slice, b_slice, data = make_variables()
c = a * b
assert np.array_equal(c.values, data * data)
c = a * 2.0
assert np.array_equal(c.values, data * 2.0)
c = a * b_slice
assert np.array_equal(c.values, data * data)
c *= b
assert np.array_equal(c.values, data * data * data)
c *= b_slice
assert np.array_equal(c.values, data * data * data * data)
c = 3.5 * c
assert np.array_equal(c.values, data * data * data * data * 3.5)
def test_binary_divide():
a, b, a_slice, b_slice, data = make_variables()
c = a / b
assert np.array_equal(c.values, data / data)
c = a / 2.0
assert np.array_equal(c.values, data / 2.0)
c = a / b_slice
assert np.array_equal(c.values, data / data)
c /= b
assert np.array_equal(c.values, data / data / data)
c /= b_slice
assert np.array_equal(c.values, data / data / data / data)
c = 2.0 / a
assert np.array_equal(c.values, 2.0 / data)
def test_in_place_binary_or():
a = sc.Variable(False)
b = sc.Variable(True)
a |= b
assert sc.identical(a, sc.Variable(True))
a = sc.Variable(dims=['x'], values=np.array([False, True, False, True]))
b = sc.Variable(dims=['x'], values=np.array([False, False, True, True]))
a |= b
assert sc.identical(
a, sc.Variable(dims=['x'], values=np.array([False, True, True, True])))
def test_binary_or():
a = sc.Variable(False)
b = sc.Variable(True)
assert sc.identical((a | b), sc.Variable(True))
a = sc.Variable(dims=['x'], values=np.array([False, True, False, True]))
b = sc.Variable(dims=['x'], values=np.array([False, False, True, True]))
assert sc.identical((a | b),
sc.Variable(dims=['x'],
values=np.array([False, True, True,
True])))
def test_in_place_binary_and():
a = sc.Variable(False)
b = sc.Variable(True)
a &= b
assert sc.identical(a, sc.Variable(False))
a = sc.Variable(dims=['x'], values=np.array([False, True, False, True]))
b = sc.Variable(dims=['x'], values=np.array([False, False, True, True]))
a &= b
assert sc.identical(
a, sc.Variable(dims=['x'],
values=np.array([False, False, False, True])))
def test_binary_and():
a = sc.Variable(False)
b = sc.Variable(True)
assert sc.identical((a & b), sc.Variable(False))
a = sc.Variable(dims=['x'], values=np.array([False, True, False, True]))
b = sc.Variable(dims=['x'], values=np.array([False, False, True, True]))
assert sc.identical(
(a & b),
sc.Variable(dims=['x'], values=np.array([False, False, False, True])))
def test_in_place_binary_xor():
a = sc.Variable(False)
b = sc.Variable(True)
a ^= b
assert sc.identical(a, sc.Variable(True))
a = sc.Variable(dims=['x'], values=np.array([False, True, False, True]))
b = sc.Variable(dims=['x'], values=np.array([False, False, True, True]))
a ^= b
assert sc.identical(
a, sc.Variable(dims=['x'], values=np.array([False, True, True,
False])))
def test_binary_xor():
a = sc.Variable(False)
b = sc.Variable(True)
assert sc.identical((a ^ b), sc.Variable(True))
a = sc.Variable(dims=['x'], values=np.array([False, True, False, True]))
b = sc.Variable(dims=['x'], values=np.array([False, False, True, True]))
assert sc.identical((a ^ b),
sc.Variable(dims=['x'],
values=np.array([False, True, True,
False])))
def test_in_place_binary_with_scalar():
v = sc.Variable(dims=['x'], values=[10.0])
copy = v.copy()
v += 2
v *= 2
v -= 4
v /= 2
assert sc.identical(v, copy)
def test_binary_equal():
a, b, a_slice, b_slice, data = make_variables()
assert sc.identical(a, b)
assert sc.identical(a, a_slice)
assert sc.identical(a_slice, b_slice)
assert sc.identical(b, a)
assert sc.identical(b_slice, a)
assert sc.identical(b_slice, a_slice)
def test_binary_not_equal():
a, b, a_slice, b_slice, data = make_variables()
c = a + b
assert not sc.identical(a, c)
assert not sc.identical(a_slice, c)
assert not sc.identical(c, a)
assert not sc.identical(c, a_slice)
def test_abs():
assert_export(sc.abs, sc.Variable())
def test_abs_out():
var = sc.Variable()
assert_export(sc.abs, var, out=var)
def test_dot():
assert_export(sc.dot, sc.Variable(), sc.Variable())
def test_concatenate():
assert_export(sc.concatenate, sc.Variable(), sc.Variable(), 'x')
def test_mean():
assert_export(sc.mean, sc.Variable(), 'x')
def test_mean_in_place():
var = sc.Variable()
assert_export(sc.mean, sc.Variable(), 'x', var)
def test_norm():
assert_export(sc.norm, sc.Variable())
def test_sqrt():
assert_export(sc.sqrt, sc.Variable())
def test_sqrt_out():
var = sc.Variable()
assert_export(sc.sqrt, var, var)
def test_values_variances():
assert_export(sc.values, sc.Variable())
assert_export(sc.variances, sc.Variable())
def test_sum():
var = sc.Variable(dims=['x', 'y'],
values=np.array([[0.1, 0.3], [0.2, 0.6]]),
unit=sc.units.m)
expected = sc.Variable(dims=['x'],
values=np.array([0.4, 0.8]),
unit=sc.units.m)
assert sc.identical(sc.sum(var, 'y'), expected)
def test_sum_in_place():
var = sc.Variable(dims=['x', 'y'],
values=np.array([[0.1, 0.3], [0.2, 0.6]]),
unit=sc.units.m)
out_var = sc.Variable(dims=['x'],
values=np.array([0.0, 0.0]),
unit=sc.units.m)
expected = sc.Variable(dims=['x'],
values=np.array([0.4, 0.8]),
unit=sc.units.m)
out_view = sc.sum(var, 'y', out=out_var)
assert sc.identical(out_var, expected)
assert sc.identical(out_view, expected)
def test_variance_acess():
v = sc.Variable()
assert v.variance is None
assert v.variances is None
def test_set_variance():
values = np.random.rand(2, 3)
variances = np.random.rand(2, 3)
var = sc.Variable(dims=['x', 'y'], values=values)
expected = sc.Variable(dims=['x', 'y'], values=values, variances=variances)
assert var.variances is None
assert not sc.identical(var, expected)
var.variances = variances
assert var.variances is not None
assert sc.identical(var, expected)
def test_copy_variance():
values = np.random.rand(2, 3)
variances = np.random.rand(2, 3)
var = sc.Variable(dims=['x', 'y'], values=values)
expected = sc.Variable(dims=['x', 'y'], values=values, variances=variances)
assert var.variances is None
assert not sc.identical(var, expected)
var.variances = expected.variances
assert var.variances is not None
assert sc.identical(var, expected)
def test_remove_variance():
values = np.random.rand(2, 3)
variances = np.random.rand(2, 3)
var = sc.Variable(dims=['x', 'y'], values=values, variances=variances)
expected = sc.Variable(dims=['x', 'y'], values=values)
assert var.variances is not None
var.variances = None
assert var.variances is None
assert sc.identical(var, expected)
def test_set_variance_convert_dtype():
values = np.random.rand(2, 3)
variances = np.arange(6).reshape(2, 3)
assert variances.dtype == int
var = sc.Variable(dims=['x', 'y'], values=values)
expected = sc.Variable(dims=['x', 'y'], values=values, variances=variances)
assert var.variances is None
assert not sc.identical(var, expected)
var.variances = variances
assert var.variances is not None
assert sc.identical(var, expected)
def test_sum_mean():
var = sc.Variable(dims=['x'], values=np.arange(5, dtype=np.int64))
assert sc.identical(sc.sum(var, 'x'), sc.Variable(10))
var = sc.Variable(dims=['x'], values=np.arange(6, dtype=np.int64))
assert sc.identical(sc.mean(var, 'x'), sc.Variable(2.5))
def test_make_variable_from_unit_scalar_mult_div():
var = sc.Variable()
var.unit = sc.units.m
assert sc.identical(var, 0.0 * sc.units.m)
var.unit = sc.units.m**(-1)
assert sc.identical(var, 0.0 / sc.units.m)
var = sc.Variable(value=np.float32())
var.unit = sc.units.m
assert sc.identical(var, np.float32(0.0) * sc.units.m)
var.unit = sc.units.m**(-1)
assert sc.identical(var, np.float32(0.0) / sc.units.m)
def test_construct_0d_numpy():
v = sc.Variable(dims=['x'], values=np.array([0]), dtype=np.float32)
var = v['x', 0].copy()
assert sc.identical(var, sc.Variable(np.float32()))
v = sc.Variable(dims=['x'], values=np.array([0]), dtype=np.float32)
var = v['x', 0].copy()
var.unit = sc.units.m
assert sc.identical(var, np.float32(0.0) * sc.units.m)
var.unit = sc.units.m**(-1)
assert sc.identical(var, np.float32(0.0) / sc.units.m)
def test_construct_0d_native_python_types():
assert sc.Variable(2).dtype == sc.dtype.int64
assert sc.Variable(2.0).dtype == sc.dtype.float64
assert sc.Variable(True).dtype == sc.dtype.bool
def test_construct_0d_dtype():
assert sc.Variable(2, dtype=np.int32).dtype == sc.dtype.int32
assert sc.Variable(np.float64(2),
dtype=np.float32).dtype == sc.dtype.float32
assert sc.Variable(1, dtype=bool).dtype == sc.dtype.bool
def test_rename_dims():
values = np.arange(6).reshape(2, 3)
xy = sc.Variable(dims=['x', 'y'], values=values)
zy = sc.Variable(dims=['z', 'y'], values=values)
xy.rename_dims({'x': 'z'})
assert sc.identical(xy, zy)
def test_create_1d_with_strings():
v = sc.Variable(dims=['x'], values=["aaa", "ff", "bb"])
assert np.all(v.values == np.array(["aaa", "ff", "bb"]))
def test_bool_variable_repr():
a = sc.Variable(dims=['x'],
values=np.array([False, True, True, False, True]))
assert [expected in repr(a) for expected in ["True", "False", "..."]]
def test_reciprocal():
assert_export(sc.reciprocal, sc.Variable())
def test_reciprocal_out():
var = sc.Variable()
assert_export(sc.reciprocal, var, var)
def test_exp():
var = sc.Variable()
assert_export(sc.exp, x=var)
def test_log():
var = sc.Variable()
assert_export(sc.log, x=var)
def test_log10():
var = sc.Variable()
assert_export(sc.log10, x=var)
def test_sin():
assert_export(sc.sin, sc.Variable())
def test_sin_out():
var = sc.Variable()
assert_export(sc.sin, var, out=var)
def test_cos():
assert_export(sc.cos, sc.Variable())
def test_cos_out():
var = sc.Variable()
assert_export(sc.cos, var, out=var)
def test_tan():
assert_export(sc.tan, sc.Variable())
def test_tan_out():
var = sc.Variable()
assert_export(sc.tan, var, out=var)
def test_asin():
assert_export(sc.asin, sc.Variable())
def test_asin_out():
var = sc.Variable()
assert_export(sc.asin, var, out=var)
def test_acos():
assert_export(sc.acos, sc.Variable())
def test_acos_out():
var = sc.Variable()
assert_export(sc.acos, var, out=var)
def test_atan():
assert_export(sc.atan, sc.Variable())
def test_atan_out():
var = sc.Variable()
assert_export(sc.atan, var, out=var)
def test_atan2():
var = sc.Variable()
assert_export(sc.atan2, y=var, x=var)
assert_export(sc.atan2, y=var, x=var, out=var)
def test_variable_data_array_binary_ops():
a = sc.DataArray(1.0 * sc.units.m)
var = 1.0 * sc.units.m
assert sc.identical(a / var, var / a)
def test_isnan():
assert sc.identical(
sc.isnan(sc.Variable(['x'], values=np.array([1, 1, np.nan]))),
sc.Variable(['x'], values=[False, False, True]))
def test_isinf():
assert sc.identical(
sc.isinf(sc.Variable(['x'], values=np.array([1, -np.inf, np.inf]))),
sc.Variable(['x'], values=[False, True, True]))
def test_isfinite():
assert sc.identical(
sc.isfinite(
sc.Variable(['x'], values=np.array([1, -np.inf, np.inf, np.nan]))),
sc.Variable(['x'], values=[True, False, False, False]))
def test_isposinf():
assert sc.identical(
sc.isposinf(sc.Variable(['x'], values=np.array([1, -np.inf, np.inf]))),
sc.Variable(['x'], values=[False, False, True]))
def test_isneginf():
assert sc.identical(
sc.isneginf(sc.Variable(['x'], values=np.array([1, -np.inf, np.inf]))),
sc.Variable(['x'], values=[False, True, False]))
def test_nan_to_num():
a = sc.Variable(dims=['x'], values=np.array([1, np.nan]))
replace = sc.Variable(value=0.0)
b = sc.nan_to_num(a, replace)
expected = sc.Variable(dims=['x'], values=np.array([1, replace.value]))
assert sc.identical(b, expected)
def test_nan_to_nan_with_pos_inf():
a = sc.Variable(dims=['x'], values=np.array([1, np.inf]))
replace = sc.Variable(value=0.0)
b = sc.nan_to_num(a, posinf=replace)
expected = sc.Variable(dims=['x'], values=np.array([1, replace.value]))
assert sc.identical(b, expected)
def test_nan_to_nan_with_neg_inf():
a = sc.Variable(dims=['x'], values=np.array([1, -np.inf]))
replace = sc.Variable(value=0.0)
b = sc.nan_to_num(a, neginf=replace)
expected = sc.Variable(dims=['x'], values=np.array([1, replace.value]))
assert sc.identical(b, expected)
def test_nan_to_nan_with_multiple_special_replacements():
a = sc.Variable(dims=['x'], values=np.array([1, np.nan, np.inf, -np.inf]))
replace_nan = sc.Variable(value=-1.0)
replace_pos_inf = sc.Variable(value=-2.0)
replace_neg_inf = sc.Variable(value=-3.0)
b = sc.nan_to_num(a,
nan=replace_nan,
posinf=replace_pos_inf,
neginf=replace_neg_inf)
expected = sc.Variable(
dims=['x'],
values=np.array([1] + [
repl.value
for repl in [replace_nan, replace_pos_inf, replace_neg_inf]
]))
assert sc.identical(b, expected)
def test_nan_to_num_out():
a = sc.Variable(dims=['x'], values=np.array([1, np.nan]))
out = sc.Variable(dims=['x'], values=np.zeros(2))
replace = sc.Variable(value=0.0)
sc.nan_to_num(a, nan=replace, out=out)
expected = sc.Variable(dims=['x'], values=np.array([1, replace.value]))
assert sc.identical(out, expected)
def test_nan_to_num_out_with_multiple_special_replacements():
a = sc.Variable(dims=['x'], values=np.array([1, np.inf, -np.inf, np.nan]))
out = sc.Variable(dims=['x'], values=np.zeros(4))
replace = sc.Variable(value=0.0)
# just replace nans
sc.nan_to_num(a, nan=replace, out=out)
expected = sc.Variable(dims=['x'],
values=np.array([1, np.inf, -np.inf,
replace.value]))
assert sc.identical(out, expected)
# replace neg inf
sc.nan_to_num(out, neginf=replace, out=out)
expected = sc.Variable(dims=['x'],
values=np.array(
[1, np.inf, replace.value, replace.value]))
assert sc.identical(out, expected)
# replace pos inf
sc.nan_to_num(out, posinf=replace, out=out)
expected = sc.Variable(dims=['x'],
values=np.array([1] + [replace.value] * 3))
assert sc.identical(out, expected)
def test_position():
var = sc.Variable()
assert_export(sc.geometry.position, x=var, y=var, z=var)
def test_comparison():
var = sc.Variable()
assert_export(sc.less, x=var, y=var)
assert_export(sc.greater, x=var, y=var)
assert_export(sc.greater_equal, x=var, y=var)
assert_export(sc.less_equal, x=var, y=var)
assert_export(sc.equal, x=var, y=var)
assert_export(sc.not_equal, x=var, y=var)
def test_radd_int():
var = sc.Variable(dims=['x'], values=[1, 2, 3])
assert (var + 1).dtype == var.dtype
assert (1 + var).dtype == var.dtype
def test_rsub_int():
var = sc.Variable(dims=['x'], values=[1, 2, 3])
assert (var - 1).dtype == var.dtype
assert (1 - var).dtype == var.dtype
def test_rmul_int():
var = sc.Variable(dims=['x'], values=[1, 2, 3])
assert (var * 1).dtype == var.dtype
assert (1 * var).dtype == var.dtype
def test_rtruediv_int():
var = sc.Variable(dims=['x'], values=[1, 2, 3])
assert (var / 1).dtype == sc.dtype.float64
assert (1 / var).dtype == sc.dtype.float64
def test_sort():
var = sc.Variable()
assert_export(sc.sort, x=var, dim='x', order='ascending')
assert_export(sc.issorted, x=var, dim='x', order='ascending')
| [
"numpy.array_equal",
"numpy.ones",
"scipp.sum",
"numpy.arange",
"numpy.float64",
"scipp.Dataset",
"scipp.Variable",
"scipp.identical",
"scipp.mean",
"pytest.raises",
"scipp.vectors",
"scipp.DataArray",
"scipp.Unit",
"numpy.testing.assert_array_equal",
"scipp.nan_to_num",
"numpy.float32... | [((256, 284), 'numpy.arange', 'np.arange', (['(1)', '(4)'], {'dtype': 'float'}), '(1, 4, dtype=float)\n', (265, 284), True, 'import numpy as np\n'), ((293, 329), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'values': 'data'}), "(dims=['x'], values=data)\n", (304, 329), True, 'import scipp as sc\n'), ((338, 374), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'values': 'data'}), "(dims=['x'], values=data)\n", (349, 374), True, 'import scipp as sc\n'), ((502, 515), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (513, 515), True, 'import scipp as sc\n'), ((702, 736), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'shape': '[4]'}), "(dims=['x'], shape=[4])\n", (713, 736), True, 'import scipp as sc\n'), ((820, 878), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'shape': '[2]', 'dtype': 'sc.dtype.float32'}), "(dims=['x'], shape=[2], dtype=sc.dtype.float32)\n", (831, 878), True, 'import scipp as sc\n'), ((2719, 2737), 'scipp.Variable', 'sc.Variable', (['value'], {}), '(value)\n', (2730, 2737), True, 'import scipp as sc\n'), ((3058, 3085), 'scipp.Variable', 'sc.Variable', (['(1.2)'], {'unit': 'unit'}), '(1.2, unit=unit)\n', (3069, 3085), True, 'import scipp as sc\n'), ((3320, 3337), 'scipp.Variable', 'sc.Variable', (['elem'], {}), '(elem)\n', (3331, 3337), True, 'import scipp as sc\n'), ((3349, 3378), 'scipp.identical', 'sc.identical', (['var.value', 'elem'], {}), '(var.value, elem)\n', (3361, 3378), True, 'import scipp as sc\n'), ((3503, 3530), 'scipp.Variable', 'sc.Variable', (["elem['x', 1:3]"], {}), "(elem['x', 1:3])\n", (3514, 3530), True, 'import scipp as sc\n'), ((3698, 3715), 'scipp.Variable', 'sc.Variable', (['elem'], {}), '(elem)\n', (3709, 3715), True, 'import scipp as sc\n'), ((3727, 3756), 'scipp.identical', 'sc.identical', (['var.value', 'elem'], {}), '(var.value, elem)\n', (3739, 3756), True, 'import scipp as sc\n'), ((3882, 3909), 'scipp.Variable', 'sc.Variable', (["elem['x', 1:3]"], {}), "(elem['x', 1:3])\n", (3893, 3909), True, 'import scipp as sc\n'), ((4076, 4093), 'scipp.Variable', 'sc.Variable', (['elem'], {}), '(elem)\n', (4087, 4093), True, 'import scipp as sc\n'), ((4105, 4134), 'scipp.identical', 'sc.identical', (['var.value', 'elem'], {}), '(var.value, elem)\n', (4117, 4134), True, 'import scipp as sc\n'), ((4258, 4285), 'scipp.Variable', 'sc.Variable', (["elem['x', 1:3]"], {}), "(elem['x', 1:3])\n", (4269, 4285), True, 'import scipp as sc\n'), ((4374, 4407), 'scipp.Variable', 'sc.Variable', (['(1.2)'], {'unit': 'sc.units.m'}), '(1.2, unit=sc.units.m)\n', (4385, 4407), True, 'import scipp as sc\n'), ((4582, 4615), 'scipp.Variable', 'sc.Variable', (['(1.2)'], {'unit': 'sc.units.m'}), '(1.2, unit=sc.units.m)\n', (4593, 4615), True, 'import scipp as sc\n'), ((4654, 4681), 'scipp.identical', 'sc.identical', (['var', 'expected'], {}), '(var, expected)\n', (4666, 4681), True, 'import scipp as sc\n'), ((4723, 4785), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['row']", 'values': "['a', 'bb']", 'unit': 'sc.units.m'}), "(dims=['row'], values=['a', 'bb'], unit=sc.units.m)\n", (4734, 4785), True, 'import scipp as sc\n'), ((5039, 5109), 'scipp.vectors', 'sc.vectors', ([], {'dims': "['x']", 'values': '[[1, 2, 3], [4, 5, 6]]', 'unit': 'sc.units.m'}), "(dims=['x'], values=[[1, 2, 3], [4, 5, 6]], unit=sc.units.m)\n", (5049, 5109), True, 'import scipp as sc\n'), ((5188, 5243), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['var.values[0]', '[1, 2, 3]'], {}), '(var.values[0], [1, 2, 3])\n', (5217, 5243), True, 'import numpy as np\n'), ((5248, 5303), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['var.values[1]', '[4, 5, 6]'], {}), '(var.values[1], [4, 5, 6])\n', (5277, 5303), True, 'import numpy as np\n'), ((5626, 5681), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['var.values[0]', '[0, 1, 2]'], {}), '(var.values[0], [0, 1, 2])\n', (5655, 5681), True, 'import numpy as np\n'), ((5686, 5741), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['var.values[1]', '[3, 4, 5]'], {}), '(var.values[1], [3, 4, 5])\n', (5715, 5741), True, 'import numpy as np\n'), ((6583, 6617), 'scipp.Variable', 'sc.Variable', (['(1.5)'], {'unit': 'sc.units.kg'}), '(1.5, unit=sc.units.kg)\n', (6594, 6617), True, 'import scipp as sc\n'), ((6629, 6657), 'scipp.identical', 'sc.identical', (['reference', 'var'], {}), '(reference, var)\n', (6641, 6657), True, 'import scipp as sc\n'), ((6699, 6712), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (6710, 6712), True, 'import scipp as sc\n'), ((6893, 6915), 'scipp.Variable', 'sc.Variable', ([], {'value': '"""a"""'}), "(value='a')\n", (6904, 6915), True, 'import scipp as sc\n'), ((7063, 7098), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'shape': '(1,)'}), "(dims=['x'], shape=(1,))\n", (7074, 7098), True, 'import scipp as sc\n'), ((7286, 7321), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'shape': '(2,)'}), "(dims=['x'], shape=(2,))\n", (7297, 7321), True, 'import scipp as sc\n'), ((7420, 7455), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'shape': '(2,)'}), "(dims=['x'], shape=(2,))\n", (7431, 7455), True, 'import scipp as sc\n'), ((7623, 7658), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'shape': '(2,)'}), "(dims=['x'], shape=(2,))\n", (7634, 7658), True, 'import scipp as sc\n'), ((7795, 7837), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'values': "['a', 'b']"}), "(dims=['x'], values=['a', 'b'])\n", (7806, 7837), True, 'import scipp as sc\n'), ((8073, 8111), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'values': '[1, 2]'}), "(dims=['x'], values=[1, 2])\n", (8084, 8111), True, 'import scipp as sc\n'), ((8271, 8330), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'shape': '(2,)', 'dtype': 'sc.dtype.Dataset'}), "(dims=['x'], shape=(2,), dtype=sc.dtype.Dataset)\n", (8282, 8330), True, 'import scipp as sc\n'), ((8341, 8376), 'scipp.Dataset', 'sc.Dataset', (["{'a': 1.5 * sc.units.m}"], {}), "({'a': 1.5 * sc.units.m})\n", (8351, 8376), True, 'import scipp as sc\n'), ((8386, 8421), 'scipp.Dataset', 'sc.Dataset', (["{'a': 2.5 * sc.units.m}"], {}), "({'a': 2.5 * sc.units.m})\n", (8396, 8421), True, 'import scipp as sc\n'), ((8459, 8490), 'scipp.identical', 'sc.identical', (['var.values[0]', 'd1'], {}), '(var.values[0], d1)\n', (8471, 8490), True, 'import scipp as sc\n'), ((8502, 8533), 'scipp.identical', 'sc.identical', (['var.values[1]', 'd2'], {}), '(var.values[1], d2)\n', (8514, 8533), True, 'import scipp as sc\n'), ((8583, 8618), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'shape': '(2,)'}), "(dims=['x'], shape=(2,))\n", (8594, 8618), True, 'import scipp as sc\n'), ((8726, 8768), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x', 'y']", 'shape': '(2, 3)'}), "(dims=['x', 'y'], shape=(2, 3))\n", (8737, 8768), True, 'import scipp as sc\n'), ((9123, 9165), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x', 'y']", 'shape': '(2, 3)'}), "(dims=['x', 'y'], shape=(2, 3))\n", (9134, 9165), True, 'import scipp as sc\n'), ((9291, 9349), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x', 'y']", 'shape': '(2, 3)', 'variances': '(True)'}), "(dims=['x', 'y'], shape=(2, 3), variances=True)\n", (9302, 9349), True, 'import scipp as sc\n'), ((9538, 9559), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3)'}), '(shape=(2, 3))\n', (9545, 9559), True, 'import numpy as np\n'), ((10850, 10916), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'values': '[1, 2, 3, 4]', 'dtype': 'sc.dtype.int64'}), "(dims=['x'], values=[1, 2, 3, 4], dtype=sc.dtype.int64)\n", (10861, 10916), True, 'import scipp as sc\n'), ((10937, 10979), 'scipp.Variable', 'sc.Variable', ([], {'value': '(5)', 'dtype': 'sc.dtype.int64'}), '(value=5, dtype=sc.dtype.int64)\n', (10948, 10979), True, 'import scipp as sc\n'), ((11506, 11526), 'scipp.Variable', 'sc.Variable', ([], {'value': '(1)'}), '(value=1)\n', (11517, 11526), True, 'import scipp as sc\n'), ((11560, 11589), 'scipp.Variable', 'sc.Variable', (["['x']"], {'shape': '[2]'}), "(['x'], shape=[2])\n", (11571, 11589), True, 'import scipp as sc\n'), ((11629, 11666), 'scipp.Variable', 'sc.Variable', (["['y', 'z']"], {'shape': '[3, 4]'}), "(['y', 'z'], shape=[3, 4])\n", (11640, 11666), True, 'import scipp as sc\n'), ((11740, 11756), 'scipp.Variable', 'sc.Variable', (['(2.2)'], {}), '(2.2)\n', (11751, 11756), True, 'import scipp as sc\n'), ((11765, 11781), 'scipp.Variable', 'sc.Variable', (['(1.2)'], {}), '(1.2)\n', (11776, 11781), True, 'import scipp as sc\n'), ((11816, 11841), 'scipp.identical', 'sc.identical', (['a', 'expected'], {}), '(a, expected)\n', (11828, 11841), True, 'import scipp as sc\n'), ((11853, 11878), 'scipp.identical', 'sc.identical', (['b', 'expected'], {}), '(b, expected)\n', (11865, 11878), True, 'import scipp as sc\n'), ((12184, 12202), 'scipp.identical', 'sc.identical', (['a', 'b'], {}), '(a, b)\n', (12196, 12202), True, 'import scipp as sc\n'), ((12237, 12259), 'scipp.Variable', 'sc.Variable', (['(2.2 - 1.0)'], {}), '(2.2 - 1.0)\n', (12248, 12259), True, 'import scipp as sc\n'), ((12268, 12284), 'scipp.Variable', 'sc.Variable', (['(2.2)'], {}), '(2.2)\n', (12279, 12284), True, 'import scipp as sc\n'), ((12319, 12344), 'scipp.identical', 'sc.identical', (['a', 'expected'], {}), '(a, expected)\n', (12331, 12344), True, 'import scipp as sc\n'), ((12356, 12381), 'scipp.identical', 'sc.identical', (['b', 'expected'], {}), '(b, expected)\n', (12368, 12381), True, 'import scipp as sc\n'), ((12406, 12424), 'scipp.identical', 'sc.identical', (['a', 'b'], {}), '(a, b)\n', (12418, 12424), True, 'import scipp as sc\n'), ((12459, 12475), 'scipp.Variable', 'sc.Variable', (['(2.4)'], {}), '(2.4)\n', (12470, 12475), True, 'import scipp as sc\n'), ((12484, 12500), 'scipp.Variable', 'sc.Variable', (['(1.2)'], {}), '(1.2)\n', (12495, 12500), True, 'import scipp as sc\n'), ((12535, 12560), 'scipp.identical', 'sc.identical', (['a', 'expected'], {}), '(a, expected)\n', (12547, 12560), True, 'import scipp as sc\n'), ((12572, 12597), 'scipp.identical', 'sc.identical', (['b', 'expected'], {}), '(b, expected)\n', (12584, 12597), True, 'import scipp as sc\n'), ((12622, 12640), 'scipp.identical', 'sc.identical', (['a', 'b'], {}), '(a, b)\n', (12634, 12640), True, 'import scipp as sc\n'), ((12675, 12691), 'scipp.Variable', 'sc.Variable', (['(1.2)'], {}), '(1.2)\n', (12686, 12691), True, 'import scipp as sc\n'), ((12700, 12716), 'scipp.Variable', 'sc.Variable', (['(2.4)'], {}), '(2.4)\n', (12711, 12716), True, 'import scipp as sc\n'), ((12751, 12776), 'scipp.identical', 'sc.identical', (['a', 'expected'], {}), '(a, expected)\n', (12763, 12776), True, 'import scipp as sc\n'), ((12788, 12813), 'scipp.identical', 'sc.identical', (['b', 'expected'], {}), '(b, expected)\n', (12800, 12813), True, 'import scipp as sc\n'), ((12838, 12856), 'scipp.identical', 'sc.identical', (['a', 'b'], {}), '(a, b)\n', (12850, 12856), True, 'import scipp as sc\n'), ((12891, 12909), 'scipp.Variable', 'sc.Variable', (['(False)'], {}), '(False)\n', (12902, 12909), True, 'import scipp as sc\n'), ((12918, 12935), 'scipp.Variable', 'sc.Variable', (['(True)'], {}), '(True)\n', (12929, 12935), True, 'import scipp as sc\n'), ((12955, 12973), 'scipp.Variable', 'sc.Variable', (['(False)'], {}), '(False)\n', (12966, 12973), True, 'import scipp as sc\n'), ((12985, 13010), 'scipp.identical', 'sc.identical', (['a', 'expected'], {}), '(a, expected)\n', (12997, 13010), True, 'import scipp as sc\n'), ((13022, 13047), 'scipp.identical', 'sc.identical', (['b', 'expected'], {}), '(b, expected)\n', (13034, 13047), True, 'import scipp as sc\n'), ((13057, 13074), 'scipp.Variable', 'sc.Variable', (['(True)'], {}), '(True)\n', (13068, 13074), True, 'import scipp as sc\n'), ((13086, 13104), 'scipp.identical', 'sc.identical', (['a', 'b'], {}), '(a, b)\n', (13098, 13104), True, 'import scipp as sc\n'), ((13138, 13155), 'scipp.Variable', 'sc.Variable', (['(True)'], {}), '(True)\n', (13149, 13155), True, 'import scipp as sc\n'), ((13164, 13182), 'scipp.Variable', 'sc.Variable', (['(False)'], {}), '(False)\n', (13175, 13182), True, 'import scipp as sc\n'), ((13202, 13219), 'scipp.Variable', 'sc.Variable', (['(True)'], {}), '(True)\n', (13213, 13219), True, 'import scipp as sc\n'), ((13231, 13256), 'scipp.identical', 'sc.identical', (['a', 'expected'], {}), '(a, expected)\n', (13243, 13256), True, 'import scipp as sc\n'), ((13268, 13293), 'scipp.identical', 'sc.identical', (['b', 'expected'], {}), '(b, expected)\n', (13280, 13293), True, 'import scipp as sc\n'), ((13303, 13320), 'scipp.Variable', 'sc.Variable', (['(True)'], {}), '(True)\n', (13314, 13320), True, 'import scipp as sc\n'), ((13332, 13350), 'scipp.identical', 'sc.identical', (['a', 'b'], {}), '(a, b)\n', (13344, 13350), True, 'import scipp as sc\n'), ((13385, 13402), 'scipp.Variable', 'sc.Variable', (['(True)'], {}), '(True)\n', (13396, 13402), True, 'import scipp as sc\n'), ((13411, 13429), 'scipp.Variable', 'sc.Variable', (['(False)'], {}), '(False)\n', (13422, 13429), True, 'import scipp as sc\n'), ((13449, 13466), 'scipp.Variable', 'sc.Variable', (['(True)'], {}), '(True)\n', (13460, 13466), True, 'import scipp as sc\n'), ((13478, 13503), 'scipp.identical', 'sc.identical', (['a', 'expected'], {}), '(a, expected)\n', (13490, 13503), True, 'import scipp as sc\n'), ((13515, 13540), 'scipp.identical', 'sc.identical', (['b', 'expected'], {}), '(b, expected)\n', (13527, 13540), True, 'import scipp as sc\n'), ((13550, 13567), 'scipp.Variable', 'sc.Variable', (['(True)'], {}), '(True)\n', (13561, 13567), True, 'import scipp as sc\n'), ((13579, 13597), 'scipp.identical', 'sc.identical', (['a', 'b'], {}), '(a, b)\n', (13591, 13597), True, 'import scipp as sc\n'), ((13701, 13738), 'numpy.array_equal', 'np.array_equal', (['c.values', '(data + data)'], {}), '(c.values, data + data)\n', (13715, 13738), True, 'import numpy as np\n'), ((13766, 13802), 'numpy.array_equal', 'np.array_equal', (['c.values', '(data + 2.0)'], {}), '(c.values, data + 2.0)\n', (13780, 13802), True, 'import numpy as np\n'), ((13834, 13871), 'numpy.array_equal', 'np.array_equal', (['c.values', '(data + data)'], {}), '(c.values, data + data)\n', (13848, 13871), True, 'import numpy as np\n'), ((13894, 13938), 'numpy.array_equal', 'np.array_equal', (['c.values', '(data + data + data)'], {}), '(c.values, data + data + data)\n', (13908, 13938), True, 'import numpy as np\n'), ((13967, 14018), 'numpy.array_equal', 'np.array_equal', (['c.values', '(data + data + data + data)'], {}), '(c.values, data + data + data + data)\n', (13981, 14018), True, 'import numpy as np\n'), ((14046, 14103), 'numpy.array_equal', 'np.array_equal', (['c.values', '(data + data + data + data + 3.5)'], {}), '(c.values, data + data + data + data + 3.5)\n', (14060, 14103), True, 'import numpy as np\n'), ((14208, 14245), 'numpy.array_equal', 'np.array_equal', (['c.values', '(data - data)'], {}), '(c.values, data - data)\n', (14222, 14245), True, 'import numpy as np\n'), ((14273, 14309), 'numpy.array_equal', 'np.array_equal', (['c.values', '(data - 2.0)'], {}), '(c.values, data - 2.0)\n', (14287, 14309), True, 'import numpy as np\n'), ((14341, 14378), 'numpy.array_equal', 'np.array_equal', (['c.values', '(data - data)'], {}), '(c.values, data - data)\n', (14355, 14378), True, 'import numpy as np\n'), ((14401, 14445), 'numpy.array_equal', 'np.array_equal', (['c.values', '(data - data - data)'], {}), '(c.values, data - data - data)\n', (14415, 14445), True, 'import numpy as np\n'), ((14474, 14525), 'numpy.array_equal', 'np.array_equal', (['c.values', '(data - data - data - data)'], {}), '(c.values, data - data - data - data)\n', (14488, 14525), True, 'import numpy as np\n'), ((14553, 14610), 'numpy.array_equal', 'np.array_equal', (['c.values', '(3.5 - data + data + data + data)'], {}), '(c.values, 3.5 - data + data + data + data)\n', (14567, 14610), True, 'import numpy as np\n'), ((14718, 14755), 'numpy.array_equal', 'np.array_equal', (['c.values', '(data * data)'], {}), '(c.values, data * data)\n', (14732, 14755), True, 'import numpy as np\n'), ((14783, 14819), 'numpy.array_equal', 'np.array_equal', (['c.values', '(data * 2.0)'], {}), '(c.values, data * 2.0)\n', (14797, 14819), True, 'import numpy as np\n'), ((14851, 14888), 'numpy.array_equal', 'np.array_equal', (['c.values', '(data * data)'], {}), '(c.values, data * data)\n', (14865, 14888), True, 'import numpy as np\n'), ((14911, 14955), 'numpy.array_equal', 'np.array_equal', (['c.values', '(data * data * data)'], {}), '(c.values, data * data * data)\n', (14925, 14955), True, 'import numpy as np\n'), ((14984, 15035), 'numpy.array_equal', 'np.array_equal', (['c.values', '(data * data * data * data)'], {}), '(c.values, data * data * data * data)\n', (14998, 15035), True, 'import numpy as np\n'), ((15063, 15120), 'numpy.array_equal', 'np.array_equal', (['c.values', '(data * data * data * data * 3.5)'], {}), '(c.values, data * data * data * data * 3.5)\n', (15077, 15120), True, 'import numpy as np\n'), ((15226, 15263), 'numpy.array_equal', 'np.array_equal', (['c.values', '(data / data)'], {}), '(c.values, data / data)\n', (15240, 15263), True, 'import numpy as np\n'), ((15291, 15327), 'numpy.array_equal', 'np.array_equal', (['c.values', '(data / 2.0)'], {}), '(c.values, data / 2.0)\n', (15305, 15327), True, 'import numpy as np\n'), ((15359, 15396), 'numpy.array_equal', 'np.array_equal', (['c.values', '(data / data)'], {}), '(c.values, data / data)\n', (15373, 15396), True, 'import numpy as np\n'), ((15419, 15463), 'numpy.array_equal', 'np.array_equal', (['c.values', '(data / data / data)'], {}), '(c.values, data / data / data)\n', (15433, 15463), True, 'import numpy as np\n'), ((15492, 15543), 'numpy.array_equal', 'np.array_equal', (['c.values', '(data / data / data / data)'], {}), '(c.values, data / data / data / data)\n', (15506, 15543), True, 'import numpy as np\n'), ((15571, 15607), 'numpy.array_equal', 'np.array_equal', (['c.values', '(2.0 / data)'], {}), '(c.values, 2.0 / data)\n', (15585, 15607), True, 'import numpy as np\n'), ((15649, 15667), 'scipp.Variable', 'sc.Variable', (['(False)'], {}), '(False)\n', (15660, 15667), True, 'import scipp as sc\n'), ((15676, 15693), 'scipp.Variable', 'sc.Variable', (['(True)'], {}), '(True)\n', (15687, 15693), True, 'import scipp as sc\n'), ((16054, 16072), 'scipp.Variable', 'sc.Variable', (['(False)'], {}), '(False)\n', (16065, 16072), True, 'import scipp as sc\n'), ((16081, 16098), 'scipp.Variable', 'sc.Variable', (['(True)'], {}), '(True)\n', (16092, 16098), True, 'import scipp as sc\n'), ((16563, 16581), 'scipp.Variable', 'sc.Variable', (['(False)'], {}), '(False)\n', (16574, 16581), True, 'import scipp as sc\n'), ((16590, 16607), 'scipp.Variable', 'sc.Variable', (['(True)'], {}), '(True)\n', (16601, 16607), True, 'import scipp as sc\n'), ((16995, 17013), 'scipp.Variable', 'sc.Variable', (['(False)'], {}), '(False)\n', (17006, 17013), True, 'import scipp as sc\n'), ((17022, 17039), 'scipp.Variable', 'sc.Variable', (['(True)'], {}), '(True)\n', (17033, 17039), True, 'import scipp as sc\n'), ((17411, 17429), 'scipp.Variable', 'sc.Variable', (['(False)'], {}), '(False)\n', (17422, 17429), True, 'import scipp as sc\n'), ((17438, 17455), 'scipp.Variable', 'sc.Variable', (['(True)'], {}), '(True)\n', (17449, 17455), True, 'import scipp as sc\n'), ((17870, 17888), 'scipp.Variable', 'sc.Variable', (['(False)'], {}), '(False)\n', (17881, 17888), True, 'import scipp as sc\n'), ((17897, 17914), 'scipp.Variable', 'sc.Variable', (['(True)'], {}), '(True)\n', (17908, 17914), True, 'import scipp as sc\n'), ((18388, 18426), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'values': '[10.0]'}), "(dims=['x'], values=[10.0])\n", (18399, 18426), True, 'import scipp as sc\n'), ((18503, 18524), 'scipp.identical', 'sc.identical', (['v', 'copy'], {}), '(v, copy)\n', (18515, 18524), True, 'import scipp as sc\n'), ((18615, 18633), 'scipp.identical', 'sc.identical', (['a', 'b'], {}), '(a, b)\n', (18627, 18633), True, 'import scipp as sc\n'), ((18645, 18669), 'scipp.identical', 'sc.identical', (['a', 'a_slice'], {}), '(a, a_slice)\n', (18657, 18669), True, 'import scipp as sc\n'), ((18681, 18711), 'scipp.identical', 'sc.identical', (['a_slice', 'b_slice'], {}), '(a_slice, b_slice)\n', (18693, 18711), True, 'import scipp as sc\n'), ((18723, 18741), 'scipp.identical', 'sc.identical', (['b', 'a'], {}), '(b, a)\n', (18735, 18741), True, 'import scipp as sc\n'), ((18753, 18777), 'scipp.identical', 'sc.identical', (['b_slice', 'a'], {}), '(b_slice, a)\n', (18765, 18777), True, 'import scipp as sc\n'), ((18789, 18819), 'scipp.identical', 'sc.identical', (['b_slice', 'a_slice'], {}), '(b_slice, a_slice)\n', (18801, 18819), True, 'import scipp as sc\n'), ((19156, 19169), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (19167, 19169), True, 'import scipp as sc\n'), ((19483, 19496), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (19494, 19496), True, 'import scipp as sc\n'), ((19704, 19717), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (19715, 19717), True, 'import scipp as sc\n'), ((20689, 20718), 'scipp.sum', 'sc.sum', (['var', '"""y"""'], {'out': 'out_var'}), "(var, 'y', out=out_var)\n", (20695, 20718), True, 'import scipp as sc\n'), ((20730, 20761), 'scipp.identical', 'sc.identical', (['out_var', 'expected'], {}), '(out_var, expected)\n', (20742, 20761), True, 'import scipp as sc\n'), ((20773, 20805), 'scipp.identical', 'sc.identical', (['out_view', 'expected'], {}), '(out_view, expected)\n', (20785, 20805), True, 'import scipp as sc\n'), ((20843, 20856), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (20854, 20856), True, 'import scipp as sc\n'), ((20958, 20978), 'numpy.random.rand', 'np.random.rand', (['(2)', '(3)'], {}), '(2, 3)\n', (20972, 20978), True, 'import numpy as np\n'), ((20995, 21015), 'numpy.random.rand', 'np.random.rand', (['(2)', '(3)'], {}), '(2, 3)\n', (21009, 21015), True, 'import numpy as np\n'), ((21026, 21069), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x', 'y']", 'values': 'values'}), "(dims=['x', 'y'], values=values)\n", (21037, 21069), True, 'import scipp as sc\n'), ((21085, 21149), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x', 'y']", 'values': 'values', 'variances': 'variances'}), "(dims=['x', 'y'], values=values, variances=variances)\n", (21096, 21149), True, 'import scipp as sc\n'), ((21307, 21334), 'scipp.identical', 'sc.identical', (['var', 'expected'], {}), '(var, expected)\n', (21319, 21334), True, 'import scipp as sc\n'), ((21376, 21396), 'numpy.random.rand', 'np.random.rand', (['(2)', '(3)'], {}), '(2, 3)\n', (21390, 21396), True, 'import numpy as np\n'), ((21413, 21433), 'numpy.random.rand', 'np.random.rand', (['(2)', '(3)'], {}), '(2, 3)\n', (21427, 21433), True, 'import numpy as np\n'), ((21444, 21487), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x', 'y']", 'values': 'values'}), "(dims=['x', 'y'], values=values)\n", (21455, 21487), True, 'import scipp as sc\n'), ((21503, 21567), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x', 'y']", 'values': 'values', 'variances': 'variances'}), "(dims=['x', 'y'], values=values, variances=variances)\n", (21514, 21567), True, 'import scipp as sc\n'), ((21734, 21761), 'scipp.identical', 'sc.identical', (['var', 'expected'], {}), '(var, expected)\n', (21746, 21761), True, 'import scipp as sc\n'), ((21805, 21825), 'numpy.random.rand', 'np.random.rand', (['(2)', '(3)'], {}), '(2, 3)\n', (21819, 21825), True, 'import numpy as np\n'), ((21842, 21862), 'numpy.random.rand', 'np.random.rand', (['(2)', '(3)'], {}), '(2, 3)\n', (21856, 21862), True, 'import numpy as np\n'), ((21873, 21937), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x', 'y']", 'values': 'values', 'variances': 'variances'}), "(dims=['x', 'y'], values=values, variances=variances)\n", (21884, 21937), True, 'import scipp as sc\n'), ((21953, 21996), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x', 'y']", 'values': 'values'}), "(dims=['x', 'y'], values=values)\n", (21964, 21996), True, 'import scipp as sc\n'), ((22103, 22130), 'scipp.identical', 'sc.identical', (['var', 'expected'], {}), '(var, expected)\n', (22115, 22130), True, 'import scipp as sc\n'), ((22185, 22205), 'numpy.random.rand', 'np.random.rand', (['(2)', '(3)'], {}), '(2, 3)\n', (22199, 22205), True, 'import numpy as np\n'), ((22293, 22336), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x', 'y']", 'values': 'values'}), "(dims=['x', 'y'], values=values)\n", (22304, 22336), True, 'import scipp as sc\n'), ((22352, 22416), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x', 'y']", 'values': 'values', 'variances': 'variances'}), "(dims=['x', 'y'], values=values, variances=variances)\n", (22363, 22416), True, 'import scipp as sc\n'), ((22574, 22601), 'scipp.identical', 'sc.identical', (['var', 'expected'], {}), '(var, expected)\n', (22586, 22601), True, 'import scipp as sc\n'), ((22951, 22964), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (22962, 22964), True, 'import scipp as sc\n'), ((23002, 23037), 'scipp.identical', 'sc.identical', (['var', '(0.0 * sc.units.m)'], {}), '(var, 0.0 * sc.units.m)\n', (23014, 23037), True, 'import scipp as sc\n'), ((23081, 23116), 'scipp.identical', 'sc.identical', (['var', '(0.0 / sc.units.m)'], {}), '(var, 0.0 / sc.units.m)\n', (23093, 23116), True, 'import scipp as sc\n'), ((24343, 24386), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x', 'y']", 'values': 'values'}), "(dims=['x', 'y'], values=values)\n", (24354, 24386), True, 'import scipp as sc\n'), ((24396, 24439), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['z', 'y']", 'values': 'values'}), "(dims=['z', 'y'], values=values)\n", (24407, 24439), True, 'import scipp as sc\n'), ((24482, 24502), 'scipp.identical', 'sc.identical', (['xy', 'zy'], {}), '(xy, zy)\n', (24494, 24502), True, 'import scipp as sc\n'), ((24548, 24599), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'values': "['aaa', 'ff', 'bb']"}), "(dims=['x'], values=['aaa', 'ff', 'bb'])\n", (24559, 24599), True, 'import scipp as sc\n'), ((24983, 24996), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (24994, 24996), True, 'import scipp as sc\n'), ((25068, 25081), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (25079, 25081), True, 'import scipp as sc\n'), ((25143, 25156), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (25154, 25156), True, 'import scipp as sc\n'), ((25220, 25233), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (25231, 25233), True, 'import scipp as sc\n'), ((25360, 25373), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (25371, 25373), True, 'import scipp as sc\n'), ((25505, 25518), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (25516, 25518), True, 'import scipp as sc\n'), ((25650, 25663), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (25661, 25663), True, 'import scipp as sc\n'), ((25798, 25811), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (25809, 25811), True, 'import scipp as sc\n'), ((25947, 25960), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (25958, 25960), True, 'import scipp as sc\n'), ((26096, 26109), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (26107, 26109), True, 'import scipp as sc\n'), ((26181, 26194), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (26192, 26194), True, 'import scipp as sc\n'), ((26341, 26371), 'scipp.DataArray', 'sc.DataArray', (['(1.0 * sc.units.m)'], {}), '(1.0 * sc.units.m)\n', (26353, 26371), True, 'import scipp as sc\n'), ((26410, 26440), 'scipp.identical', 'sc.identical', (['(a / var)', '(var / a)'], {}), '(a / var, var / a)\n', (26422, 26440), True, 'import scipp as sc\n'), ((27476, 27498), 'scipp.Variable', 'sc.Variable', ([], {'value': '(0.0)'}), '(value=0.0)\n', (27487, 27498), True, 'import scipp as sc\n'), ((27507, 27532), 'scipp.nan_to_num', 'sc.nan_to_num', (['a', 'replace'], {}), '(a, replace)\n', (27520, 27532), True, 'import scipp as sc\n'), ((27620, 27645), 'scipp.identical', 'sc.identical', (['b', 'expected'], {}), '(b, expected)\n', (27632, 27645), True, 'import scipp as sc\n'), ((27760, 27782), 'scipp.Variable', 'sc.Variable', ([], {'value': '(0.0)'}), '(value=0.0)\n', (27771, 27782), True, 'import scipp as sc\n'), ((27791, 27823), 'scipp.nan_to_num', 'sc.nan_to_num', (['a'], {'posinf': 'replace'}), '(a, posinf=replace)\n', (27804, 27823), True, 'import scipp as sc\n'), ((27911, 27936), 'scipp.identical', 'sc.identical', (['b', 'expected'], {}), '(b, expected)\n', (27923, 27936), True, 'import scipp as sc\n'), ((28052, 28074), 'scipp.Variable', 'sc.Variable', ([], {'value': '(0.0)'}), '(value=0.0)\n', (28063, 28074), True, 'import scipp as sc\n'), ((28083, 28115), 'scipp.nan_to_num', 'sc.nan_to_num', (['a'], {'neginf': 'replace'}), '(a, neginf=replace)\n', (28096, 28115), True, 'import scipp as sc\n'), ((28203, 28228), 'scipp.identical', 'sc.identical', (['b', 'expected'], {}), '(b, expected)\n', (28215, 28228), True, 'import scipp as sc\n'), ((28386, 28409), 'scipp.Variable', 'sc.Variable', ([], {'value': '(-1.0)'}), '(value=-1.0)\n', (28397, 28409), True, 'import scipp as sc\n'), ((28432, 28455), 'scipp.Variable', 'sc.Variable', ([], {'value': '(-2.0)'}), '(value=-2.0)\n', (28443, 28455), True, 'import scipp as sc\n'), ((28478, 28501), 'scipp.Variable', 'sc.Variable', ([], {'value': '(-3.0)'}), '(value=-3.0)\n', (28489, 28501), True, 'import scipp as sc\n'), ((28510, 28596), 'scipp.nan_to_num', 'sc.nan_to_num', (['a'], {'nan': 'replace_nan', 'posinf': 'replace_pos_inf', 'neginf': 'replace_neg_inf'}), '(a, nan=replace_nan, posinf=replace_pos_inf, neginf=\n replace_neg_inf)\n', (28523, 28596), True, 'import scipp as sc\n'), ((28857, 28882), 'scipp.identical', 'sc.identical', (['b', 'expected'], {}), '(b, expected)\n', (28869, 28882), True, 'import scipp as sc\n'), ((29042, 29064), 'scipp.Variable', 'sc.Variable', ([], {'value': '(0.0)'}), '(value=0.0)\n', (29053, 29064), True, 'import scipp as sc\n'), ((29069, 29107), 'scipp.nan_to_num', 'sc.nan_to_num', (['a'], {'nan': 'replace', 'out': 'out'}), '(a, nan=replace, out=out)\n', (29082, 29107), True, 'import scipp as sc\n'), ((29195, 29222), 'scipp.identical', 'sc.identical', (['out', 'expected'], {}), '(out, expected)\n', (29207, 29222), True, 'import scipp as sc\n'), ((29434, 29456), 'scipp.Variable', 'sc.Variable', ([], {'value': '(0.0)'}), '(value=0.0)\n', (29445, 29456), True, 'import scipp as sc\n'), ((29485, 29523), 'scipp.nan_to_num', 'sc.nan_to_num', (['a'], {'nan': 'replace', 'out': 'out'}), '(a, nan=replace, out=out)\n', (29498, 29523), True, 'import scipp as sc\n'), ((29699, 29726), 'scipp.identical', 'sc.identical', (['out', 'expected'], {}), '(out, expected)\n', (29711, 29726), True, 'import scipp as sc\n'), ((29753, 29796), 'scipp.nan_to_num', 'sc.nan_to_num', (['out'], {'neginf': 'replace', 'out': 'out'}), '(out, neginf=replace, out=out)\n', (29766, 29796), True, 'import scipp as sc\n'), ((29966, 29993), 'scipp.identical', 'sc.identical', (['out', 'expected'], {}), '(out, expected)\n', (29978, 29993), True, 'import scipp as sc\n'), ((30020, 30063), 'scipp.nan_to_num', 'sc.nan_to_num', (['out'], {'posinf': 'replace', 'out': 'out'}), '(out, posinf=replace, out=out)\n', (30033, 30063), True, 'import scipp as sc\n'), ((30185, 30212), 'scipp.identical', 'sc.identical', (['out', 'expected'], {}), '(out, expected)\n', (30197, 30212), True, 'import scipp as sc\n'), ((30246, 30259), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (30257, 30259), True, 'import scipp as sc\n'), ((30356, 30369), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (30367, 30369), True, 'import scipp as sc\n'), ((30673, 30714), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'values': '[1, 2, 3]'}), "(dims=['x'], values=[1, 2, 3])\n", (30684, 30714), True, 'import scipp as sc\n'), ((30828, 30869), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'values': '[1, 2, 3]'}), "(dims=['x'], values=[1, 2, 3])\n", (30839, 30869), True, 'import scipp as sc\n'), ((30983, 31024), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'values': '[1, 2, 3]'}), "(dims=['x'], values=[1, 2, 3])\n", (30994, 31024), True, 'import scipp as sc\n'), ((31142, 31183), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'values': '[1, 2, 3]'}), "(dims=['x'], values=[1, 2, 3])\n", (31153, 31183), True, 'import scipp as sc\n'), ((31307, 31320), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (31318, 31320), True, 'import scipp as sc\n'), ((1723, 1747), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1736, 1747), False, 'import pytest\n'), ((1995, 2007), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (2004, 2007), True, 'import numpy as np\n'), ((2205, 2234), 'numpy.array', 'np.array', (['[True, False, True]'], {}), '([True, False, True])\n', (2213, 2234), True, 'import numpy as np\n'), ((2505, 2517), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (2514, 2517), True, 'import numpy as np\n'), ((2568, 2583), 'numpy.arange', 'np.arange', (['(4)', '(8)'], {}), '(4, 8)\n', (2577, 2583), True, 'import numpy as np\n'), ((2636, 2651), 'numpy.float64', 'np.float64', (['(1.2)'], {}), '(1.2)\n', (2646, 2651), True, 'import numpy as np\n'), ((2943, 2955), 'scipp.Unit', 'sc.Unit', (['"""m"""'], {}), "('m')\n", (2950, 2955), True, 'import scipp as sc\n'), ((6301, 6329), 'pytest.raises', 'pytest.raises', (['sc.DTypeError'], {}), '(sc.DTypeError)\n', (6314, 6329), False, 'import pytest\n'), ((6993, 7015), 'scipp.Variable', 'sc.Variable', ([], {'value': '"""b"""'}), "(value='b')\n", (7004, 7015), True, 'import scipp as sc\n'), ((7109, 7136), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (7122, 7136), False, 'import pytest\n'), ((7179, 7206), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (7192, 7206), False, 'import pytest\n'), ((7332, 7359), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (7345, 7359), False, 'import pytest\n'), ((7717, 7759), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'values': '[1.0, 2.0]'}), "(dims=['x'], values=[1.0, 2.0])\n", (7728, 7759), True, 'import scipp as sc\n'), ((7991, 8033), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'values': "['c', 'd']"}), "(dims=['x'], values=['c', 'd'])\n", (8002, 8033), True, 'import scipp as sc\n'), ((8196, 8234), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'values': '[3, 4]'}), "(dims=['x'], values=[3, 4])\n", (8207, 8234), True, 'import scipp as sc\n'), ((8629, 8656), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (8642, 8656), False, 'import pytest\n'), ((8679, 8691), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (8688, 8691), True, 'import numpy as np\n'), ((9175, 9202), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (9188, 9202), False, 'import pytest\n'), ((9225, 9246), 'numpy.ones', 'np.ones', ([], {'shape': '(3, 2)'}), '(shape=(3, 2))\n', (9232, 9246), True, 'import numpy as np\n'), ((9494, 9516), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2, 3)'}), '(shape=(2, 3))\n', (9502, 9516), True, 'import numpy as np\n'), ((9601, 9622), 'numpy.ones', 'np.ones', ([], {'shape': '(2, 3)'}), '(shape=(2, 3))\n', (9608, 9622), True, 'import numpy as np\n'), ((11018, 11084), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'values': '[1, 5, 5, 4]', 'dtype': 'sc.dtype.int64'}), "(dims=['x'], values=[1, 5, 5, 4], dtype=sc.dtype.int64)\n", (11029, 11084), True, 'import scipp as sc\n'), ((15732, 15749), 'scipp.Variable', 'sc.Variable', (['(True)'], {}), '(True)\n', (15743, 15749), True, 'import scipp as sc\n'), ((16132, 16149), 'scipp.Variable', 'sc.Variable', (['(True)'], {}), '(True)\n', (16143, 16149), True, 'import scipp as sc\n'), ((16646, 16664), 'scipp.Variable', 'sc.Variable', (['(False)'], {}), '(False)\n', (16657, 16664), True, 'import scipp as sc\n'), ((17073, 17091), 'scipp.Variable', 'sc.Variable', (['(False)'], {}), '(False)\n', (17084, 17091), True, 'import scipp as sc\n'), ((17494, 17511), 'scipp.Variable', 'sc.Variable', (['(True)'], {}), '(True)\n', (17505, 17511), True, 'import scipp as sc\n'), ((17948, 17965), 'scipp.Variable', 'sc.Variable', (['(True)'], {}), '(True)\n', (17959, 17965), True, 'import scipp as sc\n'), ((18932, 18950), 'scipp.identical', 'sc.identical', (['a', 'c'], {}), '(a, c)\n', (18944, 18950), True, 'import scipp as sc\n'), ((18966, 18990), 'scipp.identical', 'sc.identical', (['a_slice', 'c'], {}), '(a_slice, c)\n', (18978, 18990), True, 'import scipp as sc\n'), ((19006, 19024), 'scipp.identical', 'sc.identical', (['c', 'a'], {}), '(c, a)\n', (19018, 19024), True, 'import scipp as sc\n'), ((19040, 19064), 'scipp.identical', 'sc.identical', (['c', 'a_slice'], {}), '(c, a_slice)\n', (19052, 19064), True, 'import scipp as sc\n'), ((19109, 19122), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (19120, 19122), True, 'import scipp as sc\n'), ((19254, 19267), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (19265, 19267), True, 'import scipp as sc\n'), ((19269, 19282), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (19280, 19282), True, 'import scipp as sc\n'), ((19344, 19357), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (19355, 19357), True, 'import scipp as sc\n'), ((19359, 19372), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (19370, 19372), True, 'import scipp as sc\n'), ((19425, 19438), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (19436, 19438), True, 'import scipp as sc\n'), ((19524, 19537), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (19535, 19537), True, 'import scipp as sc\n'), ((19595, 19608), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (19606, 19608), True, 'import scipp as sc\n'), ((19656, 19669), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (19667, 19669), True, 'import scipp as sc\n'), ((19815, 19828), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (19826, 19828), True, 'import scipp as sc\n'), ((19862, 19875), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (19873, 19875), True, 'import scipp as sc\n'), ((20201, 20217), 'scipp.sum', 'sc.sum', (['var', '"""y"""'], {}), "(var, 'y')\n", (20207, 20217), True, 'import scipp as sc\n'), ((21199, 21226), 'scipp.identical', 'sc.identical', (['var', 'expected'], {}), '(var, expected)\n', (21211, 21226), True, 'import scipp as sc\n'), ((21617, 21644), 'scipp.identical', 'sc.identical', (['var', 'expected'], {}), '(var, expected)\n', (21629, 21644), True, 'import scipp as sc\n'), ((22466, 22493), 'scipp.identical', 'sc.identical', (['var', 'expected'], {}), '(var, expected)\n', (22478, 22493), True, 'import scipp as sc\n'), ((22720, 22736), 'scipp.sum', 'sc.sum', (['var', '"""x"""'], {}), "(var, 'x')\n", (22726, 22736), True, 'import scipp as sc\n'), ((22738, 22753), 'scipp.Variable', 'sc.Variable', (['(10)'], {}), '(10)\n', (22749, 22753), True, 'import scipp as sc\n'), ((22850, 22867), 'scipp.mean', 'sc.mean', (['var', '"""x"""'], {}), "(var, 'x')\n", (22857, 22867), True, 'import scipp as sc\n'), ((22869, 22885), 'scipp.Variable', 'sc.Variable', (['(2.5)'], {}), '(2.5)\n', (22880, 22885), True, 'import scipp as sc\n'), ((24929, 24942), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (24940, 24942), True, 'import scipp as sc\n'), ((25313, 25326), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (25324, 25326), True, 'import scipp as sc\n'), ((25458, 25471), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (25469, 25471), True, 'import scipp as sc\n'), ((25603, 25616), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (25614, 25616), True, 'import scipp as sc\n'), ((25750, 25763), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (25761, 25763), True, 'import scipp as sc\n'), ((25899, 25912), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (25910, 25912), True, 'import scipp as sc\n'), ((26048, 26061), 'scipp.Variable', 'sc.Variable', ([], {}), '()\n', (26059, 26061), True, 'import scipp as sc\n'), ((26565, 26612), 'scipp.Variable', 'sc.Variable', (["['x']"], {'values': '[False, False, True]'}), "(['x'], values=[False, False, True])\n", (26576, 26612), True, 'import scipp as sc\n'), ((26744, 26790), 'scipp.Variable', 'sc.Variable', (["['x']"], {'values': '[False, True, True]'}), "(['x'], values=[False, True, True])\n", (26755, 26790), True, 'import scipp as sc\n'), ((26949, 27003), 'scipp.Variable', 'sc.Variable', (["['x']"], {'values': '[True, False, False, False]'}), "(['x'], values=[True, False, False, False])\n", (26960, 27003), True, 'import scipp as sc\n'), ((27141, 27188), 'scipp.Variable', 'sc.Variable', (["['x']"], {'values': '[False, False, True]'}), "(['x'], values=[False, False, True])\n", (27152, 27188), True, 'import scipp as sc\n'), ((27326, 27373), 'scipp.Variable', 'sc.Variable', (["['x']"], {'values': '[False, True, False]'}), "(['x'], values=[False, True, False])\n", (27337, 27373), True, 'import scipp as sc\n'), ((1009, 1029), 'numpy.dtype', 'np.dtype', (['np.float32'], {}), '(np.float32)\n', (1017, 1029), True, 'import numpy as np\n'), ((1166, 1178), 'numpy.arange', 'np.arange', (['(2)'], {}), '(2)\n', (1175, 1178), True, 'import numpy as np\n'), ((1331, 1365), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'shape': '[2]'}), "(dims=['x'], shape=[2])\n", (1342, 1365), True, 'import scipp as sc\n'), ((1395, 1446), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'shape': '[2]', 'variances': '(False)'}), "(dims=['x'], shape=[2], variances=False)\n", (1406, 1446), True, 'import scipp as sc\n'), ((1499, 1549), 'scipp.Variable', 'sc.Variable', ([], {'dims': "['x']", 'shape': '[2]', 'variances': '(True)'}), "(dims=['x'], shape=[2], variances=True)\n", (1510, 1549), True, 'import scipp as sc\n'), ((1892, 1906), 'numpy.arange', 'np.arange', (['(4.0)'], {}), '(4.0)\n', (1901, 1906), True, 'import numpy as np\n'), ((2090, 2119), 'numpy.array', 'np.array', (['[True, False, True]'], {}), '([True, False, True])\n', (2098, 2119), True, 'import numpy as np\n'), ((2349, 2363), 'numpy.arange', 'np.arange', (['(4.0)'], {}), '(4.0)\n', (2358, 2363), True, 'import numpy as np\n'), ((2397, 2416), 'numpy.arange', 'np.arange', (['(4.0)', '(8.0)'], {}), '(4.0, 8.0)\n', (2406, 2416), True, 'import numpy as np\n'), ((2653, 2669), 'scipp.Variable', 'sc.Variable', (['(1.2)'], {}), '(1.2)\n', (2664, 2669), True, 'import scipp as sc\n'), ((2967, 3000), 'scipp.Variable', 'sc.Variable', (['(1.2)'], {'unit': 'sc.units.m'}), '(1.2, unit=sc.units.m)\n', (2978, 3000), True, 'import scipp as sc\n'), ((3294, 3308), 'numpy.arange', 'np.arange', (['(4.0)'], {}), '(4.0)\n', (3303, 3308), True, 'import numpy as np\n'), ((5935, 5973), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {'dtype': 'np.int64'}), '([1, 2, 3, 4], dtype=np.int64)\n', (5943, 5973), True, 'import numpy as np\n'), ((6212, 6250), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {'dtype': 'np.int64'}), '([1, 2, 3, 4], dtype=np.int64)\n', (6220, 6250), True, 'import numpy as np\n'), ((6556, 6570), 'numpy.arange', 'np.arange', (['(4.0)'], {}), '(4.0)\n', (6565, 6570), True, 'import numpy as np\n'), ((10156, 10176), 'numpy.dtype', 'np.dtype', (['np.float64'], {}), '(np.float64)\n', (10164, 10176), True, 'import numpy as np\n'), ((10272, 10292), 'numpy.dtype', 'np.dtype', (['np.float32'], {}), '(np.float32)\n', (10280, 10292), True, 'import numpy as np\n'), ((10388, 10406), 'numpy.dtype', 'np.dtype', (['np.int64'], {}), '(np.int64)\n', (10396, 10406), True, 'import numpy as np\n'), ((10500, 10518), 'numpy.dtype', 'np.dtype', (['np.int32'], {}), '(np.int32)\n', (10508, 10518), True, 'import numpy as np\n'), ((11174, 11189), 'numpy.arange', 'np.arange', (['(0)', '(3)'], {}), '(0, 3)\n', (11183, 11189), True, 'import numpy as np\n'), ((11458, 11476), 'numpy.array', 'np.array', (['expected'], {}), '(expected)\n', (11466, 11476), True, 'import numpy as np\n'), ((15791, 15827), 'numpy.array', 'np.array', (['[False, True, False, True]'], {}), '([False, True, False, True])\n', (15799, 15827), True, 'import numpy as np\n'), ((15868, 15904), 'numpy.array', 'np.array', (['[False, False, True, True]'], {}), '([False, False, True, True])\n', (15876, 15904), True, 'import numpy as np\n'), ((16191, 16227), 'numpy.array', 'np.array', (['[False, True, False, True]'], {}), '([False, True, False, True])\n', (16199, 16227), True, 'import numpy as np\n'), ((16268, 16304), 'numpy.array', 'np.array', (['[False, False, True, True]'], {}), '([False, False, True, True])\n', (16276, 16304), True, 'import numpy as np\n'), ((16706, 16742), 'numpy.array', 'np.array', (['[False, True, False, True]'], {}), '([False, True, False, True])\n', (16714, 16742), True, 'import numpy as np\n'), ((16783, 16819), 'numpy.array', 'np.array', (['[False, False, True, True]'], {}), '([False, False, True, True])\n', (16791, 16819), True, 'import numpy as np\n'), ((17133, 17169), 'numpy.array', 'np.array', (['[False, True, False, True]'], {}), '([False, True, False, True])\n', (17141, 17169), True, 'import numpy as np\n'), ((17210, 17246), 'numpy.array', 'np.array', (['[False, False, True, True]'], {}), '([False, False, True, True])\n', (17218, 17246), True, 'import numpy as np\n'), ((17553, 17589), 'numpy.array', 'np.array', (['[False, True, False, True]'], {}), '([False, True, False, True])\n', (17561, 17589), True, 'import numpy as np\n'), ((17630, 17666), 'numpy.array', 'np.array', (['[False, False, True, True]'], {}), '([False, False, True, True])\n', (17638, 17666), True, 'import numpy as np\n'), ((18007, 18043), 'numpy.array', 'np.array', (['[False, True, False, True]'], {}), '([False, True, False, True])\n', (18015, 18043), True, 'import numpy as np\n'), ((18084, 18120), 'numpy.array', 'np.array', (['[False, False, True, True]'], {}), '([False, False, True, True])\n', (18092, 18120), True, 'import numpy as np\n'), ((19963, 19997), 'numpy.array', 'np.array', (['[[0.1, 0.3], [0.2, 0.6]]'], {}), '([[0.1, 0.3], [0.2, 0.6]])\n', (19971, 19997), True, 'import numpy as np\n'), ((20111, 20131), 'numpy.array', 'np.array', (['[0.4, 0.8]'], {}), '([0.4, 0.8])\n', (20119, 20131), True, 'import numpy as np\n'), ((20324, 20358), 'numpy.array', 'np.array', (['[[0.1, 0.3], [0.2, 0.6]]'], {}), '([[0.1, 0.3], [0.2, 0.6]])\n', (20332, 20358), True, 'import numpy as np\n'), ((20470, 20490), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (20478, 20490), True, 'import numpy as np\n'), ((20608, 20628), 'numpy.array', 'np.array', (['[0.4, 0.8]'], {}), '([0.4, 0.8])\n', (20616, 20628), True, 'import numpy as np\n'), ((22222, 22234), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (22231, 22234), True, 'import numpy as np\n'), ((22666, 22694), 'numpy.arange', 'np.arange', (['(5)'], {'dtype': 'np.int64'}), '(5, dtype=np.int64)\n', (22675, 22694), True, 'import numpy as np\n'), ((22796, 22824), 'numpy.arange', 'np.arange', (['(6)'], {'dtype': 'np.int64'}), '(6, dtype=np.int64)\n', (22805, 22824), True, 'import numpy as np\n'), ((23146, 23158), 'numpy.float32', 'np.float32', ([], {}), '()\n', (23156, 23158), True, 'import numpy as np\n'), ((23215, 23230), 'numpy.float32', 'np.float32', (['(0.0)'], {}), '(0.0)\n', (23225, 23230), True, 'import numpy as np\n'), ((23306, 23321), 'numpy.float32', 'np.float32', (['(0.0)'], {}), '(0.0)\n', (23316, 23321), True, 'import numpy as np\n'), ((23408, 23421), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (23416, 23421), True, 'import numpy as np\n'), ((23509, 23521), 'numpy.float32', 'np.float32', ([], {}), '()\n', (23519, 23521), True, 'import numpy as np\n'), ((23564, 23577), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (23572, 23577), True, 'import numpy as np\n'), ((23679, 23694), 'numpy.float32', 'np.float32', (['(0.0)'], {}), '(0.0)\n', (23689, 23694), True, 'import numpy as np\n'), ((23770, 23785), 'numpy.float32', 'np.float32', (['(0.0)'], {}), '(0.0)\n', (23780, 23785), True, 'import numpy as np\n'), ((23858, 23872), 'scipp.Variable', 'sc.Variable', (['(2)'], {}), '(2)\n', (23869, 23872), True, 'import scipp as sc\n'), ((23908, 23924), 'scipp.Variable', 'sc.Variable', (['(2.0)'], {}), '(2.0)\n', (23919, 23924), True, 'import scipp as sc\n'), ((23962, 23979), 'scipp.Variable', 'sc.Variable', (['(True)'], {}), '(True)\n', (23973, 23979), True, 'import scipp as sc\n'), ((24047, 24077), 'scipp.Variable', 'sc.Variable', (['(2)'], {'dtype': 'np.int32'}), '(2, dtype=np.int32)\n', (24058, 24077), True, 'import scipp as sc\n'), ((24218, 24244), 'scipp.Variable', 'sc.Variable', (['(1)'], {'dtype': 'bool'}), '(1, dtype=bool)\n', (24229, 24244), True, 'import scipp as sc\n'), ((24307, 24319), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (24316, 24319), True, 'import numpy as np\n'), ((24630, 24659), 'numpy.array', 'np.array', (["['aaa', 'ff', 'bb']"], {}), "(['aaa', 'ff', 'bb'])\n", (24638, 24659), True, 'import numpy as np\n'), ((24753, 24795), 'numpy.array', 'np.array', (['[False, True, True, False, True]'], {}), '([False, True, True, False, True])\n', (24761, 24795), True, 'import numpy as np\n'), ((27439, 27460), 'numpy.array', 'np.array', (['[1, np.nan]'], {}), '([1, np.nan])\n', (27447, 27460), True, 'import numpy as np\n'), ((27579, 27607), 'numpy.array', 'np.array', (['[1, replace.value]'], {}), '([1, replace.value])\n', (27587, 27607), True, 'import numpy as np\n'), ((27723, 27744), 'numpy.array', 'np.array', (['[1, np.inf]'], {}), '([1, np.inf])\n', (27731, 27744), True, 'import numpy as np\n'), ((27870, 27898), 'numpy.array', 'np.array', (['[1, replace.value]'], {}), '([1, replace.value])\n', (27878, 27898), True, 'import numpy as np\n'), ((28014, 28036), 'numpy.array', 'np.array', (['[1, -np.inf]'], {}), '([1, -np.inf])\n', (28022, 28036), True, 'import numpy as np\n'), ((28162, 28190), 'numpy.array', 'np.array', (['[1, replace.value]'], {}), '([1, replace.value])\n', (28170, 28190), True, 'import numpy as np\n'), ((28328, 28366), 'numpy.array', 'np.array', (['[1, np.nan, np.inf, -np.inf]'], {}), '([1, np.nan, np.inf, -np.inf])\n', (28336, 28366), True, 'import numpy as np\n'), ((28722, 28814), 'numpy.array', 'np.array', (['([1] + [repl.value for repl in [replace_nan, replace_pos_inf, replace_neg_inf]]\n )'], {}), '([1] + [repl.value for repl in [replace_nan, replace_pos_inf,\n replace_neg_inf]])\n', (28730, 28814), True, 'import numpy as np\n'), ((28951, 28972), 'numpy.array', 'np.array', (['[1, np.nan]'], {}), '([1, np.nan])\n', (28959, 28972), True, 'import numpy as np\n'), ((29015, 29026), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (29023, 29026), True, 'import numpy as np\n'), ((29154, 29182), 'numpy.array', 'np.array', (['[1, replace.value]'], {}), '([1, replace.value])\n', (29162, 29182), True, 'import numpy as np\n'), ((29326, 29364), 'numpy.array', 'np.array', (['[1, np.inf, -np.inf, np.nan]'], {}), '([1, np.inf, -np.inf, np.nan])\n', (29334, 29364), True, 'import numpy as np\n'), ((29407, 29418), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (29415, 29418), True, 'import numpy as np\n'), ((29597, 29642), 'numpy.array', 'np.array', (['[1, np.inf, -np.inf, replace.value]'], {}), '([1, np.inf, -np.inf, replace.value])\n', (29605, 29642), True, 'import numpy as np\n'), ((29870, 29921), 'numpy.array', 'np.array', (['[1, np.inf, replace.value, replace.value]'], {}), '([1, np.inf, replace.value, replace.value])\n', (29878, 29921), True, 'import numpy as np\n'), ((30137, 30172), 'numpy.array', 'np.array', (['([1] + [replace.value] * 3)'], {}), '([1] + [replace.value] * 3)\n', (30145, 30172), True, 'import numpy as np\n'), ((1802, 1814), 'numpy.arange', 'np.arange', (['(2)'], {}), '(2)\n', (1811, 1814), True, 'import numpy as np\n'), ((6459, 6473), 'numpy.arange', 'np.arange', (['(4.0)'], {}), '(4.0)\n', (6468, 6473), True, 'import numpy as np\n'), ((15984, 16019), 'numpy.array', 'np.array', (['[False, True, True, True]'], {}), '([False, True, True, True])\n', (15992, 16019), True, 'import numpy as np\n'), ((16430, 16465), 'numpy.array', 'np.array', (['[False, True, True, True]'], {}), '([False, True, True, True])\n', (16438, 16465), True, 'import numpy as np\n'), ((16922, 16959), 'numpy.array', 'np.array', (['[False, False, False, True]'], {}), '([False, False, False, True])\n', (16930, 16959), True, 'import numpy as np\n'), ((17329, 17366), 'numpy.array', 'np.array', (['[False, False, False, True]'], {}), '([False, False, False, True])\n', (17337, 17366), True, 'import numpy as np\n'), ((17746, 17782), 'numpy.array', 'np.array', (['[False, True, True, False]'], {}), '([False, True, True, False])\n', (17754, 17782), True, 'import numpy as np\n'), ((18246, 18282), 'numpy.array', 'np.array', (['[False, True, True, False]'], {}), '([False, True, True, False])\n', (18254, 18282), True, 'import numpy as np\n'), ((24125, 24138), 'numpy.float64', 'np.float64', (['(2)'], {}), '(2)\n', (24135, 24138), True, 'import numpy as np\n'), ((3671, 3685), 'numpy.arange', 'np.arange', (['(4.0)'], {}), '(4.0)\n', (3680, 3685), True, 'import numpy as np\n'), ((4048, 4062), 'numpy.arange', 'np.arange', (['(4.0)'], {}), '(4.0)\n', (4057, 4062), True, 'import numpy as np\n'), ((5522, 5536), 'numpy.arange', 'np.arange', (['(6.0)'], {}), '(6.0)\n', (5531, 5536), True, 'import numpy as np\n'), ((9692, 9704), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (9701, 9704), True, 'import numpy as np\n'), ((9803, 9815), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (9812, 9815), True, 'import numpy as np\n'), ((9914, 9926), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (9923, 9926), True, 'import numpy as np\n'), ((10029, 10041), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (10038, 10041), True, 'import numpy as np\n'), ((10627, 10642), 'numpy.arange', 'np.arange', (['(0)', '(8)'], {}), '(0, 8)\n', (10636, 10642), True, 'import numpy as np\n'), ((26529, 26553), 'numpy.array', 'np.array', (['[1, 1, np.nan]'], {}), '([1, 1, np.nan])\n', (26537, 26553), True, 'import numpy as np\n'), ((26702, 26732), 'numpy.array', 'np.array', (['[1, -np.inf, np.inf]'], {}), '([1, -np.inf, np.inf])\n', (26710, 26732), True, 'import numpy as np\n'), ((26899, 26937), 'numpy.array', 'np.array', (['[1, -np.inf, np.inf, np.nan]'], {}), '([1, -np.inf, np.inf, np.nan])\n', (26907, 26937), True, 'import numpy as np\n'), ((27099, 27129), 'numpy.array', 'np.array', (['[1, -np.inf, np.inf]'], {}), '([1, -np.inf, np.inf])\n', (27107, 27129), True, 'import numpy as np\n'), ((27284, 27314), 'numpy.array', 'np.array', (['[1, -np.inf, np.inf]'], {}), '([1, -np.inf, np.inf])\n', (27292, 27314), True, 'import numpy as np\n'), ((10776, 10791), 'numpy.arange', 'np.arange', (['(4)', '(8)'], {}), '(4, 8)\n', (10785, 10791), True, 'import numpy as np\n')] |
import numpy as np
from .data_iterator import DataIterator
class BatchIterator(DataIterator):
"""TODO: BatchIterator docs"""
def __init__(self, batch_size, shuffle=False):
super().__init__()
self.batch_size = batch_size
self.shuffle = shuffle
def __call__(self, inputs, targets):
starts = np.arange(0, len(inputs), self.batch_size)
if self.shuffle:
np.random.shuffle(starts)
for start in starts:
end = start + self.batch_size
batch_inputs = inputs[start:end]
batch_targets = targets[start:end]
yield batch_inputs, batch_targets
| [
"numpy.random.shuffle"
] | [((416, 441), 'numpy.random.shuffle', 'np.random.shuffle', (['starts'], {}), '(starts)\n', (433, 441), True, 'import numpy as np\n')] |
import torch
import numpy as np
from scipy.ndimage.filters import gaussian_filter
class ModelMixing:
def __init__(self, tokenizer, base_model, main_model, target_model, cold_zone_loss, seed = 80085):
self.base_model = base_model
self.main_model = main_model
self.target_model = target_model
self.random_state = np.random.RandomState(seed)
self.torch_rng = torch.Generator()
self.torch_rng.manual_seed(seed)
self.tokenizer = tokenizer
self.cold_zones = None
self.cold_zone_loss = cold_zone_loss
def diffuse_cold_zone(self, cold_zone, diffusion, upper_bound):
result = torch.clone(cold_zone).numpy()
result[result >= upper_bound] += diffusion
result[result > 1] = 1
return torch.Tensor(result)
def calculate_cold_zones(self):
restart_tries = 0
successful_tries = 0
cold_zone_tmp = []
self.cold_zones = []
for p in self.main_model.parameters():
self.cold_zones.append(torch.zeros(p.shape))
cold_zone_tmp.append(torch.zeros(p.shape))
amplification = 0.90
while True:
for i in range(len(cold_zone_tmp)):
r = torch.rand(*cold_zone_tmp[i].shape, generator = self.torch_rng) - amplification
r[r < 0] = 0
cold_zone_tmp[i] += r
cold_zone_tmp[i][cold_zone_tmp[i] > 1] = 1
for p_base, p_main, p_target, p_mask in zip(self.base_model.parameters(), self.main_model.parameters(), self.target_model.parameters(), cold_zone_tmp):
p_target.data = torch.lerp(p_main.data, p_base.data, p_mask)
did_successful_cold_zone = self.cold_zone_loss()
if did_successful_cold_zone:
# Copy a successful new cold zone
successful_tries += 1
print(f"Successful #{successful_tries} cold-zone calculated (amp: {amplification:.2f} restart_tries: {restart_tries})")
for i in range(len(cold_zone_tmp)):
self.cold_zones[i][:] = cold_zone_tmp[i]
restart_tries = max(restart_tries - 1, 0)
amplification = max(amplification - 0.01, 0.1)
else:
# Restore cold_zone_tmp to last "known to work" position
print(f"(restart_tries: {restart_tries} amp: {amplification:.2f})")
for i in range(len(cold_zone_tmp)):
cold_zone_tmp[i][:] = self.cold_zones[i]
restart_tries += 1
amplification = min(amplification + 0.01, 0.99)
if restart_tries == 25:
return None
def mix(self, amount, cold_zone_diffusion = None, upper_bound = 0.2, normalize_cold_zone = False):
for i, (p_base, p_main, p_target) in enumerate(zip(self.base_model.parameters(), self.main_model.parameters(), self.target_model.parameters())):
if cold_zone_diffusion is None or self.cold_zones is None:
mask = amount
else:
cold_zone = torch.clone(self.cold_zones[i])
if normalize_cold_zone:
cold_zone -= torch.min(cold_zone)
cold_zone /= torch.max(cold_zone)
mask = self.diffuse_cold_zone(cold_zone, cold_zone_diffusion, upper_bound) * amount
del cold_zone
p_target.data = torch.lerp(p_main.data, p_base.data, mask) | [
"torch.min",
"torch.lerp",
"numpy.random.RandomState",
"torch.Tensor",
"torch.max",
"torch.rand",
"torch.zeros",
"torch.clone",
"torch.Generator"
] | [((348, 375), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (369, 375), True, 'import numpy as np\n'), ((401, 418), 'torch.Generator', 'torch.Generator', ([], {}), '()\n', (416, 418), False, 'import torch\n'), ((802, 822), 'torch.Tensor', 'torch.Tensor', (['result'], {}), '(result)\n', (814, 822), False, 'import torch\n'), ((3501, 3543), 'torch.lerp', 'torch.lerp', (['p_main.data', 'p_base.data', 'mask'], {}), '(p_main.data, p_base.data, mask)\n', (3511, 3543), False, 'import torch\n'), ((665, 687), 'torch.clone', 'torch.clone', (['cold_zone'], {}), '(cold_zone)\n', (676, 687), False, 'import torch\n'), ((1057, 1077), 'torch.zeros', 'torch.zeros', (['p.shape'], {}), '(p.shape)\n', (1068, 1077), False, 'import torch\n'), ((1112, 1132), 'torch.zeros', 'torch.zeros', (['p.shape'], {}), '(p.shape)\n', (1123, 1132), False, 'import torch\n'), ((1666, 1710), 'torch.lerp', 'torch.lerp', (['p_main.data', 'p_base.data', 'p_mask'], {}), '(p_main.data, p_base.data, p_mask)\n', (1676, 1710), False, 'import torch\n'), ((3151, 3182), 'torch.clone', 'torch.clone', (['self.cold_zones[i]'], {}), '(self.cold_zones[i])\n', (3162, 3182), False, 'import torch\n'), ((1264, 1325), 'torch.rand', 'torch.rand', (['*cold_zone_tmp[i].shape'], {'generator': 'self.torch_rng'}), '(*cold_zone_tmp[i].shape, generator=self.torch_rng)\n', (1274, 1325), False, 'import torch\n'), ((3268, 3288), 'torch.min', 'torch.min', (['cold_zone'], {}), '(cold_zone)\n', (3277, 3288), False, 'import torch\n'), ((3322, 3342), 'torch.max', 'torch.max', (['cold_zone'], {}), '(cold_zone)\n', (3331, 3342), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
import functools
import numpy as np
from .. import datamods
from .. import utils
from .. import constants
DB_SPECIES = datamods.species['debye']
def setup_extended_debye(solutes, calculate_osmotic_coefficient=False):
db_species = DB_SPECIES
I_factor = []
dh_a = []
dh_b = []
for sp in solutes:
I_factor_, dh_a_, dh_b_ = _species_definition_dh_model(sp, db_species)
I_factor.append(I_factor_)
dh_a.append(dh_a_)
dh_b.append(dh_b_)
dh_a = np.array(dh_a)
dh_b = np.array(dh_b)
I_factor = np.array(I_factor)
zarray = np.array([utils.charge_number(specie) for specie in solutes])
g = functools.partial(_loggamma_and_osmotic,
zarray=zarray,
calculate_osmotic_coefficient=calculate_osmotic_coefficient,
dh_a=dh_a, dh_b=dh_b,
I_factor=I_factor)
return g
def _loggamma_and_osmotic(xarray, TK, zarray, calculate_osmotic_coefficient,
dh_a, dh_b, I_factor):
A, B = _debye_huckel_constant(TK)
I = 0.5*np.sum(zarray**2*xarray)
logg1 = -A * zarray ** 2 * \
np.sqrt(I) / (1 + B * dh_a * np.sqrt(I)) + dh_b * I
logg2 = I_factor*I
logg3 = -A * zarray ** 2 * (np.sqrt(I) / (1.0 + np.sqrt(I)) - 0.3 * I)
logg = np.nan_to_num(logg1)*(~np.isnan(dh_a)) + \
np.nan_to_num(logg2)*(np.isnan(dh_a) & (~np.isnan(I_factor))) + \
np.nan_to_num(logg3)*(np.isnan(dh_a) & (np.isnan(I_factor)))
resw = -A*I**(3/2)/(1 + constants.B_DEBYE*I**(1/2))
if calculate_osmotic_coefficient:
osmotic_coefficient = constants.LOG10E*(2*resw/np.sum(xarray)+1)
else:
osmotic_coefficient = constants.LOG10E
# Insertion of osmotic coefficient
logg = np.insert(logg, 0, osmotic_coefficient)
return logg
def _debye_huckel_constant(TK):
epsilon = _dieletricconstant_water(TK)
rho = _density_water(TK)
A = 1.82483e6 * np.sqrt(rho) / (epsilon * TK) ** 1.5 # (L/mol)^1/2
B = 50.2916 * np.sqrt(rho / (epsilon * TK)) # Angstrom^-1 . (L/mol)^1/2
return A, B
def _species_definition_dh_model(tag, species_activity_db):
z = utils.charge_number(tag)
if tag not in species_activity_db:
if z == 0:
I_factor = 0.1
dh_a = np.nan
dh_b = np.nan
else: # Else should use davies
I_factor = np.nan
dh_a = np.nan
dh_b = np.nan
else:
db_specie = species_activity_db[tag]
try:
if "I_factor" in db_specie:
I_factor = db_specie["I_factor"]
dh_a = np.nan
dh_b = np.nan
else:
I_factor = np.nan
dh_a = db_specie["dh"]["phreeqc"]["a"]
dh_b = db_specie["dh"]["phreeqc"]["b"]
except KeyError as e:
print("Error getting activity of specie = {}".format(tag))
raise e
return I_factor, dh_a, dh_b
def _dieletricconstant_water(TK):
# for TK: 273-372
return 0.24921e3 - 0.79069 * TK + 0.72997e-3 * TK ** 2
def _density_water(TK):
# for TK: 273-372
return (
0.183652
+ 0.00724987 * TK
- 0.203449e-4 * TK ** 2
+ 1.73702e-8 * TK ** 3
)
| [
"functools.partial",
"numpy.sum",
"numpy.nan_to_num",
"numpy.isnan",
"numpy.insert",
"numpy.array",
"numpy.sqrt"
] | [((523, 537), 'numpy.array', 'np.array', (['dh_a'], {}), '(dh_a)\n', (531, 537), True, 'import numpy as np\n'), ((549, 563), 'numpy.array', 'np.array', (['dh_b'], {}), '(dh_b)\n', (557, 563), True, 'import numpy as np\n'), ((579, 597), 'numpy.array', 'np.array', (['I_factor'], {}), '(I_factor)\n', (587, 597), True, 'import numpy as np\n'), ((681, 846), 'functools.partial', 'functools.partial', (['_loggamma_and_osmotic'], {'zarray': 'zarray', 'calculate_osmotic_coefficient': 'calculate_osmotic_coefficient', 'dh_a': 'dh_a', 'dh_b': 'dh_b', 'I_factor': 'I_factor'}), '(_loggamma_and_osmotic, zarray=zarray,\n calculate_osmotic_coefficient=calculate_osmotic_coefficient, dh_a=dh_a,\n dh_b=dh_b, I_factor=I_factor)\n', (698, 846), False, 'import functools\n'), ((1821, 1860), 'numpy.insert', 'np.insert', (['logg', '(0)', 'osmotic_coefficient'], {}), '(logg, 0, osmotic_coefficient)\n', (1830, 1860), True, 'import numpy as np\n'), ((1134, 1162), 'numpy.sum', 'np.sum', (['(zarray ** 2 * xarray)'], {}), '(zarray ** 2 * xarray)\n', (1140, 1162), True, 'import numpy as np\n'), ((2073, 2102), 'numpy.sqrt', 'np.sqrt', (['(rho / (epsilon * TK))'], {}), '(rho / (epsilon * TK))\n', (2080, 2102), True, 'import numpy as np\n'), ((1486, 1506), 'numpy.nan_to_num', 'np.nan_to_num', (['logg3'], {}), '(logg3)\n', (1499, 1506), True, 'import numpy as np\n'), ((2003, 2015), 'numpy.sqrt', 'np.sqrt', (['rho'], {}), '(rho)\n', (2010, 2015), True, 'import numpy as np\n'), ((1200, 1210), 'numpy.sqrt', 'np.sqrt', (['I'], {}), '(I)\n', (1207, 1210), True, 'import numpy as np\n'), ((1307, 1317), 'numpy.sqrt', 'np.sqrt', (['I'], {}), '(I)\n', (1314, 1317), True, 'import numpy as np\n'), ((1361, 1381), 'numpy.nan_to_num', 'np.nan_to_num', (['logg1'], {}), '(logg1)\n', (1374, 1381), True, 'import numpy as np\n'), ((1412, 1432), 'numpy.nan_to_num', 'np.nan_to_num', (['logg2'], {}), '(logg2)\n', (1425, 1432), True, 'import numpy as np\n'), ((1508, 1522), 'numpy.isnan', 'np.isnan', (['dh_a'], {}), '(dh_a)\n', (1516, 1522), True, 'import numpy as np\n'), ((1526, 1544), 'numpy.isnan', 'np.isnan', (['I_factor'], {}), '(I_factor)\n', (1534, 1544), True, 'import numpy as np\n'), ((1229, 1239), 'numpy.sqrt', 'np.sqrt', (['I'], {}), '(I)\n', (1236, 1239), True, 'import numpy as np\n'), ((1327, 1337), 'numpy.sqrt', 'np.sqrt', (['I'], {}), '(I)\n', (1334, 1337), True, 'import numpy as np\n'), ((1384, 1398), 'numpy.isnan', 'np.isnan', (['dh_a'], {}), '(dh_a)\n', (1392, 1398), True, 'import numpy as np\n'), ((1434, 1448), 'numpy.isnan', 'np.isnan', (['dh_a'], {}), '(dh_a)\n', (1442, 1448), True, 'import numpy as np\n'), ((1696, 1710), 'numpy.sum', 'np.sum', (['xarray'], {}), '(xarray)\n', (1702, 1710), True, 'import numpy as np\n'), ((1453, 1471), 'numpy.isnan', 'np.isnan', (['I_factor'], {}), '(I_factor)\n', (1461, 1471), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.linear_model import LinearRegression
from .model import Model
class Linear(Model):
def __init__(self, features):
super(Linear, self).__init__(features)
self.model = LinearRegression()
def fit(self, X, y):
X, y = np.array(X), np.array(y)
self.model.fit(X, y)
return self
def predict(self, X):
return self.model.predict(X)
| [
"sklearn.linear_model.LinearRegression",
"numpy.array"
] | [((220, 238), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (236, 238), False, 'from sklearn.linear_model import LinearRegression\n'), ((280, 291), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (288, 291), True, 'import numpy as np\n'), ((293, 304), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (301, 304), True, 'import numpy as np\n')] |
import logging
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from shapely import geometry
import input_fn.input_fn_2d.data_gen_2dt.data_gen_t2d_util.polygone_2d_helper as old_helper
logger = logging.getLogger("polygone_2d_helper")
# logger.setLevel("DEBUG")
# logger.setLevel("INFO")
if __name__ == "__main__":
logging.basicConfig()
np.set_printoptions(precision=6, suppress=True)
class Fcalculator:
def __init__(self, points, epsilon=np.array(0.0001), debug=False):
"""points is list of tupel with x,y like [(x1,y1), (x2,y2), (x3,y3),...]"""
self.epsilon_tf = tf.constant(epsilon, dtype=tf.float64)
self.points_tf = points
self._debug = debug
self._cross = tf.constant([[0.0, 1.0], [-1.0, 0.0]], dtype=tf.float64)
self.epsilon = epsilon
self.points = points
def update_points(self, points):
self.points_tf = points
def q_of_phi(self, phi):
phi_tf = tf.cast(phi, dtype=tf.float64)
a__tf = tf.math.cos(phi_tf)
b__tf = tf.math.sin(phi_tf) - 1.0
q_tf = tf.stack([a__tf, b__tf])
if self._debug:
phi = np.array(phi, dtype=np.float64)
a_ = np.cos(phi)
b_ = np.sin(phi) - 1.0
q = np.array([a_, b_])
logger.debug("q^2: {}".format(tf.math.abs(q[0] ** 2 + q[1] ** 2)))
assert np.array_equal(q, q_tf.numpy())
return q
else:
return q_tf
@tf.function
def F_of_qs_arr(self, q, p0_, p1_, c=0.0):
j_tf = tf.cast(tf.complex(0.0, 1.0), dtype=tf.complex128)
p0_tf = tf.cast(p0_, dtype=tf.float64)
p1_tf = tf.cast(p1_, dtype=tf.float64)
q_tf = tf.cast(q, dtype=tf.float64)
c_tf = tf.cast(c, dtype=tf.float64)
c_tfc = tf.cast(c, dtype=tf.complex128)
# q_cross_tf = tf.Variable([-q_tf[1], q_tf[0]])
q_cross_tf = tf.matmul(tf.cast([[0.0, -1.0],[1.0, 0.0]], dtype=tf.float64), q_tf)
p0p1_tf = p1_tf - p0_tf
scale_tf = tf.cast(1.0 / tf.math.abs(q_tf[0] ** 2 + q_tf[1] ** 2), dtype=tf.float64)
f_p0_tf = -tf.cast(1.0, dtype=tf.complex128) * tf.math.exp(j_tf * (tf.cast(complex_dot(p0_tf, q_tf), dtype=tf.complex128) + c_tfc))
f_p1_tf = -tf.cast(1.0, dtype=tf.complex128) * tf.math.exp(j_tf * (tf.cast(complex_dot(p1_tf, q_tf), dtype=tf.complex128) + c_tfc))
case1_array_tf = tf.cast(scale_tf * complex_dot(p0p1_tf, q_cross_tf), dtype=tf.complex128) * (f_p1_tf - f_p0_tf) / tf.cast(complex_dot(p0p1_tf, q_tf), dtype=tf.complex128)
case2_array_tf = tf.cast(scale_tf * complex_dot(p0p1_tf, q_cross_tf), dtype=tf.complex128) * -j_tf * tf.math.exp(j_tf * tf.cast(complex_dot(p0_tf, q_tf) + c_tf, dtype=tf.complex128))
res_array_tf = tf.where(tf.math.abs(complex_dot(p0p1_tf, q_tf)) >= 0.0001, case1_array_tf, case2_array_tf)
if not self._debug:
return res_array_tf
else:
j_ = np.array(1.0j, dtype=np.complex128)
p0 = np.array(p0_, dtype=np.float64)
p1 = np.array(p1_, dtype=np.float64)
q = np.array(q, dtype=np.float64)
q_cross = np.array([-q[1], q[0]])
c = np.array(c)
scale = 1.0 / np.abs(q[0] ** 2 + q[1] ** 2)
p0p1 = p1 - p0
assert np.array_equal(p0, p0_tf.numpy())
assert np.array_equal(p1, p1_tf.numpy())
assert np.array_equal(q, q_tf.numpy())
# print(q_cross, q_cross_tf.numpy())
assert np.array_equal(q_cross, q_cross_tf.numpy())
assert np.array_equal(c, c_tf.numpy())
assert np.array_equal(scale, scale_tf.numpy())
f_p0 = -np.array(1.0, dtype=np.complex128) * np.exp(j_ * (np.dot(p0, q) + c))
f_p1 = -np.array(1.0, dtype=np.complex128) * np.exp(j_ * (np.dot(p1, q) + c))
assert np.array_equal(f_p0, f_p0_tf.numpy())
assert np.array_equal(f_p1, f_p1_tf.numpy())
case1_array = np.array(scale * np.dot(p0p1, q_cross), dtype=np.complex128) * (f_p1 - f_p0) / tf.cast(np.dot(p0p1, q), dtype=np.complex128)
case2_array = np.array(scale * np.dot(p0p1, q_cross), dtype=np.complex128) * -1.0j * np.exp(1.0j * (tf.cast(np.dot(p0, q), dtype=np.complex128) + c))
# print("complex dot in",p0p1_tf,q_cross_tf )
# print("complex dot out", complex_dot(p0p1_tf, q_cross_tf))
# print(case1_array, case1_array_tf.numpy())
assert np.array_equal(case1_array, case1_array_tf.numpy())
assert np.array_equal(case2_array, case2_array_tf.numpy())
# print("case1_array.shape", case1_array.shape)
res_array = np.where(np.abs(np.dot(p0p1, q)) >= 0.0001, case1_array, case2_array)
# if np.max(scale) >= 1000.0 / self.epsilon:
# logger.debug("Scale == NONE")
# polygon = geometry.Polygon(self.points)
# area = np.array(polygon.area, dtype=np.complex)
# logger.debug("area: {}".format(area))
# s_value = area / len(self.points)
# case3_array = np.ones_like(q[0]) * s_value
# res_array = np.where(scale >= 1000.0 / self.epsilon, case3_array, res_array)
#
# if tf.math.reduce_max(scale_tf) >= 1000.0 / self.epsilon_tf:
# logger.debug("Scale_tf == NONE")
# polygon = geometry.Polygon(self.points_tf)
# area = np.array(polygon.area, dtype=np.complex)
# logger.debug("area: {}".format(area))
# s_value = area / len(self.points)
# case3_array = np.ones_like(q[0]) * s_value
# res_array_tf = np.where(scale >= 1000.0 / self.epsilon, case3_array, res_array)
# print("res_array", res_array)
# print("res_array_tf", res_array_tf)
assert np.array_equal(res_array, res_array_tf.numpy())
return res_array
@tf.function
def F_of_phi(self, phi):
logger.debug("###########################################")
logger.info("phi: {}".format(phi))
q = self.q_of_phi(phi)
c = 0.0
if not self._debug:
sum_res = tf.cast(phi * tf.cast(0.0, dtype=tf.float64), dtype=tf.complex128)
c = tf.cast(c, dtype=tf.float64)
for index in range(self.points_tf.shape[0]):
p0 = self.points_tf[index - 1]
p1 = self.points_tf[index]
sum_res += self.F_of_qs_arr(q, p0, p1, c=c)
else:
sum_res = tf.zeros_like(phi, dtype=np.complex128)
for index in range(len(self.points)):
logger.debug("index: {}".format(index))
p0 = self.points[index - 1]
p1 = self.points[index]
logger.debug("p0: {}; p1: {}".format(p0, p1))
sum_res += self.F_of_qs_arr(q, p0, p1, c=c)
logger.debug("sum_res {}".format(sum_res))
final_res = sum_res
logger.debug("sum_res.dtype: {}".format(sum_res.dtype))
logger.info("final value: {}".format(final_res))
return final_res
def complex_dot(a, b):
return tf.einsum('i,i...->...', a, b)
def debug_track_gradient():
DEBUG = False
convex_polygon_arr = old_helper.generate_target_polygon(max_edge=3)
# convex_polygon_tuple = old_helper.array_to_tuples(convex_polygon_arr)
convex_polygon_arr = tf.constant(convex_polygon_arr, dtype=tf.float64)
polygon_calculator_target = Fcalculator(points=convex_polygon_arr, debug=DEBUG)
dphi = 0.0001
har = 1.0 / 180.0 * np.pi # hole_half_angle_rad
mac = 1.0 / 180.0 * np.pi # max_angle_of_view_cut_rad
phi_array = np.concatenate((np.arange(0 + har, np.pi / 2 - mac, dphi),
np.arange(np.pi / 2 + har, np.pi - mac, dphi)))
polygon_scatter_res_target = polygon_calculator_target.F_of_phi(phi=phi_array)
convex_polygon_tensor =tf.Variable(convex_polygon_arr + np.random.uniform(-0.1, 0.1, convex_polygon_arr.shape))
optimizer = tf.keras.optimizers.RMSprop()
with tf.GradientTape() as tape:
polygon_calculator = Fcalculator(points=convex_polygon_tensor, debug=DEBUG)
polygon_scatter_res = polygon_calculator.F_of_phi(phi=phi_array)
loss = tf.keras.losses.mean_absolute_error(polygon_scatter_res_target, polygon_scatter_res)
tf.print(loss)
gradient = tape.gradient(loss, convex_polygon_tensor)
tf.print(gradient)
if __name__ == "__main__":
print("run main")
import model_fn.util_model_fn.custom_layers as c_layer
# debug_track_gradient()
# exit(0)
DEBUG = True
# DEBUG = False
if DEBUG:
logging.warning("DEBUG-MODE ON; GRAPH-MODE IS DISABLED!")
tf.config.experimental_run_functions_eagerly(run_eagerly=True)
t1 = time.time()
for target in range(100):
print(target)
convex_polygon_arr = old_helper.generate_target_polygon(max_edge=3)
convex_polygon_tuple = old_helper.array_to_tuples(convex_polygon_arr)
polygon_calculator = Fcalculator(points=convex_polygon_tuple, debug=DEBUG)
dphi = 0.0001
har = 1.0 / 180.0 * np.pi # hole_half_angle_rad
mac = 1.0 / 180.0 * np.pi # max_angle_of_view_cut_rad
phi_array = np.concatenate((np.arange(0 + har, np.pi / 2 - mac, dphi),
np.arange(np.pi / 2 + har, np.pi - mac, dphi)))
if not DEBUG:
phi_array = tf.cast(phi_array, dtype=tf.float64)
# polygon_scatter_res = np.array(
# [polygon_calculator.F_of_phi(phi=phi).astype(dtype=np.complex64) for phi in phi_array])
polygon_scatter_res = polygon_calculator.F_of_phi(phi=phi_array)
if isinstance(polygon_scatter_res, tf.Tensor):
polygon_scatter_res = polygon_scatter_res.numpy().astype(dtype=np.complex64)
else:
polygon_scatter_res = polygon_scatter_res.astype(dtype=np.complex64)
print("test reference", np.mean(polygon_scatter_res))
# print(phi_array.shape)
ScatterPolygonLayer1 = c_layer.ScatterPolygonTF(tf.expand_dims(phi_array, axis=0), with_batch_dim=False)
Res = ScatterPolygonLayer1(tf.constant(convex_polygon_arr, dtype=tf.float64))
# print("test Layer", np.mean(Res.numpy()))
print("test Layer", np.mean(Res[0].numpy()+ 1.0j * Res[1].numpy()))
phi_tf = tf.expand_dims(phi_array, axis=0)
fc_one = tf.concat((phi_tf, tf.zeros_like(phi_tf), tf.ones_like(phi_tf)), axis=0)
fc_one_b = tf.expand_dims(fc_one, axis=0)
fc_batch = tf.concat((fc_one_b, fc_one_b, fc_one_b, fc_one_b), axis=0)
convex_polygon_arr_b = tf.expand_dims(convex_polygon_arr, axis=0)
convex_polygon_arr_batch = tf.concat((convex_polygon_arr_b, convex_polygon_arr_b, convex_polygon_arr_b, convex_polygon_arr_b), axis=0)
ScatterPolygonLayerBatch1 = c_layer.ScatterPolygonTF(fc_batch, with_batch_dim=True)
Res_Batch = ScatterPolygonLayerBatch1(tf.constant(convex_polygon_arr_batch, dtype=tf.float64))
print("test BatchLayer", np.mean(Res_Batch[:, 0].numpy() + 1.0j * Res_Batch[:, 1].numpy()))
# print(convex_polygon_arr.shape)
# fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(9.5, 14))
# ax1.plot(phi_array, polygon_scatter_res.real, "+b", label="real_polygon")
# ax1.plot(phi_array, polygon_scatter_res.imag, "+r", label="imag_polygon")
# ax1.plot(phi_array, np.abs(polygon_scatter_res), "+y", label="abs_polygon")
# ax2.fill(convex_polygon_arr.transpose()[0], convex_polygon_arr.transpose()[1])
# ax2.set_xlim((-50, 50))
# ax2.set_ylim((-50, 50))
# ax2.set_aspect(aspect=1.0)
# plt.show()
print("Time: {:0.1f}".format(time.time()-t1))
| [
"tensorflow.einsum",
"numpy.abs",
"tensorflow.print",
"tensorflow.zeros_like",
"numpy.mean",
"numpy.arange",
"numpy.sin",
"tensorflow.complex",
"tensorflow.math.abs",
"tensorflow.keras.optimizers.RMSprop",
"tensorflow.keras.losses.mean_absolute_error",
"input_fn.input_fn_2d.data_gen_2dt.data_g... | [((235, 274), 'logging.getLogger', 'logging.getLogger', (['"""polygone_2d_helper"""'], {}), "('polygone_2d_helper')\n", (252, 274), False, 'import logging\n'), ((360, 381), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (379, 381), False, 'import logging\n'), ((386, 433), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(6)', 'suppress': '(True)'}), '(precision=6, suppress=True)\n', (405, 433), True, 'import numpy as np\n'), ((7256, 7286), 'tensorflow.einsum', 'tf.einsum', (['"""i,i...->..."""', 'a', 'b'], {}), "('i,i...->...', a, b)\n", (7265, 7286), True, 'import tensorflow as tf\n'), ((7362, 7408), 'input_fn.input_fn_2d.data_gen_2dt.data_gen_t2d_util.polygone_2d_helper.generate_target_polygon', 'old_helper.generate_target_polygon', ([], {'max_edge': '(3)'}), '(max_edge=3)\n', (7396, 7408), True, 'import input_fn.input_fn_2d.data_gen_2dt.data_gen_t2d_util.polygone_2d_helper as old_helper\n'), ((7510, 7559), 'tensorflow.constant', 'tf.constant', (['convex_polygon_arr'], {'dtype': 'tf.float64'}), '(convex_polygon_arr, dtype=tf.float64)\n', (7521, 7559), True, 'import tensorflow as tf\n'), ((8146, 8175), 'tensorflow.keras.optimizers.RMSprop', 'tf.keras.optimizers.RMSprop', ([], {}), '()\n', (8173, 8175), True, 'import tensorflow as tf\n'), ((8933, 8944), 'time.time', 'time.time', ([], {}), '()\n', (8942, 8944), False, 'import time\n'), ((494, 510), 'numpy.array', 'np.array', (['(0.0001)'], {}), '(0.0001)\n', (502, 510), True, 'import numpy as np\n'), ((636, 674), 'tensorflow.constant', 'tf.constant', (['epsilon'], {'dtype': 'tf.float64'}), '(epsilon, dtype=tf.float64)\n', (647, 674), True, 'import tensorflow as tf\n'), ((757, 813), 'tensorflow.constant', 'tf.constant', (['[[0.0, 1.0], [-1.0, 0.0]]'], {'dtype': 'tf.float64'}), '([[0.0, 1.0], [-1.0, 0.0]], dtype=tf.float64)\n', (768, 813), True, 'import tensorflow as tf\n'), ((992, 1022), 'tensorflow.cast', 'tf.cast', (['phi'], {'dtype': 'tf.float64'}), '(phi, dtype=tf.float64)\n', (999, 1022), True, 'import tensorflow as tf\n'), ((1039, 1058), 'tensorflow.math.cos', 'tf.math.cos', (['phi_tf'], {}), '(phi_tf)\n', (1050, 1058), True, 'import tensorflow as tf\n'), ((1116, 1140), 'tensorflow.stack', 'tf.stack', (['[a__tf, b__tf]'], {}), '([a__tf, b__tf])\n', (1124, 1140), True, 'import tensorflow as tf\n'), ((1650, 1680), 'tensorflow.cast', 'tf.cast', (['p0_'], {'dtype': 'tf.float64'}), '(p0_, dtype=tf.float64)\n', (1657, 1680), True, 'import tensorflow as tf\n'), ((1697, 1727), 'tensorflow.cast', 'tf.cast', (['p1_'], {'dtype': 'tf.float64'}), '(p1_, dtype=tf.float64)\n', (1704, 1727), True, 'import tensorflow as tf\n'), ((1743, 1771), 'tensorflow.cast', 'tf.cast', (['q'], {'dtype': 'tf.float64'}), '(q, dtype=tf.float64)\n', (1750, 1771), True, 'import tensorflow as tf\n'), ((1787, 1815), 'tensorflow.cast', 'tf.cast', (['c'], {'dtype': 'tf.float64'}), '(c, dtype=tf.float64)\n', (1794, 1815), True, 'import tensorflow as tf\n'), ((1832, 1863), 'tensorflow.cast', 'tf.cast', (['c'], {'dtype': 'tf.complex128'}), '(c, dtype=tf.complex128)\n', (1839, 1863), True, 'import tensorflow as tf\n'), ((8185, 8202), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (8200, 8202), True, 'import tensorflow as tf\n'), ((8384, 8472), 'tensorflow.keras.losses.mean_absolute_error', 'tf.keras.losses.mean_absolute_error', (['polygon_scatter_res_target', 'polygon_scatter_res'], {}), '(polygon_scatter_res_target,\n polygon_scatter_res)\n', (8419, 8472), True, 'import tensorflow as tf\n'), ((8477, 8491), 'tensorflow.print', 'tf.print', (['loss'], {}), '(loss)\n', (8485, 8491), True, 'import tensorflow as tf\n'), ((8562, 8580), 'tensorflow.print', 'tf.print', (['gradient'], {}), '(gradient)\n', (8570, 8580), True, 'import tensorflow as tf\n'), ((8795, 8852), 'logging.warning', 'logging.warning', (['"""DEBUG-MODE ON; GRAPH-MODE IS DISABLED!"""'], {}), "('DEBUG-MODE ON; GRAPH-MODE IS DISABLED!')\n", (8810, 8852), False, 'import logging\n'), ((8861, 8923), 'tensorflow.config.experimental_run_functions_eagerly', 'tf.config.experimental_run_functions_eagerly', ([], {'run_eagerly': '(True)'}), '(run_eagerly=True)\n', (8905, 8923), True, 'import tensorflow as tf\n'), ((9027, 9073), 'input_fn.input_fn_2d.data_gen_2dt.data_gen_t2d_util.polygone_2d_helper.generate_target_polygon', 'old_helper.generate_target_polygon', ([], {'max_edge': '(3)'}), '(max_edge=3)\n', (9061, 9073), True, 'import input_fn.input_fn_2d.data_gen_2dt.data_gen_t2d_util.polygone_2d_helper as old_helper\n'), ((9105, 9151), 'input_fn.input_fn_2d.data_gen_2dt.data_gen_t2d_util.polygone_2d_helper.array_to_tuples', 'old_helper.array_to_tuples', (['convex_polygon_arr'], {}), '(convex_polygon_arr)\n', (9131, 9151), True, 'import input_fn.input_fn_2d.data_gen_2dt.data_gen_t2d_util.polygone_2d_helper as old_helper\n'), ((10521, 10554), 'tensorflow.expand_dims', 'tf.expand_dims', (['phi_array'], {'axis': '(0)'}), '(phi_array, axis=0)\n', (10535, 10554), True, 'import tensorflow as tf\n'), ((10664, 10694), 'tensorflow.expand_dims', 'tf.expand_dims', (['fc_one'], {'axis': '(0)'}), '(fc_one, axis=0)\n', (10678, 10694), True, 'import tensorflow as tf\n'), ((10714, 10773), 'tensorflow.concat', 'tf.concat', (['(fc_one_b, fc_one_b, fc_one_b, fc_one_b)'], {'axis': '(0)'}), '((fc_one_b, fc_one_b, fc_one_b, fc_one_b), axis=0)\n', (10723, 10773), True, 'import tensorflow as tf\n'), ((10805, 10847), 'tensorflow.expand_dims', 'tf.expand_dims', (['convex_polygon_arr'], {'axis': '(0)'}), '(convex_polygon_arr, axis=0)\n', (10819, 10847), True, 'import tensorflow as tf\n'), ((10883, 10994), 'tensorflow.concat', 'tf.concat', (['(convex_polygon_arr_b, convex_polygon_arr_b, convex_polygon_arr_b,\n convex_polygon_arr_b)'], {'axis': '(0)'}), '((convex_polygon_arr_b, convex_polygon_arr_b, convex_polygon_arr_b,\n convex_polygon_arr_b), axis=0)\n', (10892, 10994), True, 'import tensorflow as tf\n'), ((11027, 11082), 'model_fn.util_model_fn.custom_layers.ScatterPolygonTF', 'c_layer.ScatterPolygonTF', (['fc_batch'], {'with_batch_dim': '(True)'}), '(fc_batch, with_batch_dim=True)\n', (11051, 11082), True, 'import model_fn.util_model_fn.custom_layers as c_layer\n'), ((1075, 1094), 'tensorflow.math.sin', 'tf.math.sin', (['phi_tf'], {}), '(phi_tf)\n', (1086, 1094), True, 'import tensorflow as tf\n'), ((1183, 1214), 'numpy.array', 'np.array', (['phi'], {'dtype': 'np.float64'}), '(phi, dtype=np.float64)\n', (1191, 1214), True, 'import numpy as np\n'), ((1232, 1243), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (1238, 1243), True, 'import numpy as np\n'), ((1295, 1313), 'numpy.array', 'np.array', (['[a_, b_]'], {}), '([a_, b_])\n', (1303, 1313), True, 'import numpy as np\n'), ((1591, 1611), 'tensorflow.complex', 'tf.complex', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (1601, 1611), True, 'import tensorflow as tf\n'), ((1952, 2004), 'tensorflow.cast', 'tf.cast', (['[[0.0, -1.0], [1.0, 0.0]]'], {'dtype': 'tf.float64'}), '([[0.0, -1.0], [1.0, 0.0]], dtype=tf.float64)\n', (1959, 2004), True, 'import tensorflow as tf\n'), ((2996, 3031), 'numpy.array', 'np.array', (['(1.0j)'], {'dtype': 'np.complex128'}), '(1.0j, dtype=np.complex128)\n', (3004, 3031), True, 'import numpy as np\n'), ((3049, 3080), 'numpy.array', 'np.array', (['p0_'], {'dtype': 'np.float64'}), '(p0_, dtype=np.float64)\n', (3057, 3080), True, 'import numpy as np\n'), ((3098, 3129), 'numpy.array', 'np.array', (['p1_'], {'dtype': 'np.float64'}), '(p1_, dtype=np.float64)\n', (3106, 3129), True, 'import numpy as np\n'), ((3146, 3175), 'numpy.array', 'np.array', (['q'], {'dtype': 'np.float64'}), '(q, dtype=np.float64)\n', (3154, 3175), True, 'import numpy as np\n'), ((3198, 3221), 'numpy.array', 'np.array', (['[-q[1], q[0]]'], {}), '([-q[1], q[0]])\n', (3206, 3221), True, 'import numpy as np\n'), ((3238, 3249), 'numpy.array', 'np.array', (['c'], {}), '(c)\n', (3246, 3249), True, 'import numpy as np\n'), ((6361, 6389), 'tensorflow.cast', 'tf.cast', (['c'], {'dtype': 'tf.float64'}), '(c, dtype=tf.float64)\n', (6368, 6389), True, 'import tensorflow as tf\n'), ((6634, 6673), 'tensorflow.zeros_like', 'tf.zeros_like', (['phi'], {'dtype': 'np.complex128'}), '(phi, dtype=np.complex128)\n', (6647, 6673), True, 'import tensorflow as tf\n'), ((7807, 7848), 'numpy.arange', 'np.arange', (['(0 + har)', '(np.pi / 2 - mac)', 'dphi'], {}), '(0 + har, np.pi / 2 - mac, dphi)\n', (7816, 7848), True, 'import numpy as np\n'), ((7880, 7925), 'numpy.arange', 'np.arange', (['(np.pi / 2 + har)', '(np.pi - mac)', 'dphi'], {}), '(np.pi / 2 + har, np.pi - mac, dphi)\n', (7889, 7925), True, 'import numpy as np\n'), ((8073, 8127), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)', 'convex_polygon_arr.shape'], {}), '(-0.1, 0.1, convex_polygon_arr.shape)\n', (8090, 8127), True, 'import numpy as np\n'), ((9587, 9623), 'tensorflow.cast', 'tf.cast', (['phi_array'], {'dtype': 'tf.float64'}), '(phi_array, dtype=tf.float64)\n', (9594, 9623), True, 'import tensorflow as tf\n'), ((10114, 10142), 'numpy.mean', 'np.mean', (['polygon_scatter_res'], {}), '(polygon_scatter_res)\n', (10121, 10142), True, 'import numpy as np\n'), ((10233, 10266), 'tensorflow.expand_dims', 'tf.expand_dims', (['phi_array'], {'axis': '(0)'}), '(phi_array, axis=0)\n', (10247, 10266), True, 'import tensorflow as tf\n'), ((10325, 10374), 'tensorflow.constant', 'tf.constant', (['convex_polygon_arr'], {'dtype': 'tf.float64'}), '(convex_polygon_arr, dtype=tf.float64)\n', (10336, 10374), True, 'import tensorflow as tf\n'), ((11129, 11184), 'tensorflow.constant', 'tf.constant', (['convex_polygon_arr_batch'], {'dtype': 'tf.float64'}), '(convex_polygon_arr_batch, dtype=tf.float64)\n', (11140, 11184), True, 'import tensorflow as tf\n'), ((1261, 1272), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (1267, 1272), True, 'import numpy as np\n'), ((2077, 2117), 'tensorflow.math.abs', 'tf.math.abs', (['(q_tf[0] ** 2 + q_tf[1] ** 2)'], {}), '(q_tf[0] ** 2 + q_tf[1] ** 2)\n', (2088, 2117), True, 'import tensorflow as tf\n'), ((2156, 2189), 'tensorflow.cast', 'tf.cast', (['(1.0)'], {'dtype': 'tf.complex128'}), '(1.0, dtype=tf.complex128)\n', (2163, 2189), True, 'import tensorflow as tf\n'), ((2296, 2329), 'tensorflow.cast', 'tf.cast', (['(1.0)'], {'dtype': 'tf.complex128'}), '(1.0, dtype=tf.complex128)\n', (2303, 2329), True, 'import tensorflow as tf\n'), ((3276, 3305), 'numpy.abs', 'np.abs', (['(q[0] ** 2 + q[1] ** 2)'], {}), '(q[0] ** 2 + q[1] ** 2)\n', (3282, 3305), True, 'import numpy as np\n'), ((9414, 9455), 'numpy.arange', 'np.arange', (['(0 + har)', '(np.pi / 2 - mac)', 'dphi'], {}), '(0 + har, np.pi / 2 - mac, dphi)\n', (9423, 9455), True, 'import numpy as np\n'), ((9491, 9536), 'numpy.arange', 'np.arange', (['(np.pi / 2 + har)', '(np.pi - mac)', 'dphi'], {}), '(np.pi / 2 + har, np.pi - mac, dphi)\n', (9500, 9536), True, 'import numpy as np\n'), ((10591, 10612), 'tensorflow.zeros_like', 'tf.zeros_like', (['phi_tf'], {}), '(phi_tf)\n', (10604, 10612), True, 'import tensorflow as tf\n'), ((10614, 10634), 'tensorflow.ones_like', 'tf.ones_like', (['phi_tf'], {}), '(phi_tf)\n', (10626, 10634), True, 'import tensorflow as tf\n'), ((11896, 11907), 'time.time', 'time.time', ([], {}), '()\n', (11905, 11907), False, 'import time\n'), ((1356, 1390), 'tensorflow.math.abs', 'tf.math.abs', (['(q[0] ** 2 + q[1] ** 2)'], {}), '(q[0] ** 2 + q[1] ** 2)\n', (1367, 1390), True, 'import tensorflow as tf\n'), ((3733, 3767), 'numpy.array', 'np.array', (['(1.0)'], {'dtype': 'np.complex128'}), '(1.0, dtype=np.complex128)\n', (3741, 3767), True, 'import numpy as np\n'), ((3823, 3857), 'numpy.array', 'np.array', (['(1.0)'], {'dtype': 'np.complex128'}), '(1.0, dtype=np.complex128)\n', (3831, 3857), True, 'import numpy as np\n'), ((4121, 4136), 'numpy.dot', 'np.dot', (['p0p1', 'q'], {}), '(p0p1, q)\n', (4127, 4136), True, 'import numpy as np\n'), ((6292, 6322), 'tensorflow.cast', 'tf.cast', (['(0.0)'], {'dtype': 'tf.float64'}), '(0.0, dtype=tf.float64)\n', (6299, 6322), True, 'import tensorflow as tf\n'), ((4751, 4766), 'numpy.dot', 'np.dot', (['p0p1', 'q'], {}), '(p0p1, q)\n', (4757, 4766), True, 'import numpy as np\n'), ((3783, 3796), 'numpy.dot', 'np.dot', (['p0', 'q'], {}), '(p0, q)\n', (3789, 3796), True, 'import numpy as np\n'), ((3873, 3886), 'numpy.dot', 'np.dot', (['p1', 'q'], {}), '(p1, q)\n', (3879, 3886), True, 'import numpy as np\n'), ((4051, 4072), 'numpy.dot', 'np.dot', (['p0p1', 'q_cross'], {}), '(p0p1, q_cross)\n', (4057, 4072), True, 'import numpy as np\n'), ((4202, 4223), 'numpy.dot', 'np.dot', (['p0p1', 'q_cross'], {}), '(p0p1, q_cross)\n', (4208, 4223), True, 'import numpy as np\n'), ((4279, 4292), 'numpy.dot', 'np.dot', (['p0', 'q'], {}), '(p0, q)\n', (4285, 4292), True, 'import numpy as np\n')] |
# Import basic packages
import pandas as pd
import numpy as np
# import plot packages
import matplotlib.pyplot as plt
import gspplot
# Import graph packages
import gsp
import pygsp
# Import pytorch packages
import torch
import torch.nn as nn
import torch.optim as optim
# Import other packages
import os
def set_ground_truth(patch, threshold, f, figsize=(6, 6)):
"""
Set ground truth.
Attributes:
- patch : A patch cropped from the cloud
- threshold : Threshold to extract roof points
- f : Filter
- figsize : Figsize of visualization
Return:
- df2 : DataFrame added the ground truth column
- edge_roof : Estimated points of edge roof
"""
df2 = patch.copy()
df2.reset_index(drop=False, inplace=True)
# Prepare the signal
mask = (df2['label'] == 5)|(df2['label'] == 0)
df2['is_building'] = mask
df2['is_building'] = df2['is_building'].apply(lambda x: int(x))
# Filter the signal
signal_roof = f.filter(df2.is_building, method = 'chebyshev')
if signal_roof.ndim == 1:
edge_roof = signal_roof >= threshold
else:
edge_roof = signal_roof[:, -1] >= threshold
# Remove positive false points
tmp = df2[edge_roof]
edge_roof[tmp[tmp['label']!=5].index] = False
df2['is_building'] = df2['label'] == 5
df2['is_edge'] = edge_roof
df2['is_edge'] = df2['is_edge'].apply(lambda x: int(x))
# Visualize
fig, ax = plt.subplots(figsize=figsize)
gspplot.plot_vaihingen_2D(patch,
id_highlight=np.nonzero(edge_roof)[0],
label_high='Edges',
ax=ax,
title="Ground Truth")
return df2, edge_roof
def comp_df(patch, edge_dict, labels, normalize=True, **kwargs):
"""
Calculate the composition of the highlighted points.
Attributes:
- patch :
- edge_dict :
- labels :
- normalize :
Return:
- df :
"""
if kwargs is not None:
num = kwargs['num'] if 'num' in kwargs else None
data = []
for i in edge_dict.keys():
if num is None:
tmp = patch[edge_dict[i]].label.value_counts(normalize=normalize)
else:
tmp = patch[edge_dict[i][num]].label.value_counts(normalize=normalize)
tmp = tmp.sort_index()
data.append(tmp)
for i in range(len(data)):
for j in range(len(patch.label.unique())):
if j not in data[i].index:
data[i].loc[j] = 0
data[i].sort_index(inplace=True)
data = [list(data[i]) for i in range(len(data))]
df = pd.DataFrame(data = data, columns=list(labels.values()))
new_index = [i[i.find('_')+1:] for i in list(edge_dict.keys())]
new_index = dict(zip(range(len(new_index)), new_index))
df.rename(index=new_index, inplace=True) # Use a dictionary to change index
return df
def qua_comp_df(df, df2, edge_dict, edge_roof, **kwargs):
"""
Add another three columns to our dataframe.
Attributes:
- df :
- df2 :
- edge_dict :
- edge_roof :
Return:
- df :
"""
if kwargs is not None:
num = kwargs['num'] if 'num' in kwargs else None
if num is None:
total = [np.sum(edge_dict[i]) for i in edge_dict.keys()]
total_roof = [np.sum(df2.loc[edge_dict[i], 'is_building']) for i in edge_dict.keys()]
total_edge = [np.sum(df2.loc[edge_dict[i], 'is_edge']) for i in edge_dict.keys()]
else:
total = [np.sum(edge_dict[i][num]) for i in edge_dict.keys()]
total_roof = [np.sum(df2.loc[edge_dict[i][num], 'is_building']) for i in edge_dict.keys()]
total_edge = [np.sum(df2.loc[edge_dict[i][num], 'is_edge']) for i in edge_dict.keys()]
df['Total'] = total
df['Precision'] = [i/j for i,j in zip(total_edge, total)]
df['Recall_GT'] = [i/np.sum(df2['is_edge']) for i in total_edge]
df['Recall_roof'] = [i/np.sum(df2['label'] == 5) for i in total_roof]
return df.sort_values(by='Recall_GT', ascending=False)
def recover_df(edge_dict, df3, labels2, normalize=True, **kwargs):
"""
Set ground truth.
Attributes:
- edge_dict :
- df3 :
- labels2 :
- normalize :
Return:
- tmp :
"""
if kwargs is not None:
num = kwargs['num'] if 'num' in kwargs else None
s = df3.replace({"label": labels2}).label.value_counts()
if num is None:
tmp = comp_df(df3, edge_dict, labels2, normalize=False).append(s)
else:
tmp = comp_df(df3, edge_dict, labels2, normalize=False, num=num).append(s)
tmp=tmp.rename(index = {'label':'total'})
tmp['sum'] = tmp.sum(axis=1)
tmp = tmp.T
if normalize:
tmp['bp_30nn_Binary'] = tmp['bp_30nn_Binary'] / tmp['total']
tmp['bp_30nn_Local'] = tmp['bp_30nn_Local'] / tmp['total']
return tmp
def load_G_30nn_Binary(path_e, path_U, path_L, patch):
"""
Load 30nn binary graph and do eigen decomposition.
Attributes:
- path_e : Path to Eigen-value matrix
- path_U : Path to Eigen-vector matrix
- path_L : Path to Laplacian matrix
- patch : A patch cropped from the cloud
Return:
- G_30nn_Binary_e : Eigen-value matrix of 30nn binary graph
- G_30nn_Binary_U : Eigen-vector matrix of 30nn binary graph
- G_30nn_Binary_L : Laplacian matrix of 30nn binary graph
"""
# Load matrices if they exist
if os.path.exists(path_e) and os.path.exists(path_U) and os.path.exists(path_L):
G_30nn_Binary_e = np.load(path_e)
G_30nn_Binary_U = np.load(path_U)
G_30nn_Binary_L = np.load(path_L, allow_pickle=True).item()
else:
G_30nn_Binary, l2 = gsp.knn_graph(cloud=patch[['x', 'y', 'z']],
k=30,
dist3D=False,
mode='connectivity',
neightype='number',
lap_type='combinatorial',
norm=True)
# Compute Eigen-value, Eigen-vector
if not os.path.exists(path_e) or not os.path.exists(path_U):
G_30nn_Binary.compute_fourier_basis(recompute=False)
G_30nn_Binary_e, G_30nn_Binary_U = G_30nn_Binary.e, G_30nn_Binary.U
np.save(path_U, G_30nn_Binary_U)
np.save(path_e, G_30nn_Binary_e)
else:
G_30nn_Binary_e = np.load(path_e)
G_30nn_Binary_U = np.load(path_U)
# Compute Laplacian matrices
if not os.path.exists(path_L):
G_30nn_Binary.compute_laplacian('combinatorial')
G_30nn_Binary_L = G_30nn_Binary.L
np.save(path_L, G_30nn_Binary_L)
else:
G_30nn_Binary_L = np.load(path_L, allow_pickle=True).item()
return G_30nn_Binary_e, G_30nn_Binary_U, G_30nn_Binary_L
def generate_data_non_parametric(U, patch):
"""
Generate input data to learn a non parametric filter.
Attributes:
- U : Eigen-vector matrix
- patch : Aimed patch
Return:
- train_input : Polynomialized terms in tensor format
"""
n_dim = len(patch)
train_input = np.zeros([n_dim, n_dim])
for i in range(n_dim):
train_input[:, i] = U[:, i]*(U[:, i]@patch.z)
train_input = torch.from_numpy(train_input).float()
return train_input
def train_non_parametric_filter(nb_epochs, train_input, train_target, e, edge, f, gamma=1e-6, alter_thresh = False):
"""
Training process to learn a non parametric filter.
Attributes:
- nb_epochs : Number of epochs to train
- train_input : Initial filtered signal
- train_target : Binary targets labeling node class
- e : Eigen values
- edge : Edge
- f : Filter to initalize parameters
- gamma : Learning rate
- alter_thresh : Whether to optimize the threshold
Return:
- w : Optimized Omega coefficients
- train_error_list : List of training error
- loss_list : List of training loss
"""
def compute_error_loss(signal):
"""
Compute train error and loss.
Return:
- error : Training error
- loss : Training loss
"""
error = 0
for i,j in zip(signal, train_target):
if int(i) != int(j):
error += 1
error = 100 * error / train_input.size(0)
loss = float(l(torch.sigmoid(train_input.mv(w) - t), train_target)) * train_input.shape[0]
return error, loss
strMargin = lambda s, l=50, sep=' ': int((l-len(s))/2)*'-' + sep + s + sep + (l-int((l-len(s))/2)-len(s))*'-'
print(strMargin('Train non Parametric Filter'))
# Initialization
initial_error = 0
loss_list, train_error_list = [], []
# Initialize polynomial coefficients: Omega
w = torch.from_numpy(f.evaluate(e)[1, :]).float()
w.requires_grad = True
# Initialize threshold
t = torch.tensor([0.2], requires_grad = True)
# Define loss
l = nn.BCELoss()
# Compute initial train error and loss
initial_error, initial_loss = compute_error_loss(edge)
train_error_list.append(initial_error)
loss_list.append(initial_loss)
# Start training
print("Start training...")
for epoch in range(nb_epochs):
for n in range(train_input.size(0)):
x_tr, y_tr = train_input[n], train_target[n]
output = torch.sigmoid(x_tr.view(1, -1).mv(w) - t)
loss = l(output, y_tr)
loss.backward()
with torch.no_grad():
w = w - gamma * w.grad
w = torch.clamp(w, min=0)
w.requires_grad = True
# Append new train error rate and new loss
error, loss = compute_error_loss((train_input.mv(w) - t)>=0)
train_error_list.append(error)
loss_list.append(loss)
print('=> Epoch: {}/{}\tError: {:.4f}\t Loss: {:.4f}\t'.format(epoch+1, nb_epochs, error, loss), end='\r')
print('\n=> Done.')
print(strMargin('',sep='-'))
return w, train_error_list, loss_list
def generate_data_mexican_hat(U, patch):
"""
Generate input data to learn a mexican hat filter.
Attributes:
- U : Eigen-vector matrix
- patch : Aimed patch
Return:
- train_input : Polynomialized terms in tensor format
"""
n_dim = len(patch)
train_input = np.zeros([n_dim, n_dim])
for i in range(n_dim):
train_input[:, i] = U[:, i]*(U[:, i]@patch.z)
train_input = torch.from_numpy(train_input).float()
return train_input
def train_mexican_hat(nb_epochs, train_input, train_target, e, signal, edge, alter_thresh = False, initial_tau=1.5, gamma=1e-4, breakDetect=True):
"""
Training process to learn a mexican hat filter.
Attributes:
- nb_epochs : Number of epochs to train
- train_input : Initial filtered signal
- train_target : Binary targets labeling node class
- e : Eigen values
- signal : Signal
- edge : Edge
- alter_thresh : Whether to optimize the threshold
- initial_tau : Initial tau
- gamma : Learning rate
- breakDetect : Whether to break before looping all epochs
Return:
- tau : Optimized tau coefficients
- t : Optimized threshold
- train_error_list2 : List of training error
- loss_list2 : List of training loss
"""
def compute_error_loss(edge, signal):
"""
Compute train error and loss.
Return:
- error : Training error
- loss : Training loss
"""
error = 0
for i,j in zip(edge, train_target):
if int(i) != int(j):
error += 1
error = 100 * error / train_input.size(0)
loss = float(l(torch.sigmoid(signal.float() - t), train_target)) * train_input.shape[0]
return error, loss
strMargin = lambda s, l=50, sep=' ': int((l-len(s))/2)*'-' + sep + s + sep + (l-int((l-len(s))/2)-len(s))*'-'
print(strMargin('Train Mexican Hat Kernel'))
# Initialization
initial_error, e = 0, torch.from_numpy(e).float()
loss_list2, train_error_list2 = [], []
# Initialize tau
tau = torch.tensor([initial_tau], requires_grad = True)
# Initialize threshold
t = torch.tensor([0.2], requires_grad = True)
# Define optimizer
lr_t = alter_thresh if alter_thresh else 0
optimizer = optim.SGD([{'params': tau, 'lr': gamma},
{'params': t, 'lr': lr_t}])
# Define loss
l = nn.BCELoss()
# Calculate initial loss and initial train error
initial_error, initial_loss = compute_error_loss(edge, torch.from_numpy(signal[:, -1]))
loss_list2.append(initial_loss)
train_error_list2.append(initial_error)
# Start training
print("Start training...")
for epoch in range(nb_epochs):
for n in range(train_input.size(0)):
x_tr, y_tr = train_input[n], train_target[n]
w2 = e * tau * torch.exp(- e * tau)
optimizer.zero_grad()
output = torch.sigmoid(x_tr.view(1, -1).mv(w2) - t)
loss = l(output, y_tr)
loss.backward()
optimizer.step()
result = train_input.mv(e*tau*torch.exp(-e*tau))
error, loss = compute_error_loss(result-t>=0, result)
loss_list2.append(loss)
train_error_list2.append(error)
print('=> Epoch: {}/{}\tError: {:.4f}\t Loss: {:.4f}\t'.format(epoch+1, nb_epochs, error, loss), end='\r')
if breakDetect:
if loss_list2[-2] - loss_list2[-1]<0.1:
break
print('\n=> Done.')
print(strMargin('',sep='-'))
return tau, t, w2, train_error_list2, loss_list2
def build_poly(patch, L, k):
"""
Build polynomial terms for Laplacian matrix.
Attributes:
- patch : A patch cropped from the cloud
- L : Laplacian matrix
- k : The heightest order
Return:
- train_input : Polynomialized terms
"""
train_input = L@patch.z
feature = L@train_input
for i in range(k-1):
train_input = np.c_[train_input, feature]
feature = L@feature
return train_input
def poly_e(e, k):
"""
Build polynomial terms for eigen-value.
Attributes:
- e : Eigen-value
- k : The heightest order
Return:
- result : Polynomialized terms
"""
result = e
for i in range(1, k):
result = np.c_[result, e**(i+1)]
return result
def generate_data_poly(patch, L, k):
"""
Generate input data to learn a polynomial filter.
Return:
- train_input : Polynomialized terms in tensor format
"""
train_input = build_poly(patch, L, k)
train_input = torch.from_numpy(train_input).float()
return train_input
def train_polynomial_kernel(nb_epochs, train_input3, train_target, k, e, f, gamma=1e-4, alter_thresh=False, scheduler_flag=False, scheduler_step=10, scheduler_gamma=0.5):
"""
Training process to learn a Polynomial Kernel.
Attributes:
- nb_epochs : Number of epochs to train
- train_input3 : Initial filtered signal
- train_target : Binary targets labeling node class
- k : The heightest order
- e : Eigen values
- f : Filter to initalize parameters
- gamma : Learning rate of alpha
- alter_thresh : Whether to optimize the threshold
- scheduler_flag : Whether to use scheduler
Return:
- x : Polynomialized terms for eigen-value
- alpha : Optimized alpha coefficients
- t3 : Optimized threshold
- train_error_list3 : List of training error
- loss_list3 : List of training loss
"""
def compute_error_loss():
"""
Compute train error and loss.
Return:
- error : Training error
- loss : Training loss
"""
error = 0
f_hat = train_input3.mv(alpha)
for i,j in zip((f_hat - t3) > 0, train_target):
if int(i) != int(j):
error += 1
error = 100 * error / train_input3.size(0)
loss = float(l(torch.sigmoid(f_hat - t3), train_target)) * train_input3.shape[0]
return error, loss
strMargin = lambda s, l=50, sep=' ': int((l-len(s))/2)*'-' + sep + s + sep + (l-int((l-len(s))/2)-len(s))*'-'
print(strMargin('Train Polynomial Kernel'))
print("Entire epochs: {}\tHeightest order k: {}".format(nb_epochs, k) )
# Initialization
initial_error = 0
loss_list3, train_error_list3 = [], []
# Initialize polynomial coefficients: alpha
x, y = poly_e(e, k), f.evaluate(e)[1, :]
alpha = torch.from_numpy(np.linalg.lstsq(x, y)[0]).float()
alpha.requires_grad = True
# Initialize threshold
t3 = torch.tensor([0.2], requires_grad = True)
# Define optimizer
lr_t = 0 if not alter_thresh else alter_thresh
optimizer = torch.optim.SGD([{'params': alpha, 'lr': gamma},
{'params': t3, 'lr': lr_t}])
if scheduler_flag:
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=scheduler_step, gamma=scheduler_gamma)
# Define loss
l = nn.BCELoss()
# Compute original train error and loss
initial_error, initial_loss = compute_error_loss()
train_error_list3.append(initial_error)
loss_list3.append(initial_loss)
# Start training
print("Start training...")
for epoch in range(nb_epochs):
for n in range(train_input3.size(0)):
x_tr, y_tr = train_input3[n], train_target[n]
output = torch.sigmoid(x_tr.view(1, -1).mv(alpha) - t3)
loss = l(output, y_tr)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if scheduler_flag:
scheduler.step()
# Append train error and loss after each epoch
error, loss = compute_error_loss()
train_error_list3.append(error)
loss_list3.append(loss)
print('=> Epoch: {}/{}\tError: {:.4f}\t Loss: {:.4f}\t'.format(epoch+1, nb_epochs, error, loss), end='\r')
print('\n=> Done.')
print(strMargin('',sep='-'))
return x, alpha, t3, train_error_list3, loss_list3
def type_error_tree(df):
"""
Calculate type 1 error and type 2 error for tree detection.
Attributes:
- df : DataFrame containing data for computing error
Return:
- df : DataFrame after processing
"""
df.loc['type_1 error'] = (1 - df.loc['Tree']/df.loc['sum'])*100
df.loc['type_2 error'] = (df.loc['Tree', 'total']-df.loc['Tree'])/(df.loc['sum', 'total']-df.loc['sum'])*100
df.loc['type_1 error', 'total'] = '/'
df.loc['type_2 error', 'total'] = '/'
df.iloc[:, :-1] = df.iloc[:, :-1].astype(int)
return df
def type_error_roof(df):
df.loc['type_1 error'] = (1 - df.loc['Edge']/df.loc['sum'])*100
df.loc['type_2 error'] = (df.loc['Edge', 'total']-df.loc['Edge'])/(df.loc['sum', 'total']-df.loc['sum'])*100
df.loc['type_1 error', 'total'] = '/'
df.loc['type_2 error', 'total'] = '/'
df.iloc[:, :-1] = df.iloc[:, :-1].astype(int)
return df
def type_error_tree_roof(df):
"""
Calculate type 1 error and type 2 error.
"""
df.loc['type_1 error','roof'] = (1 - df.loc['Edge','roof']/df.loc[ 'sum','roof'])*100
df.loc['type_2 error','roof'] = (df.loc['Edge','total']-df.loc['Edge','roof'])/ \
(df.loc['sum','total']-df.loc['sum','roof'])*100
df.loc['type_1 error','tree'] = (1 - df.loc['Tree','tree']/df.loc[ 'sum','tree'])*100
df.loc['type_2 error','tree'] = (df.loc['Tree','total']-df.loc['Tree','tree'])/ \
(df.loc['sum','total']-df.loc['sum','tree'])*100
df.loc['type_1 error', 'total'] = '/'
df.loc['type_2 error','total'] = '/'
df.iloc[:, :-1] = df.iloc[:, :-1].astype(int)
return df | [
"numpy.load",
"gsp.knn_graph",
"torch.from_numpy",
"torch.nn.BCELoss",
"torch.optim.lr_scheduler.StepLR",
"numpy.sum",
"numpy.save",
"numpy.linalg.lstsq",
"numpy.zeros",
"os.path.exists",
"numpy.nonzero",
"torch.exp",
"torch.sigmoid",
"torch.clamp",
"torch.no_grad",
"matplotlib.pyplot.... | [((1558, 1587), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (1570, 1587), True, 'import matplotlib.pyplot as plt\n'), ((7907, 7931), 'numpy.zeros', 'np.zeros', (['[n_dim, n_dim]'], {}), '([n_dim, n_dim])\n', (7915, 7931), True, 'import numpy as np\n'), ((9870, 9909), 'torch.tensor', 'torch.tensor', (['[0.2]'], {'requires_grad': '(True)'}), '([0.2], requires_grad=True)\n', (9882, 9909), False, 'import torch\n'), ((9943, 9955), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (9953, 9955), True, 'import torch.nn as nn\n'), ((11397, 11421), 'numpy.zeros', 'np.zeros', (['[n_dim, n_dim]'], {}), '([n_dim, n_dim])\n', (11405, 11421), True, 'import numpy as np\n'), ((13431, 13478), 'torch.tensor', 'torch.tensor', (['[initial_tau]'], {'requires_grad': '(True)'}), '([initial_tau], requires_grad=True)\n', (13443, 13478), False, 'import torch\n'), ((13523, 13562), 'torch.tensor', 'torch.tensor', (['[0.2]'], {'requires_grad': '(True)'}), '([0.2], requires_grad=True)\n', (13535, 13562), False, 'import torch\n'), ((13656, 13724), 'torch.optim.SGD', 'optim.SGD', (["[{'params': tau, 'lr': gamma}, {'params': t, 'lr': lr_t}]"], {}), "([{'params': tau, 'lr': gamma}, {'params': t, 'lr': lr_t}])\n", (13665, 13724), True, 'import torch.optim as optim\n'), ((13785, 13797), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (13795, 13797), True, 'import torch.nn as nn\n'), ((18413, 18452), 'torch.tensor', 'torch.tensor', (['[0.2]'], {'requires_grad': '(True)'}), '([0.2], requires_grad=True)\n', (18425, 18452), False, 'import torch\n'), ((18550, 18627), 'torch.optim.SGD', 'torch.optim.SGD', (["[{'params': alpha, 'lr': gamma}, {'params': t3, 'lr': lr_t}]"], {}), "([{'params': alpha, 'lr': gamma}, {'params': t3, 'lr': lr_t}])\n", (18565, 18627), False, 'import torch\n'), ((18820, 18832), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (18830, 18832), True, 'import torch.nn as nn\n'), ((5999, 6021), 'os.path.exists', 'os.path.exists', (['path_e'], {}), '(path_e)\n', (6013, 6021), False, 'import os\n'), ((6026, 6048), 'os.path.exists', 'os.path.exists', (['path_U'], {}), '(path_U)\n', (6040, 6048), False, 'import os\n'), ((6053, 6075), 'os.path.exists', 'os.path.exists', (['path_L'], {}), '(path_L)\n', (6067, 6075), False, 'import os\n'), ((6103, 6118), 'numpy.load', 'np.load', (['path_e'], {}), '(path_e)\n', (6110, 6118), True, 'import numpy as np\n'), ((6145, 6160), 'numpy.load', 'np.load', (['path_U'], {}), '(path_U)\n', (6152, 6160), True, 'import numpy as np\n'), ((6276, 6422), 'gsp.knn_graph', 'gsp.knn_graph', ([], {'cloud': "patch[['x', 'y', 'z']]", 'k': '(30)', 'dist3D': '(False)', 'mode': '"""connectivity"""', 'neightype': '"""number"""', 'lap_type': '"""combinatorial"""', 'norm': '(True)'}), "(cloud=patch[['x', 'y', 'z']], k=30, dist3D=False, mode=\n 'connectivity', neightype='number', lap_type='combinatorial', norm=True)\n", (6289, 6422), False, 'import gsp\n'), ((13915, 13946), 'torch.from_numpy', 'torch.from_numpy', (['signal[:, -1]'], {}), '(signal[:, -1])\n', (13931, 13946), False, 'import torch\n'), ((18707, 18797), 'torch.optim.lr_scheduler.StepLR', 'optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': 'scheduler_step', 'gamma': 'scheduler_gamma'}), '(optimizer, step_size=scheduler_step, gamma=\n scheduler_gamma)\n', (18732, 18797), True, 'import torch.optim as optim\n'), ((3628, 3648), 'numpy.sum', 'np.sum', (['edge_dict[i]'], {}), '(edge_dict[i])\n', (3634, 3648), True, 'import numpy as np\n'), ((3698, 3742), 'numpy.sum', 'np.sum', (["df2.loc[edge_dict[i], 'is_building']"], {}), "(df2.loc[edge_dict[i], 'is_building'])\n", (3704, 3742), True, 'import numpy as np\n'), ((3792, 3832), 'numpy.sum', 'np.sum', (["df2.loc[edge_dict[i], 'is_edge']"], {}), "(df2.loc[edge_dict[i], 'is_edge'])\n", (3798, 3832), True, 'import numpy as np\n'), ((3887, 3912), 'numpy.sum', 'np.sum', (['edge_dict[i][num]'], {}), '(edge_dict[i][num])\n', (3893, 3912), True, 'import numpy as np\n'), ((3962, 4011), 'numpy.sum', 'np.sum', (["df2.loc[edge_dict[i][num], 'is_building']"], {}), "(df2.loc[edge_dict[i][num], 'is_building'])\n", (3968, 4011), True, 'import numpy as np\n'), ((4061, 4106), 'numpy.sum', 'np.sum', (["df2.loc[edge_dict[i][num], 'is_edge']"], {}), "(df2.loc[edge_dict[i][num], 'is_edge'])\n", (4067, 4106), True, 'import numpy as np\n'), ((4245, 4267), 'numpy.sum', 'np.sum', (["df2['is_edge']"], {}), "(df2['is_edge'])\n", (4251, 4267), True, 'import numpy as np\n'), ((4316, 4341), 'numpy.sum', 'np.sum', (["(df2['label'] == 5)"], {}), "(df2['label'] == 5)\n", (4322, 4341), True, 'import numpy as np\n'), ((6955, 6987), 'numpy.save', 'np.save', (['path_U', 'G_30nn_Binary_U'], {}), '(path_U, G_30nn_Binary_U)\n', (6962, 6987), True, 'import numpy as np\n'), ((7000, 7032), 'numpy.save', 'np.save', (['path_e', 'G_30nn_Binary_e'], {}), '(path_e, G_30nn_Binary_e)\n', (7007, 7032), True, 'import numpy as np\n'), ((7077, 7092), 'numpy.load', 'np.load', (['path_e'], {}), '(path_e)\n', (7084, 7092), True, 'import numpy as np\n'), ((7123, 7138), 'numpy.load', 'np.load', (['path_U'], {}), '(path_U)\n', (7130, 7138), True, 'import numpy as np\n'), ((7200, 7222), 'os.path.exists', 'os.path.exists', (['path_L'], {}), '(path_L)\n', (7214, 7222), False, 'import os\n'), ((7343, 7375), 'numpy.save', 'np.save', (['path_L', 'G_30nn_Binary_L'], {}), '(path_L, G_30nn_Binary_L)\n', (7350, 7375), True, 'import numpy as np\n'), ((8031, 8060), 'torch.from_numpy', 'torch.from_numpy', (['train_input'], {}), '(train_input)\n', (8047, 8060), False, 'import torch\n'), ((10566, 10587), 'torch.clamp', 'torch.clamp', (['w'], {'min': '(0)'}), '(w, min=0)\n', (10577, 10587), False, 'import torch\n'), ((11521, 11550), 'torch.from_numpy', 'torch.from_numpy', (['train_input'], {}), '(train_input)\n', (11537, 11550), False, 'import torch\n'), ((16149, 16178), 'torch.from_numpy', 'torch.from_numpy', (['train_input'], {}), '(train_input)\n', (16165, 16178), False, 'import torch\n'), ((1669, 1690), 'numpy.nonzero', 'np.nonzero', (['edge_roof'], {}), '(edge_roof)\n', (1679, 1690), True, 'import numpy as np\n'), ((6187, 6221), 'numpy.load', 'np.load', (['path_L'], {'allow_pickle': '(True)'}), '(path_L, allow_pickle=True)\n', (6194, 6221), True, 'import numpy as np\n'), ((6744, 6766), 'os.path.exists', 'os.path.exists', (['path_e'], {}), '(path_e)\n', (6758, 6766), False, 'import os\n'), ((6774, 6796), 'os.path.exists', 'os.path.exists', (['path_U'], {}), '(path_U)\n', (6788, 6796), False, 'import os\n'), ((10494, 10509), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10507, 10509), False, 'import torch\n'), ((13324, 13343), 'torch.from_numpy', 'torch.from_numpy', (['e'], {}), '(e)\n', (13340, 13343), False, 'import torch\n'), ((14249, 14268), 'torch.exp', 'torch.exp', (['(-e * tau)'], {}), '(-e * tau)\n', (14258, 14268), False, 'import torch\n'), ((14522, 14541), 'torch.exp', 'torch.exp', (['(-e * tau)'], {}), '(-e * tau)\n', (14531, 14541), False, 'import torch\n'), ((7420, 7454), 'numpy.load', 'np.load', (['path_L'], {'allow_pickle': '(True)'}), '(path_L, allow_pickle=True)\n', (7427, 7454), True, 'import numpy as np\n'), ((17749, 17774), 'torch.sigmoid', 'torch.sigmoid', (['(f_hat - t3)'], {}), '(f_hat - t3)\n', (17762, 17774), False, 'import torch\n'), ((18307, 18328), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['x', 'y'], {}), '(x, y)\n', (18322, 18328), True, 'import numpy as np\n')] |
"""
MIT License
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import division
from __future__ import absolute_import
import cvxpy as cvx
import numpy as np
import cvxpy.lin_ops.lin_utils as lu
import scipy.sparse as sp
from numpy import linalg as LA
import scipy.sparse.linalg as SLA
import logging
from qcqp.utilities import *
import qcqp.settings as s
logging.basicConfig(filename='qcqp.log', filemode='w', level=logging.INFO)
def solve_spectral(prob, *args, **kwargs):
"""Solve the spectral relaxation with lambda = 1.
"""
# TODO: do this efficiently without SDP lifting
# lifted variables and semidefinite constraint
X = cvx.Semidef(prob.n + 1)
W = prob.f0.homogeneous_form()
rel_obj = cvx.Minimize(cvx.sum_entries(cvx.mul_elemwise(W, X)))
W1 = sum([f.homogeneous_form() for f in prob.fs if f.relop == '<='])
W2 = sum([f.homogeneous_form() for f in prob.fs if f.relop == '=='])
rel_prob = cvx.Problem(
rel_obj,
[
cvx.sum_entries(cvx.mul_elemwise(W1, X)) <= 0,
cvx.sum_entries(cvx.mul_elemwise(W2, X)) == 0,
X[-1, -1] == 1
]
)
rel_prob.solve(*args, **kwargs)
if rel_prob.status not in [cvx.OPTIMAL, cvx.OPTIMAL_INACCURATE]:
raise Exception("Relaxation problem status: %s" % rel_prob.status)
(w, v) = LA.eig(X.value)
return np.sqrt(np.max(w))*np.asarray(v[:-1, np.argmax(w)]).flatten(), rel_prob.value
def solve_sdr(prob, *args, **kwargs):
"""Solve the SDP relaxation.
"""
# lifted variables and semidefinite constraint
X = cvx.Semidef(prob.n + 1)
W = prob.f0.homogeneous_form()
rel_obj = cvx.Minimize(cvx.sum_entries(cvx.mul_elemwise(W, X)))
rel_constr = [X[-1, -1] == 1]
for f in prob.fs:
W = f.homogeneous_form()
lhs = cvx.sum_entries(cvx.mul_elemwise(W, X))
if f.relop == '==':
rel_constr.append(lhs == 0)
else:
rel_constr.append(lhs <= 0)
rel_prob = cvx.Problem(rel_obj, rel_constr)
rel_prob.solve(*args, **kwargs)
if rel_prob.status not in [cvx.OPTIMAL, cvx.OPTIMAL_INACCURATE]:
raise Exception("Relaxation problem status: %s" % rel_prob.status)
return X.value, rel_prob.value
# phase 1: optimize infeasibility
def coord_descent_phase1(x0, prob, num_iters=1000,
viol_tol=1e-2, tol=1e-4):
logging.info("Phase 1 starts")
x = np.copy(x0)
# number of iterations since last infeasibility improvement
update_counter = 0
failed = False
# TODO: correct termination condition with tolerance
viol_last = np.inf
for t in range(num_iters):
if viol_last < viol_tol: break
# optimize over x[i]
for i in range(prob.n):
obj = OneVarQuadraticFunction(0, 0, 0)
nfs = [f.get_onevar_func(x, i) for f in prob.fs]
nfs = [f for f in nfs if f.P != 0 or f.q != 0]
viol = max([f.violation(x[i]) for f in nfs])
logging.debug("Current violation in x[%d]: %.3f", i, viol)
logging.debug("Current point: %s", x)
new_xi = x[i]
new_viol = viol
ss, es = -tol, viol - viol_tol
while es - ss > tol:
s = (ss + es) / 2
xi = onevar_qcqp(obj, nfs, s)
if xi is None:
ss = s
else:
new_xi = xi
new_viol = s
es = s
if new_viol < viol:
x[i] = new_xi
update_counter = 0
logging.debug("Violation reduction %.3f -> %.3f", viol, new_viol)
else:
update_counter += 1
if update_counter == prob.n:
failed = True
break
#if failed: break
viol = max(prob.violations(x))
logging.info("Maximum violation: %.6f -> %.6f", viol_last, viol)
#if viol_last <= viol + tol:
# break
viol_last = viol
return x
# phase 2: optimize objective over feasible points
def coord_descent_phase2(x0, prob, num_iters=1000,
viol_tol=1e-2, tol=1e-4):
# TODO: find correct termination condition with tolerance
logging.info("Phase 2 starts")
x = np.copy(x0)
viol = max(prob.violations(x))
update_counter = 0
converged = False
for t in range(num_iters):
# optimize over x[i]
for i in range(prob.n):
obj = prob.f0.get_onevar_func(x, i)
nfs = [f.get_onevar_func(x, i) for f in prob.fs]
# TODO: maybe this shouldn't be here?
nfs = [f for f in nfs if f.P != 0 or f.q != 0]
new_xi = onevar_qcqp(obj, nfs, viol)
if new_xi is not None and np.abs(new_xi - x[i]) > tol:
x[i] = new_xi
update_counter = 0
else:
update_counter += 1
if update_counter == prob.n:
converged = True
break
if converged: break
return x
def improve_coord_descent(x, prob, *args, **kwargs):
num_iters = kwargs.get('num_iters', 1000)
viol_tol = kwargs.get('viol_tol', 1e-2)
tol = kwargs.get('tol', 1e-4)
phase1 = kwargs.get('phase1', True)
if phase1:
x = coord_descent_phase1(x, prob, num_iters, viol_tol, tol)
if max(prob.violations(x)) < viol_tol:
x = coord_descent_phase2(x, prob, num_iters, viol_tol, tol)
return x
def admm_phase1(x0, prob, tol=1e-2, num_iters=1000):
logging.info("Starting ADMM phase 1 with tol %.3f", tol)
z = np.copy(x0)
xs = [np.copy(x0) for i in range(prob.m)]
us = [np.zeros(prob.n) for i in range(prob.m)]
for t in range(num_iters):
if max(prob.violations(z)) < tol:
break
z = (sum(xs)-sum(us))/prob.m
for i in range(prob.m):
x, u, f = xs[i], us[i], prob.fi(i)
xs[i] = onecons_qcqp(z + u, f)
for i in range(prob.m):
us[i] += z - xs[i]
return z
def admm_phase2(x0, prob, rho, tol=1e-2, num_iters=1000, viol_lim=1e4):
logging.info("Starting ADMM phase 2 with rho %.3f", rho)
bestx = np.copy(x0)
z = np.copy(x0)
xs = [np.copy(x0) for i in range(prob.m)]
us = [np.zeros(prob.n) for i in range(prob.m)]
if prob.rho != rho:
prob.rho = rho
zlhs = 2*(prob.f0.P + rho*prob.m*sp.identity(prob.n)).tocsc()
prob.z_solver = SLA.factorized(zlhs)
last_z = None
for t in range(num_iters):
rhs = 2*rho*(sum(xs)-sum(us)) - prob.f0.qarray
z = prob.z_solver(rhs)
# TODO: parallel x/u-updates
for i in range(prob.m):
xs[i] = onecons_qcqp(z + us[i], prob.fi(i))
for i in range(prob.m):
us[i] += z - xs[i]
# TODO: termination condition
if last_z is not None and LA.norm(last_z - z) < tol:
break
last_z = z
maxviol = max(prob.violations(z))
logging.info("Iteration %d, violation %.3f", t, maxviol)
if maxviol > viol_lim: break
bestx = np.copy(prob.better(z, bestx))
return bestx
def improve_admm(x0, prob, *args, **kwargs):
num_iters = kwargs.get('num_iters', 1000)
viol_lim = kwargs.get('viol_lim', 1e4)
tol = kwargs.get('tol', 1e-2)
rho = kwargs.get('rho', None)
phase1 = kwargs.get('phase1', True)
if rho is not None:
lmb0, P0Q = map(np.asmatrix, LA.eigh(prob.f0.P.todense()))
lmb_min = np.min(lmb0)
if lmb_min + prob.m*rho < 0:
logging.error("rho parameter is too small, z-update not convex.")
logging.error("Minimum possible value of rho: %.3f\n", -lmb_min/prob.m)
logging.error("Given value of rho: %.3f\n", rho)
raise Exception("rho parameter is too small, need at least %.3f." % rho)
# TODO: find a reasonable auto parameter
if rho is None:
lmb0, P0Q = map(np.asmatrix, LA.eigh(prob.f0.P.todense()))
lmb_min = np.min(lmb0)
lmb_max = np.max(lmb0)
if lmb_min < 0: rho = 2.*(1.-lmb_min)/prob.m
else: rho = 1./prob.m
rho *= 50.
logging.warning("Automatically setting rho to %.3f", rho)
if phase1:
x1 = prob.better(x0, admm_phase1(x0, prob, tol, num_iters))
else:
x1 = x0
x2 = prob.better(x1, admm_phase2(x1, prob, rho, tol, num_iters, viol_lim))
return x2
def improve_dccp(x0, prob, *args, **kwargs):
try:
import dccp
except ImportError:
raise Exception("DCCP package is not installed.")
use_eigen_split = kwargs.get('use_eigen_split', False)
tau = kwargs.get('tau', 0.005)
x = cvx.Variable(prob.n)
x.value = x0
# dummy objective
T = cvx.Variable()
T.value = prob.f0.eval(x0)
obj = cvx.Minimize(T)
f0p, f0m = prob.f0.dc_split(use_eigen_split)
cons = [f0p.eval_cvx(x) <= f0m.eval_cvx(x) + T]
for f in prob.fs:
fp, fm = f.dc_split(use_eigen_split)
if f.relop == '==':
cons.append(fp.eval_cvx(x) == fm.eval_cvx(x))
else:
cons.append(fp.eval_cvx(x) <= fm.eval_cvx(x))
dccp_prob = cvx.Problem(obj, cons)
bestx = np.copy(x0)
try:
result = dccp_prob.solve(method='dccp', tau=tau)
if dccp_prob.status == "Converged":
bestx = prob.better(bestx, np.asarray(x.value).flatten())
except cvx.error.SolverError:
pass
return bestx
def improve_ipopt(x0, prob, *args, **kwargs):
try:
import pyipopt
except ImportError:
raise Exception("PyIpopt package is not installed.")
lb = pyipopt.NLP_LOWER_BOUND_INF
ub = pyipopt.NLP_UPPER_BOUND_INF
g_L = np.zeros(prob.m)
for i in range(prob.m):
if prob.fs[i].relop == '<=':
g_L[i] = lb
g_U = np.zeros(prob.m)
def eval_grad_f(x, user_data = None):
return 2*prob.f0.P.dot(x) + prob.f0.qarray
def eval_g(x, user_data = None):
return np.array([f.eval(x) for f in prob.fs])
jac_grid = np.indices((prob.m, prob.n))
jac_r = jac_grid[0].ravel()
jac_c = jac_grid[1].ravel()
def eval_jac_g(x, flag, user_data = None):
if flag:
return (jac_r, jac_c)
else:
return np.vstack([2*f.P.dot(x)+f.qarray for f in prob.fs])
nlp = pyipopt.create(
prob.n, lb*np.ones(prob.n), ub*np.ones(prob.n),
prob.m, g_L, g_U, prob.m*prob.n, 0,
prob.f0.eval, eval_grad_f,
eval_g, eval_jac_g
)
try:
x, zl, zu, constraint_multipliers, obj, status = nlp.solve(x0)
except:
pass
return x
class QCQP:
def __init__(self, prob):
self.prob = prob
self.qcqp_form = get_qcqp_form(prob)
self.n = self.qcqp_form.n
self.spectral_sol = None
self.spectral_bound = None
self.sdr_sol = None
self.sdr_bound = None
self.maximize_flag = (prob.objective.NAME == "maximize")
def suggest(self, method=s.RANDOM, eps=1e-8, *args, **kwargs):
if method not in s.suggest_methods:
raise Exception("Unknown suggest method: %s\n", method)
if method == s.RANDOM:
x = np.random.randn(self.n)
elif method == s.SPECTRAL:
if self.spectral_sol is None:
self.spectral_sol, self.spectral_bound = solve_spectral(self.qcqp_form, *args, **kwargs)
if self.maximize_flag:
self.spectral_bound *= -1
x = self.spectral_sol
elif method == s.SDR:
if self.sdr_sol is None:
self.sdr_sol, self.sdr_bound = solve_sdr(self.qcqp_form, *args, **kwargs)
if self.maximize_flag:
self.sdr_bound *= -1
self.mu = np.asarray(self.sdr_sol[:-1, -1]).flatten()
self.Sigma = self.sdr_sol[:-1, :-1] - self.mu*self.mu.T + eps*sp.identity(self.n)
x = np.random.multivariate_normal(self.mu, self.Sigma)
assign_vars(self.prob.variables(), x)
f0 = self.qcqp_form.f0.eval(x)
if self.maximize_flag: f0 *= -1
return (f0, max(self.qcqp_form.violations(x)))
def _improve(self, method, *args, **kwargs):
x0 = flatten_vars(self.prob.variables(), self.n)
if method == s.COORD_DESCENT:
x = improve_coord_descent(x0, self.qcqp_form, *args, **kwargs)
elif method == s.ADMM:
x = improve_admm(x0, self.qcqp_form, *args, **kwargs)
elif method == s.DCCP:
x = improve_dccp(x0, self.qcqp_form, *args, **kwargs)
elif method == s.IPOPT:
x = improve_ipopt(x0, self.qcqp_form, *args, **kwargs)
assign_vars(self.prob.variables(), x)
f0 = self.qcqp_form.f0.eval(x)
if self.maximize_flag: f0 *= -1
return (f0, max(self.qcqp_form.violations(x)))
def improve(self, method, *args, **kwargs):
if not isinstance(method, list): methods = [method]
else: methods = method
if not all([method in s.improve_methods for method in methods]):
raise Exception("Unknown improve method(s): ", methods)
if any([x is None for x in self.prob.variables()]):
self.suggest()
for method in methods:
f, v = self._improve(method, *args, **kwargs)
return (f, v)
| [
"numpy.abs",
"numpy.argmax",
"scipy.sparse.linalg.factorized",
"numpy.ones",
"numpy.linalg.norm",
"logging.error",
"numpy.copy",
"numpy.random.randn",
"logging.warning",
"numpy.linalg.eig",
"numpy.max",
"scipy.sparse.identity",
"cvxpy.Problem",
"numpy.asarray",
"cvxpy.mul_elemwise",
"n... | [((1383, 1457), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""qcqp.log"""', 'filemode': '"""w"""', 'level': 'logging.INFO'}), "(filename='qcqp.log', filemode='w', level=logging.INFO)\n", (1402, 1457), False, 'import logging\n'), ((1677, 1700), 'cvxpy.Semidef', 'cvx.Semidef', (['(prob.n + 1)'], {}), '(prob.n + 1)\n', (1688, 1700), True, 'import cvxpy as cvx\n'), ((2364, 2379), 'numpy.linalg.eig', 'LA.eig', (['X.value'], {}), '(X.value)\n', (2370, 2379), True, 'from numpy import linalg as LA\n'), ((2609, 2632), 'cvxpy.Semidef', 'cvx.Semidef', (['(prob.n + 1)'], {}), '(prob.n + 1)\n', (2620, 2632), True, 'import cvxpy as cvx\n'), ((3019, 3051), 'cvxpy.Problem', 'cvx.Problem', (['rel_obj', 'rel_constr'], {}), '(rel_obj, rel_constr)\n', (3030, 3051), True, 'import cvxpy as cvx\n'), ((3390, 3420), 'logging.info', 'logging.info', (['"""Phase 1 starts"""'], {}), "('Phase 1 starts')\n", (3402, 3420), False, 'import logging\n'), ((3429, 3440), 'numpy.copy', 'np.copy', (['x0'], {}), '(x0)\n', (3436, 3440), True, 'import numpy as np\n'), ((5260, 5290), 'logging.info', 'logging.info', (['"""Phase 2 starts"""'], {}), "('Phase 2 starts')\n", (5272, 5290), False, 'import logging\n'), ((5299, 5310), 'numpy.copy', 'np.copy', (['x0'], {}), '(x0)\n', (5306, 5310), True, 'import numpy as np\n'), ((6573, 6629), 'logging.info', 'logging.info', (['"""Starting ADMM phase 1 with tol %.3f"""', 'tol'], {}), "('Starting ADMM phase 1 with tol %.3f', tol)\n", (6585, 6629), False, 'import logging\n'), ((6639, 6650), 'numpy.copy', 'np.copy', (['x0'], {}), '(x0)\n', (6646, 6650), True, 'import numpy as np\n'), ((7154, 7210), 'logging.info', 'logging.info', (['"""Starting ADMM phase 2 with rho %.3f"""', 'rho'], {}), "('Starting ADMM phase 2 with rho %.3f', rho)\n", (7166, 7210), False, 'import logging\n'), ((7224, 7235), 'numpy.copy', 'np.copy', (['x0'], {}), '(x0)\n', (7231, 7235), True, 'import numpy as np\n'), ((7245, 7256), 'numpy.copy', 'np.copy', (['x0'], {}), '(x0)\n', (7252, 7256), True, 'import numpy as np\n'), ((9730, 9750), 'cvxpy.Variable', 'cvx.Variable', (['prob.n'], {}), '(prob.n)\n', (9742, 9750), True, 'import cvxpy as cvx\n'), ((9798, 9812), 'cvxpy.Variable', 'cvx.Variable', ([], {}), '()\n', (9810, 9812), True, 'import cvxpy as cvx\n'), ((9855, 9870), 'cvxpy.Minimize', 'cvx.Minimize', (['T'], {}), '(T)\n', (9867, 9870), True, 'import cvxpy as cvx\n'), ((10215, 10237), 'cvxpy.Problem', 'cvx.Problem', (['obj', 'cons'], {}), '(obj, cons)\n', (10226, 10237), True, 'import cvxpy as cvx\n'), ((10250, 10261), 'numpy.copy', 'np.copy', (['x0'], {}), '(x0)\n', (10257, 10261), True, 'import numpy as np\n'), ((10756, 10772), 'numpy.zeros', 'np.zeros', (['prob.m'], {}), '(prob.m)\n', (10764, 10772), True, 'import numpy as np\n'), ((10872, 10888), 'numpy.zeros', 'np.zeros', (['prob.m'], {}), '(prob.m)\n', (10880, 10888), True, 'import numpy as np\n'), ((11090, 11118), 'numpy.indices', 'np.indices', (['(prob.m, prob.n)'], {}), '((prob.m, prob.n))\n', (11100, 11118), True, 'import numpy as np\n'), ((4900, 4964), 'logging.info', 'logging.info', (['"""Maximum violation: %.6f -> %.6f"""', 'viol_last', 'viol'], {}), "('Maximum violation: %.6f -> %.6f', viol_last, viol)\n", (4912, 4964), False, 'import logging\n'), ((6661, 6672), 'numpy.copy', 'np.copy', (['x0'], {}), '(x0)\n', (6668, 6672), True, 'import numpy as np\n'), ((6707, 6723), 'numpy.zeros', 'np.zeros', (['prob.n'], {}), '(prob.n)\n', (6715, 6723), True, 'import numpy as np\n'), ((7267, 7278), 'numpy.copy', 'np.copy', (['x0'], {}), '(x0)\n', (7274, 7278), True, 'import numpy as np\n'), ((7313, 7329), 'numpy.zeros', 'np.zeros', (['prob.n'], {}), '(prob.n)\n', (7321, 7329), True, 'import numpy as np\n'), ((7496, 7516), 'scipy.sparse.linalg.factorized', 'SLA.factorized', (['zlhs'], {}), '(zlhs)\n', (7510, 7516), True, 'import scipy.sparse.linalg as SLA\n'), ((8030, 8086), 'logging.info', 'logging.info', (['"""Iteration %d, violation %.3f"""', 't', 'maxviol'], {}), "('Iteration %d, violation %.3f', t, maxviol)\n", (8042, 8086), False, 'import logging\n'), ((8544, 8556), 'numpy.min', 'np.min', (['lmb0'], {}), '(lmb0)\n', (8550, 8556), True, 'import numpy as np\n'), ((9053, 9065), 'numpy.min', 'np.min', (['lmb0'], {}), '(lmb0)\n', (9059, 9065), True, 'import numpy as np\n'), ((9084, 9096), 'numpy.max', 'np.max', (['lmb0'], {}), '(lmb0)\n', (9090, 9096), True, 'import numpy as np\n'), ((9207, 9264), 'logging.warning', 'logging.warning', (['"""Automatically setting rho to %.3f"""', 'rho'], {}), "('Automatically setting rho to %.3f', rho)\n", (9222, 9264), False, 'import logging\n'), ((1780, 1802), 'cvxpy.mul_elemwise', 'cvx.mul_elemwise', (['W', 'X'], {}), '(W, X)\n', (1796, 1802), True, 'import cvxpy as cvx\n'), ((2712, 2734), 'cvxpy.mul_elemwise', 'cvx.mul_elemwise', (['W', 'X'], {}), '(W, X)\n', (2728, 2734), True, 'import cvxpy as cvx\n'), ((2857, 2879), 'cvxpy.mul_elemwise', 'cvx.mul_elemwise', (['W', 'X'], {}), '(W, X)\n', (2873, 2879), True, 'import cvxpy as cvx\n'), ((3998, 4056), 'logging.debug', 'logging.debug', (['"""Current violation in x[%d]: %.3f"""', 'i', 'viol'], {}), "('Current violation in x[%d]: %.3f', i, viol)\n", (4011, 4056), False, 'import logging\n'), ((4069, 4106), 'logging.debug', 'logging.debug', (['"""Current point: %s"""', 'x'], {}), "('Current point: %s', x)\n", (4082, 4106), False, 'import logging\n'), ((8606, 8671), 'logging.error', 'logging.error', (['"""rho parameter is too small, z-update not convex."""'], {}), "('rho parameter is too small, z-update not convex.')\n", (8619, 8671), False, 'import logging\n'), ((8684, 8757), 'logging.error', 'logging.error', (['"""Minimum possible value of rho: %.3f\n"""', '(-lmb_min / prob.m)'], {}), "('Minimum possible value of rho: %.3f\\n', -lmb_min / prob.m)\n", (8697, 8757), False, 'import logging\n'), ((8768, 8816), 'logging.error', 'logging.error', (['"""Given value of rho: %.3f\n"""', 'rho'], {}), "('Given value of rho: %.3f\\n', rho)\n", (8781, 8816), False, 'import logging\n'), ((11412, 11427), 'numpy.ones', 'np.ones', (['prob.n'], {}), '(prob.n)\n', (11419, 11427), True, 'import numpy as np\n'), ((11432, 11447), 'numpy.ones', 'np.ones', (['prob.n'], {}), '(prob.n)\n', (11439, 11447), True, 'import numpy as np\n'), ((12246, 12269), 'numpy.random.randn', 'np.random.randn', (['self.n'], {}), '(self.n)\n', (12261, 12269), True, 'import numpy as np\n'), ((2399, 2408), 'numpy.max', 'np.max', (['w'], {}), '(w)\n', (2405, 2408), True, 'import numpy as np\n'), ((4602, 4667), 'logging.debug', 'logging.debug', (['"""Violation reduction %.3f -> %.3f"""', 'viol', 'new_viol'], {}), "('Violation reduction %.3f -> %.3f', viol, new_viol)\n", (4615, 4667), False, 'import logging\n'), ((7915, 7934), 'numpy.linalg.norm', 'LA.norm', (['(last_z - z)'], {}), '(last_z - z)\n', (7922, 7934), True, 'from numpy import linalg as LA\n'), ((2036, 2059), 'cvxpy.mul_elemwise', 'cvx.mul_elemwise', (['W1', 'X'], {}), '(W1, X)\n', (2052, 2059), True, 'import cvxpy as cvx\n'), ((2095, 2118), 'cvxpy.mul_elemwise', 'cvx.mul_elemwise', (['W2', 'X'], {}), '(W2, X)\n', (2111, 2118), True, 'import cvxpy as cvx\n'), ((5788, 5809), 'numpy.abs', 'np.abs', (['(new_xi - x[i])'], {}), '(new_xi - x[i])\n', (5794, 5809), True, 'import numpy as np\n'), ((12992, 13042), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['self.mu', 'self.Sigma'], {}), '(self.mu, self.Sigma)\n', (13021, 13042), True, 'import numpy as np\n'), ((10411, 10430), 'numpy.asarray', 'np.asarray', (['x.value'], {}), '(x.value)\n', (10421, 10430), True, 'import numpy as np\n'), ((7443, 7462), 'scipy.sparse.identity', 'sp.identity', (['prob.n'], {}), '(prob.n)\n', (7454, 7462), True, 'import scipy.sparse as sp\n'), ((2428, 2440), 'numpy.argmax', 'np.argmax', (['w'], {}), '(w)\n', (2437, 2440), True, 'import numpy as np\n'), ((12834, 12867), 'numpy.asarray', 'np.asarray', (['self.sdr_sol[:-1, -1]'], {}), '(self.sdr_sol[:-1, -1])\n', (12844, 12867), True, 'import numpy as np\n'), ((12956, 12975), 'scipy.sparse.identity', 'sp.identity', (['self.n'], {}), '(self.n)\n', (12967, 12975), True, 'import scipy.sparse as sp\n')] |
#!/usr/bin/env python
# encoding: utf-8
import tensorflow as tf
import numpy as np
import random
from collections import deque
FRAME_PER_ACTION = 1
GAMMA = 0.99
OBSERVE = 100.
EXPLORE = 200000.
FINAL_EPSILON = 0.001
INITIAL_EPSILON = 0.01
REPLAY_MEMORY = 50000
BATCH_SIZE = 32
UPDATE_TIME = 100
try:
tf.mul
except:
tf.mul = tf.multiply
class BrainDQN:
def __init__(self, actions):
self.replayMemory = deque()
self.timeStep = 0
self.epsilon = INITIAL_EPSILON
self.saved = 0
self.actions = actions
init_params = self.createQNetwork()
self.stateInput, self.QValue, self.W_conv1, self.b_conv1, self.W_conv2, self.b_conv2, self.W_conv3, self.b_conv3, self.W_fc1, self.b_fc1, self.W_fc2, self.b_fc2 = init_params
target_params = self.createQNetwork()
self.stateInputT, self.QValueT, self.W_conv1T, self.b_conv1T, self.W_conv2T, self.b_conv2T, self.W_conv3T, self.b_conv3T, self.W_fc1T, self.b_fc1T, self.W_fc2T, self.b_fc2T = target_params
self.copyTargetQNetworkOperation = [self.W_conv1T.assign(self.W_conv1), self.b_conv1T.assign(self.b_conv1),
self.W_conv2T.assign(self.W_conv2), self.b_conv2T.assign(self.b_conv2),
self.W_conv3T.assign(self.W_conv3), self.b_conv3T.assign(self.b_conv3),
self.W_fc1T.assign(self.W_fc1), self.b_fc1T.assign(self.b_fc1),
self.W_fc2T.assign(self.W_fc2), self.b_fc2T.assign(self.b_fc2)]
self.createTrainingMethod()
self.saver = tf.train.Saver()
self.session = tf.InteractiveSession()
self.session.run(tf.initialize_all_variables())
checkpoint = tf.train.get_checkpoint_state("saved_networks")
if checkpoint and checkpoint.model_checkpoint_path:
checkpoint_path = checkpoint.model_checkpoint_path
self.saver.restore(self.session, checkpoint_path)
print("Successfully loaded:", checkpoint_path)
self.saved = int(checkpoint_path.split('-')[-1])
else:
print("Could not find old network weights")
def createQNetwork(self):
'''
使用网络为卷积神经网络
卷积神经网络(CNN)由输入层、卷积层、激活函数、池化层、全连接层组成
卷积层:用它来进行特征提取
池化层: 池化操作可以减小数据量,从而减小参数,降低计算,因此防止过拟合
输入图像(80x80x4) --> 卷积(卷积核(8x8x4x32),步长4) (20x20x32) --> 池化(池化核(2x2)) (10x10x32) -->
--> 卷积(卷积核(4x4x32x64),步长2) (5x5x64) --> 池化(池化核(2x2)) (3x3x64) -->
--> 卷积(卷积核(3x3x64x64),步长1) (3x3x64) --> 池化(池化核(2x2)) (2x2x64) -->
--> 变形(256x1) --> 全连接并使用RELU激活(512x1) --> 数组相乘(2x1)
'''
# Network weight
W_conv1 = self.weight_variable([8, 8, 4, 32])
b_conv1 = self.bias_variable([32])
W_conv2 = self.weight_variable([4, 4, 32, 64])
b_conv2 = self.bias_variable([64])
W_conv3 = self.weight_variable([3, 3, 64, 64])
b_conv3 = self.bias_variable([64])
W_fc1 = self.weight_variable([256, 256])
b_fc1 = self.bias_variable([256])
W_fc2 = self.weight_variable([256, self.actions])
b_fc2 = self.bias_variable([self.actions])
# Input
stateInput = tf.placeholder("float", [None, 80, 80, 4])
# Hidden layer
h_conv1 = tf.nn.relu(self.conv2d(stateInput, W_conv1, 4) + b_conv1)
h_pool1 = self.max_pool_2x2(h_conv1)
h_conv2 = tf.nn.relu(self.conv2d(h_pool1, W_conv2, 2) + b_conv2)
h_pool2 = self.max_pool_2x2(h_conv2)
h_conv3 = tf.nn.relu(self.conv2d(h_pool2, W_conv3, 1) + b_conv3)
h_pool3 = self.max_pool_2x2(h_conv3)
h_conv3_flat = tf.reshape(h_pool3, [-1, 256])
h_fc1 = tf.nn.relu(tf.matmul(h_conv3_flat, W_fc1) + b_fc1)
# 求出Q值
QValue = tf.matmul(h_fc1, W_fc2) + b_fc2
return stateInput, QValue, W_conv1, b_conv1, W_conv2, b_conv2, W_conv3, b_conv3, W_fc1, b_fc1, W_fc2, b_fc2
def copyTargetQNetwork(self):
self.session.run(self.copyTargetQNetworkOperation)
def createTrainingMethod(self):
self.actionInput = tf.placeholder("float", [None, self.actions])
self.yInput = tf.placeholder("float", [None])
Q_Action = tf.reduce_sum(tf.mul(self.QValue, self.actionInput), reduction_indices=1)
self.cost = tf.reduce_mean(tf.square(self.yInput - Q_Action))
self.trainStep = tf.train.AdamOptimizer(1e-6).minimize(self.cost)
def trainQNetwork(self):
minibatch = random.sample(self.replayMemory, BATCH_SIZE)
state_batch = [data[0] for data in minibatch]
action_batch = [data[1] for data in minibatch]
reward_batch = [data[2] for data in minibatch]
nextState_batch = [data[3] for data in minibatch]
y_batch = []
QValue_batch = self.QValueT.eval(feed_dict={self.stateInputT: nextState_batch})
for i in range(0, BATCH_SIZE):
terminal = minibatch[i][4]
if terminal:
y_batch.append(reward_batch[i])
else:
y_batch.append(reward_batch[i] + GAMMA * np.max(QValue_batch[i]))
self.trainStep.run(feed_dict={
self.yInput: y_batch,
self.actionInput: action_batch,
self.stateInput: state_batch
})
# Save the current training node every 10,000 iterations
if (self.timeStep + self.saved) % 1000 == 0:
self.saver.save(self.session, 'saved_networks/' + 'network' + '-dqn',
global_step=(self.saved + self.timeStep))
if self.timeStep % UPDATE_TIME == 0:
self.copyTargetQNetwork()
def setPerception(self, nextObservation, action, reward, terminal):
newState = np.append(self.currentState[:, :, 1:], nextObservation, axis=2)
self.replayMemory.append((self.currentState, action, reward, newState, terminal))
if len(self.replayMemory) > REPLAY_MEMORY:
self.replayMemory.popleft()
if self.timeStep > OBSERVE:
self.trainQNetwork()
if self.timeStep <= OBSERVE:
state = "observe"
elif self.timeStep > OBSERVE and self.timeStep <= OBSERVE + EXPLORE:
state = "explore"
else:
state = "train"
print("TIMESTEP", (self.saved + self.timeStep), "/ STATE", state, \
"/ EPSILON", self.epsilon)
self.currentState = newState
self.timeStep += 1
def getAction(self):
QValue = self.QValue.eval(feed_dict={self.stateInput: [self.currentState]})[0]
action = np.zeros(self.actions)
if self.timeStep % FRAME_PER_ACTION == 0:
if random.random() <= self.epsilon:
action_index = random.randrange(self.actions)
action[action_index] = 1
else:
action_index = np.argmax(QValue)
action[action_index] = 1
else:
action[0] = 1
if self.epsilon > FINAL_EPSILON and self.timeStep > OBSERVE:
self.epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE
return action
def setInitState(self, observation):
self.currentState = np.stack((observation, observation, observation, observation), axis=2)
def weight_variable(self, shape):
initial = tf.truncated_normal(shape, stddev=0.01)
return tf.Variable(initial)
def bias_variable(self, shape):
initial = tf.constant(0.01, shape=shape)
return tf.Variable(initial)
def conv2d(self, x, W, stride):
return tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding="SAME")
def max_pool_2x2(self, x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
| [
"numpy.argmax",
"random.sample",
"tensorflow.reshape",
"tensorflow.matmul",
"tensorflow.Variable",
"tensorflow.nn.conv2d",
"tensorflow.InteractiveSession",
"tensorflow.truncated_normal",
"collections.deque",
"tensorflow.placeholder",
"numpy.append",
"numpy.max",
"tensorflow.initialize_all_va... | [((427, 434), 'collections.deque', 'deque', ([], {}), '()\n', (432, 434), False, 'from collections import deque\n'), ((1650, 1666), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1664, 1666), True, 'import tensorflow as tf\n'), ((1690, 1713), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (1711, 1713), True, 'import tensorflow as tf\n'), ((1791, 1838), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['"""saved_networks"""'], {}), "('saved_networks')\n", (1820, 1838), True, 'import tensorflow as tf\n'), ((3315, 3357), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, 80, 80, 4]'], {}), "('float', [None, 80, 80, 4])\n", (3329, 3357), True, 'import tensorflow as tf\n'), ((3765, 3795), 'tensorflow.reshape', 'tf.reshape', (['h_pool3', '[-1, 256]'], {}), '(h_pool3, [-1, 256])\n', (3775, 3795), True, 'import tensorflow as tf\n'), ((4203, 4248), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, self.actions]'], {}), "('float', [None, self.actions])\n", (4217, 4248), True, 'import tensorflow as tf\n'), ((4271, 4302), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None]'], {}), "('float', [None])\n", (4285, 4302), True, 'import tensorflow as tf\n'), ((4590, 4634), 'random.sample', 'random.sample', (['self.replayMemory', 'BATCH_SIZE'], {}), '(self.replayMemory, BATCH_SIZE)\n', (4603, 4634), False, 'import random\n'), ((5835, 5898), 'numpy.append', 'np.append', (['self.currentState[:, :, 1:]', 'nextObservation'], {'axis': '(2)'}), '(self.currentState[:, :, 1:], nextObservation, axis=2)\n', (5844, 5898), True, 'import numpy as np\n'), ((6679, 6701), 'numpy.zeros', 'np.zeros', (['self.actions'], {}), '(self.actions)\n', (6687, 6701), True, 'import numpy as np\n'), ((7286, 7356), 'numpy.stack', 'np.stack', (['(observation, observation, observation, observation)'], {'axis': '(2)'}), '((observation, observation, observation, observation), axis=2)\n', (7294, 7356), True, 'import numpy as np\n'), ((7414, 7453), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.01)'}), '(shape, stddev=0.01)\n', (7433, 7453), True, 'import tensorflow as tf\n'), ((7469, 7489), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (7480, 7489), True, 'import tensorflow as tf\n'), ((7545, 7575), 'tensorflow.constant', 'tf.constant', (['(0.01)'], {'shape': 'shape'}), '(0.01, shape=shape)\n', (7556, 7575), True, 'import tensorflow as tf\n'), ((7591, 7611), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (7602, 7611), True, 'import tensorflow as tf\n'), ((7664, 7730), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W'], {'strides': '[1, stride, stride, 1]', 'padding': '"""SAME"""'}), "(x, W, strides=[1, stride, stride, 1], padding='SAME')\n", (7676, 7730), True, 'import tensorflow as tf\n'), ((7778, 7853), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, 2, 2, 1]', 'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n", (7792, 7853), True, 'import tensorflow as tf\n'), ((1739, 1768), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (1766, 1768), True, 'import tensorflow as tf\n'), ((3896, 3919), 'tensorflow.matmul', 'tf.matmul', (['h_fc1', 'W_fc2'], {}), '(h_fc1, W_fc2)\n', (3905, 3919), True, 'import tensorflow as tf\n'), ((4336, 4373), 'tensorflow.mul', 'tf.mul', (['self.QValue', 'self.actionInput'], {}), '(self.QValue, self.actionInput)\n', (4342, 4373), True, 'import tensorflow as tf\n'), ((4431, 4464), 'tensorflow.square', 'tf.square', (['(self.yInput - Q_Action)'], {}), '(self.yInput - Q_Action)\n', (4440, 4464), True, 'import tensorflow as tf\n'), ((3823, 3853), 'tensorflow.matmul', 'tf.matmul', (['h_conv3_flat', 'W_fc1'], {}), '(h_conv3_flat, W_fc1)\n', (3832, 3853), True, 'import tensorflow as tf\n'), ((4491, 4520), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(1e-06)'], {}), '(1e-06)\n', (4513, 4520), True, 'import tensorflow as tf\n'), ((6767, 6782), 'random.random', 'random.random', ([], {}), '()\n', (6780, 6782), False, 'import random\n'), ((6831, 6861), 'random.randrange', 'random.randrange', (['self.actions'], {}), '(self.actions)\n', (6847, 6861), False, 'import random\n'), ((6952, 6969), 'numpy.argmax', 'np.argmax', (['QValue'], {}), '(QValue)\n', (6961, 6969), True, 'import numpy as np\n'), ((5193, 5216), 'numpy.max', 'np.max', (['QValue_batch[i]'], {}), '(QValue_batch[i])\n', (5199, 5216), True, 'import numpy as np\n')] |
import argparse
import json
import logging
from typing import Any, Dict, List, Tuple
import zipfile, gzip, re, copy, random, math
import sys, os, shutil
import numpy
from typing import TypeVar,Iterable
from multiprocessing import Pool
from allennlp.common.elastic_logger import ElasticLogger
from subprocess import Popen,call
T = TypeVar('T')
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.join(os.path.join(__file__, os.pardir), os.pardir))))
from allennlp.common.tqdm import Tqdm
from allennlp.common.file_utils import cached_path
from allennlp.common.util import add_noise_to_dict_values
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.instance import Instance
from allennlp.data.dataset_readers.reading_comprehension import util
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Token, Tokenizer
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
from sklearn.feature_extraction.text import TfidfVectorizer
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import pairwise_distances
from nltk.corpus import stopwords
import string
def parse_filename(filename):
results_dict = {}
match_results = re.match('(\S+)_dev_on_(\S+)_from_(\S+)_(\S+).json', filename)
if match_results is not None:
results_dict['eval_set'] = match_results[1]
results_dict['target_dataset'] = match_results[2]
results_dict['source_dataset'] = match_results[3]
results_dict['type'] = match_results[4]
return results_dict
logger.error('could not find any parsing for the format %s',filename)
return
def process_results(args):
# for BERTlarge we process a precdiction file ...
if False and args.predictions_file is not None:
instance_list = []
with open(args.predictions_file, 'r') as f:
for line in f:
try:
instance_list.append(json.loads(line))
except:
pass
instance_list = sorted(instance_list, key=lambda x: x['question_id'])
intances_question_id = [instance['question_id'] for instance in instance_list]
split_inds = [0] + list(np.cumsum(np.unique(intances_question_id, return_counts=True)[1]))
per_question_instances = [instance_list[split_inds[ind]:split_inds[ind + 1]] for ind in range(len(split_inds) - 1)]
print(len(per_question_instances))
results_dict = {'EM':0.0, 'f1': 0.0}
for question_instances in per_question_instances:
best_ind = numpy.argmax([instance['best_span_logit'] for instance in question_instances])
results_dict['EM'] += question_instances[best_ind]['EM']
results_dict['f1'] += question_instances[best_ind]['f1']
results_dict['EM'] /= len(per_question_instances)
results_dict['f1'] /= len(per_question_instances)
results_dict['EM'] *= instance_list[0]['qas_used_fraction']
results_dict['f1'] *= instance_list[0]['qas_used_fraction']
# sanity test:
if args.eval_path == '-1':
pass
elif args.eval_path is None:
single_file_path = cached_path('s3://multiqa/datasets/' + args.eval_set + '_' + args.split_type + '.jsonl.zip')
all_question_ids = []
with zipfile.ZipFile(single_file_path, 'r') as myzip:
if myzip.namelist()[0].find('jsonl') > 0:
contexts = []
with myzip.open(myzip.namelist()[0]) as myfile:
header = json.loads(myfile.readline())['header']
for example in myfile:
context = json.loads(example)
contexts.append(context)
all_question_ids += [qa['id'] for qa in context['qas']]
predictions_question_ids = list(set(intances_question_id))
# print(set(all_question_ids) - set(predictions_question_ids))
results_dict['qids_missing_frac'] = len(set(all_question_ids) - set(predictions_question_ids)) / len(set(all_question_ids))
else:
single_file_path = cached_path(args.eval_path)
all_question_ids = []
contexts = []
with gzip.open(single_file_path) as myfile:
header = json.loads(myfile.readline())['header']
for example in myfile:
context = json.loads(example)
contexts.append(context)
all_question_ids += [qa['id'] for qa in context['qas']]
predictions_question_ids = list(set(intances_question_id))
#print(set(all_question_ids) - set(predictions_question_ids))
results_dict['qids_missing_frac'] = len(set(all_question_ids) - set(predictions_question_ids)) / len(set(all_question_ids))
else:
# computing
with open(args.eval_res_file, 'r') as f:
results_dict = json.load(f)
for field in args._get_kwargs():
results_dict[field[0]] = field[1]
ElasticLogger().write_log('INFO', 'EvalResults', context_dict=results_dict)
if args.predictions_file is not None:
if args.eval_path is not None:
# uploading to cloud
command = "aws s3 cp " + args.predictions_file + " " + args.prediction_path + " --acl public-read"
Popen(command, shell=True, preexec_fn=os.setsid)
else:
# uploading to cloud
command = "aws s3 cp " + args.predictions_file + " " + args.prediction_path + " --acl public-read"
Popen(command, shell=True, preexec_fn=os.setsid)
def main():
parse = argparse.ArgumentParser("Pre-process for DocumentQA/MultiQA model and datareader")
parse.add_argument("--eval_res_file",default=None, type=str)
parse.add_argument("--type", default=None, type=str)
parse.add_argument("--source_dataset", default=None, type=str)
parse.add_argument("--target_dataset", default=None, type=str)
parse.add_argument("--eval_set", default=None, type=str)
parse.add_argument("--split_type", default='dev', type=str)
parse.add_argument("--model", default=None, type=str)
parse.add_argument("--target_size", default=None, type=str)
parse.add_argument("--seed", default=None, type=str)
parse.add_argument("--num_of_epochs", default=None, type=int)
parse.add_argument("--batch_size", default=None, type=int)
parse.add_argument("--learning_rate", default=None, type=float)
parse.add_argument("--gas", default=None, type=int)
parse.add_argument("--experiment", default=None, type=str)
parse.add_argument("--full_experiments_name", default=None, type=str)
parse.add_argument("--predictions_file", default=None, type=str)
parse.add_argument("--prediction_path", default="s3://aigame/predictions", type=str)
parse.add_argument("--eval_path", default=None, type=str)
parse.add_argument("--remove_serialization_dir", default=None, type=str)
args = parse.parse_args()
if args.eval_res_file is not None:
process_results(args)
if args.remove_serialization_dir is not None:
logger.warning("removing the following dir %s" % (args.remove_serialization_dir))
try:
shutil.rmtree(args.remove_serialization_dir)
except OSError as e:
logger.warning("Error: %s - %s." % (e.filename, e.strerror))
else:
logger.error('No input provided')
if __name__ == "__main__":
main()
| [
"subprocess.Popen",
"json.load",
"zipfile.ZipFile",
"argparse.ArgumentParser",
"shutil.rmtree",
"numpy.argmax",
"gzip.open",
"allennlp.common.elastic_logger.ElasticLogger",
"json.loads",
"numpy.unique",
"re.match",
"allennlp.common.file_utils.cached_path",
"typing.TypeVar",
"os.path.join",... | [((331, 343), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (338, 343), False, 'from typing import TypeVar, Iterable\n'), ((931, 958), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (948, 958), False, 'import logging\n'), ((1299, 1365), 're.match', 're.match', (['"""(\\\\S+)_dev_on_(\\\\S+)_from_(\\\\S+)_(\\\\S+).json"""', 'filename'], {}), "('(\\\\S+)_dev_on_(\\\\S+)_from_(\\\\S+)_(\\\\S+).json', filename)\n", (1307, 1365), False, 'import zipfile, gzip, re, copy, random, math\n'), ((5776, 5863), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Pre-process for DocumentQA/MultiQA model and datareader"""'], {}), "(\n 'Pre-process for DocumentQA/MultiQA model and datareader')\n", (5799, 5863), False, 'import argparse\n'), ((2653, 2731), 'numpy.argmax', 'numpy.argmax', (["[instance['best_span_logit'] for instance in question_instances]"], {}), "([instance['best_span_logit'] for instance in question_instances])\n", (2665, 2731), False, 'import numpy\n'), ((5072, 5084), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5081, 5084), False, 'import json\n'), ((5169, 5184), 'allennlp.common.elastic_logger.ElasticLogger', 'ElasticLogger', ([], {}), '()\n', (5182, 5184), False, 'from allennlp.common.elastic_logger import ElasticLogger\n'), ((5483, 5531), 'subprocess.Popen', 'Popen', (['command'], {'shell': '(True)', 'preexec_fn': 'os.setsid'}), '(command, shell=True, preexec_fn=os.setsid)\n', (5488, 5531), False, 'from subprocess import Popen, call\n'), ((5702, 5750), 'subprocess.Popen', 'Popen', (['command'], {'shell': '(True)', 'preexec_fn': 'os.setsid'}), '(command, shell=True, preexec_fn=os.setsid)\n', (5707, 5750), False, 'from subprocess import Popen, call\n'), ((409, 442), 'os.path.join', 'os.path.join', (['__file__', 'os.pardir'], {}), '(__file__, os.pardir)\n', (421, 442), False, 'import sys, os, shutil\n'), ((3266, 3363), 'allennlp.common.file_utils.cached_path', 'cached_path', (["('s3://multiqa/datasets/' + args.eval_set + '_' + args.split_type +\n '.jsonl.zip')"], {}), "('s3://multiqa/datasets/' + args.eval_set + '_' + args.\n split_type + '.jsonl.zip')\n", (3277, 3363), False, 'from allennlp.common.file_utils import cached_path\n'), ((4263, 4290), 'allennlp.common.file_utils.cached_path', 'cached_path', (['args.eval_path'], {}), '(args.eval_path)\n', (4274, 4290), False, 'from allennlp.common.file_utils import cached_path\n'), ((7389, 7433), 'shutil.rmtree', 'shutil.rmtree', (['args.remove_serialization_dir'], {}), '(args.remove_serialization_dir)\n', (7402, 7433), False, 'import sys, os, shutil\n'), ((3411, 3449), 'zipfile.ZipFile', 'zipfile.ZipFile', (['single_file_path', '"""r"""'], {}), "(single_file_path, 'r')\n", (3426, 3449), False, 'import zipfile, gzip, re, copy, random, math\n'), ((4368, 4395), 'gzip.open', 'gzip.open', (['single_file_path'], {}), '(single_file_path)\n', (4377, 4395), False, 'import zipfile, gzip, re, copy, random, math\n'), ((2028, 2044), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (2038, 2044), False, 'import json\n'), ((2303, 2354), 'numpy.unique', 'np.unique', (['intances_question_id'], {'return_counts': '(True)'}), '(intances_question_id, return_counts=True)\n', (2312, 2354), True, 'import numpy as np\n'), ((4541, 4560), 'json.loads', 'json.loads', (['example'], {}), '(example)\n', (4551, 4560), False, 'import json\n'), ((3778, 3797), 'json.loads', 'json.loads', (['example'], {}), '(example)\n', (3788, 3797), False, 'import json\n')] |
#!/usr/bin/env python3
import argparse
import csv
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
import datetime as dt
parser = argparse.ArgumentParser(description='Plots battery discharge CSV log')
parser.add_argument('csv_file', help='discharger log file to plot')
parser.add_argument('--line_skips', type=int, default=0, help='number of input lines skipped per output line')
parser.add_argument('--volts_only', action='store_true', help='strips out coulomb data')
parser.add_argument('--yaxis', type=str, help='comma-separated list of battery numbers to plot (e.g., --yaxis=0,2,5)')
args = parser.parse_args()
if args.yaxis is None:
yaxis = [i for i in range(8)]
else:
yaxis = [int(i) for i in args.yaxis.split(',')]
in_data = np.genfromtxt(args.csv_file, delimiter=',')
epoch_converter = np.vectorize(dt.datetime.fromtimestamp)
time = epoch_converter(in_data[1:, 0])
skip = args.line_skips + 1
volts = in_data[1::skip, 1:17:2]
coulombs = in_data[1::skip, 2:17:2]
with open(args.csv_file, newline='') as csv_file:
row = next(csv.reader(csv_file))
legend_t = row[0]
legend_v = row[1:17:2]
legend_c = row[2:17:2]
fig = plt.figure()
if args.volts_only:
gs = fig.add_gridspec(1, 1)
else:
gs = fig.add_gridspec(2, 1)
axv = fig.add_subplot(gs[0, 0])
for i, (col, leg) in enumerate(zip(volts.T, legend_v)):
if i in yaxis:
axv.plot(time, col, label=leg)
axv.set_title('CR2450 Discharge Curves')
axv.set_ylabel('Battery Voltage (V)')
axv.set_ylim(ymin=1.9, ymax=3.1)
axv.legend(loc='best')
axv.grid(True)
if not args.volts_only:
axc = fig.add_subplot(gs[1, 0]) # , sharex=True)
for i, (col, leg) in enumerate(zip(coulombs.T, legend_c)):
if i in yaxis:
axc.plot(time, col, label=leg)
axc.set_ylabel('Battery Charge (C)')
axc.set_xlabel('Date')
axc.legend(loc='best')
axc.grid(True)
style.use('ggplot')
fig.tight_layout()
plt.show()
| [
"numpy.vectorize",
"argparse.ArgumentParser",
"matplotlib.style.use",
"matplotlib.pyplot.show",
"csv.reader",
"numpy.genfromtxt",
"matplotlib.pyplot.figure"
] | [((162, 232), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Plots battery discharge CSV log"""'}), "(description='Plots battery discharge CSV log')\n", (185, 232), False, 'import argparse\n'), ((773, 816), 'numpy.genfromtxt', 'np.genfromtxt', (['args.csv_file'], {'delimiter': '""","""'}), "(args.csv_file, delimiter=',')\n", (786, 816), True, 'import numpy as np\n'), ((835, 874), 'numpy.vectorize', 'np.vectorize', (['dt.datetime.fromtimestamp'], {}), '(dt.datetime.fromtimestamp)\n', (847, 874), True, 'import numpy as np\n'), ((1181, 1193), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1191, 1193), True, 'import matplotlib.pyplot as plt\n'), ((1907, 1926), 'matplotlib.style.use', 'style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (1916, 1926), False, 'from matplotlib import style\n'), ((1946, 1956), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1954, 1956), True, 'import matplotlib.pyplot as plt\n'), ((1076, 1096), 'csv.reader', 'csv.reader', (['csv_file'], {}), '(csv_file)\n', (1086, 1096), False, 'import csv\n')] |
import numpy as np
import rospy
import tensorflow as tf
import yaml
from styx_msgs.msg import TrafficLight
class TLClassifier(object):
def __init__(self):
SIM_MODEL_PATH = 'sim/frozen_inference_graph.pb'
REAL_MODEL_PATH = 'real/frozen_inference_graph.pb'
config_string = rospy.get_param("/traffic_light_config")
config = yaml.load(config_string)
MODEL_PATH = REAL_MODEL_PATH if config['is_site'] else SIM_MODEL_PATH
rospy.logwarn("Chosen Model: {0}".format(MODEL_PATH))
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(MODEL_PATH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
self.d_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
self.d_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.d_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.sess = tf.Session(graph=self.detection_graph)
def filter_boxes(self, min_score, boxes, scores, classes):
"""Return boxes with a confidence >= `min_score`"""
n = len(classes)
idxs = []
for i in range(n):
if scores[i] >= min_score:
idxs.append(i)
filtered_boxes = boxes[idxs, ...]
filtered_scores = scores[idxs, ...]
filtered_classes = classes[idxs, ...]
return filtered_boxes, filtered_scores, filtered_classes
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
# Bounding Box Detection.
with self.detection_graph.as_default():
# Expand dimension since the model expects image to have shape [1, None, None, 3].
img_expanded = np.expand_dims(image, axis=0)
(boxes, scores, classes) = self.sess.run(
[self.d_boxes, self.d_scores, self.d_classes],
feed_dict={self.image_tensor: img_expanded})
# Remove unnecessary dimensions
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes)
# Filter boxes with a confidence score less than `confidence_cutoff`
confidence_cutoff = 0.8
filtered_boxes, filtered_scores, filtered_classes = self.filter_boxes(confidence_cutoff, boxes, scores, classes)
# Make light Unknown by default.
top_class = 4
if (filtered_classes.any()):
# Top class from high confidence inference.
top_class = filtered_classes[0].item()
elif classes.any():
# Make best guess from aggregation of lower confidence inference classes.
confidence_cutoff = 0.4
filtered_boxes, filtered_scores, filtered_classes = self.filter_boxes(confidence_cutoff, boxes, scores, classes)
class_count = [0,0,0,0]
for classification in filtered_classes:
if classification.item() == 1.:
class_count[0] += 1
elif classification.item() == 2.:
class_count[1] += 1
elif classification.item() == 3.:
class_count[2] += 1
else:
class_count[3] += 1
max_count = max(class_count)
top_class = [i for i, count in enumerate(class_count) if count == max_count][-1] + 1
else:
return TrafficLight.UNKNOWN
if top_class == 1.:
return TrafficLight.GREEN
elif top_class == 2.:
return TrafficLight.RED
elif top_class == 3.:
return TrafficLight.YELLOW
else:
return TrafficLight.UNKNOWN | [
"yaml.load",
"tensorflow.Session",
"numpy.expand_dims",
"rospy.get_param",
"tensorflow.gfile.GFile",
"tensorflow.Graph",
"numpy.squeeze",
"tensorflow.import_graph_def",
"tensorflow.GraphDef"
] | [((300, 340), 'rospy.get_param', 'rospy.get_param', (['"""/traffic_light_config"""'], {}), "('/traffic_light_config')\n", (315, 340), False, 'import rospy\n'), ((358, 382), 'yaml.load', 'yaml.load', (['config_string'], {}), '(config_string)\n', (367, 382), False, 'import yaml\n'), ((563, 573), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (571, 573), True, 'import tensorflow as tf\n'), ((1281, 1319), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.detection_graph'}), '(graph=self.detection_graph)\n', (1291, 1319), True, 'import tensorflow as tf\n'), ((649, 662), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (660, 662), True, 'import tensorflow as tf\n'), ((2289, 2318), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (2303, 2318), True, 'import numpy as np\n'), ((2576, 2593), 'numpy.squeeze', 'np.squeeze', (['boxes'], {}), '(boxes)\n', (2586, 2593), True, 'import numpy as np\n'), ((2615, 2633), 'numpy.squeeze', 'np.squeeze', (['scores'], {}), '(scores)\n', (2625, 2633), True, 'import numpy as np\n'), ((2656, 2675), 'numpy.squeeze', 'np.squeeze', (['classes'], {}), '(classes)\n', (2666, 2675), True, 'import numpy as np\n'), ((692, 724), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['MODEL_PATH', '"""rb"""'], {}), "(MODEL_PATH, 'rb')\n", (706, 724), True, 'import tensorflow as tf\n'), ((858, 900), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (877, 900), True, 'import tensorflow as tf\n')] |
import requests
from pathlib import Path
import cv2
from tqdm import tqdm
import numpy as np
from imutils.paths import list_images
import pandas as pd
def test_img(project,port):
url=f'http://127.0.0.1:{port}/{project}'
print(url)
img_dir='img/'
pbar=tqdm(list(list_images(img_dir)),colour='green')
recored={'filename':[],'pred':[]}
for img_file in pbar:
data={'filename':img_file,'mask':False}
res=requests.post(url=url,data=data)
if res.status_code==200:
result=eval(res.text)['predict']
real=convert(result)
recored['filename'].append(img_file)
recored['pred'].append(real)
else:
print(res.status_code)
break
df=pd.DataFrame(recored)
df.to_csv('result.csv')
print(df.head())
def convert(res):
y=np.array(res).squeeze()
return y
if __name__=='__main__':
port='1111'
project='meter'
test_img(project,port)
| [
"pandas.DataFrame",
"requests.post",
"imutils.paths.list_images",
"numpy.array"
] | [((780, 801), 'pandas.DataFrame', 'pd.DataFrame', (['recored'], {}), '(recored)\n', (792, 801), True, 'import pandas as pd\n'), ((456, 489), 'requests.post', 'requests.post', ([], {'url': 'url', 'data': 'data'}), '(url=url, data=data)\n', (469, 489), False, 'import requests\n'), ((290, 310), 'imutils.paths.list_images', 'list_images', (['img_dir'], {}), '(img_dir)\n', (301, 310), False, 'from imutils.paths import list_images\n'), ((881, 894), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (889, 894), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.animation import FuncAnimation
import math
n = 8 # Number of particles
m = np.ones(n).astype(float) # Particle masses
x = np.zeros((n,2)).astype(float) # Particle positions (x and y for ith particle in x[i,0], x[i,1])
v = np.zeros((n,2)).astype(float) # Particle velocities
f = np.zeros((n,2)).astype(float) # Force accumulator
dt = 0.02 # Time step
g = np.array([0,-9.8]) # Acceleration due to gravity
# Initialize
for i in range(n):
m[i] = 1
x[i,:] = np.array([0,0])
v[i,:] = np.array([1,3.5])
# Time stepping (this is actually "semi-implicit Euler")
def step1():
# Accumulate forces on each particle
f.fill(0)
for i in range(n):
f[i,:] = m[i]*g
# Update velocity of each particle
for i in range(n):
v[i,:] += f[i,:]/m[i] * dt
# Update position of each particle
for i in range(n):
x[i,:] += v[i,:] * dt
def step2():
f.fill(0)
for i in range(n):
f[i,:] = m[i]*g
v0 = np.array([0.7,0.7])
v1 = np.array([1,0])
v2 = np.array([0.7,-0.7])
v3 = np.array([0,1])
v4 = np.array([0,-1])
v5 = np.array([-0.7,0.7])
v6 = np.array([-1,0])
v7 = np.array([-0.7,-0.7])
v[0]+=v0
v[1]+=v1
v[2]+=v2
v[3]+=v3
v[4]+=v4
v[5]+=v5
v[6]+=v6
v[7]+=v7
#for i in range(n):
# v[i,:] += f[i,:]/m[i] * dt
for i in range(n):
x[i,:] += v[i,:] * dt
# Drawing code
fig, ax = plt.subplots()
points, = ax.plot(x[:,0], x[:,1], 'o')
def init():
ax.set_xlim(0,1)
ax.set_ylim(0,1)
ax.set_aspect('equal')
return points,
def animate(frame):
if frame == frames//3:
step2()
else:
step1()
points.set_data(x[:,0], x[:,1])
if frame is frames-1:
plt.close()
return points,
totalTime = 1
frames = int(totalTime/dt)
anim = FuncAnimation(fig, animate, frames=range(frames), init_func=init, interval=dt*1000)
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.close",
"numpy.zeros",
"numpy.ones",
"numpy.array",
"matplotlib.pyplot.subplots"
] | [((462, 481), 'numpy.array', 'np.array', (['[0, -9.8]'], {}), '([0, -9.8])\n', (470, 481), True, 'import numpy as np\n'), ((1574, 1588), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1586, 1588), True, 'import matplotlib.pyplot as plt\n'), ((2077, 2087), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2085, 2087), True, 'import matplotlib.pyplot as plt\n'), ((575, 591), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (583, 591), True, 'import numpy as np\n'), ((605, 623), 'numpy.array', 'np.array', (['[1, 3.5]'], {}), '([1, 3.5])\n', (613, 623), True, 'import numpy as np\n'), ((1092, 1112), 'numpy.array', 'np.array', (['[0.7, 0.7]'], {}), '([0.7, 0.7])\n', (1100, 1112), True, 'import numpy as np\n'), ((1122, 1138), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (1130, 1138), True, 'import numpy as np\n'), ((1148, 1169), 'numpy.array', 'np.array', (['[0.7, -0.7]'], {}), '([0.7, -0.7])\n', (1156, 1169), True, 'import numpy as np\n'), ((1179, 1195), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1187, 1195), True, 'import numpy as np\n'), ((1205, 1222), 'numpy.array', 'np.array', (['[0, -1]'], {}), '([0, -1])\n', (1213, 1222), True, 'import numpy as np\n'), ((1232, 1253), 'numpy.array', 'np.array', (['[-0.7, 0.7]'], {}), '([-0.7, 0.7])\n', (1240, 1253), True, 'import numpy as np\n'), ((1263, 1280), 'numpy.array', 'np.array', (['[-1, 0]'], {}), '([-1, 0])\n', (1271, 1280), True, 'import numpy as np\n'), ((1290, 1312), 'numpy.array', 'np.array', (['[-0.7, -0.7]'], {}), '([-0.7, -0.7])\n', (1298, 1312), True, 'import numpy as np\n'), ((161, 171), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (168, 171), True, 'import numpy as np\n'), ((214, 230), 'numpy.zeros', 'np.zeros', (['(n, 2)'], {}), '((n, 2))\n', (222, 230), True, 'import numpy as np\n'), ((315, 331), 'numpy.zeros', 'np.zeros', (['(n, 2)'], {}), '((n, 2))\n', (323, 331), True, 'import numpy as np\n'), ((372, 388), 'numpy.zeros', 'np.zeros', (['(n, 2)'], {}), '((n, 2))\n', (380, 388), True, 'import numpy as np\n'), ((1907, 1918), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1916, 1918), True, 'import matplotlib.pyplot as plt\n')] |
import math
import torch
import pickle
import numpy as np
from torch import nn
from scipy import signal
import torch.optim as optim
from torch.distributions import Normal
def _uniform_init(tensor, param=3e-3):
return tensor.data.uniform_(-param, param)
def layer_init(layer, weight_init = _uniform_init, bias_init = _uniform_init ):
weight_init(layer.weight)
bias_init(layer.bias)
def uniform_init(layer):
layer_init(layer, weight_init = _uniform_init, bias_init = _uniform_init )
class SpinalActorCritic(nn.Module):
def __init__(self, policy, net_type="linear",
policy_learning_rate=0.0003, value_learning_rate=0.0003, optimizer=optim.Adam):
super(SpinalActorCritic, self).__init__()
self.policy = policy
self.net_type = net_type
# generates value function with same topology
self.value_function = policy.generate_value_function()
policy_params, value_params = self.params()
self.value_optim = optimizer(params=value_params, lr=value_learning_rate)
self.policy_optim = optimizer(params=policy_params, lr=policy_learning_rate)
def forward(self, x):
return self.policy(x)
def value(self, x):
return self.value_function(x)
def params(self):
return self.policy.params(), self.value_function.params()
def optimize(self, policy_loss, value_loss):
self.value_optim.zero_grad()
value_loss.backward()
self.value_optim.step()
self.policy_optim.zero_grad()
policy_loss.backward()
if self.policy.plastic:
nn.utils.clip_grad_norm(self.policy.parameters(), 10)
self.policy_optim.step()
def reset(self):
self.policy.reset()
class ReplayMemory:
def __init__(self,):
self.actions_top = list()
self.actions_bottom = list()
self.rewards_top = list()
self.rewards_bottom = list()
self.log_probs_top = list()
self.log_probs_bottom = list()
self.reset_flags_top = list()
self.reset_flags_bottom = list()
self.sensor_states_top = list()
self.sensor_states_bottom = list()
def clear(self):
self.actions_top = list()
self.actions_bottom = list()
self.rewards_top = list()
self.rewards_bottom = list()
self.log_probs_top = list()
self.log_probs_bottom = list()
self.reset_flags_top = list()
self.reset_flags_bottom = list()
self.sensor_states_top = list()
self.sensor_states_bottom = list()
class SpinalPlasticityModule(nn.Module):
def __init__(self, dim_1, dim_2, plasticity=True, plasticity_type="neuromodulated_linear"):
super(SpinalPlasticityModule, self).__init__()
self.plasticity = plasticity
self.plasticity_type = plasticity_type
self.hebbian_trace = torch.zeros(size=(dim_1, dim_2))
# create parameters for respective plasticity type
if self.plasticity_type == "neuromodulated_linear":
# linear feedforward weights
self.forward_weights = nn.Linear(dim_1, dim_2)
# modulatory update rule weights
self.eta_fan_in = nn.Linear(dim_2, self.COMPRESS_DIM)
self.eta_fan_out = nn.Linear(self.COMPRESS_DIM, dim_2)
# alpha plasticity modulation weights
self.alpha_plasticity = torch.rand(size=(dim_1, dim_2))*0.01
# uniform initialize weights
uniform_init(self.eta_fan_in)
uniform_init(self.eta_fan_out)
uniform_init(self.forward_weights)
def forward(self, x):
pass
def update_trace(self):
pass
class SpinalNetworkModule(nn.Module):
def __init__(self, dim_1, dim_2, module_type='continuous_reinforcement', optional_args=None):
super(SpinalNetworkModule, self).__init__()
self.dimension_1 = dim_1
self.dimension_2 = dim_2
self.module_type = module_type
if module_type not in self.module_types:
raise Exception("{} is not a module type".format(module_type))
if module_type == "continuous_reinforcement":
# linear feedforward
self.activation = torch.tanh
self.linear = nn.Linear(dim_1, dim_2)
nn.init.xavier_normal_(self.linear.weight)
self.linear.bias.data *= 0
elif module_type == "continuous_reinforcement_vision":
# convolutional reinforcement vision feedforward
stride = 2
kernel = 5
self.pooling = False
self.activation = torch.relu
if 'kernel' in optional_args:
kernel = optional_args['kernel']
if 'stride' in optional_args:
stride = optional_args['stride']
if 'activation' in optional_args:
self.activation = optional_args['activation']
if 'pooling' in optional_args: # bool
self.pooling = optional_args['pooling']
# if pooling then specify pooling kernel
self.pool = nn.MaxPool2d(kernel_size=2)
if 'pooling_kernel' in optional_args:
self.pool = nn.MaxPool2d(kernel_size=optional_args['pooling_kernel'])
# the dimensionality of the hypothetical flattened output
self.flatten_dimensionlity = -1
self.convolutional = nn.Conv2d(in_channels=dim_1, out_channels=dim_2, kernel_size=kernel, stride=stride)
elif module_type == "continuous_reinforcement_vision_final":
# final mapping for continuous reinforcement vision -- linear embedding
self.linear = nn.Linear(dim_1, dim_2)
nn.init.xavier_normal_(self.linear.weight)
#uniform_init(self.linear)
elif module_type == "value_module":
# feedforward for linear value module
self.activation = torch.tanh
self.linear = nn.Linear(dim_1, dim_2)
uniform_init(self.linear)
elif module_type == "value_module_final":
# final output for value module
self.linear = nn.Linear(dim_1, 1)
uniform_init(self.linear)
elif module_type == "value_module_vision":
# value module for vision
stride = 2
kernel = 5
self.pooling = False
self.activation = torch.relu
if 'kernel' in optional_args:
kernel = optional_args['kernel']
if 'stride' in optional_args:
stride = optional_args['stride']
if 'activation' in optional_args:
self.activation = optional_args['activation']
if 'pooling' in optional_args['pooling']: # bool
self.pooling = optional_args['pooling']
# if pooling then specify pooling kernel
self.pool = nn.MaxPool2d(kernel_size=2)
if 'pooling_kernel' in optional_args:
self.pool = nn.MaxPool2d(kernel_size=optional_args['pooling_kernel'])
# the dimensionality of the hypothetical flattened output
self.flatten_dimensionlity = -1
self.convolutional = nn.Conv2d(in_channels=dim_1, out_channels=dim_2, kernel_size=kernel, stride=stride)
elif module_type == "continuous_reinforcement_final":
# final module for continuous action space reinforcement agent
self.linear = nn.Linear(dim_1, dim_2)
nn.init.xavier_normal_(self.linear.weight)
#nn.init.xavier_normal_(self.linear.bias, 0)
self.linear.bias.data *= 0
self.log_std_lin = nn.Parameter(torch.ones(1, dim_2)*-0.5)
self.log_std_min, self.log_std_max = -20, 2
@property
def module_types(self):
return ['continuous_reinforcement', 'continuous_reinforcement_final', 'continuous_reinforcement_vision',
'value_module', 'value_module_vision', 'value_module_final', 'continuous_reinforcement_vision_final']
def forward(self, x):
if self.module_type in ['value_module', 'continuous_reinforcement']:
return self.activation(self.linear(x))
elif self.module_type in ['value_module_final', 'continuous_reinforcement_vision_final']:
return self.linear(x)
elif self.module_type in ['continuous_reinforcement_vision', 'value_module_vision']:
conv = self.convolutional(x)
if self.pooling:
conv = self.pool(conv)
return self.activation(conv)
elif self.module_type == "continuous_reinforcement_final":
mean = self.linear(x)
return mean, torch.clamp(self.log_std_lin.expand_as(mean), min=self.log_std_min, max=self.log_std_max)
class SpinalNeuralNetwork(nn.Module):
def __init__(self, latent_shape, environment=None, act_space=None, obs_space=None,
plasticity=None, network_type="cont_pg_rl", module_arguments=None):
super(SpinalNeuralNetwork, self).__init__()
self.environment = environment
self.network_type = network_type
if environment is not None:
ob_space = environment.observation_space.shape[0]
else:
ob_space = obs_space
if environment is not None:
ac_space = environment.action_space.shape[0]
else:
ac_space = act_space
self.plastic = plasticity
self.trace_template = \
torch.zeros(latent_shape[-1], ac_space, requires_grad=False)
if plasticity is not None:
if plasticity == "neuromodulated":
self.trace = torch.zeros(latent_shape[-1], ac_space, requires_grad=False)
self.fan_in = nn.Linear(ac_space, 1)
self.fan_out = nn.Linear(1, ac_space)
self.alpha_plasticity = nn.Parameter(torch.randn(latent_shape[-1], ac_space)*0.01)
# generate network shape from latent if cont_pg_rl
self.network_shape = latent_shape
if network_type == "cont_pg_rl":
self.network_shape = [ob_space] + latent_shape + [ac_space]
# if no model arguments provided generate corresponding argument list
if module_arguments is None:
module_arguments = [dict() for _ in range(len(self.network_shape))]
# check validity of network type
if network_type not in self.network_types:
raise Exception("{} is not a network type".format(network_type))
# generate corresponding network modules
if network_type == "cont_pg_rl":
""" Continuous policy gradient reinforcement learning """
self.network_modules = [
SpinalNetworkModule(dim_1=self.network_shape[_],
dim_2=self.network_shape[_ + 1], module_type="continuous_reinforcement",
optional_args=module_arguments[_]) for _ in range(len(self.network_shape) - 2)]
self.network_modules.append(SpinalNetworkModule(dim_1=self.network_shape[-2], dim_2=self.network_shape[-1],
module_type="continuous_reinforcement_final", optional_args=module_arguments[-1]))
elif network_type == "vanilla":
""" Continuous policy gradient reinforcement learning """
self.network_modules = [
SpinalNetworkModule(dim_1=self.network_shape[_],
dim_2=self.network_shape[_ + 1], module_type="continuous_reinforcement",
optional_args=module_arguments[_]) for _ in range(len(self.network_shape) - 1)]
self.network_modules[-1].activation = nn.Identity()
elif network_type == "vision":
""" Continuous policy gradient reinforcement learning """
self.network_modules = [
SpinalNetworkModule(dim_1=self.network_shape[_],
dim_2=self.network_shape[_ + 1], module_type="continuous_reinforcement_vision",
optional_args=module_arguments[_]) for _ in range(len(self.network_shape) - 2)]
self.network_modules.append(SpinalNetworkModule(dim_1=self.network_shape[-2], dim_2=self.network_shape[-1],
module_type="continuous_reinforcement_vision_final", optional_args=module_arguments[-1]))
def params(self):
# generate parameter list
param_list = list()
for _ in range(len(self.network_modules)):
param_list += list(self.network_modules[_].parameters())
return param_list
def generate_value_function(self, optional_args=None):
# generate value function with same topology as policy
_itr = 0
value_modules = list()
if optional_args is None:
optional_args = [None for _ in range(len(self.network_modules))]
for _module in self.network_modules:
value_modules.append(SpinalNetworkModule(dim_1=_module.dimension_1, dim_2=_module.dimension_2,
module_type=_module.module_type.replace("continuous_reinforcement", "value_module"),
optional_args=optional_args[_itr]))
_itr += 1
d_copy = deepcopy(self)
d_copy.network_modules = value_modules
d_copy.plastic = False
return d_copy
@property
def network_types(self):
return ["cont_pg_rl", "vanilla", "vision"]
def forward(self, x):
# feedforward through modules
pre_syn = torch.clone(x)
for _module in self.network_modules:
pre_syn = torch.clone(x)
x = _module(x)
if self.plastic:
x = x[0]
plastic_component = torch.mm(pre_syn, self.trace*self.alpha_plasticity)
new_mean = x + plastic_component
x = new_mean, torch.clamp(self.network_modules[-1].log_std_lin.expand_as(new_mean),
min=self.network_modules[-1].log_std_min, max=self.network_modules[-1].log_std_max)
eta = self.fan_out(self.fan_in(x[0]))
self.trace = torch.clamp(self.trace + eta*torch.mm(pre_syn.t(), x[0]), min=-1, max=1)
return x
def reset(self):
self.trace = torch.clone(self.trace_template)
class SpinalHierarchicalNetwork(nn.Module):
def __init__(self, top_layer_ac, bottom_layer_ac, top_layer_freq,
bottom_layer_freq, epochs=10, minibatch_size=1000, timestep_size=4000, entropy_coefficient=0.01):
super(SpinalHierarchicalNetwork, self).__init__()
self.lamb = 0.95
self.gamma = 0.99
self.ppo_clip = 0.2
self.epochs = epochs
self.timestep_size = timestep_size
self.minibatch_size = minibatch_size
self.entropy_coefficient = entropy_coefficient
self.top_layer_freq = top_layer_freq
self.bottom_layer_freq = bottom_layer_freq
self.top_layer_actor_critic = top_layer_ac
self.bottom_layer_actor_critic = bottom_layer_ac
def forward(self, x, memory, evaluate=False, append=True, top=False, bottom=False, ):
if not top and not bottom:
raise RuntimeError("Must be top or bottom")
if top:
action_mean, action_log_std = self.top_layer_actor_critic(x)
action_std = torch.exp(action_log_std)
distribution = Normal(loc=action_mean, scale=action_std)
action = distribution.sample()
log_probabilities = distribution.log_prob(action)
log_probabilities = torch.sum(log_probabilities, dim=1)
if append:
memory.log_probs_top.append(log_probabilities.detach())
if evaluate:
return action_mean.detach(), None
return action, memory
elif bottom:
action_mean, action_log_std = self.bottom_layer_actor_critic(x)
action_std = torch.exp(action_log_std)
distribution = Normal(loc=action_mean, scale=action_std)
action = distribution.sample()
log_probabilities = distribution.log_prob(action)
log_probabilities = torch.sum(log_probabilities, dim=1)
if append:
memory.log_probs_bottom.append(log_probabilities.detach())
if evaluate:
return action_mean.detach(), None
return action, memory
raise Exception("Forward not top or bottom")
def evaluate(self, x, old_action, top=False, bottom=False):
if not top and not bottom:
raise RuntimeError("Must be top or bottom")
if top:
action_mean, action_log_std = self.top_layer_actor_critic(x)
action_std = torch.exp(action_log_std)
distribution = Normal(loc=action_mean, scale=action_std)
log_probabilities = distribution.log_prob(old_action.squeeze(dim=1))
log_probabilities = torch.sum(log_probabilities, dim=1)
entropy = distribution.entropy()
return log_probabilities, entropy
elif bottom:
action_mean, action_log_std = self.bottom_layer_actor_critic(x)
action_std = torch.exp(action_log_std)
distribution = Normal(loc=action_mean, scale=action_std)
log_probabilities = distribution.log_prob(old_action.squeeze(dim=1))
log_probabilities = torch.sum(log_probabilities, dim=1)
entropy = distribution.entropy()
return log_probabilities, entropy
def generalized_advantage_estimation(self, r, v, mask):
batchsz = v.size(0)
# v_target is worked out by Bellman equation.
delta = torch.Tensor(batchsz)
v_target = torch.Tensor(batchsz)
adv_state = torch.Tensor(batchsz)
prev_v = 0
prev_v_target = 0
prev_adv_state = 0
for t in reversed(range(batchsz)):
# mask here indicates a end of trajectory
# this value will be treated as the target value of value network.
# mask = 0 means the immediate reward is the real V(s) since it's end of trajectory.
# formula: V(s_t) = r_t + gamma * V(s_t+1)
v_target[t] = r[t] + self.gamma * prev_v_target * mask[t]
# please refer to : https://arxiv.org/abs/1506.02438
# for generalized adavantage estimation
# formula: delta(s_t) = r_t + gamma * V(s_t+1) - V(s_t)
delta[t] = r[t] + self.gamma * prev_v * mask[t] - v[t]
# formula: A(s, a) = delta(s_t) + gamma * lamda * A(s_t+1, a_t+1)
# here use symbol tau as lambda, but original paper uses symbol lambda.
adv_state[t] = delta[t] + self.gamma * self.lamb * prev_adv_state * mask[t]
# update previous
prev_v_target = v_target[t]
prev_v = v[t]
prev_adv_state = adv_state[t]
# normalize adv_state
adv_state = (adv_state - adv_state.mean()) / (adv_state.std() + 1e-6)
return adv_state, v_target
def learn(self, memory):
replay_len = len(memory.rewards_bottom)
minibatch_count = self.timestep_size / self.minibatch_size
values = self.bottom_layer_actor_critic.value(torch.FloatTensor(memory.sensor_states_bottom)).detach()
advantages, value_target = self.generalized_advantage_estimation(
torch.FloatTensor(memory.rewards_bottom).unsqueeze(1), values, torch.FloatTensor(memory.reset_flags_bottom).unsqueeze(1))
advantages = advantages.detach().numpy()
value_target = value_target.detach().numpy()
for _ in range(self.epochs):
minibatch_indices = list(range(replay_len))
np.random.shuffle(minibatch_indices)
minibatches = [minibatch_indices[int(_ * (replay_len/minibatch_count)):
int((_ + 1) * (replay_len/minibatch_count))] for _ in range(int(minibatch_count))]
for batch in minibatches:
mb_states = torch.FloatTensor(np.array(memory.sensor_states_bottom)[batch])
mb_actions = torch.stack(memory.actions_bottom).index_select(0, torch.LongTensor(batch))
mb_old_log_probabilities = torch.stack(memory.log_probs_bottom).index_select(0, torch.LongTensor(batch))
predicted_values = self.bottom_layer_actor_critic.value(mb_states)
log_probabilities, entropy = self.evaluate(mb_states, mb_actions, bottom=True)
mb_advantages = torch.FloatTensor(advantages[batch])
ratio = (log_probabilities - mb_old_log_probabilities.squeeze()).exp()
min_adv = torch.where(mb_advantages > 0,
(1 + self.ppo_clip) * mb_advantages, (1 - self.ppo_clip) * mb_advantages)
policy_loss = -(torch.min(ratio * mb_advantages, min_adv)).mean() - self.entropy_coefficient*entropy.mean()
value_loss = (torch.FloatTensor(value_target[batch]) - predicted_values.squeeze()).pow(2).mean()
self.bottom_layer_actor_critic.optimize(policy_loss, value_loss)
adv = list()
adv += [value_target]
replay_len = len(memory.rewards_top)
minibatch_count = self.timestep_size / self.minibatch_size
values = self.top_layer_actor_critic.value(torch.FloatTensor(memory.sensor_states_top)).detach()
advantages, value_target = self.generalized_advantage_estimation(
torch.FloatTensor(memory.rewards_top).unsqueeze(1), values, torch.FloatTensor(memory.reset_flags_top).unsqueeze(1))
advantages = advantages.detach().numpy()
value_target = value_target.detach().numpy()
for _ in range(self.epochs):
minibatch_indices = list(range(replay_len))
np.random.shuffle(minibatch_indices)
minibatches = [minibatch_indices[int(_ * (replay_len/minibatch_count)):
int((_ + 1) * (replay_len/minibatch_count))] for _ in range(int(minibatch_count))]
for batch in minibatches:
mb_states = torch.FloatTensor(np.array(memory.sensor_states_top)[batch])
mb_actions = torch.stack(memory.actions_top).index_select(0, torch.LongTensor(batch))
mb_old_log_probabilities = torch.stack(memory.log_probs_top).index_select(0, torch.LongTensor(batch))
predicted_values = self.top_layer_actor_critic.value(mb_states)
log_probabilities, entropy = self.evaluate(mb_states, mb_actions, top=True)
mb_advantages = torch.FloatTensor(advantages[batch])
ratio = (log_probabilities - mb_old_log_probabilities.squeeze()).exp()
min_adv = torch.where(mb_advantages > 0,
(1 + self.ppo_clip) * mb_advantages, (1 - self.ppo_clip) * mb_advantages)
policy_loss = -(torch.min(ratio * mb_advantages, min_adv)).mean() - self.entropy_coefficient*entropy.mean()
value_loss = (torch.FloatTensor(value_target[batch]) - predicted_values.squeeze()).pow(2).mean()
self.top_layer_actor_critic.optimize(policy_loss, value_loss)
adv += [value_target]
return adv
from copy import deepcopy
def run(train_id=0):
torch.set_num_threads(1)
import gym
env = gym.make("HalfCheetahHier-v2")
agent_replay = ReplayMemory()
# input = 17
# output = 6
top_spinal = SpinalActorCritic(SpinalNeuralNetwork([64, 64], obs_space=17*3+1, act_space=6+6))
bottom_spinal = SpinalActorCritic(SpinalNeuralNetwork([64, 64], obs_space=17*3+6+6+1, act_space=6))
spinal_network = SpinalHierarchicalNetwork(top_layer_ac=top_spinal,
bottom_layer_ac=bottom_spinal, top_layer_freq=5, bottom_layer_freq=1,
epochs=10, minibatch_size=500*15, timestep_size=3000*15, entropy_coefficient=0.0)
timesteps = 0
total_timesteps = 0
max_timesteps = 30000000
avg_action_magnitude = 0
episode_itr = 0
tr_avg_sum = 0.0
avg_sum_rewards = 0.0
saved_reward = list()
saved_finish_mask = list()
while total_timesteps < max_timesteps:
game_over = False
sensor_obs = env.reset()
target_vel_x = 0
freq_itr = 0
freq_meta_itr = 0
top_layer_action = np.zeros((1, 12))
target_vel_x = 1.0 + torch.rand(1, 1) * 0.25
while not game_over:
if freq_itr == 0:
top_layer_state = torch.cat((target_vel_x, torch.FloatTensor(sensor_obs).unsqueeze(0)), dim=1)
top_layer_action, agent_replay = spinal_network(
x=top_layer_state, memory=agent_replay, top=True)
agent_replay.sensor_states_top.append(deepcopy(top_layer_state.detach().numpy()[0]))
agent_replay.actions_top.append(top_layer_action)
clip_max = torch.FloatTensor([[1.05, .785, .785, .7, .87, .5, 10, 10, 10, 10, 10, 10]]) # this is both min and max, just get min and max separate
clip_min = torch.FloatTensor([[-.52, -.785, -.4, -1, -1.2, -.5, -10, -10, -10, -10, -10, -10]])
top_layer_action = np.clip(top_layer_action.clone(), a_max=clip_max, a_min=clip_min)
agent_replay.reset_flags_top.append(1)
# DO TARGET DELTA POSITION AND DELTA VELOCITY
bottom_state = torch.cat((target_vel_x, top_layer_action, torch.FloatTensor(sensor_obs).unsqueeze(0)), dim=1)
local_action, agent_replay = spinal_network(x=bottom_state, memory=agent_replay, bottom=True)
agent_replay.sensor_states_bottom.append(deepcopy(bottom_state.detach().numpy()[0]))
agent_replay.actions_bottom.append(local_action)
local_action = local_action.squeeze(dim=1).numpy()
sensor_obs, tr_rew, game_over, information = env.step(np.clip(local_action, a_min=-1, a_max=1))
agent_replay.reset_flags_bottom.append(0 if game_over else 1)
if game_over or freq_itr == 4:
_vel_err = -1*(abs(target_vel_x.numpy()[0][0] - env.env.data.body_xvelp[1][0]))
agent_replay.rewards_top.append(_vel_err + 4)
if game_over:
agent_replay.reset_flags_top[-1] = 0
reward = -(np.sum(np.abs(top_layer_action.flatten().numpy()[:6] - env.env.data.qpos[3:]))) + 4
agent_replay.rewards_bottom.append(reward)
avg_sum_rewards += reward
tr_avg_sum += tr_rew
timesteps += 1
total_timesteps += 1
freq_itr = (freq_itr+1)%spinal_network.top_layer_freq
episode_itr += 1
if timesteps > spinal_network.timestep_size:
updates = spinal_network.learn(memory=agent_replay)
#upd_list += updates
avg_action_magnitude /= timesteps
print("Time: {}, Bottom: {}, Top: {}, True: {}, Timestep: {}, ".format(
round(timesteps/episode_itr, 5), round(sum(updates[0])/len(agent_replay.rewards_bottom), 56),
round(sum(updates[1])/len(agent_replay.rewards_top), 5), round(tr_avg_sum/episode_itr, 5), total_timesteps), end="")
print("Target Vel: {}, Actual Vel, {}"
.format(round(target_vel_x.numpy()[0][0], 5), round(env.env.data.body_xvelp[1][0], 5)))
timesteps = 0
episode_itr = 0
tr_avg_sum = 0.0
avg_sum_rewards = 0.0
avg_action_magnitude = 0
#with open("saved_model_{}_{}.pkl".format(net_type, train_id), "wb") as f:
# pickle.dump(agent, f)
#with open("saved_weightupd_rew.pkl", "wb") as f:
# pickle.dump(upd_list, f)
agent_replay.clear()
saved_reward.clear()
saved_finish_mask.clear()
run(1)
| [
"torch.mm",
"numpy.clip",
"torch.randn",
"torch.set_num_threads",
"torch.clone",
"torch.ones",
"torch.FloatTensor",
"torch.exp",
"torch.Tensor",
"torch.nn.Linear",
"torch.zeros",
"numpy.random.shuffle",
"copy.deepcopy",
"torch.where",
"torch.nn.Conv2d",
"torch.rand",
"torch.nn.MaxPoo... | [((23191, 23215), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (23212, 23215), False, 'import torch\n'), ((23242, 23272), 'gym.make', 'gym.make', (['"""HalfCheetahHier-v2"""'], {}), "('HalfCheetahHier-v2')\n", (23250, 23272), False, 'import gym\n'), ((2887, 2919), 'torch.zeros', 'torch.zeros', ([], {'size': '(dim_1, dim_2)'}), '(size=(dim_1, dim_2))\n', (2898, 2919), False, 'import torch\n'), ((9524, 9584), 'torch.zeros', 'torch.zeros', (['latent_shape[-1]', 'ac_space'], {'requires_grad': '(False)'}), '(latent_shape[-1], ac_space, requires_grad=False)\n', (9535, 9584), False, 'import torch\n'), ((13194, 13208), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (13202, 13208), False, 'from copy import deepcopy\n'), ((13487, 13501), 'torch.clone', 'torch.clone', (['x'], {}), '(x)\n', (13498, 13501), False, 'import torch\n'), ((14190, 14222), 'torch.clone', 'torch.clone', (['self.trace_template'], {}), '(self.trace_template)\n', (14201, 14222), False, 'import torch\n'), ((17613, 17634), 'torch.Tensor', 'torch.Tensor', (['batchsz'], {}), '(batchsz)\n', (17625, 17634), False, 'import torch\n'), ((17654, 17675), 'torch.Tensor', 'torch.Tensor', (['batchsz'], {}), '(batchsz)\n', (17666, 17675), False, 'import torch\n'), ((17696, 17717), 'torch.Tensor', 'torch.Tensor', (['batchsz'], {}), '(batchsz)\n', (17708, 17717), False, 'import torch\n'), ((24218, 24235), 'numpy.zeros', 'np.zeros', (['(1, 12)'], {}), '((1, 12))\n', (24226, 24235), True, 'import numpy as np\n'), ((3115, 3138), 'torch.nn.Linear', 'nn.Linear', (['dim_1', 'dim_2'], {}), '(dim_1, dim_2)\n', (3124, 3138), False, 'from torch import nn\n'), ((3214, 3249), 'torch.nn.Linear', 'nn.Linear', (['dim_2', 'self.COMPRESS_DIM'], {}), '(dim_2, self.COMPRESS_DIM)\n', (3223, 3249), False, 'from torch import nn\n'), ((3281, 3316), 'torch.nn.Linear', 'nn.Linear', (['self.COMPRESS_DIM', 'dim_2'], {}), '(self.COMPRESS_DIM, dim_2)\n', (3290, 3316), False, 'from torch import nn\n'), ((4271, 4294), 'torch.nn.Linear', 'nn.Linear', (['dim_1', 'dim_2'], {}), '(dim_1, dim_2)\n', (4280, 4294), False, 'from torch import nn\n'), ((4307, 4349), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.linear.weight'], {}), '(self.linear.weight)\n', (4329, 4349), False, 'from torch import nn\n'), ((13569, 13583), 'torch.clone', 'torch.clone', (['x'], {}), '(x)\n', (13580, 13583), False, 'import torch\n'), ((13689, 13742), 'torch.mm', 'torch.mm', (['pre_syn', '(self.trace * self.alpha_plasticity)'], {}), '(pre_syn, self.trace * self.alpha_plasticity)\n', (13697, 13742), False, 'import torch\n'), ((15259, 15284), 'torch.exp', 'torch.exp', (['action_log_std'], {}), '(action_log_std)\n', (15268, 15284), False, 'import torch\n'), ((15313, 15354), 'torch.distributions.Normal', 'Normal', ([], {'loc': 'action_mean', 'scale': 'action_std'}), '(loc=action_mean, scale=action_std)\n', (15319, 15354), False, 'from torch.distributions import Normal\n'), ((15492, 15527), 'torch.sum', 'torch.sum', (['log_probabilities'], {'dim': '(1)'}), '(log_probabilities, dim=1)\n', (15501, 15527), False, 'import torch\n'), ((16655, 16680), 'torch.exp', 'torch.exp', (['action_log_std'], {}), '(action_log_std)\n', (16664, 16680), False, 'import torch\n'), ((16709, 16750), 'torch.distributions.Normal', 'Normal', ([], {'loc': 'action_mean', 'scale': 'action_std'}), '(loc=action_mean, scale=action_std)\n', (16715, 16750), False, 'from torch.distributions import Normal\n'), ((16864, 16899), 'torch.sum', 'torch.sum', (['log_probabilities'], {'dim': '(1)'}), '(log_probabilities, dim=1)\n', (16873, 16899), False, 'import torch\n'), ((19652, 19688), 'numpy.random.shuffle', 'np.random.shuffle', (['minibatch_indices'], {}), '(minibatch_indices)\n', (19669, 19688), True, 'import numpy as np\n'), ((21721, 21757), 'numpy.random.shuffle', 'np.random.shuffle', (['minibatch_indices'], {}), '(minibatch_indices)\n', (21738, 21757), True, 'import numpy as np\n'), ((3403, 3434), 'torch.rand', 'torch.rand', ([], {'size': '(dim_1, dim_2)'}), '(size=(dim_1, dim_2))\n', (3413, 3434), False, 'import torch\n'), ((5434, 5522), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'dim_1', 'out_channels': 'dim_2', 'kernel_size': 'kernel', 'stride': 'stride'}), '(in_channels=dim_1, out_channels=dim_2, kernel_size=kernel, stride\n =stride)\n', (5443, 5522), False, 'from torch import nn\n'), ((9696, 9756), 'torch.zeros', 'torch.zeros', (['latent_shape[-1]', 'ac_space'], {'requires_grad': '(False)'}), '(latent_shape[-1], ac_space, requires_grad=False)\n', (9707, 9756), False, 'import torch\n'), ((9788, 9810), 'torch.nn.Linear', 'nn.Linear', (['ac_space', '(1)'], {}), '(ac_space, 1)\n', (9797, 9810), False, 'from torch import nn\n'), ((9842, 9864), 'torch.nn.Linear', 'nn.Linear', (['(1)', 'ac_space'], {}), '(1, ac_space)\n', (9851, 9864), False, 'from torch import nn\n'), ((11680, 11693), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (11691, 11693), False, 'from torch import nn\n'), ((15854, 15879), 'torch.exp', 'torch.exp', (['action_log_std'], {}), '(action_log_std)\n', (15863, 15879), False, 'import torch\n'), ((15908, 15949), 'torch.distributions.Normal', 'Normal', ([], {'loc': 'action_mean', 'scale': 'action_std'}), '(loc=action_mean, scale=action_std)\n', (15914, 15949), False, 'from torch.distributions import Normal\n'), ((16087, 16122), 'torch.sum', 'torch.sum', (['log_probabilities'], {'dim': '(1)'}), '(log_probabilities, dim=1)\n', (16096, 16122), False, 'import torch\n'), ((17115, 17140), 'torch.exp', 'torch.exp', (['action_log_std'], {}), '(action_log_std)\n', (17124, 17140), False, 'import torch\n'), ((17169, 17210), 'torch.distributions.Normal', 'Normal', ([], {'loc': 'action_mean', 'scale': 'action_std'}), '(loc=action_mean, scale=action_std)\n', (17175, 17210), False, 'from torch.distributions import Normal\n'), ((17324, 17359), 'torch.sum', 'torch.sum', (['log_probabilities'], {'dim': '(1)'}), '(log_probabilities, dim=1)\n', (17333, 17359), False, 'import torch\n'), ((20443, 20479), 'torch.FloatTensor', 'torch.FloatTensor', (['advantages[batch]'], {}), '(advantages[batch])\n', (20460, 20479), False, 'import torch\n'), ((20594, 20702), 'torch.where', 'torch.where', (['(mb_advantages > 0)', '((1 + self.ppo_clip) * mb_advantages)', '((1 - self.ppo_clip) * mb_advantages)'], {}), '(mb_advantages > 0, (1 + self.ppo_clip) * mb_advantages, (1 -\n self.ppo_clip) * mb_advantages)\n', (20605, 20702), False, 'import torch\n'), ((22497, 22533), 'torch.FloatTensor', 'torch.FloatTensor', (['advantages[batch]'], {}), '(advantages[batch])\n', (22514, 22533), False, 'import torch\n'), ((22648, 22756), 'torch.where', 'torch.where', (['(mb_advantages > 0)', '((1 + self.ppo_clip) * mb_advantages)', '((1 - self.ppo_clip) * mb_advantages)'], {}), '(mb_advantages > 0, (1 + self.ppo_clip) * mb_advantages, (1 -\n self.ppo_clip) * mb_advantages)\n', (22659, 22756), False, 'import torch\n'), ((24265, 24281), 'torch.rand', 'torch.rand', (['(1)', '(1)'], {}), '(1, 1)\n', (24275, 24281), False, 'import torch\n'), ((24788, 24873), 'torch.FloatTensor', 'torch.FloatTensor', (['[[1.05, 0.785, 0.785, 0.7, 0.87, 0.5, 10, 10, 10, 10, 10, 10]]'], {}), '([[1.05, 0.785, 0.785, 0.7, 0.87, 0.5, 10, 10, 10, 10, 10,\n 10]])\n', (24805, 24873), False, 'import torch\n'), ((24959, 25051), 'torch.FloatTensor', 'torch.FloatTensor', (['[[-0.52, -0.785, -0.4, -1, -1.2, -0.5, -10, -10, -10, -10, -10, -10]]'], {}), '([[-0.52, -0.785, -0.4, -1, -1.2, -0.5, -10, -10, -10, -10,\n -10, -10]])\n', (24976, 25051), False, 'import torch\n'), ((25784, 25824), 'numpy.clip', 'np.clip', (['local_action'], {'a_min': '(-1)', 'a_max': '(1)'}), '(local_action, a_min=-1, a_max=1)\n', (25791, 25824), True, 'import numpy as np\n'), ((5115, 5142), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (5127, 5142), False, 'from torch import nn\n'), ((5698, 5721), 'torch.nn.Linear', 'nn.Linear', (['dim_1', 'dim_2'], {}), '(dim_1, dim_2)\n', (5707, 5721), False, 'from torch import nn\n'), ((5734, 5776), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.linear.weight'], {}), '(self.linear.weight)\n', (5756, 5776), False, 'from torch import nn\n'), ((19177, 19223), 'torch.FloatTensor', 'torch.FloatTensor', (['memory.sensor_states_bottom'], {}), '(memory.sensor_states_bottom)\n', (19194, 19223), False, 'import torch\n'), ((19321, 19361), 'torch.FloatTensor', 'torch.FloatTensor', (['memory.rewards_bottom'], {}), '(memory.rewards_bottom)\n', (19338, 19361), False, 'import torch\n'), ((19384, 19428), 'torch.FloatTensor', 'torch.FloatTensor', (['memory.reset_flags_bottom'], {}), '(memory.reset_flags_bottom)\n', (19401, 19428), False, 'import torch\n'), ((20084, 20107), 'torch.LongTensor', 'torch.LongTensor', (['batch'], {}), '(batch)\n', (20100, 20107), False, 'import torch\n'), ((20205, 20228), 'torch.LongTensor', 'torch.LongTensor', (['batch'], {}), '(batch)\n', (20221, 20228), False, 'import torch\n'), ((21255, 21298), 'torch.FloatTensor', 'torch.FloatTensor', (['memory.sensor_states_top'], {}), '(memory.sensor_states_top)\n', (21272, 21298), False, 'import torch\n'), ((21396, 21433), 'torch.FloatTensor', 'torch.FloatTensor', (['memory.rewards_top'], {}), '(memory.rewards_top)\n', (21413, 21433), False, 'import torch\n'), ((21456, 21497), 'torch.FloatTensor', 'torch.FloatTensor', (['memory.reset_flags_top'], {}), '(memory.reset_flags_top)\n', (21473, 21497), False, 'import torch\n'), ((22147, 22170), 'torch.LongTensor', 'torch.LongTensor', (['batch'], {}), '(batch)\n', (22163, 22170), False, 'import torch\n'), ((22265, 22288), 'torch.LongTensor', 'torch.LongTensor', (['batch'], {}), '(batch)\n', (22281, 22288), False, 'import torch\n'), ((5229, 5286), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': "optional_args['pooling_kernel']"}), "(kernel_size=optional_args['pooling_kernel'])\n", (5241, 5286), False, 'from torch import nn\n'), ((5978, 6001), 'torch.nn.Linear', 'nn.Linear', (['dim_1', 'dim_2'], {}), '(dim_1, dim_2)\n', (5987, 6001), False, 'from torch import nn\n'), ((9918, 9957), 'torch.randn', 'torch.randn', (['latent_shape[-1]', 'ac_space'], {}), '(latent_shape[-1], ac_space)\n', (9929, 9957), False, 'import torch\n'), ((19958, 19995), 'numpy.array', 'np.array', (['memory.sensor_states_bottom'], {}), '(memory.sensor_states_bottom)\n', (19966, 19995), True, 'import numpy as np\n'), ((20033, 20067), 'torch.stack', 'torch.stack', (['memory.actions_bottom'], {}), '(memory.actions_bottom)\n', (20044, 20067), False, 'import torch\n'), ((20152, 20188), 'torch.stack', 'torch.stack', (['memory.log_probs_bottom'], {}), '(memory.log_probs_bottom)\n', (20163, 20188), False, 'import torch\n'), ((22027, 22061), 'numpy.array', 'np.array', (['memory.sensor_states_top'], {}), '(memory.sensor_states_top)\n', (22035, 22061), True, 'import numpy as np\n'), ((22099, 22130), 'torch.stack', 'torch.stack', (['memory.actions_top'], {}), '(memory.actions_top)\n', (22110, 22130), False, 'import torch\n'), ((22215, 22248), 'torch.stack', 'torch.stack', (['memory.log_probs_top'], {}), '(memory.log_probs_top)\n', (22226, 22248), False, 'import torch\n'), ((6161, 6180), 'torch.nn.Linear', 'nn.Linear', (['dim_1', '(1)'], {}), '(dim_1, 1)\n', (6170, 6180), False, 'from torch import nn\n'), ((25336, 25365), 'torch.FloatTensor', 'torch.FloatTensor', (['sensor_obs'], {}), '(sensor_obs)\n', (25353, 25365), False, 'import torch\n'), ((7240, 7328), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'dim_1', 'out_channels': 'dim_2', 'kernel_size': 'kernel', 'stride': 'stride'}), '(in_channels=dim_1, out_channels=dim_2, kernel_size=kernel, stride\n =stride)\n', (7249, 7328), False, 'from torch import nn\n'), ((20751, 20792), 'torch.min', 'torch.min', (['(ratio * mb_advantages)', 'min_adv'], {}), '(ratio * mb_advantages, min_adv)\n', (20760, 20792), False, 'import torch\n'), ((22805, 22846), 'torch.min', 'torch.min', (['(ratio * mb_advantages)', 'min_adv'], {}), '(ratio * mb_advantages, min_adv)\n', (22814, 22846), False, 'import torch\n'), ((24407, 24436), 'torch.FloatTensor', 'torch.FloatTensor', (['sensor_obs'], {}), '(sensor_obs)\n', (24424, 24436), False, 'import torch\n'), ((6921, 6948), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (6933, 6948), False, 'from torch import nn\n'), ((7488, 7511), 'torch.nn.Linear', 'nn.Linear', (['dim_1', 'dim_2'], {}), '(dim_1, dim_2)\n', (7497, 7511), False, 'from torch import nn\n'), ((7524, 7566), 'torch.nn.init.xavier_normal_', 'nn.init.xavier_normal_', (['self.linear.weight'], {}), '(self.linear.weight)\n', (7546, 7566), False, 'from torch import nn\n'), ((20874, 20912), 'torch.FloatTensor', 'torch.FloatTensor', (['value_target[batch]'], {}), '(value_target[batch])\n', (20891, 20912), False, 'import torch\n'), ((22928, 22966), 'torch.FloatTensor', 'torch.FloatTensor', (['value_target[batch]'], {}), '(value_target[batch])\n', (22945, 22966), False, 'import torch\n'), ((7035, 7092), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': "optional_args['pooling_kernel']"}), "(kernel_size=optional_args['pooling_kernel'])\n", (7047, 7092), False, 'from torch import nn\n'), ((7708, 7728), 'torch.ones', 'torch.ones', (['(1)', 'dim_2'], {}), '(1, dim_2)\n', (7718, 7728), False, 'import torch\n')] |
#!/usr/bin/python
"""
GridFlag
Use XArray, Dask, and Numpy to load CASA Measurement Set (MS) data and
create binned UV data.
Todo:
[ ] Add options for choosing stokes parameters or amplitude/complex components
"""
import numpy as np
import dask
import dask.array as da
import numba as nb
from . import groupby_apply, groupby_partition, annulus_stats
import logging
# Add colours for warnings and errors
logging.addLevelName(logging.WARNING, "\033[1;31m%s\033[1;0m" % logging.getLevelName(logging.WARNING))
logging.addLevelName(logging.ERROR, "\033[1;41m%s\033[1;0m" % logging.getLevelName(logging.ERROR))
logging.basicConfig(
level=logging.INFO,
format='%(asctime)-20s %(levelname)-8s %(message)s',
handlers=[
logging.FileHandler("gridflag.log"),
logging.StreamHandler()
]
)
logger = logging.getLogger()
def process_stokes_options(ds_ind, stokes='I', use_existing_flags=True):
"""
Determine columns to use for flagging algorithm from DATA and FLAG tables.
Inputs:
ds_ind The input dask dataframe
stokes Which Stokes to flag on, str
use_existing_flags Take into account existing flags in the MS, bool
Returns:
vals Visibilities, dask dataframe
flags Flags, dask dataframe
"""
# Determine which polarization state to grid and flag
if stokes=='I':
vals = ds_ind.DATA[:,0].data + ds_ind.DATA[:,-1].data
elif stokes=='Q':
vals = ds_ind.DATA[:,0].data - ds_ind.DATA[:,-1].data
elif stokes=='U':
vals = ds_ind.DATA[:,1].data + ds_ind.DATA[:,2].data
elif stokes=='V':
vals = ds_ind.DATA[:,1].data - ds_ind.DATA[:,2].data
else:
raise ValueError(f"compute_ipflag_grid: the stokes argument, '{stokes}', \
is not currently implemented, please select another value.")
# Take only the real part for Gaussian stats
vals = vals.real
if use_existing_flags:
if stokes=='I':
flags = ds_ind.FLAG.data[:,0] | ds_ind.FLAG.data[:,-1]
elif stokes=='Q':
flags = ds_ind.FLAG.data[:,0] | ds_ind.FLAG.data[:,-1]
elif stokes=='U':
flags = ds_ind.FLAG.data[:,1] | ds_ind.FLAG.data[:,2]
elif stokes=='V':
flags = ds_ind.FLAG.data[:,1] | ds_ind.FLAG.data[:,2]
else:
flags = None
return vals, flags
def compute_ipflag_grid(ds_ind, uvbins, sigma=3.0, partition_level=4, stokes='I', use_existing_flags=True, client=None):
"""
Map functions to a concurrent dask functions to an Xarray dataset with pre-
computed grid indicies.
Parameters
----------
ds_ind : xarray.dataset
An xarray datset imported from a measurement set. It must contain
coordinates U_bins and V_bins, and relevant data and position variables.
uvbins : list
List of two arrays, each containing the bounds for the discrete U and V
bins in the ds_ind dataset.
sigma : float
Depth of flagging - corresponds to the significance of a particular
observation to be labeled RFI.
partition_level : int
Internal parameter used to split the dataset in to congruent chunks in
the uv-bin space. Used for concurrency.
stokes : string
Reduce the data via one or more combinations of columns - 'I', 'Q', 'U',
'V' for corresponding stokes parameters. For amplitude of each columns
use 'A'.
use_existing_flags : bool
Either remove rows that are flagged in the input MS file before flagging
(True) or ignore and overwrite existing flags (False).
client : dask.distributed.client
Pass dask distributed client or 'None' for local computation.
Returns
-------
flag_list: numpy.array
A list of flag indicies to be applied to the original measurement set.
median_grid_unflagged : numpy.array
A two-dimensional array representing a uv-grid. Each cell in the grid is
the median of all values, after zero removal and flagging, in that bin.
median_grid : numpy.array
The same as the above but after flagged visibilities are removed.
"""
# Load data from dask-ms dastaset
ubins = ds_ind.U_bins.data
vbins = ds_ind.V_bins.data
flags, vals = process_stokes_options(ds_ind, stokes, True)
# The array 'p' is used to map flags back to the original file order
p = da.arange(len(ds_ind.newrow), dtype=np.int64, chunks=ubins.chunks)
chunks = list(ubins.chunks[0])
logger.debug(f"Original chunk sizes: ", chunks)
# Execute partition function which does a partial sort on U and V bins
split_points, ubins_part, vbins_part, vals_part, flags_part, p_part = dask_partition_sort(
ubins, vbins, vals, flags, p, chunks, partition_level, 0, client=client)
print(f"Dataset split into {len(ubins_part.chunks[0])} uv-plane partitions.")
print(f"Flagging: {ubins_part.chunks[0]} rows.")
# Convert back to delayed one final time
dd_bins = da.stack([ubins_part, vbins_part, p_part]).T
dd_bins = dd_bins.rechunk((ubins_part.chunks[0], 3))
dd_bins = dd_bins.to_delayed()
dd_vals = vals_part.to_delayed()
dd_flags = flags_part.to_delayed()
vdim = vbins_part.ndim
if use_existing_flags:
if vdim > 1:
group_bins_sort = [dask.delayed(groupby_partition.sort_bins_multi)
(
part[0][0],
part[1][0],
part[2]
) for part in zip(dd_bins, dd_vals, dd_flags)]
else:
group_bins_sort = [dask.delayed(groupby_partition.sort_bins)
(
part[0][0],
part[1],
part[2]
) for part in zip(dd_bins, dd_vals, dd_flags)]
else:
if vdim > 1:
group_bins_sort = [dask.delayed(groupby_partition.sort_bins_multi)
(
part[0][0],
part[1][0]
) for part in zip(dd_bins, dd_vals)]
else:
group_bins_sort = [dask.delayed(groupby_partition.sort_bins)
(
part[0][0],
part[1]
) for part in zip(dd_bins, dd_vals)]
group_chunks = [dask.delayed(groupby_partition.create_bin_groups_sort)
(c[0], c[1], c[2]) for c in group_bins_sort]
function_chunks = [dask.delayed(groupby_partition.apply_grid_median)
(c[1], c[2]) for c in group_chunks]
median_grid = dask.delayed(groupby_partition.combine_function_partitions)(
function_chunks)
if client:
median_grid_unflagged = client.compute(median_grid)
else:
median_grid_unflagged = median_grid.compute()
# Autoamatically compute annulus widths (naive)
annulus_width = dask.delayed(annulus_stats.compute_annulus_bins)(
median_grid_unflagged,
uvbins,
10
)
print(f"Initiate flagging with sigma = {sigma}.")
annuli_data = dask.delayed(annulus_stats.process_annuli)(
median_grid_unflagged,
annulus_width,
uvbins[0],
uvbins[1],
sigma=sigma
)
# Process only one column for multi-dim data until we finish this function
if vdim > 1:
flag_results = [dask.delayed(annulus_stats.flag_one_annulus)(
c[0],
c[1][:,0],
c[2],
c[3],
annuli_data[0],
annuli_data[1],
sigma=sigma) for c in group_chunks]
else:
flag_results = [dask.delayed(annulus_stats.flag_one_annulus)(
c[0],
c[1],
c[2],
c[3],
annuli_data[0],
annuli_data[1],
sigma=sigma) for c in group_chunks]
results = dask.delayed(groupby_partition.combine_annulus_results)(
[fr[0] for fr in flag_results],
[fr[1] for fr in flag_results],
[fr[2] for fr in flag_results],
[fr[3] for fr in flag_results])
print("Compute median grid on the partitions.")
if client:
ms_flag_list, da_flag_list, median_grid_flagged, count_grid = \
client.compute(results).result()
median_grid_unflagged = median_grid_unflagged.result()
else:
ms_flag_list, da_flag_list, median_grid_flagged, count_grid = \
results.compute()
return ms_flag_list, median_grid_unflagged, median_grid_flagged
@nb.njit(nogil=True)
def strip_zero_values(a, b, v, p):
null_flags = p[np.where(v==0)]
a = a[np.where(v!=0)]
newlen, oldlen = len(a), len(b)
print("Removed ", oldlen - newlen, " zero value rows of ", oldlen,
" rows (", round(100*(oldlen-newlen)/oldlen, 2), "%)." )
b = b[np.where(v!=0)]
v = v[np.where(v!=0)]
p = p[np.where(v!=0)]
return newlen, a, b, v, p, null_flags
@nb.njit(nogil=True)
def median_of_medians(data, sublist_length=11):
# Compute median of medians on each chunk
data = np.array([np.median(k) for k in [data[j:(j + sublist_length)] for j in range(0,len(data),sublist_length)]])
return data
def apply_median_func(data, depth):
for i in np.arange(depth):
data = median_of_medians(data, 11)
return data
def da_median(a):
m = np.median(a)
# m = da.mean(a)
return np.array(m)[None,None] # add dummy dimensions
def combine_median(meds):
umed_list = np.concatenate(meds)
# print(f"umeds:", len(umed_list))
return np.median(umed_list)
combine_median = dask.delayed(combine_median)
partition_permutation = dask.delayed(groupby_partition.partition_permutation)
partition_permutation_multi = dask.delayed(groupby_partition.partition_permutation_multi)
da_median = dask.delayed(da_median)
apply_median_func = dask.delayed(apply_median_func)
# Latest Version
def dask_partition_sort(a, b, v, f, p, chunks, binary_chunks, partition_level, client=None):
split_points = np.array([])
# Persist to compute DAG up to this point on workers. This improves the performance of
# in-place sorting six-fold.
if client:
a = client.persist(a)
b = client.persist(b)
v = client.persist(v)
f = client.persist(f)
p = client.persist(p)
else:
a = a.persist()
b = b.persist()
v = v.persist()
f = f.persist()
p = p.persist()
a_min, a_max = da.min(a), da.max(a)
v = da.squeeze(v)
if v.ndim > 1:
ncols = v.shape[1]
else:
ncols = 1
# print("Columns: ", ncols, v.ndim, v.shape, v.dtype, nb.typeof(v[:10].compute()))
a = a.to_delayed()
b = b.to_delayed()
v = v.to_delayed()
f = f.to_delayed()
p = p.to_delayed()
# Compute the median-of-medians heuristic (not the proper definition of MoM but we only need an approximate pivot)
# umed = [da_median(a_)[0] for a_ in a]
resid = 50
sublist_length=11
min_nrow = np.min(chunks)
med_depth = np.int32(np.floor((np.log(min_nrow/resid)/np.log(sublist_length))))
umed = [apply_median_func(a_, med_depth) for a_ in a]
# umeds = [len(u.compute()) for u in umed]
if client:
pivot = combine_median(umed)
pivot = client.compute(pivot).result()
else:
pivot = combine_median(umed).compute()
if pivot == a_max:
pivot-=0.5
if pivot == a_min:
pivot+=0.5
if ncols > 1:
results = [partition_permutation_multi(a_, b_, v_[0], f_, p_, pivot) for a_, b_, v_, f_, p_ in zip(a, b, v, f, p)]
else:
results = [partition_permutation(a_, b_, v_, f_, p_, pivot) for a_, b_, v_, f_, p_ in zip(a, b, v, f, p)]
print(f"Partition Level {partition_level}, med_depth: {med_depth}, pivot: {pivot}")
if client:
results = client.persist(results)
sp0 = [r[0].compute() for r in results]
a = [r[1] for r in results]
b = [r[2] for r in results]
v = [r[3] for r in results]
f = [r[4] for r in results]
p = [r[5] for r in results]
else:
results = dask.persist(results)
sp0 = [r[0].compute() for r in results[0]]
a = [r[1] for r in results[0]]
b = [r[2] for r in results[0]]
v = [r[3] for r in results[0]]
f = [r[4] for r in results[0]]
p = [r[5] for r in results[0]]
partition_level+=1
if partition_level < binary_chunks:
# Split each chunk of data in to two partitions as computed above then recurse on each partition.
a1 = da.concatenate([da.from_delayed(x_[0:s_], ((s_),), dtype=np.int32) for x_, s_, c_ in zip(a, sp0, chunks)])
b1 = da.concatenate([da.from_delayed(x_[0:s_], ((s_),), dtype=np.int32) for x_, s_, c_ in zip(b, sp0, chunks)])
f1 = da.concatenate([da.from_delayed(x_[0:s_], ((s_),), dtype=np.int64) for x_, s_, c_ in zip(f, sp0, chunks)])
p1 = da.concatenate([da.from_delayed(x_[0:s_], ((s_),), dtype=np.int64) for x_, s_, c_ in zip(p, sp0, chunks)])
a2 = da.concatenate([da.from_delayed(x_[s_:c_], ((c_-s_),), dtype=np.int32) for x_, s_, c_ in zip(a, sp0, chunks)])
b2 = da.concatenate([da.from_delayed(x_[s_:c_], ((c_-s_),), dtype=np.int32) for x_, s_, c_ in zip(b, sp0, chunks)])
f2 = da.concatenate([da.from_delayed(x_[s_:c_], ((c_-s_),), dtype=np.int64) for x_, s_, c_ in zip(f, sp0, chunks)])
p2 = da.concatenate([da.from_delayed(x_[s_:c_], ((c_-s_),), dtype=np.int64) for x_, s_, c_ in zip(p, sp0, chunks)])
# Requires a different format for array shape for 1d and 2d arrays
if ncols > 1:
v1 = da.concatenate([da.from_delayed(x_[0:s_], ((s_,ncols)), dtype=np.float32) for x_, s_, c_ in zip(v, sp0, chunks)])
v2 = da.concatenate([da.from_delayed(x_[s_:c_], ((c_-s_,ncols)), dtype=np.float32) for x_, s_, c_ in zip(v, sp0, chunks)])
else:
v1 = da.concatenate([da.from_delayed(x_[0:s_], ((s_),), dtype=np.float32) for x_, s_, c_ in zip(v, sp0, chunks)])
v2 = da.concatenate([da.from_delayed(x_[s_:c_], ((c_-s_),), dtype=np.float32) for x_, s_, c_ in zip(v, sp0, chunks)])
# Compute chunk size here as delayed objects don't have metadata
chunks1 = list(a1.chunks[0])
chunks2 = list(a2.chunks[0])
# Do recursion step on each partition
sp1, b1_, a1_, v1_, f1_, p1_ = dask_partition_sort(b1, a1, v1, f1, p1, chunks1, binary_chunks, partition_level, client=client)
sp2, b2_, a2_, v2_, f2_, p2_ = dask_partition_sort(b2, a2, v2, f2, p2, chunks2, binary_chunks, partition_level, client=client)
# Combine the partially sorted partitions into the original array shape
a = da.concatenate([a1_, a2_])
b = da.concatenate([b1_, b2_])
v = da.concatenate([v1_, v2_])
f = da.concatenate([f1_, f2_])
p = da.concatenate([p1_, p2_])
split_points = np.concatenate((sp1, sp2))
else:
# Break out of the recursion and combine the partitions into a partially sorted array.
a1 = da.concatenate([da.from_delayed(x_[:sp_], ((sp_),), dtype=np.int32) for x_, sp_, ch_ in zip(a, sp0, chunks)])
a1 = a1.rechunk(len(a1))
a2 = da.concatenate([da.from_delayed(x_[sp_:ch_], ((ch_ - sp_),), dtype=np.int32) for x_, sp_, ch_ in zip(a, sp0, chunks)])
a2 = a2.rechunk(len(a2))
if (len(a1) == 0) or (len(a2) == 0):
raise Exception(f"Partition level {binary_chunks} resulted in one or more zero-length partitions. Please choose a smaller value for 'partition_level' or increase the number of UV bins.")
a = da.concatenate([a1, a2])
b1 = da.concatenate([da.from_delayed(x_[:sp_], ((sp_),), dtype=np.int32) for x_, sp_, ch_ in zip(b, sp0, chunks)])
b1 = b1.rechunk(len(b1))
b2 = da.concatenate([da.from_delayed(x_[sp_:ch_], ((ch_ - sp_),), dtype=np.int32) for x_, sp_, ch_ in zip(b, sp0, chunks)])
b2 = b2.rechunk(len(b2))
b = da.concatenate([b1, b2])
f1 = da.concatenate([da.from_delayed(x_[:sp_], ((sp_),), dtype=np.int64) for x_, sp_, ch_ in zip(f, sp0, chunks)])
f1 = f1.rechunk(len(f1))
f2 = da.concatenate([da.from_delayed(x_[sp_:ch_], ((ch_ - sp_),), dtype=np.int64) for x_, sp_, ch_ in zip(f, sp0, chunks)])
f2 = f2.rechunk(len(f2))
f = da.concatenate([f1, f2])
p1 = da.concatenate([da.from_delayed(x_[:sp_], ((sp_),), dtype=np.int64) for x_, sp_, ch_ in zip(p, sp0, chunks)])
p1 = p1.rechunk(len(p1))
p2 = da.concatenate([da.from_delayed(x_[sp_:ch_], ((ch_ - sp_),), dtype=np.int64) for x_, sp_, ch_ in zip(p, sp0, chunks)])
p2 = p2.rechunk(len(p2))
p = da.concatenate([p1, p2])
if ncols > 1:
v1 = da.concatenate([da.from_delayed(x_[:sp_], ((sp_),ncols), dtype=np.float32) for x_, sp_, ch_ in zip(v, sp0, chunks)])
v1 = v1.rechunk(len(v1))
v2 = da.concatenate([da.from_delayed(x_[sp_:ch_], ((ch_ - sp_),ncols), dtype=np.float32) for x_, sp_, ch_ in zip(v, sp0, chunks)])
v2 = v2.rechunk(len(v2))
else:
v1 = da.concatenate([da.from_delayed(x_[:sp_], ((sp_),), dtype=np.float32) for x_, sp_, ch_ in zip(v, sp0, chunks)])
v1 = v1.rechunk(len(v1))
v2 = da.concatenate([da.from_delayed(x_[sp_:ch_], ((ch_ - sp_),), dtype=np.float32) for x_, sp_, ch_ in zip(v, sp0, chunks)])
v2 = v2.rechunk(len(v2))
v = da.concatenate([v1, v2])
# The split points are indicies for the array position of the partitions, use later to define dask chunk with unique (u,v) bin ranges
# for concurrent statistical calculations.
split_points = np.concatenate((a1.chunks, a2.chunks), axis=None)
return split_points, a, b, v, f, p
def compute_median_grid(
ds_ind,
uvbins,
partition_level=4,
stokes='I',
client=None
):
print("Load in-memory data for sort.")
ubins = ds_ind.U_bins.data
vbins = ds_ind.V_bins.data
vals, flags = process_stokes_options(ds_ind, stokes, True)
#Comute chunks
chunks = list(ubins.chunks[0])
# The array 'p' is used to map flags back to the original file order
p = da.arange(len(ds_ind.newrow), dtype=np.int64, chunks=ubins.chunks)
print("Compute parallel partitions and do partial sort.")
split_points,
ubins_part,
vbins_part,
vals_part,
flags_part,
p_part = dask_partition_sort(ubins,
vbins,
vals,
flags,
p,
chunks,
partition_level,
0,
client=client
)
print("Preparing dask delayed...")
dd_bins = da.stack([ubins_part, vbins_part, p_part]).T
dd_bins = dd_bins.rechunk((ubins_part.chunks[0], 3))
dd_bins = dd_bins.to_delayed()
dd_vals = vals_part.to_delayed()
dd_flags = flags_part.to_delayed()
vdim = vbins_part.ndim
del ubins, vbins, vals, p
print("Compute UV map and median grid.")
# group_bins_sort = [dask.delayed(groupby_partition.sort_bins)(part[0][0], part[1]) for part in zip(dd_bins, dd_vals)]
if vdim > 1:
group_bins_sort = [dask.delayed(groupby_partition.sort_bins_multi)(
part[0][0],
part[1][0],
part[2]) for part in zip(dd_bins, dd_vals, dd_flags)]
else:
group_bins_sort = [dask.delayed(groupby_partition.sort_bins)(
part[0][0],
part[1],
part[2]
) for part in zip(dd_bins, dd_vals, dd_flags)]
group_chunks = [dask.delayed(groupby_partition.create_bin_groups_sort)(c[0], c[1], c[2]) for c in group_bins_sort]
function_chunks = [dask.delayed(groupby_partition.apply_grid_median)(c[1], c[2]) for c in group_chunks]
median_grid = dask.delayed(groupby_partition.combine_function_partitions)(function_chunks)
if client:
median_grid = client.compute(median_grid).result()
else:
median_grid = median_grid.compute()
return median_grid
def check_exising_flags(ds_ind, stokes='I', client=None):
""" Check the existing flags in the input Measurement Set."""
# Determine which polarization state to grid and flag
if stokes=='I':
flags = ds_ind.FLAG.data[:,0] | ds_ind.FLAG.data[:,-1]
elif stokes=='Q':
flags = ds_ind.FLAG.data[:,0] | ds_ind.FLAG.data[:,-1]
elif stokes=='U':
flags = ds_ind.FLAG.data[:,1] | ds_ind.FLAG.data[:,2]
elif stokes=='V':
flags = ds_ind.FLAG.data[:,1] | ds_ind.FLAG.data[:,2]
elif stokes=='A':
flags = da.sum(ds_ind.FLAG.data, axis=1, dtype=np.bool)
else:
raise ValueError(f"check_existing_flags: the stokes argument, \
'{stokes}', is not currently implemented, please select another value.")
flag_loc = da.where(flags==True)
if not client is None:
flag_loc = client.compute(flag_loc)
else:
flag_loc = flag_loc[0].compute()
nflags = len(flag_loc)
nrows = len(flags)
print( f"Rows alrady flagged: {(100*nflags/nrows):.1f}% ({nflags}/{nrows}),\
in file \"{ds_ind.attrs['Measurement Set']}\"." )
def map_amplitude_grid(
ds_ind,
data_columns,
stokes='I',
chunk_size:int=10**6,
return_index:bool=False
):
"""
Map functions to a concurrent dask functions to an Xarray dataset with
pre-computed grid indicies.
Parameters
----------
ds_ind : xarray.dataset
An xarray datset imported from a measurement set. It must contain
coordinates U_bins and V_bins, and relevant data and position
variables.
data_columns : list
Components for Stokes terms to be used to compute amplitude. Depends
on dataset.
chunk_size : int
The chunk size for computing split-apply functions in dask, Default is
'10**6'.
return_index : bool
Determines the return data type. If true, it returns a 2d grid of lists
of values and indicies for each bin.
Returns
-------
value_groups: numpy.array
A two-dimensional array representing a uv-grid. Each cell in the grid
contains a list of values from the dataset.
uvbins: xarray.Dataset
A two-dimensional array representing a uv-grid. Each cell in the grid
contains a list of indicies to be used to map subsequent computations
back to the original dataset.
"""
# Get dask arrays of UV-bins and visibilities from XArray dataset
dd_ubins = ds_ind.U_bins.data
dd_vbins = ds_ind.V_bins.data
dd_flgs = (ds_ind.FLAG[:,data_columns[0]].data |
ds_ind.FLAG[:,data_columns[1]].data)
if stokes=='I':
dd_vals = (np.absolute(ds_ind.DATA[:,data_columns[0]].data +
ds_ind.DATA[:,data_columns[1]].data))
elif stokes=='Q':
dd_vals = (np.absolute(ds_ind.DATA[:,data_columns[0]].data -
ds_ind.DATA[:,data_columns[1]].data))
# Combine U and V bins into one dask array
dd_bins = da.stack([dd_ubins, dd_vbins]).T
# Apply unifrom chunks to both dask arrays
dd_bins = dd_bins.rechunk([chunk_size, 2])
dd_vals = dd_vals.rechunk([chunk_size, 1])
dd_flgs = dd_flgs.rechunk([chunk_size, 1])
# Convert to delayed data structures
bin_partitions = dd_bins.to_delayed()
val_partitions = dd_vals.to_delayed()
flg_partitions = dd_flgs.to_delayed()
# Compute indicies for each bin in the grid for each chunk
group_chunks = [dask.delayed(groupby_apply.group_bin_flagval_wrap)(
part[0][0],
part[1],
part[2],
init_index=(chunk_size*kth)
) for kth, part in enumerate(zip(bin_partitions,
val_partitions,
flg_partitions)
)
]
groups = dask.delayed(groupby_apply.combine_group_flagval)(group_chunks)
# group_chunks = [dask.delayed(groupby_apply.group_bin_idx_val_wrap)(part[0][0], part[1]) for part in zip(bin_partitions, val_partitions)]
# groups = dask.delayed(groupby_apply.combine_group_idx_val)(group_chunks)
if return_index:
# Compute the grid from above without doing the apply step
groups = groups.compute()
index_groups, value_groups, flag_groups = groups[0], groups[1], groups[2]
return index_groups, value_groups, flag_groups
else:
# Apply the function to the grid without explicitly computing the indicies
median_grid = dask.delayed(groupby_apply.apply_to_groups)(value_groups_, np.median)
std_grid = dask.delayed(groupby_apply.apply_to_groups)(value_groups_, np.std)
# median_grid = median_grid_.compute()
# std_grid = std_grid_.compute()
return median_grid, std_grid
def bin_rms(a):
mu = np.median(a)
rms = np.sqrt(np.sum((a-mu)**2))
return rms
def bin_rms_grid(ds_ind, flag_list):
""" Create a two dimensional UV-grid with the RMS value for each bin, after
removing flags in a provided list of flags.
Parameters
----------
ds_ind : xarray.dataset
Dataset for input MS file.
flag_list : array of ints
Indicies for flagged rows in the dataset.
Returns
-------
rms_grid : array of shape (ubins, vbins)
A two-dimensional array wiht the RMS value for the values in each UV bin.
"""
flags = np.zeros((len(ds_ind.newrow)), dtype=bool)
flags[flag_list] = True
# Use Calculated Flags Column
ubins = ds_ind.U_bins.data
vbins = ds_ind.V_bins.data
vals = (da.absolute(ds_ind.DATA[:,0].data + ds_ind.DATA[:,-1].data))
print("Processing RMS Grid with ", np.sum(1*flags), len(flag_list), "flags.")
p = da.arange(len(ds_ind.newrow), dtype=np.int64, chunks=ubins.chunks)
dd_bins = da.stack([ubins, vbins, p]).T
dd_bins = dd_bins.rechunk((ubins.chunks[0], 3))
bins = dd_bins.compute()
vals = vals.compute()
bins_sort, vals_sort, null_flags = groupby_partition.sort_bins(bins, vals, flags)
print(len(vals_sort))
uv_ind, values, grid_row_map, null_flags = groupby_partition.create_bin_groups_sort(bins_sort,
vals_sort,
null_flags)
print(len(values))
rms_grid = groupby_partition.apply_grid_function(values, grid_row_map, bin_rms)
return rms_grid
| [
"numpy.absolute",
"dask.array.sum",
"numpy.sum",
"dask.array.squeeze",
"numba.njit",
"logging.getLevelName",
"numpy.arange",
"dask.array.max",
"logging.FileHandler",
"dask.array.from_delayed",
"numpy.median",
"logging.StreamHandler",
"numpy.min",
"dask.array.where",
"numpy.concatenate",
... | [((827, 846), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (844, 846), False, 'import logging\n'), ((8513, 8532), 'numba.njit', 'nb.njit', ([], {'nogil': '(True)'}), '(nogil=True)\n', (8520, 8532), True, 'import numba as nb\n'), ((8938, 8957), 'numba.njit', 'nb.njit', ([], {'nogil': '(True)'}), '(nogil=True)\n', (8945, 8957), True, 'import numba as nb\n'), ((9590, 9618), 'dask.delayed', 'dask.delayed', (['combine_median'], {}), '(combine_median)\n', (9602, 9618), False, 'import dask\n'), ((9644, 9697), 'dask.delayed', 'dask.delayed', (['groupby_partition.partition_permutation'], {}), '(groupby_partition.partition_permutation)\n', (9656, 9697), False, 'import dask\n'), ((9728, 9787), 'dask.delayed', 'dask.delayed', (['groupby_partition.partition_permutation_multi'], {}), '(groupby_partition.partition_permutation_multi)\n', (9740, 9787), False, 'import dask\n'), ((9802, 9825), 'dask.delayed', 'dask.delayed', (['da_median'], {}), '(da_median)\n', (9814, 9825), False, 'import dask\n'), ((9846, 9877), 'dask.delayed', 'dask.delayed', (['apply_median_func'], {}), '(apply_median_func)\n', (9858, 9877), False, 'import dask\n'), ((9237, 9253), 'numpy.arange', 'np.arange', (['depth'], {}), '(depth)\n', (9246, 9253), True, 'import numpy as np\n'), ((9342, 9354), 'numpy.median', 'np.median', (['a'], {}), '(a)\n', (9351, 9354), True, 'import numpy as np\n'), ((9480, 9500), 'numpy.concatenate', 'np.concatenate', (['meds'], {}), '(meds)\n', (9494, 9500), True, 'import numpy as np\n'), ((9551, 9571), 'numpy.median', 'np.median', (['umed_list'], {}), '(umed_list)\n', (9560, 9571), True, 'import numpy as np\n'), ((10009, 10021), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (10017, 10021), True, 'import numpy as np\n'), ((10497, 10510), 'dask.array.squeeze', 'da.squeeze', (['v'], {}), '(v)\n', (10507, 10510), True, 'import dask.array as da\n'), ((11013, 11027), 'numpy.min', 'np.min', (['chunks'], {}), '(chunks)\n', (11019, 11027), True, 'import numpy as np\n'), ((21306, 21329), 'dask.array.where', 'da.where', (['(flags == True)'], {}), '(flags == True)\n', (21314, 21329), True, 'import dask.array as da\n'), ((25570, 25582), 'numpy.median', 'np.median', (['a'], {}), '(a)\n', (25579, 25582), True, 'import numpy as np\n'), ((26367, 26428), 'dask.array.absolute', 'da.absolute', (['(ds_ind.DATA[:, 0].data + ds_ind.DATA[:, -1].data)'], {}), '(ds_ind.DATA[:, 0].data + ds_ind.DATA[:, -1].data)\n', (26378, 26428), True, 'import dask.array as da\n'), ((476, 513), 'logging.getLevelName', 'logging.getLevelName', (['logging.WARNING'], {}), '(logging.WARNING)\n', (496, 513), False, 'import logging\n'), ((577, 612), 'logging.getLevelName', 'logging.getLevelName', (['logging.ERROR'], {}), '(logging.ERROR)\n', (597, 612), False, 'import logging\n'), ((5056, 5098), 'dask.array.stack', 'da.stack', (['[ubins_part, vbins_part, p_part]'], {}), '([ubins_part, vbins_part, p_part])\n', (5064, 5098), True, 'import dask.array as da\n'), ((6559, 6618), 'dask.delayed', 'dask.delayed', (['groupby_partition.combine_function_partitions'], {}), '(groupby_partition.combine_function_partitions)\n', (6571, 6618), False, 'import dask\n'), ((6882, 6930), 'dask.delayed', 'dask.delayed', (['annulus_stats.compute_annulus_bins'], {}), '(annulus_stats.compute_annulus_bins)\n', (6894, 6930), False, 'import dask\n'), ((7072, 7114), 'dask.delayed', 'dask.delayed', (['annulus_stats.process_annuli'], {}), '(annulus_stats.process_annuli)\n', (7084, 7114), False, 'import dask\n'), ((7859, 7914), 'dask.delayed', 'dask.delayed', (['groupby_partition.combine_annulus_results'], {}), '(groupby_partition.combine_annulus_results)\n', (7871, 7914), False, 'import dask\n'), ((8592, 8608), 'numpy.where', 'np.where', (['(v == 0)'], {}), '(v == 0)\n', (8600, 8608), True, 'import numpy as np\n'), ((8618, 8634), 'numpy.where', 'np.where', (['(v != 0)'], {}), '(v != 0)\n', (8626, 8634), True, 'import numpy as np\n'), ((8817, 8833), 'numpy.where', 'np.where', (['(v != 0)'], {}), '(v != 0)\n', (8825, 8833), True, 'import numpy as np\n'), ((8843, 8859), 'numpy.where', 'np.where', (['(v != 0)'], {}), '(v != 0)\n', (8851, 8859), True, 'import numpy as np\n'), ((8869, 8885), 'numpy.where', 'np.where', (['(v != 0)'], {}), '(v != 0)\n', (8877, 8885), True, 'import numpy as np\n'), ((9387, 9398), 'numpy.array', 'np.array', (['m'], {}), '(m)\n', (9395, 9398), True, 'import numpy as np\n'), ((10467, 10476), 'dask.array.min', 'da.min', (['a'], {}), '(a)\n', (10473, 10476), True, 'import dask.array as da\n'), ((10478, 10487), 'dask.array.max', 'da.max', (['a'], {}), '(a)\n', (10484, 10487), True, 'import dask.array as da\n'), ((12135, 12156), 'dask.persist', 'dask.persist', (['results'], {}), '(results)\n', (12147, 12156), False, 'import dask\n'), ((14826, 14852), 'dask.array.concatenate', 'da.concatenate', (['[a1_, a2_]'], {}), '([a1_, a2_])\n', (14840, 14852), True, 'import dask.array as da\n'), ((14865, 14891), 'dask.array.concatenate', 'da.concatenate', (['[b1_, b2_]'], {}), '([b1_, b2_])\n', (14879, 14891), True, 'import dask.array as da\n'), ((14904, 14930), 'dask.array.concatenate', 'da.concatenate', (['[v1_, v2_]'], {}), '([v1_, v2_])\n', (14918, 14930), True, 'import dask.array as da\n'), ((14943, 14969), 'dask.array.concatenate', 'da.concatenate', (['[f1_, f2_]'], {}), '([f1_, f2_])\n', (14957, 14969), True, 'import dask.array as da\n'), ((14982, 15008), 'dask.array.concatenate', 'da.concatenate', (['[p1_, p2_]'], {}), '([p1_, p2_])\n', (14996, 15008), True, 'import dask.array as da\n'), ((15033, 15059), 'numpy.concatenate', 'np.concatenate', (['(sp1, sp2)'], {}), '((sp1, sp2))\n', (15047, 15059), True, 'import numpy as np\n'), ((15753, 15777), 'dask.array.concatenate', 'da.concatenate', (['[a1, a2]'], {}), '([a1, a2])\n', (15767, 15777), True, 'import dask.array as da\n'), ((16121, 16145), 'dask.array.concatenate', 'da.concatenate', (['[b1, b2]'], {}), '([b1, b2])\n', (16135, 16145), True, 'import dask.array as da\n'), ((16481, 16505), 'dask.array.concatenate', 'da.concatenate', (['[f1, f2]'], {}), '([f1, f2])\n', (16495, 16505), True, 'import dask.array as da\n'), ((16841, 16865), 'dask.array.concatenate', 'da.concatenate', (['[p1, p2]'], {}), '([p1, p2])\n', (16855, 16865), True, 'import dask.array as da\n'), ((17620, 17644), 'dask.array.concatenate', 'da.concatenate', (['[v1, v2]'], {}), '([v1, v2])\n', (17634, 17644), True, 'import dask.array as da\n'), ((17871, 17920), 'numpy.concatenate', 'np.concatenate', (['(a1.chunks, a2.chunks)'], {'axis': 'None'}), '((a1.chunks, a2.chunks), axis=None)\n', (17885, 17920), True, 'import numpy as np\n'), ((19063, 19105), 'dask.array.stack', 'da.stack', (['[ubins_part, vbins_part, p_part]'], {}), '([ubins_part, vbins_part, p_part])\n', (19071, 19105), True, 'import dask.array as da\n'), ((20282, 20341), 'dask.delayed', 'dask.delayed', (['groupby_partition.combine_function_partitions'], {}), '(groupby_partition.combine_function_partitions)\n', (20294, 20341), False, 'import dask\n'), ((23209, 23301), 'numpy.absolute', 'np.absolute', (['(ds_ind.DATA[:, data_columns[0]].data + ds_ind.DATA[:, data_columns[1]].data)'], {}), '(ds_ind.DATA[:, data_columns[0]].data + ds_ind.DATA[:,\n data_columns[1]].data)\n', (23220, 23301), True, 'import numpy as np\n'), ((23552, 23582), 'dask.array.stack', 'da.stack', (['[dd_ubins, dd_vbins]'], {}), '([dd_ubins, dd_vbins])\n', (23560, 23582), True, 'import dask.array as da\n'), ((24589, 24638), 'dask.delayed', 'dask.delayed', (['groupby_apply.combine_group_flagval'], {}), '(groupby_apply.combine_group_flagval)\n', (24601, 24638), False, 'import dask\n'), ((25601, 25622), 'numpy.sum', 'np.sum', (['((a - mu) ** 2)'], {}), '((a - mu) ** 2)\n', (25607, 25622), True, 'import numpy as np\n'), ((26472, 26489), 'numpy.sum', 'np.sum', (['(1 * flags)'], {}), '(1 * flags)\n', (26478, 26489), True, 'import numpy as np\n'), ((26614, 26641), 'dask.array.stack', 'da.stack', (['[ubins, vbins, p]'], {}), '([ubins, vbins, p])\n', (26622, 26641), True, 'import dask.array as da\n'), ((740, 775), 'logging.FileHandler', 'logging.FileHandler', (['"""gridflag.log"""'], {}), "('gridflag.log')\n", (759, 775), False, 'import logging\n'), ((785, 808), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (806, 808), False, 'import logging\n'), ((6276, 6330), 'dask.delayed', 'dask.delayed', (['groupby_partition.create_bin_groups_sort'], {}), '(groupby_partition.create_bin_groups_sort)\n', (6288, 6330), False, 'import dask\n'), ((6427, 6476), 'dask.delayed', 'dask.delayed', (['groupby_partition.apply_grid_median'], {}), '(groupby_partition.apply_grid_median)\n', (6439, 6476), False, 'import dask\n'), ((9073, 9085), 'numpy.median', 'np.median', (['k'], {}), '(k)\n', (9082, 9085), True, 'import numpy as np\n'), ((20056, 20110), 'dask.delayed', 'dask.delayed', (['groupby_partition.create_bin_groups_sort'], {}), '(groupby_partition.create_bin_groups_sort)\n', (20068, 20110), False, 'import dask\n'), ((20179, 20228), 'dask.delayed', 'dask.delayed', (['groupby_partition.apply_grid_median'], {}), '(groupby_partition.apply_grid_median)\n', (20191, 20228), False, 'import dask\n'), ((23370, 23462), 'numpy.absolute', 'np.absolute', (['(ds_ind.DATA[:, data_columns[0]].data - ds_ind.DATA[:, data_columns[1]].data)'], {}), '(ds_ind.DATA[:, data_columns[0]].data - ds_ind.DATA[:,\n data_columns[1]].data)\n', (23381, 23462), True, 'import numpy as np\n'), ((24034, 24084), 'dask.delayed', 'dask.delayed', (['groupby_apply.group_bin_flagval_wrap'], {}), '(groupby_apply.group_bin_flagval_wrap)\n', (24046, 24084), False, 'import dask\n'), ((25260, 25303), 'dask.delayed', 'dask.delayed', (['groupby_apply.apply_to_groups'], {}), '(groupby_apply.apply_to_groups)\n', (25272, 25303), False, 'import dask\n'), ((25349, 25392), 'dask.delayed', 'dask.delayed', (['groupby_apply.apply_to_groups'], {}), '(groupby_apply.apply_to_groups)\n', (25361, 25392), False, 'import dask\n'), ((7355, 7399), 'dask.delayed', 'dask.delayed', (['annulus_stats.flag_one_annulus'], {}), '(annulus_stats.flag_one_annulus)\n', (7367, 7399), False, 'import dask\n'), ((7622, 7666), 'dask.delayed', 'dask.delayed', (['annulus_stats.flag_one_annulus'], {}), '(annulus_stats.flag_one_annulus)\n', (7634, 7666), False, 'import dask\n'), ((11064, 11088), 'numpy.log', 'np.log', (['(min_nrow / resid)'], {}), '(min_nrow / resid)\n', (11070, 11088), True, 'import numpy as np\n'), ((11087, 11109), 'numpy.log', 'np.log', (['sublist_length'], {}), '(sublist_length)\n', (11093, 11109), True, 'import numpy as np\n'), ((12628, 12676), 'dask.array.from_delayed', 'da.from_delayed', (['x_[0:s_]', '(s_,)'], {'dtype': 'np.int32'}), '(x_[0:s_], (s_,), dtype=np.int32)\n', (12643, 12676), True, 'import dask.array as da\n'), ((12751, 12799), 'dask.array.from_delayed', 'da.from_delayed', (['x_[0:s_]', '(s_,)'], {'dtype': 'np.int32'}), '(x_[0:s_], (s_,), dtype=np.int32)\n', (12766, 12799), True, 'import dask.array as da\n'), ((12874, 12922), 'dask.array.from_delayed', 'da.from_delayed', (['x_[0:s_]', '(s_,)'], {'dtype': 'np.int64'}), '(x_[0:s_], (s_,), dtype=np.int64)\n', (12889, 12922), True, 'import dask.array as da\n'), ((12997, 13045), 'dask.array.from_delayed', 'da.from_delayed', (['x_[0:s_]', '(s_,)'], {'dtype': 'np.int64'}), '(x_[0:s_], (s_,), dtype=np.int64)\n', (13012, 13045), True, 'import dask.array as da\n'), ((13121, 13175), 'dask.array.from_delayed', 'da.from_delayed', (['x_[s_:c_]', '(c_ - s_,)'], {'dtype': 'np.int32'}), '(x_[s_:c_], (c_ - s_,), dtype=np.int32)\n', (13136, 13175), True, 'import dask.array as da\n'), ((13253, 13307), 'dask.array.from_delayed', 'da.from_delayed', (['x_[s_:c_]', '(c_ - s_,)'], {'dtype': 'np.int32'}), '(x_[s_:c_], (c_ - s_,), dtype=np.int32)\n', (13268, 13307), True, 'import dask.array as da\n'), ((13377, 13431), 'dask.array.from_delayed', 'da.from_delayed', (['x_[s_:c_]', '(c_ - s_,)'], {'dtype': 'np.int64'}), '(x_[s_:c_], (c_ - s_,), dtype=np.int64)\n', (13392, 13431), True, 'import dask.array as da\n'), ((13501, 13555), 'dask.array.from_delayed', 'da.from_delayed', (['x_[s_:c_]', '(c_ - s_,)'], {'dtype': 'np.int64'}), '(x_[s_:c_], (c_ - s_,), dtype=np.int64)\n', (13516, 13555), True, 'import dask.array as da\n'), ((15203, 15252), 'dask.array.from_delayed', 'da.from_delayed', (['x_[:sp_]', '(sp_,)'], {'dtype': 'np.int32'}), '(x_[:sp_], (sp_,), dtype=np.int32)\n', (15218, 15252), True, 'import dask.array as da\n'), ((15359, 15417), 'dask.array.from_delayed', 'da.from_delayed', (['x_[sp_:ch_]', '(ch_ - sp_,)'], {'dtype': 'np.int32'}), '(x_[sp_:ch_], (ch_ - sp_,), dtype=np.int32)\n', (15374, 15417), True, 'import dask.array as da\n'), ((15816, 15865), 'dask.array.from_delayed', 'da.from_delayed', (['x_[:sp_]', '(sp_,)'], {'dtype': 'np.int32'}), '(x_[:sp_], (sp_,), dtype=np.int32)\n', (15831, 15865), True, 'import dask.array as da\n'), ((15972, 16030), 'dask.array.from_delayed', 'da.from_delayed', (['x_[sp_:ch_]', '(ch_ - sp_,)'], {'dtype': 'np.int32'}), '(x_[sp_:ch_], (ch_ - sp_,), dtype=np.int32)\n', (15987, 16030), True, 'import dask.array as da\n'), ((16176, 16225), 'dask.array.from_delayed', 'da.from_delayed', (['x_[:sp_]', '(sp_,)'], {'dtype': 'np.int64'}), '(x_[:sp_], (sp_,), dtype=np.int64)\n', (16191, 16225), True, 'import dask.array as da\n'), ((16332, 16390), 'dask.array.from_delayed', 'da.from_delayed', (['x_[sp_:ch_]', '(ch_ - sp_,)'], {'dtype': 'np.int64'}), '(x_[sp_:ch_], (ch_ - sp_,), dtype=np.int64)\n', (16347, 16390), True, 'import dask.array as da\n'), ((16536, 16585), 'dask.array.from_delayed', 'da.from_delayed', (['x_[:sp_]', '(sp_,)'], {'dtype': 'np.int64'}), '(x_[:sp_], (sp_,), dtype=np.int64)\n', (16551, 16585), True, 'import dask.array as da\n'), ((16692, 16750), 'dask.array.from_delayed', 'da.from_delayed', (['x_[sp_:ch_]', '(ch_ - sp_,)'], {'dtype': 'np.int64'}), '(x_[sp_:ch_], (ch_ - sp_,), dtype=np.int64)\n', (16707, 16750), True, 'import dask.array as da\n'), ((19552, 19599), 'dask.delayed', 'dask.delayed', (['groupby_partition.sort_bins_multi'], {}), '(groupby_partition.sort_bins_multi)\n', (19564, 19599), False, 'import dask\n'), ((19802, 19843), 'dask.delayed', 'dask.delayed', (['groupby_partition.sort_bins'], {}), '(groupby_partition.sort_bins)\n', (19814, 19843), False, 'import dask\n'), ((5379, 5426), 'dask.delayed', 'dask.delayed', (['groupby_partition.sort_bins_multi'], {}), '(groupby_partition.sort_bins_multi)\n', (5391, 5426), False, 'import dask\n'), ((5627, 5668), 'dask.delayed', 'dask.delayed', (['groupby_partition.sort_bins'], {}), '(groupby_partition.sort_bins)\n', (5639, 5668), False, 'import dask\n'), ((5884, 5931), 'dask.delayed', 'dask.delayed', (['groupby_partition.sort_bins_multi'], {}), '(groupby_partition.sort_bins_multi)\n', (5896, 5931), False, 'import dask\n'), ((6096, 6137), 'dask.delayed', 'dask.delayed', (['groupby_partition.sort_bins'], {}), '(groupby_partition.sort_bins)\n', (6108, 6137), False, 'import dask\n'), ((13727, 13783), 'dask.array.from_delayed', 'da.from_delayed', (['x_[0:s_]', '(s_, ncols)'], {'dtype': 'np.float32'}), '(x_[0:s_], (s_, ncols), dtype=np.float32)\n', (13742, 13783), True, 'import dask.array as da\n'), ((13859, 13921), 'dask.array.from_delayed', 'da.from_delayed', (['x_[s_:c_]', '(c_ - s_, ncols)'], {'dtype': 'np.float32'}), '(x_[s_:c_], (c_ - s_, ncols), dtype=np.float32)\n', (13874, 13921), True, 'import dask.array as da\n'), ((14008, 14058), 'dask.array.from_delayed', 'da.from_delayed', (['x_[0:s_]', '(s_,)'], {'dtype': 'np.float32'}), '(x_[0:s_], (s_,), dtype=np.float32)\n', (14023, 14058), True, 'import dask.array as da\n'), ((14135, 14191), 'dask.array.from_delayed', 'da.from_delayed', (['x_[s_:c_]', '(c_ - s_,)'], {'dtype': 'np.float32'}), '(x_[s_:c_], (c_ - s_,), dtype=np.float32)\n', (14150, 14191), True, 'import dask.array as da\n'), ((16922, 16979), 'dask.array.from_delayed', 'da.from_delayed', (['x_[:sp_]', '(sp_, ncols)'], {'dtype': 'np.float32'}), '(x_[:sp_], (sp_, ncols), dtype=np.float32)\n', (16937, 16979), True, 'import dask.array as da\n'), ((17093, 17159), 'dask.array.from_delayed', 'da.from_delayed', (['x_[sp_:ch_]', '(ch_ - sp_, ncols)'], {'dtype': 'np.float32'}), '(x_[sp_:ch_], (ch_ - sp_, ncols), dtype=np.float32)\n', (17108, 17159), True, 'import dask.array as da\n'), ((17287, 17338), 'dask.array.from_delayed', 'da.from_delayed', (['x_[:sp_]', '(sp_,)'], {'dtype': 'np.float32'}), '(x_[:sp_], (sp_,), dtype=np.float32)\n', (17302, 17338), True, 'import dask.array as da\n'), ((17453, 17513), 'dask.array.from_delayed', 'da.from_delayed', (['x_[sp_:ch_]', '(ch_ - sp_,)'], {'dtype': 'np.float32'}), '(x_[sp_:ch_], (ch_ - sp_,), dtype=np.float32)\n', (17468, 17513), True, 'import dask.array as da\n'), ((21075, 21122), 'dask.array.sum', 'da.sum', (['ds_ind.FLAG.data'], {'axis': '(1)', 'dtype': 'np.bool'}), '(ds_ind.FLAG.data, axis=1, dtype=np.bool)\n', (21081, 21122), True, 'import dask.array as da\n')] |
##################################################################################
# #
# Michael: I think that this is the file to evaluate whether a rhythm is pyloric #
# It does not exactly follow Prinz et al.'s definition of a pyloric rhythm. It #
# simply checks whether there are bursts in the the data --> not NaN. #
# #
##################################################################################
import sys
import numpy as np
#import dill
#from sortedcontainers import SortedList
import sys
import numpy as np
#import dill
import os
from parameters import ParameterSet
from copy import deepcopy
import time
def merge_datasets(filedir, dataset=None, outfile_name=None, adhere_prior=False,
prior=None, exclude_NaN=False, enforce_pyloric=False, verbose=False):
"""
This function takes the dataset provided in the variable dataset and extends it by the files in filedir.
It adds all variables where the phasegap is well defined and prints the number of pyloric samples.
:param filedir: string to folder, e.g. '../results/samples/samples_13D_new'
:param dataset: string to dataset, e.g. '../results/samples/pyloricsamples_13D.npz'
:param outfile_name: string, name of file to write to, e.g. 'pyloricsamples_13D.npz'
:param adhere_prior: bool, if True, samples are deleted if they come from a region outside of the prior
:param verbose: bool
:return: saves the extended dataset (summ_stats, seed, and params) as .npz file
"""
assert adhere_prior==False or prior is not None, 'You have to give a prior if you want to adhere its boundaries'
# getting all files from directory filedir
files = os.listdir(filedir)
# checking for hidden files. If the file starts with '.', we discard it. Also discard readme.txt
filenames = []
for file in files:
if file[0] != '.' and file != 'readme.txt':
filenames.append(file)
picked_params = []
picked_stats = []
picked_seeds = []
last_len = 0
# we will create multiple files (as Jakob suggested). Each contains a fraction of all samples. This reduces the risk of
# loosing the entire dataset due to some small mistake. Here, we loop over those multiple files.
for fname in filenames:
data = np.load("{}/{}".format(filedir, fname)) # samples_dir = results/samples/
summstats = data['data']
counter_local = 0
counter_pyloric = 0
param_data_exp = np.exp(data['params'])
param_data = data['params']
seed_data = data['seeds']
for current_summ_stat in summstats:
current_params_exp = param_data_exp[counter_local]
current_params = param_data[counter_local]
current_seeds = seed_data[counter_local]
if current_summ_stat[-1] > 0.0:
counter_pyloric += 1
if exclude_NaN:
if (not enforce_pyloric and not np.isnan(current_summ_stat[4]))\
or (enforce_pyloric and current_summ_stat[-1] == 1):
if (adhere_prior and params_are_bounded(current_params_exp, prior)) or not adhere_prior:
picked_params.append(current_params)
picked_stats.append(current_summ_stat[:-1]) # drop last one which simply indicates pyloric-ness.
picked_seeds.append(current_seeds)
else:
if (adhere_prior and params_are_bounded(current_params_exp, prior)) or not adhere_prior:
picked_params.append(current_params)
picked_stats.append(current_summ_stat[:-1]) # drop last one which simply indicates pyloric-ness.
picked_seeds.append(current_seeds)
counter_local += 1
if verbose:
print("File "+fname+" contained {} samples".format(counter_local))
print("of which {} go into the dataset".format(len(picked_params) - last_len))
print("of which {} are pyloric".format(counter_pyloric))
last_len = len(picked_params)
if dataset is not None:
data = np.load(dataset) # samples_dir = results/samples/pyloricsamples_13D
picked_params_gt = data['params']
picked_stats_gt = data['stats']
picked_seeds_gt = data['seeds']
picked_seeds = np.concatenate((picked_seeds_gt, np.asarray(picked_seeds)))
picked_stats = np.concatenate((picked_stats_gt, np.asarray(picked_stats)))
picked_params = np.concatenate((picked_params_gt, np.exp(np.asarray(picked_params))))
print('Successfully merged datasets')
np.savez_compressed(outfile_name, seeds=picked_seeds, params=picked_params,
stats=picked_stats)
def find_pyloric_like(filename, outfile_name, num_stds=2.0):
data = np.load(filename) # samples_dir = results/samples/
summstats = data['stats']
picked_params = []
picked_stats = []
picked_seeds = []
counter = 0
data_seeds = data['seeds']
data_stats = data['stats']
data_params = data['params']
for ss in summstats:
if check_ss(ss, num_stds=num_stds):
picked_seeds.append(data_seeds[counter])
picked_stats.append(data_stats[counter])
picked_params.append(data_params[counter])
counter += 1
np.savez_compressed(outfile_name, seeds=picked_seeds, params=picked_params,
stats=picked_stats)
"""
def check_ss(summstats):
if summstats[0] < 952.0 or summstats[0] > 2067.0: return False
if summstats[1] < 317.0 or summstats[1] > 847.0: return False
if summstats[2] < 172.0 or summstats[2] > 625.0: return False
if summstats[3] < 230.0 or summstats[3] > 830.0: return False
if summstats[4] < 4.0 or summstats[4] > 439.0: return False
if summstats[5] < -181.0 or summstats[5] > 59.0: return False
if summstats[6] < 464.0 or summstats[6] > 1142.0: return False
if summstats[7] < 709.0 or summstats[7] > 1572.0: return False
if summstats[8] < 0.305 or summstats[8] > 0.464: return False
if summstats[9] < 0.146 or summstats[9] > 0.383: return False
if summstats[10] < 0.240 or summstats[10] > 0.456: return False
if summstats[11] < 0.018 or summstats[11] > 0.278: return False
if summstats[12] < -0.108 or summstats[12] > 0.029: return False
if summstats[13] < 0.426 or summstats[13] > 0.640: return False
if summstats[14] < 0.638 or summstats[14] > 0.877: return False
return True
"""
def check_ss(summstats, num_stds=2.0):
experimental_means = np.asarray([1509, 582, 399, 530, 221, -61, 803, 1141, 0.385, 0.264, 0.348, 0.148, -0.040, 0.533, 0.758])
experimental_stds = np.asarray( [279, 133, 113, 150, 109, 60, 169, 216, 0.040, 0.059, 0.054, 0.065, 0.034, 0.054, 0.060])
for ss, em, es in zip(summstats, experimental_means, experimental_stds):
if ss < em - num_stds*es or ss > em + num_stds*es:
return False
return True
def merge_samples(filedir, name='params'):
"""
Since sampling from MAFs requires rejection sampling, we do this externally.
This function then merges the externally created files into a single list.
:param filedir: string to folder, e.g. '../results/samples/samples_13D_new'
:return: all_conds: list of samples
"""
files = os.listdir(filedir)
filenames = []
for file in files:
if file[0] != '.' and file != 'readme.txt':
filenames.append(file)
all_conds = []
for fname in filenames:
data = np.load("{}/{}".format(filedir, fname)) # samples_dir = results/samples/
conductances = data[name]
for cond in conductances:
all_conds.append(cond)
return all_conds
def params_are_bounded(conductances, prior, normalized=False):
if conductances.ndim == 1:
conductances = [conductances]
vals = []
for cond in conductances:
conds = deepcopy(cond)
if not normalized:
conds[:-7] = np.log(conds[:-7])
if np.all(prior.lower < conds) and np.all(prior.upper > conds):
vals.append(True)
else: vals.append(False)
return vals
def single_params_are_bounded(conductances, prior, normalized=False):
if conductances.ndim == 1:
conductances = [conductances]
vals = []
for cond in conductances:
conds = deepcopy(cond)
if not normalized:
conds[:-7] = np.log(conds[:-7])
if np.all(prior.lower < conds) and np.all(prior.upper > conds):
vals.append(True)
else: vals.append(False)
return vals
def Kaans_function():
"""
Can be discarded. Was used as a base for the function merge_datasets.
"""
strict = False
if sys.argv[1] == "strict":
strict = True
filenames = sys.argv[2:]
print("Strict run")
else:
filenames = sys.argv[1:]
picked_params = []
picked_stats = []
class EntryWrapper:
def __init__(self, params, summstats, target):
self.params = params
self.summstats = summstats[:4]
self.dist = np.sum((self.summstats - target) ** 2)
def __lt__(self, other):
return not np.isnan(self.dist) and self.dist < other.dist
counter = 0
last_len = 0
# we will create multiple files (as Jakob suggested). Each contains a fraction of all samples. This reduces the risk of
# loosing the entire dataset due to some small mistake. Here, we loop over those multiple files.
for fname in filenames:
f = open(fname, "rb")
counter_local = counter + 0
print("Opened file {}".format(fname))
while True:
try:
entry = dill.load(f)
except EOFError:
break
counter += 1
params = entry[0]
summstats = entry[1]
assert len(summstats) == 1
summstats = summstats[0]
if (not strict and not np.isnan(summstats[4])) or (strict and not np.isnan(summstats[-1])):
picked_params.append(params)
picked_stats.append(summstats)
print("File contained {} samples".format(counter - counter_local))
print("of which {} pyloric".format(len(picked_params) - last_len))
last_len = len(picked_params)
print("went through {} elements".format(counter))
ofname = "pyloricsamples"
if strict:
ofname = "pyloricsamples_strict"
picked_params, picked_stats = np.array(picked_params), np.array(picked_stats)
np.savez_compressed(ofname, params=picked_params, stats=picked_stats)
print("Collected {}".format(len(picks)))
| [
"numpy.load",
"copy.deepcopy",
"numpy.sum",
"numpy.log",
"numpy.asarray",
"numpy.isnan",
"numpy.savez_compressed",
"numpy.array",
"numpy.exp",
"os.listdir",
"numpy.all"
] | [((1845, 1864), 'os.listdir', 'os.listdir', (['filedir'], {}), '(filedir)\n', (1855, 1864), False, 'import os\n'), ((4790, 4889), 'numpy.savez_compressed', 'np.savez_compressed', (['outfile_name'], {'seeds': 'picked_seeds', 'params': 'picked_params', 'stats': 'picked_stats'}), '(outfile_name, seeds=picked_seeds, params=picked_params,\n stats=picked_stats)\n', (4809, 4889), True, 'import numpy as np\n'), ((4984, 5001), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (4991, 5001), True, 'import numpy as np\n'), ((5503, 5602), 'numpy.savez_compressed', 'np.savez_compressed', (['outfile_name'], {'seeds': 'picked_seeds', 'params': 'picked_params', 'stats': 'picked_stats'}), '(outfile_name, seeds=picked_seeds, params=picked_params,\n stats=picked_stats)\n', (5522, 5602), True, 'import numpy as np\n'), ((6773, 6881), 'numpy.asarray', 'np.asarray', (['[1509, 582, 399, 530, 221, -61, 803, 1141, 0.385, 0.264, 0.348, 0.148, -\n 0.04, 0.533, 0.758]'], {}), '([1509, 582, 399, 530, 221, -61, 803, 1141, 0.385, 0.264, 0.348, \n 0.148, -0.04, 0.533, 0.758])\n', (6783, 6881), True, 'import numpy as np\n'), ((6902, 7005), 'numpy.asarray', 'np.asarray', (['[279, 133, 113, 150, 109, 60, 169, 216, 0.04, 0.059, 0.054, 0.065, 0.034, \n 0.054, 0.06]'], {}), '([279, 133, 113, 150, 109, 60, 169, 216, 0.04, 0.059, 0.054, \n 0.065, 0.034, 0.054, 0.06])\n', (6912, 7005), True, 'import numpy as np\n'), ((7536, 7555), 'os.listdir', 'os.listdir', (['filedir'], {}), '(filedir)\n', (7546, 7555), False, 'import os\n'), ((10782, 10851), 'numpy.savez_compressed', 'np.savez_compressed', (['ofname'], {'params': 'picked_params', 'stats': 'picked_stats'}), '(ofname, params=picked_params, stats=picked_stats)\n', (10801, 10851), True, 'import numpy as np\n'), ((2636, 2658), 'numpy.exp', 'np.exp', (["data['params']"], {}), "(data['params'])\n", (2642, 2658), True, 'import numpy as np\n'), ((4288, 4304), 'numpy.load', 'np.load', (['dataset'], {}), '(dataset)\n', (4295, 4304), True, 'import numpy as np\n'), ((8139, 8153), 'copy.deepcopy', 'deepcopy', (['cond'], {}), '(cond)\n', (8147, 8153), False, 'from copy import deepcopy\n'), ((8576, 8590), 'copy.deepcopy', 'deepcopy', (['cond'], {}), '(cond)\n', (8584, 8590), False, 'from copy import deepcopy\n'), ((10729, 10752), 'numpy.array', 'np.array', (['picked_params'], {}), '(picked_params)\n', (10737, 10752), True, 'import numpy as np\n'), ((10754, 10776), 'numpy.array', 'np.array', (['picked_stats'], {}), '(picked_stats)\n', (10762, 10776), True, 'import numpy as np\n'), ((8206, 8224), 'numpy.log', 'np.log', (['conds[:-7]'], {}), '(conds[:-7])\n', (8212, 8224), True, 'import numpy as np\n'), ((8236, 8263), 'numpy.all', 'np.all', (['(prior.lower < conds)'], {}), '(prior.lower < conds)\n', (8242, 8263), True, 'import numpy as np\n'), ((8268, 8295), 'numpy.all', 'np.all', (['(prior.upper > conds)'], {}), '(prior.upper > conds)\n', (8274, 8295), True, 'import numpy as np\n'), ((8643, 8661), 'numpy.log', 'np.log', (['conds[:-7]'], {}), '(conds[:-7])\n', (8649, 8661), True, 'import numpy as np\n'), ((8673, 8700), 'numpy.all', 'np.all', (['(prior.lower < conds)'], {}), '(prior.lower < conds)\n', (8679, 8700), True, 'import numpy as np\n'), ((8705, 8732), 'numpy.all', 'np.all', (['(prior.upper > conds)'], {}), '(prior.upper > conds)\n', (8711, 8732), True, 'import numpy as np\n'), ((9330, 9368), 'numpy.sum', 'np.sum', (['((self.summstats - target) ** 2)'], {}), '((self.summstats - target) ** 2)\n', (9336, 9368), True, 'import numpy as np\n'), ((4535, 4559), 'numpy.asarray', 'np.asarray', (['picked_seeds'], {}), '(picked_seeds)\n', (4545, 4559), True, 'import numpy as np\n'), ((4618, 4642), 'numpy.asarray', 'np.asarray', (['picked_stats'], {}), '(picked_stats)\n', (4628, 4642), True, 'import numpy as np\n'), ((4710, 4735), 'numpy.asarray', 'np.asarray', (['picked_params'], {}), '(picked_params)\n', (4720, 4735), True, 'import numpy as np\n'), ((9426, 9445), 'numpy.isnan', 'np.isnan', (['self.dist'], {}), '(self.dist)\n', (9434, 9445), True, 'import numpy as np\n'), ((10202, 10224), 'numpy.isnan', 'np.isnan', (['summstats[4]'], {}), '(summstats[4])\n', (10210, 10224), True, 'import numpy as np\n'), ((10245, 10268), 'numpy.isnan', 'np.isnan', (['summstats[-1]'], {}), '(summstats[-1])\n', (10253, 10268), True, 'import numpy as np\n'), ((3101, 3131), 'numpy.isnan', 'np.isnan', (['current_summ_stat[4]'], {}), '(current_summ_stat[4])\n', (3109, 3131), True, 'import numpy as np\n')] |
from typing import Tuple
import numpy as np
import torch
from torch import FloatTensor
from torch.utils.data.dataset import Dataset
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, ToTensor, Normalize
from .settings import DATA_ROOT
from IPython import embed
MNIST_TRANSFORM = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
class MNISTSummation(Dataset):
def __init__(self, min_len: int, max_len: int, dataset_len: int, train: bool = True, transform: Compose = None):
self.min_len = min_len
self.max_len = max_len
self.dataset_len = dataset_len
self.train = train
self.transform = transform
self.mnist = MNIST(DATA_ROOT, train=self.train, transform=self.transform, download=True)
# print(self.mnist.__dict__)
mnist_len = self.mnist.__len__()
mnist_items_range = np.arange(0, mnist_len)
items_len_range = np.arange(self.min_len, self.max_len + 1)
items_len = np.random.choice(items_len_range, size=self.dataset_len, replace=True)
self.mnist_items = []
for i in range(self.dataset_len):
label = np.random.choice(np.arange(0,10))
idx = self.mnist.targets == label
# print(mnist_items_range[idx])
# self.mnist_items.append(np.random.choice(mnist_items_range, size=items_len[i], replace=True))
self.mnist_items.append(np.random.choice(mnist_items_range[idx], size=items_len[i], replace=True))
def __len__(self) -> int:
return self.dataset_len
def __getitem__(self, item: int) -> Tuple[FloatTensor, FloatTensor]:
mnist_items = self.mnist_items[item]
# the_sum = 0
images1 = []
for mi in mnist_items:
img, target = self.mnist.__getitem__(mi)
# the_sum += target
images1.append(img)
mnist_items_range = np.arange(0, self.mnist.__len__())
idx = self.mnist.targets == target
mnist_items = np.random.choice(mnist_items_range[idx], size=len(mnist_items), replace=True)
images2 = []
for mi in mnist_items:
img, target = self.mnist.__getitem__(mi)
# the_sum += target
images2.append(img)
# return torch.stack(images, dim=0), torch.FloatTensor([the_sum])
# print(len(images1),len(images2))
return torch.stack(images1, dim=0), torch.stack(images2, dim=0), torch.LongTensor([target])
| [
"torch.stack",
"torch.LongTensor",
"numpy.arange",
"numpy.random.choice",
"torchvision.transforms.Normalize",
"torchvision.datasets.MNIST",
"torchvision.transforms.ToTensor"
] | [((337, 347), 'torchvision.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (345, 347), False, 'from torchvision.transforms import Compose, ToTensor, Normalize\n'), ((349, 380), 'torchvision.transforms.Normalize', 'Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (358, 380), False, 'from torchvision.transforms import Compose, ToTensor, Normalize\n'), ((729, 804), 'torchvision.datasets.MNIST', 'MNIST', (['DATA_ROOT'], {'train': 'self.train', 'transform': 'self.transform', 'download': '(True)'}), '(DATA_ROOT, train=self.train, transform=self.transform, download=True)\n', (734, 804), False, 'from torchvision.datasets import MNIST\n'), ((914, 937), 'numpy.arange', 'np.arange', (['(0)', 'mnist_len'], {}), '(0, mnist_len)\n', (923, 937), True, 'import numpy as np\n'), ((967, 1008), 'numpy.arange', 'np.arange', (['self.min_len', '(self.max_len + 1)'], {}), '(self.min_len, self.max_len + 1)\n', (976, 1008), True, 'import numpy as np\n'), ((1030, 1100), 'numpy.random.choice', 'np.random.choice', (['items_len_range'], {'size': 'self.dataset_len', 'replace': '(True)'}), '(items_len_range, size=self.dataset_len, replace=True)\n', (1046, 1100), True, 'import numpy as np\n'), ((2454, 2481), 'torch.stack', 'torch.stack', (['images1'], {'dim': '(0)'}), '(images1, dim=0)\n', (2465, 2481), False, 'import torch\n'), ((2483, 2510), 'torch.stack', 'torch.stack', (['images2'], {'dim': '(0)'}), '(images2, dim=0)\n', (2494, 2510), False, 'import torch\n'), ((2512, 2538), 'torch.LongTensor', 'torch.LongTensor', (['[target]'], {}), '([target])\n', (2528, 2538), False, 'import torch\n'), ((1213, 1229), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (1222, 1229), True, 'import numpy as np\n'), ((1468, 1541), 'numpy.random.choice', 'np.random.choice', (['mnist_items_range[idx]'], {'size': 'items_len[i]', 'replace': '(True)'}), '(mnist_items_range[idx], size=items_len[i], replace=True)\n', (1484, 1541), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from pyml.metrics.classification import precision_score
class test_classification(unittest.TestCase):
def test_precision_score(self):
y_pred = np.array([1,2,3,4,5,6,3,1])
y_true = np.array([1,2,3,4,5,6,4,1])
# 默认,要7位有效数字都要相同
self.assertAlmostEqual(precision_score(y_true,y_pred),7/8)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.array",
"pyml.metrics.classification.precision_score"
] | [((390, 405), 'unittest.main', 'unittest.main', ([], {}), '()\n', (403, 405), False, 'import unittest\n'), ((193, 227), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 3, 1]'], {}), '([1, 2, 3, 4, 5, 6, 3, 1])\n', (201, 227), True, 'import numpy as np\n'), ((238, 272), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 4, 1]'], {}), '([1, 2, 3, 4, 5, 6, 4, 1])\n', (246, 272), True, 'import numpy as np\n'), ((322, 353), 'pyml.metrics.classification.precision_score', 'precision_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (337, 353), False, 'from pyml.metrics.classification import precision_score\n')] |
import math
import numpy as np
from SPARQLWrapper import SPARQLWrapper, JSON, POST
from rdflib import Graph
from rdflib import URIRef
from excut.kg.utils import data_formating
from excut.utils.logging import logger
from excut.kg.utils.data_formating import entity_full_url, relation_full_url
from excut.kg.kg_triples_source import TriplesSource, FileTriplesSource
from tqdm import tqdm
# sys.path.append(os.path.abspath(os.path.join('..', '*')))
class Indexer():
"""
Index the KG in either a sparql engine or in memory. This is required for rule learning
"""
def __init__(self, store='remote', endpoint=None, identifier=None, graph=None, batch_size=100,
remove_invalid_ids=True):
self.remove_invalid_ids = remove_invalid_ids
self.batch_size = batch_size
self.store = 'SPARQLUpdateStore' if store == 'remote' or store == 'SPARQLUpdateStore' else 'default'
self.endpoint = endpoint
self.identifier = identifier
self.graph = graph
def index_triples(self, triples_source: TriplesSource, prefix='', safe_urls=False, drop_old=False):
if drop_old:
logger.info("Drop %s " % self.identifier)
self.drop()
if self.store != 'SPARQLUpdateStore' and not self.graph:
self.graph = Graph(store=self.store, identifier=self.identifier)
# print(self.graph.store)
# if self.store == 'SPARQLUpdateStore':
# self.graph.open(self.endpoint)
# self._index(triples_source, prefix, safe_urls)
self._index_np(triples_source) # , prefix, safe_urls)
return self.graph
def _index_np(self, triples_source, prefix='', safe_urls=False):
logger.info("Start indexing " + triples_source.get_name())
data = triples_source.as_numpy_array()
data_size = triples_source.size()
number_splits = math.ceil(data_size / self.batch_size)
logger.info("data size %i" % data_size)
logger.info("chunks %i" % number_splits)
# ch=0
chunks = np.array_split(data, number_splits)
for chunk in tqdm(chunks):
if self.store == 'SPARQLUpdateStore':
self.insert_sparql(chunk)
else:
self.insert_memory(chunk)
logger.info("Done indexing " + triples_source.get_name())
def drop(self):
if self.store == 'SPARQLUpdateStore':
if self.graph_exists():
return self._drop_sparql()
else:
self.graph = Graph(store=self.store, identifier=self.identifier)
return True
return True
def insert_memory(self, triples):
chunk_context = [(URIRef(s), URIRef(p), URIRef(o), self.graph) for s, p, o in triples]
self.graph.addN(chunk_context)
return True
def insert_sparql(self, triples):
triples_filtered = filter(lambda a: data_formating.valid_id_triple(a),
triples) if self.remove_invalid_ids else triples
query = 'INSERT DATA into <%s> {%s}' % (
self.identifier, '\n'.join(map(data_formating.sparql_repr, triples_filtered)))
# print(query)
sparql = SPARQLWrapper(self.endpoint)
sparql.setMethod(POST)
sparql.setReturnFormat(JSON)
sparql.setQuery(query)
results = sparql.query().convert()
return results
def graph_exists(self):
if self.store == 'SPARQLUpdateStore':
query = 'ASK WHERE { GRAPH <%s> { ?s ?p ?o } }' % self.identifier
sparql = SPARQLWrapper(self.endpoint)
sparql.setReturnFormat(JSON)
sparql.setQuery(query)
results = sparql.query().convert()
return results['boolean']
else:
return False
def _drop_sparql(self):
query = 'DROP SILENT GRAPH <%s>' % self.identifier
sparql = SPARQLWrapper(self.endpoint)
sparql.setMethod(POST)
sparql.setReturnFormat(JSON)
sparql.setQuery(query)
results = sparql.query().convert()
# print(results)
result = results['results']['bindings'][0]['callret-0']['value']
if 'triples were removed' in result:
return True
elif 'nothing to do' in result:
return False
raise Exception('Problem Dropping the graph using: %s Message from sparql : \"%s\"' % (query, result))
if __name__ == '__main__':
# labels_indexer=Indexer(host='http://badr:8890/sparql',identifier='http://yago-encoded.org')
# labels_indexer.index_kg_from_tsv('/GW/D5data-11/gadelrab/yago2018/yagoFacts.ttl','http://yago.org/')
indexer = Indexer(endpoint='http://tracy:8890/sparql', identifier='http://test-graph.org')
print(indexer.graph_exists())
indexer.index_triples(
FileTriplesSource('/home/gadelrab/ExDEC/data/20k_kemans_it1.nt', prefix='http://test.org/', safe_urls=True),
drop_old=True)
c = 0
for t in indexer.graph.triples((None, None, None)):
c += 1
print(c)
print(indexer.graph_exists())
# print(labels_indexer.drop())
| [
"tqdm.tqdm",
"rdflib.Graph",
"math.ceil",
"excut.utils.logging.logger.info",
"excut.kg.kg_triples_source.FileTriplesSource",
"rdflib.URIRef",
"SPARQLWrapper.SPARQLWrapper",
"numpy.array_split",
"excut.kg.utils.data_formating.valid_id_triple"
] | [((1894, 1932), 'math.ceil', 'math.ceil', (['(data_size / self.batch_size)'], {}), '(data_size / self.batch_size)\n', (1903, 1932), False, 'import math\n'), ((1941, 1980), 'excut.utils.logging.logger.info', 'logger.info', (["('data size %i' % data_size)"], {}), "('data size %i' % data_size)\n", (1952, 1980), False, 'from excut.utils.logging import logger\n'), ((1989, 2029), 'excut.utils.logging.logger.info', 'logger.info', (["('chunks %i' % number_splits)"], {}), "('chunks %i' % number_splits)\n", (2000, 2029), False, 'from excut.utils.logging import logger\n'), ((2063, 2098), 'numpy.array_split', 'np.array_split', (['data', 'number_splits'], {}), '(data, number_splits)\n', (2077, 2098), True, 'import numpy as np\n'), ((2120, 2132), 'tqdm.tqdm', 'tqdm', (['chunks'], {}), '(chunks)\n', (2124, 2132), False, 'from tqdm import tqdm\n'), ((3204, 3232), 'SPARQLWrapper.SPARQLWrapper', 'SPARQLWrapper', (['self.endpoint'], {}), '(self.endpoint)\n', (3217, 3232), False, 'from SPARQLWrapper import SPARQLWrapper, JSON, POST\n'), ((3908, 3936), 'SPARQLWrapper.SPARQLWrapper', 'SPARQLWrapper', (['self.endpoint'], {}), '(self.endpoint)\n', (3921, 3936), False, 'from SPARQLWrapper import SPARQLWrapper, JSON, POST\n'), ((4821, 4933), 'excut.kg.kg_triples_source.FileTriplesSource', 'FileTriplesSource', (['"""/home/gadelrab/ExDEC/data/20k_kemans_it1.nt"""'], {'prefix': '"""http://test.org/"""', 'safe_urls': '(True)'}), "('/home/gadelrab/ExDEC/data/20k_kemans_it1.nt', prefix=\n 'http://test.org/', safe_urls=True)\n", (4838, 4933), False, 'from excut.kg.kg_triples_source import TriplesSource, FileTriplesSource\n'), ((1154, 1195), 'excut.utils.logging.logger.info', 'logger.info', (["('Drop %s ' % self.identifier)"], {}), "('Drop %s ' % self.identifier)\n", (1165, 1195), False, 'from excut.utils.logging import logger\n'), ((1311, 1362), 'rdflib.Graph', 'Graph', ([], {'store': 'self.store', 'identifier': 'self.identifier'}), '(store=self.store, identifier=self.identifier)\n', (1316, 1362), False, 'from rdflib import Graph\n'), ((2538, 2589), 'rdflib.Graph', 'Graph', ([], {'store': 'self.store', 'identifier': 'self.identifier'}), '(store=self.store, identifier=self.identifier)\n', (2543, 2589), False, 'from rdflib import Graph\n'), ((3574, 3602), 'SPARQLWrapper.SPARQLWrapper', 'SPARQLWrapper', (['self.endpoint'], {}), '(self.endpoint)\n', (3587, 3602), False, 'from SPARQLWrapper import SPARQLWrapper, JSON, POST\n'), ((2699, 2708), 'rdflib.URIRef', 'URIRef', (['s'], {}), '(s)\n', (2705, 2708), False, 'from rdflib import URIRef\n'), ((2710, 2719), 'rdflib.URIRef', 'URIRef', (['p'], {}), '(p)\n', (2716, 2719), False, 'from rdflib import URIRef\n'), ((2721, 2730), 'rdflib.URIRef', 'URIRef', (['o'], {}), '(o)\n', (2727, 2730), False, 'from rdflib import URIRef\n'), ((2910, 2943), 'excut.kg.utils.data_formating.valid_id_triple', 'data_formating.valid_id_triple', (['a'], {}), '(a)\n', (2940, 2943), False, 'from excut.kg.utils import data_formating\n')] |
from bs4 import BeautifulSoup
import requests
import re
import os
import bz2
import sys
from tqdm import tqdm
import numpy as np
import pandas as pd
import h5py
import time
import shutil
import zipfile
from .hapi import db_begin, fetch, abundance, moleculeName, isotopologueName
import excalibur.ExoMol as ExoMol
import excalibur.HITRAN as HITRAN
def download_ExoMol_file(url, f, l_folder):
'''
Download a file from ExoMol and decompress it if needed.
Parameters
----------
url : String
The URL of a given ExoMol file.
f : String
The filename of the resulting downloaded file.
l_folder : TYPE
DESCRIPTION.
Returns
-------
None.
'''
if f.endswith('bz2') == True: # If the file ends in .bz2 we need to read and decompress it
# Check if the file was already downloaded
if (os.path.splitext(os.path.splitext(f)[0])[0] + '.h5') in os.listdir(l_folder):
print("This file is already downloaded. Moving on.")
return
# Create directory location to prepare for reading compressed file
compressed_file = l_folder + '/' + f
# Create a decompresser object and a directory location for the decompressed file
decompressor = bz2.BZ2Decompressor()
decompressed_file = l_folder + '/' + os.path.splitext(f)[0] #Keep the file name but get rid of the .bz2 extension to make it .trans
# Download file from the given URL in chunks and then decompress that chunk immediately
with requests.get(url, stream=True) as request:
if f.find("trans") != -1: # Only want to include the progress bar for .trans downloads
with open(compressed_file, 'wb') as file, open(decompressed_file, 'wb') as output_file, tqdm(total = int(request.headers.get('content-length', 0)), unit = 'iB', unit_scale = True) as pbar:
for chunk in request.iter_content(chunk_size = 1024 * 1024):
file.write(chunk)
pbar.update(len(chunk))
output_file.write(decompressor.decompress(chunk))
print("Converting this .trans file to HDF to save storage space...")
convert_to_hdf(file = decompressed_file, database = 'ExoMol')
else:
with open(compressed_file, 'wb') as file, open(decompressed_file, 'wb') as output_file:
for chunk in request.iter_content(chunk_size = 1024 * 1024):
file.write(chunk)
output_file.write(decompressor.decompress(chunk))
# Delete the compressed file
os.remove(compressed_file)
else: # If the file is not compressed we just need to read it in
if 'air' in f:
input_file = l_folder + '/air.broad'
elif 'self' in f:
input_file = l_folder + '/self.broad'
else:
input_file = l_folder + '/' + f
with requests.get(url, stream=True) as request:
with open(input_file, 'wb') as file:
for chunk in request.iter_content(chunk_size = 1024 * 1024):
file.write(chunk)
def download_HITRAN_line_list(mol_ID, iso_ID, folder, nu_min = 1, nu_max = 100000):
"""
Download line list using the fetch() function already in HITRAN
Parameters
----------
mol_ID : TYPE
DESCRIPTION.
iso_ID : TYPE
DESCRIPTION.
folder : TYPE
DESCRIPTION.
nu_min : TYPE, optional
DESCRIPTION. The default is 200.
nu_max : TYPE, optional
DESCRIPTION. The default is 25000.
Returns
-------
None.
"""
db_begin(folder)
fetch(moleculeName(mol_ID), mol_ID, iso_ID, nu_min, nu_max)
def HITEMP_table():
"""
Recreate the table found on the HITEMP main page in order to simplify later processes
Returns
-------
hitemp : TYPE
DESCRIPTION.
"""
url = 'https://hitran.org/hitemp/'
web_content = requests.get(url).text
soup = BeautifulSoup(web_content, "lxml")
table = soup.find('table')
n_rows = 0
n_columns = 0
column_names = []
# iterate through 'tr' (table row) tags
for row in table.find_all('tr'):
td_tags = row.find_all('td') # find all 'td' (table data) tags
if len(td_tags) > 0:
n_rows += 1
if n_columns == 0: # Use the number of td tags in the first row to set the first column
n_columns = len(td_tags)
# Handle column names
th_tags = row.find_all('th')
if len(th_tags) > 0 and len(column_names) == 0: # This loop adds all column headers of the table to 'column_names'
for th in th_tags:
column_names.append(th.get_text())
hitemp = pd.DataFrame(columns = column_names, index= range(0,n_rows)) # Create a DataFrame to store the table
row_marker = 0
for row in table.find_all('tr'): # This loop populates our DataFrame with the same data as the table online
column_marker = 0
columns = row.find_all('td')
for column in columns:
hitemp.iat[row_marker,column_marker] = column.get_text()
column_marker += 1
if len(columns) > 0:
row_marker += 1
hitemp = hitemp[:-1] # Get rid of last row
hitemp.rename(columns = {'Iso counta':'Iso Count'}, inplace = True) # Rename a column header
hitemp.loc[len(hitemp)] = ['4', 'N2O', 'Nitrous Oxide', '5', '3626425', '0', '12899', '2019', ''] # Manually add N2O molecule to table
hitemp.loc[:, 'ID'] = pd.to_numeric(hitemp['ID']) # This line and the next convert all values in 'ID' and 'Iso Count' column to floats
hitemp.loc[:, 'Iso Count'] = pd.to_numeric(hitemp['Iso Count'])
hitemp.sort_values(by = 'ID', inplace = True)
hitemp.reset_index(drop = True, inplace = True)
counter = 0
for tag in table.find_all('a'): # Populate the 'Download' columns with the url links to the line lists
hitemp.loc[counter, 'Download'] = 'https://hitran.org' + tag.get('href')
counter += 1
return hitemp
def download_HITEMP_line_list(mol_ID, iso_ID, out_folder):
"""
Download a line list(s) from the HITEMP database
Parameters
----------
mol_ID : TYPE
DESCRIPTION.
iso_ID : TYPE
DESCRIPTION.
out_folder : TYPE
DESCRIPTION.
Returns
-------
None.
"""
table = HITEMP_table()
row = table.loc[table['ID'] == mol_ID]
download_link = row.loc[row.index.values[0], 'Download']
if download_link.endswith('.bz2'):
# Create directory location to prepare for reading compressed file
compressed_file = out_folder + moleculeName(mol_ID) + '.par.bz2'
# Create a decompresser object and a directory location for the decompressed file
decompressor = bz2.BZ2Decompressor()
decompressed_file = out_folder + moleculeName(mol_ID) + '.par' #Keep the file name but get rid of the .bz2 extension to make it .par
# Download file from the given URL in chunks and then decompress that chunk immediately
with requests.get(download_link, stream=True) as request:
with open(compressed_file, 'wb') as file, open(decompressed_file, 'wb') as output_file, tqdm(total = int(request.headers.get('content-length', 0)), unit = 'iB', unit_scale = True) as pbar:
for chunk in request.iter_content(chunk_size = 1024 * 1024):
file.write(chunk)
pbar.update(len(chunk))
output_file.write(decompressor.decompress(chunk))
# Convert line list to hdf5 file format
print("Converting this .par file to HDF to save storage space...")
convert_to_hdf(file = decompressed_file, mol_ID = mol_ID,
iso_ID = iso_ID, database = 'HITEMP')
# Delete the compressed file
os.remove(compressed_file)
else:
# 'download_link' will take us to another site containing one or more '.zip' files that need to be downlaoded
new_url = download_link
web_content = requests.get(new_url).text
soup = BeautifulSoup(web_content, "lxml")
links = []
fnames = []
for a in soup.find_all('a'): # Parse the webpage to find all 'href' tags that end with '.zip'
if a.get('href').endswith('.zip'):
links.append(new_url + a.get('href'))
fnames.append(a.get('href'))
num_links = len(links)
counter = 0
for link in links:
print("\nDownloading .zip file", counter + 1, "of", num_links)
fname = fnames[counter]
compressed_file = out_folder + fname
# Download compressed version of file
with requests.get(link, stream = True) as request:
with open(compressed_file, 'wb') as file, tqdm(total = int(request.headers.get('content-length', 0)), unit = 'iB', unit_scale = True) as pbar:
for chunk in request.iter_content(chunk_size = 1024 * 1024):
file.write(chunk)
pbar.update(len(chunk))
# Decompress the file
with zipfile.ZipFile(compressed_file, 'r', allowZip64 = True) as file:
print("Decompressing this file...")
file.extractall(out_folder)
counter += 1
os.remove(compressed_file)
counter = 0
for file in os.listdir(out_folder): # Convert all downloaded files to HDF5
if file.endswith('.par'):
print("\nConverting .par file", counter + 1, "of", num_links, "to HDF to save storage space.")
convert_to_hdf(file = (out_folder + file), mol_ID = mol_ID,
iso_ID = iso_ID, database = 'HITEMP')
counter += 1
def convert_to_hdf(file = '', mol_ID = '', iso_ID = '', alkali = False,
database = '', **kwargs):
"""
Convert a given file to HDF5 format
Parameters
----------
file : TYPE
DESCRIPTION.
Returns
-------
None.
"""
start_time = time.time()
if (database == 'ExoMol'):
trans_file = pd.read_csv(file, delim_whitespace = True, header=None, usecols = [0,1,2])
upper_state = np.array(trans_file[0])
lower_state = np.array(trans_file[1])
log_Einstein_A = np.log10(np.array(trans_file[2]))
hdf_file_path = os.path.splitext(file)[0] + '.h5'
with h5py.File(hdf_file_path, 'w') as hdf:
hdf.create_dataset('Upper State', data = upper_state, dtype = 'u4') #store as 32-bit unsigned int
hdf.create_dataset('Lower State', data = lower_state, dtype = 'u4') #store as 32-bit unsigned int
hdf.create_dataset('Log Einstein A', data = log_Einstein_A, dtype = 'f4') #store as 32-bit float
os.remove(file)
elif (database in ['HITRAN', 'HITEMP']):
# Different HITRAN formats for different molecules leads us to read in .par files w/ different field widths
if mol_ID in {1, 3, 9, 12, 20, 21, 25, 29, 31, 32, 35, 37, 38}: # Group 1
field_lengths = [2, 1, 12, 10, 10, 5, 5, 10, 4, 8, 15, 15, 15, 3, 3, 3, 5, 1, 6, 12, 1, 7, 7]
J_col = 13
elif mol_ID in {10, 33}: # Group 1 - Handle HO2 and NO2 J_cols seperately, since HITRAN provides N instead of J
field_lengths = [2, 1, 12, 10, 10, 5, 5, 10, 4, 8, 15, 15, 15, 3, 3, 3, 5, 1, 6, 12, 1, 7, 7]
J_col = 13
Sym_col = 17
elif mol_ID in {2, 4, 5, 14, 15, 16, 17, 19, 22, 23, 26, 36}: # Group 2
field_lengths = [2, 1, 12, 10, 10, 5, 5, 10, 4, 8, 15, 15, 15, 5, 1, 3, 1, 5, 6, 12, 1, 7, 7]
J_col = 15
elif (mol_ID == 6 and iso_ID in {1, 2}) or mol_ID == 30: # Group 3
field_lengths = [2, 1, 12, 10, 10, 5, 5, 10, 4, 8, 15, 15, 15, 2, 3, 2, 3, 5, 6, 12, 1, 7, 7]
J_col = 14
elif mol_ID in {11, 24, 27, 28, 39} or (mol_ID == 6 and iso_ID in {3, 4}): # Group 4
field_lengths = [2, 1, 12, 10, 10, 5, 5, 10, 4, 8, 15, 15, 15, 3, 3, 2, 2, 1, 4, 6, 12, 1, 7, 7]
J_col = 13
elif mol_ID == 7: # Group 5
field_lengths = [2, 1, 12, 10, 10, 5, 5, 10, 4, 8, 15, 15, 15, 1, 1, 3, 1, 3, 5, 1, 6, 12, 1, 7, 7]
J_col = 17
elif mol_ID in {8, 18}: # Group 6
field_lengths = [2, 1, 12, 10, 10, 5, 5, 10, 4, 8, 15, 15, 15, 3, 1, 5, 1, 5, 6, 12, 1, 7, 7]
J_col = 15
# OH has slightly different field widths between HITRAN and HITEMP
if (database == 'HITRAN'):
if mol_ID in {13}: # Group 6 - OH
field_lengths = [2, 1, 12, 10, 10, 5, 5, 10, 4, 8, 15, 15, 15, 3, 5, 2, 5, 6, 12, 1, 7, 7]
J_col = 14
elif (database == 'HITEMP'):
if mol_ID in {13}: # Group 6 - OH
field_lengths = [2, 1, 12, 10, 10, 5, 5, 10, 4, 8, 15, 15, 15, 3, 1, 5, 1, 5, 6, 12, 1, 7, 7]
J_col = 15
trans_file = pd.read_fwf(file, widths=field_lengths, header=None)
if (database == 'HITEMP'):
trans_file = trans_file.query('@trans_file[1] == @iso_ID') # filter by the requested isotopologue ID
# Get only the necessary columns from the .par file
nu_0 = np.array(trans_file[2])
log_S_ref = np.log10(np.array(trans_file[3]) / abundance(mol_ID, iso_ID))
gamma_L_0_air = np.array(trans_file[5]) / 1.01325 # Convert from cm^-1 / atm -> cm^-1 / bar
E_lower = np.array(trans_file[7])
n_L_air = np.array(trans_file[8])
J_lower = np.array(trans_file[J_col])
if mol_ID in {10, 33}: # Handle creation of NO2 and HO2 J_lower columns, as the given value is N on HITRAN not J
Sym = np.array(trans_file[Sym_col])
for i in range(len(J_lower)):
if Sym[i] == '+':
J_lower[i] += 1/2
else:
J_lower[i] -= 1/2
hdf_file_path = os.path.splitext(file)[0] + '.h5'
# Write the data to our HDF5 file
with h5py.File(hdf_file_path, 'w') as hdf:
hdf.create_dataset('Transition Wavenumber', data = nu_0, dtype = 'f4') #store as 32-bit unsigned float
hdf.create_dataset('Log Line Intensity', data = log_S_ref, dtype = 'f4')
hdf.create_dataset('Lower State E', data = E_lower, dtype = 'f4')
hdf.create_dataset('Lower State J', data = J_lower, dtype = 'f4')
hdf.create_dataset('Air Broadened Width', data = gamma_L_0_air, dtype = 'f4')
hdf.create_dataset('Temperature Dependence of Air Broadening', data = n_L_air, dtype = 'f4')
os.remove(file)
elif (database == 'VALD'):
trans_file = pd.read_csv(file, delim_whitespace = True, header=None, skiprows = 1)
nu_0 = np.array(trans_file[0])
log_gf = np.array(trans_file[1])
E_low = np.array(trans_file[2])
E_up = np.array(trans_file[3])
J_low = np.array(trans_file[4])
J_up = np.array(trans_file[5])
# Account for the differences in columns depending on if the species is an alkali metal
if (alkali == True):
l_low = np.array(trans_file[6])
l_up = np.array(trans_file[7])
log_gamma_nat = np.array(trans_file[8])
log_gamma_vdw = np.array(trans_file[9])
else:
log_gamma_nat = np.array(trans_file[6])
log_gamma_vdw = np.array(trans_file[7])
hdf_file_path = os.path.splitext(file)[0] + '.h5'
with h5py.File(hdf_file_path, 'w') as hdf:
hdf.create_dataset('nu', data = nu_0, dtype = 'f8') #store as 32-bit unsigned int
hdf.create_dataset('Log gf', data = log_gf, dtype = 'f8') #store as 32-bit unsigned int
hdf.create_dataset('E lower', data = E_low, dtype = 'f8') #store as 32-bit float
hdf.create_dataset('E upper', data = E_up, dtype = 'f8') #store as 32-bit unsigned int
hdf.create_dataset('J lower', data = J_low, dtype = 'f8') #store as 32-bit unsigned int
hdf.create_dataset('J upper', data = J_up, dtype = 'f8') #store as 32-bit float
hdf.create_dataset('Log gamma nat', data = log_gamma_nat, dtype = 'f8') #store as 32-bit float
hdf.create_dataset('Log gamma vdw', data = log_gamma_vdw, dtype = 'f8') #store as 32-bit float
if (alkali == True):
hdf.create_dataset('l lower', data = l_low, dtype = 'f8') #store as 32-bit unsigned int
hdf.create_dataset('l upper', data = l_up, dtype = 'f8') #store as 32-bit float
print("This .trans file took", round(time.time() - start_time, 1), "seconds to reformat to HDF.")
def create_directories(molecule = '', isotopologue = '', line_list = '', database = '',
mol_ID = '', iso_ID = '', ionization_state = 1, VALD_data_dir = '',
**kwargs):
'''
Create new folders on local machine to store the relevant data
Parameters
----------
molecule : TYPE, optional
DESCRIPTION. The default is ''.
isotope : TYPE, optional
DESCRIPTION. The default is ''.
line_list : TYPE, optional
DESCRIPTION. The default is ''.
mol_ID : TYPE, optional
DESCRIPTION. The default is ''.
iso_ID : TYPE, optional
DESCRIPTION. The default is ''.
ionization_state : TYPE, optional
DESCRIPTION. The default is 1.
**kwargs : TYPE
DESCRIPTION.
Returns
-------
line_list_folder : TYPE
DESCRIPTION.
'''
input_folder = './input'
if os.path.exists(input_folder) == False:
os.mkdir(input_folder)
if (database == 'ExoMol'):
molecule_folder = input_folder + '/' + molecule + ' ~ (' + isotopologue + ')'
database_folder = molecule_folder + '/' + database + '/'
line_list_folder = molecule_folder + '/' + database + '/' + line_list + '/'
if os.path.exists(molecule_folder) == False:
os.mkdir(molecule_folder)
if os.path.exists(database_folder) == False:
os.mkdir(database_folder)
if os.path.exists(line_list_folder) == False:
os.mkdir(line_list_folder)
elif (database in ['HITRAN', 'HITEMP']):
molecule_folder = input_folder + '/' + moleculeName(mol_ID) + ' ~ '
iso_name = isotopologueName(mol_ID, iso_ID) # Need to format the isotopologue name to match ExoMol formatting
# 'H' not followed by lower case letter needs to become '(1H)'
iso_name = re.sub('H(?![a-z])', '(1H)', iso_name)
# Number of that atom needs to be enclosed by parentheses ... so '(1H)2' becomes '(1H2)'
matches = re.findall('[)][0-9]{1}', iso_name)
for match in matches:
number = re.findall('[0-9]{1}', match)
iso_name = re.sub('[)][0-9]{1}', number[0] + ')', iso_name)
# replace all ')(' with '-'
iso_name = iso_name.replace(')(', '-')
molecule_folder += iso_name
line_list_folder = molecule_folder + '/' + database + '/'
if os.path.exists(molecule_folder) == False:
os.mkdir(molecule_folder)
# If we don't remove an existing HITRAN folder, we encounter a Lonely Header exception from hapi.py
if (database == 'HITRAN'):
if os.path.exists(line_list_folder):
shutil.rmtree(line_list_folder)
os.mkdir(line_list_folder)
else:
if os.path.exists(line_list_folder) == False:
os.mkdir(line_list_folder)
elif (database == 'VALD'):
roman_num = ''
for i in range(ionization_state):
roman_num += 'I'
fname = molecule + '_' + roman_num + '.h5'
molecule_folder = input_folder + '/' + molecule + ' ~ (' + roman_num + ')'
line_list_folder = molecule_folder + '/' + database + '/'
if os.path.exists(molecule_folder) == False:
os.mkdir(molecule_folder)
if os.path.exists(line_list_folder) == False:
os.mkdir(line_list_folder)
# Copy the VALD line list file to the newly created folder
shutil.copy(VALD_data_dir + fname, line_list_folder + '/')
return line_list_folder
def calc_num_ExoMol_trans(html_tags):
"""
Calculate the number of .trans files in the line list
Parameters
----------
html_tags : TYPE
DESCRIPTION.
Returns
-------
counter : TYPE
DESCRIPTION.
"""
counter = 0
for tag in html_tags:
if tag.get('href').find('trans') != -1:
counter += 1
return counter
def create_ExoMol_tag_array(url, broad_URL):
"""
Create a list of html tags that contain the URLs from which we will later download files
Parameters:
url (string): The ExoMol URL for the webpage that contains download links to all the files
"""
# Get webpage content as text
web_content = requests.get(url).text
broadening_content = requests.get(broad_URL).text
# Create lxml parser
soup = BeautifulSoup(web_content, "lxml")
soup2 = BeautifulSoup(broadening_content, "lxml")
# Parse the webpage by file type (which is contained in the href of the html tag)
broad_tags = soup2.find_all('a', href = re.compile("[.]broad"))
pf_tags = soup.find_all('a', href = re.compile("[.]pf"))
states_tags = soup.find_all('a', href = re.compile("[.]states"))
trans_tags = soup.find_all('a', href = re.compile("[.]trans"))
combined_tags = broad_tags + pf_tags + states_tags + trans_tags
return combined_tags
def iterate_ExoMol_tags(tags, host, l_folder, line_list):
"""
Iterate through every html tag and download the file contained by the URL in the href
Parameters
----------
tags : TYPE
DESCRIPTION.
host : TYPE
DESCRIPTION.
l_folder : TYPE
DESCRIPTION.
line_list : TYPE
DESCRIPTION.
Returns
-------
None.
"""
counter = 0
num_trans = calc_num_ExoMol_trans(tags)
for tag in tags:
# Create the appropriate URL by combining host name and href
url = host + tag.get('href')
# Name the file in a way that it includes relevant info about what is stored in the file
matches = re.finditer('__', url)
matches_positions = [match.start() for match in matches]
filename = url[matches_positions[len(matches_positions) - 1] + 2:]
if filename.find('trans') != - 1:
counter += 1
if counter == 1:
print("Fetched the broadening coefficients, partition functions, and energy levels.")
print("Now downloading the", line_list, "line list...")
print("\nDownloading .trans file", counter, "of", num_trans)
# Download each line list
download_ExoMol_file(url, filename, l_folder)
def find_input_dir(input_dir, database, molecule, isotope, ionization_state, linelist):
"""
Find the directory on a user's machine that contains the data needed to create a cross-section
Parameters
----------
input_dir : String
'Prefix' of the directory containing the line list files. If the files were downloaded
using our Download_Line_List.py script, input_dir will end in '/input'
database : String
Database the line list was downloaded from.
molecule : String
Molecule for which the cross-section is created.
isotope : String
Isotopologue of the molecule for which the cross-section was created.
linelist : String
Line list that is being used. HITRAN/HITEMP/VALD used as the line list name for these
databases respectively. ExoMol has its own named line lists.
Returns
-------
input_directory : TYPE
DESCRIPTION.
"""
if isotope == 'default':
if database == 'exomol':
isotope = ExoMol.get_default_iso(molecule)
if database in ['hitran', 'hitemp']:
molecule_dict = HITRAN.create_id_dict()
mol_id = molecule_dict.get(molecule)
isotope = isotopologueName(mol_id, 1)
isotope = HITRAN.replace_iso_name(isotope)
if database == 'vald':
ion_roman = ''
for i in range(ionization_state):
ion_roman += 'I'
if linelist == 'default':
if database == 'exomol':
temp_isotope = re.sub('[(]|[)]', '', isotope)
linelist = ExoMol.get_default_linelist(molecule, temp_isotope)
if database == 'hitran':
linelist = 'HITRAN'
if database == 'hitemp':
linelist = 'HITEMP'
if database == 'vald':
linelist = 'VALD'
if database == 'vald':
tag = ion_roman
else:
tag = isotope
if (database == 'exomol'):
input_directory = (input_dir + molecule + ' ~ (' + tag + ')/' +
'ExoMol' + '/' + linelist + '/')
else:
input_directory = (input_dir + molecule + ' ~ (' + tag + ')/' +
linelist + '/')
if os.path.exists(input_directory):
return input_directory
else:
print("You don't seem to have a local folder with the parameters you entered.\n")
if not os.path.exists(input_dir + '/'):
print("----- You entered an invalid input directory into the cross_section() function. Please try again. -----")
sys.exit(0)
elif not os.path.exists(input_dir + '/' + molecule + ' ~ (' + tag + ')/'):
print("----- There was an error with the molecule + isotope you entered. Here are the available options: -----\n")
for folder in os.listdir(input_dir + '/'):
if not folder.startswith('.'):
print(folder)
sys.exit(0)
else:
print("There was an error with the line list. These are the linelists available: \n")
for folder in os.listdir(input_dir + '/' + molecule + ' ~ (' + tag + ')/'):
if not folder.startswith('.'):
print(folder)
sys.exit(0)
def parse_directory(directory, database):
"""
Determine which linelist and isotopologue this directory contains data for (assumes data was downloaded using our script)
Parameters
----------
directory : String
Local directory containing the line list file[s], broadening data, and partition function
Returns
-------
molecule : String
Molecule which the cross-section is being calculated for.
linelist : String
Line list which the cross-section is being calculated for.
"""
directory_name = os.path.abspath(directory)
linelist = os.path.basename(directory_name)
# For ExoMol, have to go up one more directory to reach molecule folder
if (database == 'exomol'):
directory_name = os.path.dirname(directory_name)
directory_name = os.path.dirname(directory_name)
else:
directory_name = os.path.dirname(directory_name)
molecule = os.path.basename(directory_name)
# same_molecule = copy.deepcopy(molecule) # Make a copy of the string because we'll need it for the isotopologue
# molecule = re.sub('[ ~].+', '', molecule) # Keep molecule part of the folder name
isotopologue = re.sub('.+[ ~]', '', molecule) # Keep isotope part of the folder name
return linelist, isotopologue
| [
"os.mkdir",
"os.remove",
"pandas.read_csv",
"re.finditer",
"shutil.rmtree",
"excalibur.HITRAN.create_id_dict",
"shutil.copy",
"os.path.abspath",
"os.path.dirname",
"os.path.exists",
"re.findall",
"requests.get",
"excalibur.ExoMol.get_default_linelist",
"re.sub",
"h5py.File",
"excalibur... | [((4231, 4265), 'bs4.BeautifulSoup', 'BeautifulSoup', (['web_content', '"""lxml"""'], {}), "(web_content, 'lxml')\n", (4244, 4265), False, 'from bs4 import BeautifulSoup\n'), ((5878, 5905), 'pandas.to_numeric', 'pd.to_numeric', (["hitemp['ID']"], {}), "(hitemp['ID'])\n", (5891, 5905), True, 'import pandas as pd\n'), ((6025, 6059), 'pandas.to_numeric', 'pd.to_numeric', (["hitemp['Iso Count']"], {}), "(hitemp['Iso Count'])\n", (6038, 6059), True, 'import pandas as pd\n'), ((10701, 10712), 'time.time', 'time.time', ([], {}), '()\n', (10710, 10712), False, 'import time\n'), ((22276, 22310), 'bs4.BeautifulSoup', 'BeautifulSoup', (['web_content', '"""lxml"""'], {}), "(web_content, 'lxml')\n", (22289, 22310), False, 'from bs4 import BeautifulSoup\n'), ((22323, 22364), 'bs4.BeautifulSoup', 'BeautifulSoup', (['broadening_content', '"""lxml"""'], {}), "(broadening_content, 'lxml')\n", (22336, 22364), False, 'from bs4 import BeautifulSoup\n'), ((26399, 26430), 'os.path.exists', 'os.path.exists', (['input_directory'], {}), '(input_directory)\n', (26413, 26430), False, 'import os\n'), ((28039, 28065), 'os.path.abspath', 'os.path.abspath', (['directory'], {}), '(directory)\n', (28054, 28065), False, 'import os\n'), ((28081, 28113), 'os.path.basename', 'os.path.basename', (['directory_name'], {}), '(directory_name)\n', (28097, 28113), False, 'import os\n'), ((28426, 28458), 'os.path.basename', 'os.path.basename', (['directory_name'], {}), '(directory_name)\n', (28442, 28458), False, 'import os\n'), ((28684, 28715), 're.sub', 're.sub', (['""".+[ ~]"""', '""""""', 'molecule'], {}), "('.+[ ~]', '', molecule)\n", (28690, 28715), False, 'import re\n'), ((1293, 1314), 'bz2.BZ2Decompressor', 'bz2.BZ2Decompressor', ([], {}), '()\n', (1312, 1314), False, 'import bz2\n'), ((2772, 2798), 'os.remove', 'os.remove', (['compressed_file'], {}), '(compressed_file)\n', (2781, 2798), False, 'import os\n'), ((4192, 4209), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (4204, 4209), False, 'import requests\n'), ((7200, 7221), 'bz2.BZ2Decompressor', 'bz2.BZ2Decompressor', ([], {}), '()\n', (7219, 7221), False, 'import bz2\n'), ((8296, 8322), 'os.remove', 'os.remove', (['compressed_file'], {}), '(compressed_file)\n', (8305, 8322), False, 'import os\n'), ((8574, 8608), 'bs4.BeautifulSoup', 'BeautifulSoup', (['web_content', '"""lxml"""'], {}), "(web_content, 'lxml')\n", (8587, 8608), False, 'from bs4 import BeautifulSoup\n'), ((10000, 10022), 'os.listdir', 'os.listdir', (['out_folder'], {}), '(out_folder)\n', (10010, 10022), False, 'import os\n'), ((10775, 10847), 'pandas.read_csv', 'pd.read_csv', (['file'], {'delim_whitespace': '(True)', 'header': 'None', 'usecols': '[0, 1, 2]'}), '(file, delim_whitespace=True, header=None, usecols=[0, 1, 2])\n', (10786, 10847), True, 'import pandas as pd\n'), ((10881, 10904), 'numpy.array', 'np.array', (['trans_file[0]'], {}), '(trans_file[0])\n', (10889, 10904), True, 'import numpy as np\n'), ((10927, 10950), 'numpy.array', 'np.array', (['trans_file[1]'], {}), '(trans_file[1])\n', (10935, 10950), True, 'import numpy as np\n'), ((11490, 11505), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (11499, 11505), False, 'import os\n'), ((18561, 18589), 'os.path.exists', 'os.path.exists', (['input_folder'], {}), '(input_folder)\n', (18575, 18589), False, 'import os\n'), ((18608, 18630), 'os.mkdir', 'os.mkdir', (['input_folder'], {}), '(input_folder)\n', (18616, 18630), False, 'import os\n'), ((22158, 22175), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (22170, 22175), False, 'import requests\n'), ((22206, 22229), 'requests.get', 'requests.get', (['broad_URL'], {}), '(broad_URL)\n', (22218, 22229), False, 'import requests\n'), ((23517, 23539), 're.finditer', 're.finditer', (['"""__"""', 'url'], {}), "('__', url)\n", (23528, 23539), False, 'import re\n'), ((28251, 28282), 'os.path.dirname', 'os.path.dirname', (['directory_name'], {}), '(directory_name)\n', (28266, 28282), False, 'import os\n'), ((28308, 28339), 'os.path.dirname', 'os.path.dirname', (['directory_name'], {}), '(directory_name)\n', (28323, 28339), False, 'import os\n'), ((28378, 28409), 'os.path.dirname', 'os.path.dirname', (['directory_name'], {}), '(directory_name)\n', (28393, 28409), False, 'import os\n'), ((936, 956), 'os.listdir', 'os.listdir', (['l_folder'], {}), '(l_folder)\n', (946, 956), False, 'import os\n'), ((1573, 1603), 'requests.get', 'requests.get', (['url'], {'stream': '(True)'}), '(url, stream=True)\n', (1585, 1603), False, 'import requests\n'), ((3126, 3156), 'requests.get', 'requests.get', (['url'], {'stream': '(True)'}), '(url, stream=True)\n', (3138, 3156), False, 'import requests\n'), ((7486, 7526), 'requests.get', 'requests.get', (['download_link'], {'stream': '(True)'}), '(download_link, stream=True)\n', (7498, 7526), False, 'import requests\n'), ((8527, 8548), 'requests.get', 'requests.get', (['new_url'], {}), '(new_url)\n', (8539, 8548), False, 'import requests\n'), ((9916, 9942), 'os.remove', 'os.remove', (['compressed_file'], {}), '(compressed_file)\n', (9925, 9942), False, 'import os\n'), ((10985, 11008), 'numpy.array', 'np.array', (['trans_file[2]'], {}), '(trans_file[2])\n', (10993, 11008), True, 'import numpy as np\n'), ((11102, 11131), 'h5py.File', 'h5py.File', (['hdf_file_path', '"""w"""'], {}), "(hdf_file_path, 'w')\n", (11111, 11131), False, 'import h5py\n'), ((13780, 13832), 'pandas.read_fwf', 'pd.read_fwf', (['file'], {'widths': 'field_lengths', 'header': 'None'}), '(file, widths=field_lengths, header=None)\n', (13791, 13832), True, 'import pandas as pd\n'), ((14078, 14101), 'numpy.array', 'np.array', (['trans_file[2]'], {}), '(trans_file[2])\n', (14086, 14101), True, 'import numpy as np\n'), ((14304, 14327), 'numpy.array', 'np.array', (['trans_file[7]'], {}), '(trans_file[7])\n', (14312, 14327), True, 'import numpy as np\n'), ((14346, 14369), 'numpy.array', 'np.array', (['trans_file[8]'], {}), '(trans_file[8])\n', (14354, 14369), True, 'import numpy as np\n'), ((14388, 14415), 'numpy.array', 'np.array', (['trans_file[J_col]'], {}), '(trans_file[J_col])\n', (14396, 14415), True, 'import numpy as np\n'), ((15513, 15528), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (15522, 15528), False, 'import os\n'), ((18933, 18964), 'os.path.exists', 'os.path.exists', (['molecule_folder'], {}), '(molecule_folder)\n', (18947, 18964), False, 'import os\n'), ((18987, 19012), 'os.mkdir', 'os.mkdir', (['molecule_folder'], {}), '(molecule_folder)\n', (18995, 19012), False, 'import os\n'), ((19037, 19068), 'os.path.exists', 'os.path.exists', (['database_folder'], {}), '(database_folder)\n', (19051, 19068), False, 'import os\n'), ((19091, 19116), 'os.mkdir', 'os.mkdir', (['database_folder'], {}), '(database_folder)\n', (19099, 19116), False, 'import os\n'), ((19141, 19173), 'os.path.exists', 'os.path.exists', (['line_list_folder'], {}), '(line_list_folder)\n', (19155, 19173), False, 'import os\n'), ((19196, 19222), 'os.mkdir', 'os.mkdir', (['line_list_folder'], {}), '(line_list_folder)\n', (19204, 19222), False, 'import os\n'), ((19586, 19624), 're.sub', 're.sub', (['"""H(?![a-z])"""', '"""(1H)"""', 'iso_name'], {}), "('H(?![a-z])', '(1H)', iso_name)\n", (19592, 19624), False, 'import re\n'), ((19749, 19784), 're.findall', 're.findall', (['"""[)][0-9]{1}"""', 'iso_name'], {}), "('[)][0-9]{1}', iso_name)\n", (19759, 19784), False, 'import re\n'), ((22496, 22518), 're.compile', 're.compile', (['"""[.]broad"""'], {}), "('[.]broad')\n", (22506, 22518), False, 'import re\n'), ((22560, 22579), 're.compile', 're.compile', (['"""[.]pf"""'], {}), "('[.]pf')\n", (22570, 22579), False, 'import re\n'), ((22625, 22648), 're.compile', 're.compile', (['"""[.]states"""'], {}), "('[.]states')\n", (22635, 22648), False, 'import re\n'), ((22693, 22715), 're.compile', 're.compile', (['"""[.]trans"""'], {}), "('[.]trans')\n", (22703, 22715), False, 'import re\n'), ((25187, 25219), 'excalibur.ExoMol.get_default_iso', 'ExoMol.get_default_iso', (['molecule'], {}), '(molecule)\n', (25209, 25219), True, 'import excalibur.ExoMol as ExoMol\n'), ((25293, 25316), 'excalibur.HITRAN.create_id_dict', 'HITRAN.create_id_dict', ([], {}), '()\n', (25314, 25316), True, 'import excalibur.HITRAN as HITRAN\n'), ((25438, 25470), 'excalibur.HITRAN.replace_iso_name', 'HITRAN.replace_iso_name', (['isotope'], {}), '(isotope)\n', (25461, 25470), True, 'import excalibur.HITRAN as HITRAN\n'), ((25692, 25722), 're.sub', 're.sub', (['"""[(]|[)]"""', '""""""', 'isotope'], {}), "('[(]|[)]', '', isotope)\n", (25698, 25722), False, 'import re\n'), ((25746, 25797), 'excalibur.ExoMol.get_default_linelist', 'ExoMol.get_default_linelist', (['molecule', 'temp_isotope'], {}), '(molecule, temp_isotope)\n', (25773, 25797), True, 'import excalibur.ExoMol as ExoMol\n'), ((26593, 26624), 'os.path.exists', 'os.path.exists', (["(input_dir + '/')"], {}), "(input_dir + '/')\n", (26607, 26624), False, 'import os\n'), ((26763, 26774), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (26771, 26774), False, 'import sys\n'), ((1360, 1379), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (1376, 1379), False, 'import os\n'), ((9247, 9278), 'requests.get', 'requests.get', (['link'], {'stream': '(True)'}), '(link, stream=True)\n', (9259, 9278), False, 'import requests\n'), ((9700, 9754), 'zipfile.ZipFile', 'zipfile.ZipFile', (['compressed_file', '"""r"""'], {'allowZip64': '(True)'}), "(compressed_file, 'r', allowZip64=True)\n", (9715, 9754), False, 'import zipfile\n'), ((11046, 11068), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (11062, 11068), False, 'import os\n'), ((14208, 14231), 'numpy.array', 'np.array', (['trans_file[5]'], {}), '(trans_file[5])\n', (14216, 14231), True, 'import numpy as np\n'), ((14565, 14594), 'numpy.array', 'np.array', (['trans_file[Sym_col]'], {}), '(trans_file[Sym_col])\n', (14573, 14594), True, 'import numpy as np\n'), ((14900, 14929), 'h5py.File', 'h5py.File', (['hdf_file_path', '"""w"""'], {}), "(hdf_file_path, 'w')\n", (14909, 14929), False, 'import h5py\n'), ((15591, 15656), 'pandas.read_csv', 'pd.read_csv', (['file'], {'delim_whitespace': '(True)', 'header': 'None', 'skiprows': '(1)'}), '(file, delim_whitespace=True, header=None, skiprows=1)\n', (15602, 15656), True, 'import pandas as pd\n'), ((15685, 15708), 'numpy.array', 'np.array', (['trans_file[0]'], {}), '(trans_file[0])\n', (15693, 15708), True, 'import numpy as np\n'), ((15726, 15749), 'numpy.array', 'np.array', (['trans_file[1]'], {}), '(trans_file[1])\n', (15734, 15749), True, 'import numpy as np\n'), ((15766, 15789), 'numpy.array', 'np.array', (['trans_file[2]'], {}), '(trans_file[2])\n', (15774, 15789), True, 'import numpy as np\n'), ((15805, 15828), 'numpy.array', 'np.array', (['trans_file[3]'], {}), '(trans_file[3])\n', (15813, 15828), True, 'import numpy as np\n'), ((15845, 15868), 'numpy.array', 'np.array', (['trans_file[4]'], {}), '(trans_file[4])\n', (15853, 15868), True, 'import numpy as np\n'), ((15884, 15907), 'numpy.array', 'np.array', (['trans_file[5]'], {}), '(trans_file[5])\n', (15892, 15907), True, 'import numpy as np\n'), ((17582, 17593), 'time.time', 'time.time', ([], {}), '()\n', (17591, 17593), False, 'import time\n'), ((19836, 19865), 're.findall', 're.findall', (['"""[0-9]{1}"""', 'match'], {}), "('[0-9]{1}', match)\n", (19846, 19865), False, 'import re\n'), ((19889, 19937), 're.sub', 're.sub', (['"""[)][0-9]{1}"""', "(number[0] + ')')", 'iso_name'], {}), "('[)][0-9]{1}', number[0] + ')', iso_name)\n", (19895, 19937), False, 'import re\n'), ((20170, 20201), 'os.path.exists', 'os.path.exists', (['molecule_folder'], {}), '(molecule_folder)\n', (20184, 20201), False, 'import os\n'), ((20224, 20249), 'os.mkdir', 'os.mkdir', (['molecule_folder'], {}), '(molecule_folder)\n', (20232, 20249), False, 'import os\n'), ((20421, 20453), 'os.path.exists', 'os.path.exists', (['line_list_folder'], {}), '(line_list_folder)\n', (20435, 20453), False, 'import os\n'), ((20518, 20544), 'os.mkdir', 'os.mkdir', (['line_list_folder'], {}), '(line_list_folder)\n', (20526, 20544), False, 'import os\n'), ((21334, 21392), 'shutil.copy', 'shutil.copy', (['(VALD_data_dir + fname)', "(line_list_folder + '/')"], {}), "(VALD_data_dir + fname, line_list_folder + '/')\n", (21345, 21392), False, 'import shutil\n'), ((26801, 26867), 'os.path.exists', 'os.path.exists', (["(input_dir + '/' + molecule + ' ~ (' + tag + ')/')"], {}), "(input_dir + '/' + molecule + ' ~ (' + tag + ')/')\n", (26815, 26867), False, 'import os\n'), ((27022, 27049), 'os.listdir', 'os.listdir', (["(input_dir + '/')"], {}), "(input_dir + '/')\n", (27032, 27049), False, 'import os\n'), ((27144, 27155), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (27152, 27155), False, 'import sys\n'), ((27303, 27365), 'os.listdir', 'os.listdir', (["(input_dir + '/' + molecule + ' ~ (' + tag + ')/')"], {}), "(input_dir + '/' + molecule + ' ~ (' + tag + ')/')\n", (27313, 27365), False, 'import os\n'), ((27460, 27471), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (27468, 27471), False, 'import sys\n'), ((14131, 14154), 'numpy.array', 'np.array', (['trans_file[3]'], {}), '(trans_file[3])\n', (14139, 14154), True, 'import numpy as np\n'), ((14802, 14824), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (14818, 14824), False, 'import os\n'), ((16064, 16087), 'numpy.array', 'np.array', (['trans_file[6]'], {}), '(trans_file[6])\n', (16072, 16087), True, 'import numpy as np\n'), ((16107, 16130), 'numpy.array', 'np.array', (['trans_file[7]'], {}), '(trans_file[7])\n', (16115, 16130), True, 'import numpy as np\n'), ((16159, 16182), 'numpy.array', 'np.array', (['trans_file[8]'], {}), '(trans_file[8])\n', (16167, 16182), True, 'import numpy as np\n'), ((16211, 16234), 'numpy.array', 'np.array', (['trans_file[9]'], {}), '(trans_file[9])\n', (16219, 16234), True, 'import numpy as np\n'), ((16290, 16313), 'numpy.array', 'np.array', (['trans_file[6]'], {}), '(trans_file[6])\n', (16298, 16313), True, 'import numpy as np\n'), ((16342, 16365), 'numpy.array', 'np.array', (['trans_file[7]'], {}), '(trans_file[7])\n', (16350, 16365), True, 'import numpy as np\n'), ((16459, 16488), 'h5py.File', 'h5py.File', (['hdf_file_path', '"""w"""'], {}), "(hdf_file_path, 'w')\n", (16468, 16488), False, 'import h5py\n'), ((20474, 20505), 'shutil.rmtree', 'shutil.rmtree', (['line_list_folder'], {}), '(line_list_folder)\n', (20487, 20505), False, 'import shutil\n'), ((20587, 20619), 'os.path.exists', 'os.path.exists', (['line_list_folder'], {}), '(line_list_folder)\n', (20601, 20619), False, 'import os\n'), ((20646, 20672), 'os.mkdir', 'os.mkdir', (['line_list_folder'], {}), '(line_list_folder)\n', (20654, 20672), False, 'import os\n'), ((21056, 21087), 'os.path.exists', 'os.path.exists', (['molecule_folder'], {}), '(molecule_folder)\n', (21070, 21087), False, 'import os\n'), ((21110, 21135), 'os.mkdir', 'os.mkdir', (['molecule_folder'], {}), '(molecule_folder)\n', (21118, 21135), False, 'import os\n'), ((21160, 21192), 'os.path.exists', 'os.path.exists', (['line_list_folder'], {}), '(line_list_folder)\n', (21174, 21192), False, 'import os\n'), ((21219, 21245), 'os.mkdir', 'os.mkdir', (['line_list_folder'], {}), '(line_list_folder)\n', (21227, 21245), False, 'import os\n'), ((16403, 16425), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (16419, 16425), False, 'import os\n'), ((897, 916), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (913, 916), False, 'import os\n')] |
# Copyright (C) 2014-2021 Syntrogi Inc dba Intheon. All rights reserved.
import sys
import logging
import json
from typing import Tuple, List
import numpy as np
import pandas as pd
from qtpy import QtCore
from qtpy import QtGui
from qtpy import QtWidgets
from stream_viewer.buffers import StreamDataBuffer, MergeLastOnlyBuffer, TimeSeriesBuffer
from stream_viewer.data.data_source import IDataSource
logger = logging.getLogger(__name__)
class RendererFormatData(QtCore.QObject):
chan_states_changed = QtCore.Signal(QtCore.QObject)
# Use these class variables in subclass renderers to help indicate capabilities.
COMPAT_ICONTROL = [] # Which control panels work with this renderer?
# Class variable used for saving/restoring settings from ini. Extend this in subclass.
gui_kwargs = {'upper_limit': float, 'lower_limit': float, 'highpass_cutoff': float}
def __init__(self,
key: str = None,
lower_limit: float = -1.0,
upper_limit: float = 1.0,
highpass_cutoff: float = 0.0,
frozen: bool = False,
flush_on_unfreeze: bool = True,
**kwargs):
"""
A data-parent-class for a renderer. This class outlines the interface for managing data streams,
retrieving and formatting data, and passing it off to visualization methods (supported by a
display-parent-class). This is effectively an abstract base class, but we cannot make it explicitly ABC due to
conflicts with its inheritance of QtCore.QObject that is required for the signals/slots mechanism.
Concrete classes are assumed to have a `reset_renderer(self, reset_channel_labels: bool)` method.
This can be provided by cooperative inheritance of a stream_viewer.renderers.display class.
Args:
key: A unique value, used by controlling application for comparison/lookup.
lower_limit: The data lower limit. The upper limit is controlled by upper_limit.
upper_limit: 1 unit of the renderer axis corresponds to this many units in the data.
highpass_cutoff: Corner frequency for highpass filter (8th order Butter). Set to 0 to disable.
frozen: indicates whether the renderer should start intentionally frozen - if True then the timer
won't start until my_renderer.unfreeze() is called.
flush_on_unfreeze: Whether the data source should be flushed when transitioning from frozen to unfrozen.
Set this to False to allow the data source to accumulate data; this should only be used with
renderers and data sources that can pass data quickly.
**kwargs:
"""
self._frozen = frozen
self._key = key
self._data_sources: List[IDataSource] = []
self._buffers: List[StreamDataBuffer] = []
self._chan_states = pd.DataFrame(columns=['name', 'src', 'unit', 'type', 'pos'])
self._sep_chan_states: List[pd.DataFrame] = [] # Convenience list for faster data sorting.
self._lower_limit = lower_limit
self._upper_limit = upper_limit
self._highpass_cutoff = highpass_cutoff
self._flush_on_unfreeze = flush_on_unfreeze
super().__init__(**kwargs) # parent=kwargs.pop('parent', None))
def add_source(self, data_source: IDataSource):
data_source.highpass_cutoff = self._highpass_cutoff
self._data_sources.append(data_source)
data_source.state_changed.connect(self.handle_source_changed) # Listen for later changes
self.handle_source_changed(data_source) # Manual handling immediately.
@QtCore.Slot(IDataSource)
def handle_source_changed(self, data_source) -> None:
"""
Called immediately by add_source but also when a source emits a `state_changed` signal.
Iterates through all the sources and rebuilds chan_states.
Setting chan_states will trigger `self.reset`
Args:
data_source (IDataSource):
"""
new_chan_states = []
for src in self._data_sources:
cfg = src.data_stats
src_key = src.identifier
for ch_state in cfg['chan_states']:
ch_state['src'] = src_key
new_chan_states.extend(cfg['chan_states'])
self.chan_states = new_chan_states # Triggers chan_states_changed.emit and self.reset()
def reset(self, reset_channel_labels=True):
self.reset_buffers()
self.reset_renderer(reset_channel_labels=reset_channel_labels) # Assumes stream_viewer.renderers.display mix-in
if len(self.chan_states) > 0:
# Keep a list of separated chan_states --> useful in `fetch_data`
self._sep_chan_states = []
for src_ix, src in enumerate(self._data_sources):
self._sep_chan_states.append(self.chan_states.loc[self.chan_states['src'] == src.identifier])
# Restart the renderer timer. This might be the first call to actually start the visualization.
if not self.frozen:
self.restart_timer()
def reset_buffers(self):
raise NotImplementedError("Subclass must implement reset_buffers")
def remove_source(self, data_source: IDataSource):
self._data_sources = [_ for _ in self._data_sources if _ is not data_source]
src_id = data_source.identifier
if src_id in self.chan_states['src']:
try:
data_source.state_changed.disconnect(self.handle_source_changed)
except TypeError:
pass
self.handle_source_changed(data_source)
def freeze(self) -> None:
"""
Stops the renderer timer and puts the data sources into monitor mode
(the latter causes them to auto-flush their data regularly so their remotes
don't fill up on data).
Returns: None
"""
self.stop_timer() # Implemented in render lib mix-in
if self._flush_on_unfreeze:
for src in self._data_sources:
if hasattr(src, 'monitor_mode'):
src.monitor_mode = True # Flush samples. Only matters with auto_timer --> update_requested
src.start_auto_timer() # Will fetch samples without being asked.
self._frozen = True
def unfreeze(self) -> None:
"""
Resumes the renderer timer and prevents the data source from flushing data from the remote.
Returns: None
"""
self._frozen = False
if self._flush_on_unfreeze:
for src in self._data_sources:
if hasattr(src, 'monitor_mode'):
src.monitor_mode = False # Only matters with auto_timer --> update_requested, not fetch_data
src.update_requested() # To clear the stream queue one last time.
src.stop_auto_timer()
# reset: reset_buffers -> reset_renderer -> if we have chan_states then restart_timer
self.reset()
@property
def frozen(self):
return self._frozen
def save_settings(self, settings='orphan.ini'):
from pathlib import Path
import vispy.color.color_array
if isinstance(settings, str):
settings = Path(settings)
if isinstance(settings, Path):
if not settings.exists():
home_dir = Path(QtCore.QStandardPaths.writableLocation(QtCore.QStandardPaths.HomeLocation))
settings = home_dir / '.stream_viewer' / settings.name
settings = QtCore.QSettings(str(settings), QtCore.QSettings.IniFormat)
settings.beginGroup(self.key)
settings.setValue("renderer", self.__class__.__name__)
settings.beginGroup("data_sources")
for ix, src in enumerate(self._data_sources):
settings.beginGroup(str(ix))
settings.setValue("class", src.__class__.__name__)
settings.setValue("identifier", src.identifier)
settings.endGroup()
settings.endGroup()
for attr_name in self.gui_kwargs.keys():
val = getattr(self, attr_name)
if isinstance(val, vispy.color.color_array.Color):
val = str(val.hex)
settings.setValue(attr_name, val)
settings.endGroup()
return settings
def zoom(self, scale_fac=1.1):
new_range = np.abs(self.upper_limit - self.lower_limit) / scale_fac
if self.lower_limit == 0:
self.upper_limit = new_range
elif self.upper_limit == 0:
self.lower_limit = -new_range
else:
midpoint = (self.lower_limit + self.upper_limit) / 2
self._lower_limit = midpoint - (0.5 * new_range) # Don't trigger reset.
self.upper_limit = midpoint + (0.5 * new_range)
@property
def key(self):
if self._key is not None:
return self._key
key = self.__class__.__name__
if len(self._data_sources) > 0:
src_0 = json.loads(self._data_sources[0].identifier)
key += '|' + src_0['name']
return key
# ------------------------------ #
# Properties exposed via Widgets #
# ------------------------------ #
# Only include properties common to all renderers and managed by IControlPanel
# Properties specific to individual renderers go in those renderer subclasses.
@property
def chan_states(self):
return self._chan_states
@chan_states.setter
def chan_states(self, value: List[dict]):
if len(value) > 0:
self._chan_states = pd.DataFrame(value)
else:
self._chan_states = pd.DataFrame(columns=['name', 'src', 'unit', 'type', 'pos'])
self.chan_states_changed.emit(self)
self.reset(reset_channel_labels=True)
@QtCore.Slot(QtWidgets.QTreeWidgetItem, int)
def chantree_itemChanged(self, item, column):
changed_ch_label = item.text(column)
is_checked = item.checkState(column) > 0
new_states = self.chan_states
new_states.loc[self.chan_states['name'] == changed_ch_label, 'vis'] = is_checked
self.chan_states = new_states
@property
def lower_limit(self):
return self._lower_limit
@lower_limit.setter
def lower_limit(self, value):
self._lower_limit = min(value, self.upper_limit - sys.float_info.epsilon)
self.reset_renderer(reset_channel_labels=False)
@QtCore.Slot(float)
def lower_limit_valueChanged(self, new_limit_val):
self.lower_limit = new_limit_val
@property
def upper_limit(self):
return self._upper_limit
@upper_limit.setter
def upper_limit(self, value):
self._upper_limit = max(value, self.lower_limit + sys.float_info.epsilon)
self.reset_renderer(reset_channel_labels=False)
@QtCore.Slot(float)
def upper_limit_valueChanged(self, value):
self.upper_limit = value
@property
def highpass_cutoff(self):
return self._highpass_cutoff
@highpass_cutoff.setter
def highpass_cutoff(self, value):
self._highpass_cutoff = max(value, 0)
for src in self._data_sources:
src.highpass_cutoff = self._highpass_cutoff
@QtCore.Slot(float)
def highpass_cutoff_valueChanged(self, value):
self.highpass_cutoff = value
class RendererBufferData(RendererFormatData):
autoscale_modes = ["None", "By-Channel", "By-Stream"]
gui_kwargs = dict(RendererFormatData.gui_kwargs, plot_mode=str, duration=float, auto_scale=str)
def __init__(self,
plot_mode: str = "Scrolling",
duration: float = 2.0,
auto_scale: str = "none",
**kwargs,
):
"""
RendererBufferData uses a different buffer for each stream. Its `fetch_data` method returns a tuple of
data and timestamp lists, with each list having a ndarray for each stream. Thus the renderer's
`update_visualization` method, which receives the output of `fetch_data`, must be able to handle these lists.
Optionally the returned data can be auto-scaled by the min,max of the data in the buffer by-channel or by-stream.
Args:
plot_mode: 'Sweep', or 'Scrolling'
duration: Plotted duration on screen (in seconds)
auto_scale: Options for auto-scaling data. Valid values are "none", "by-channel", "by-stream".
If not "none", data values will be scaled so [min, max] is mapped [0, 1].
"by-channel" scales each channel independently, "by-stream" scales all channels by the global min, max.
**kwargs:
"""
self._plot_mode = plot_mode
self._duration = duration
self._auto_scale = auto_scale
super().__init__(**kwargs)
def reset_buffers(self):
self._buffers = []
for src_ix, src in enumerate(self._data_sources):
src_stats = src.data_stats
_buffer = TimeSeriesBuffer(mode=self.plot_mode, srate=src_stats['srate'], duration=self._duration,
indicate_write_index=True)
this_chans = self.chan_states.loc[self.chan_states['src'] == src.identifier]
n_chans = this_chans['vis'].sum() if 'vis' in this_chans else len(this_chans)
_buffer.reset(n_chans)
self._buffers.append(_buffer)
def fetch_data(self) -> Tuple[List[Tuple[np.ndarray, np.ndarray]],
List[Tuple[np.ndarray, np.ndarray]]]:
collect_data = [(np.array([[]], dtype=_._data.dtype), np.array([], dtype=object)) for _ in self._buffers]
collect_timestamps = [(np.array([]), np.array([])) for _ in self._buffers]
for src_ix, src in enumerate(self._data_sources):
data, timestamps = src.fetch_data()
if data.size == 0:
continue
chan_states = self._sep_chan_states[src_ix]
self._buffers[src_ix].update(data, timestamps, chan_states)
data, timestamps = self._buffers[src_ix].contents # (.data, .markers), (.data_ts, .mrk_ts)
# Optional auto-scaling, but only on non-marker data.
if self._auto_scale.lower() != 'none' and np.any(data[0]) and not np.any(data[1]):
_data = data[0]
if self._auto_scale.lower() == 'by-channel':
_min = np.nanmin(_data, axis=1, keepdims=True)
_max = np.nanmax(_data, axis=1, keepdims=True)
else: # 'by-stream' -- we do not have a 'global' or 'all' that crosses streams.
_min = np.nanmin(_data) + np.zeros((_data.shape[0], 1), dtype=_data.dtype)
_max = np.nanmax(_data) + np.zeros((_data.shape[0], 1), dtype=_data.dtype)
_range = _max - _min
b_valid_range = _range.flatten() > np.finfo(np.float32).eps
coef = np.zeros_like(_range)
coef[b_valid_range] = (1 - 0) / _range[b_valid_range]
_data = _data - _min # Don't use -=; we want a copy here.
np.multiply(_data, coef, out=_data)
_data[~b_valid_range] = 0.5
data = (_data, data[1])
# np.add(dat, 0, out=dat)
collect_timestamps[src_ix] = tuple(timestamps)
collect_data[src_ix] = tuple(data)
return collect_data, collect_timestamps
@property
def duration(self):
return self._duration
@duration.setter
def duration(self, value):
self._duration = value
self.reset(reset_channel_labels=False)
@QtCore.Slot(float)
def duration_valueChanged(self, value):
self.duration = value
@property
def plot_mode(self):
return self._plot_mode
@plot_mode.setter
def plot_mode(self, value):
self._plot_mode = value
self.reset(reset_channel_labels=False)
@QtCore.Slot(str)
def mode_currentTextChanged(self, new_mode):
self.plot_mode = new_mode
@property
def auto_scale(self):
return self._auto_scale
@auto_scale.setter
def auto_scale(self, value):
self._auto_scale = value
@QtCore.Slot(str)
def auto_scale_currentTextChanged(self, value):
self.auto_scale = value
class RendererDataTimeSeries(RendererBufferData):
plot_modes = None
COMPAT_ICONTROL = ['TimeSeriesControl']
gui_kwargs = dict(RendererBufferData.gui_kwargs, marker_scale=float, font_size=int)
def __init__(self,
plot_mode: str = 'Sweep',
#
marker_scale: float = 1.0,
font_size: int = 10,
**kwargs,
):
"""
Extends RendererBufferData for renderers that might also plot the markers on the screen.
Args:
marker_scale: by default the marker will fill the range for a single channel. Use this to increase or
negate its value to make it extend into other channels.
font_size: the font size for markers
**kwargs:
"""
self._marker_scale = marker_scale
self._font_size = font_size
super().__init__(plot_mode=plot_mode, **kwargs)
def fetch_data(self) -> Tuple[List[Tuple[np.ndarray, np.ndarray]],
List[Tuple[np.ndarray, np.ndarray]]]:
collect_data, collect_timestamps = super().fetch_data()
# Further scale the markers.
for src_ix, src in enumerate(self._data_sources):
# (.data, .markers), (.data_ts, .mrk_ts)
data = collect_data[src_ix]
if np.any(data[1]):
# This is a marker stream. Fix scaling for data[0] to fill
# (self.lower_limit, self.upper_limit * self.marker_scale)
data = list(data)
data[0] = self.marker_scale * data[0]
data[0] = data[0] * (self.upper_limit - self.lower_limit) + self.lower_limit
collect_data[src_ix] = tuple(data)
return collect_data, collect_timestamps
@property
def marker_scale(self):
return self._marker_scale
@marker_scale.setter
def marker_scale(self, value):
self._marker_scale = value # No reset required, but only applies to future markers.
@QtCore.Slot(float)
def marker_scale_valueChanged(self, value):
self.marker_scale = value
@property
def font_size(self):
return self._font_size
@font_size.setter
def font_size(self, value):
self._font_size = value
self.reset(reset_channel_labels=True) # Reset required for channel to channel labels.
@QtCore.Slot(int)
def font_size_valueChanged(self, value):
self.font_size = value
class RendererMergeDataSources(RendererBufferData):
"""
RendererMergeDataSources combines all streams into a single buffer. This is only possible because in `fetch_data`
we discard all samples except the most recent from each stream, so we don't have to deal with differing sample
rates. Thus, RendererMergeDataSources is an appropriate parent class for renderers that only provide a "right now"
snapshot: i.e., bar, polar/radar, cortex mesh.
While it would be possible to implement this kind of renderer using a more efficient buffer than the one
provided by `RendererBufferData`, we nevertheless use that parent class because of the auto-scaling feature.
"""
def reset_buffers(self):
super().reset_buffers()
self._merge_buffer = [MergeLastOnlyBuffer()] # We don't care what the sample rate is.
n_chans = self.chan_states['vis'].sum() if 'vis' in self.chan_states else len(self.chan_states)
self._merge_buffer[0].reset(n_chans) # Pass all channel states at once.
def fetch_data(self) -> Tuple[np.ndarray, np.ndarray]:
collect_data, collect_timestamps = super().fetch_data()
for src_ix, src in enumerate(self._data_sources):
# (.data, .markers), (.data_ts, .mrk_ts)
self._merge_buffer[0].update(collect_data[src_ix][0], collect_timestamps[src_ix][0],
self._chan_states, source_id=src.identifier)
return self._merge_buffer[0].contents
| [
"pandas.DataFrame",
"numpy.zeros_like",
"numpy.abs",
"json.loads",
"numpy.multiply",
"numpy.nanmax",
"numpy.zeros",
"stream_viewer.buffers.TimeSeriesBuffer",
"numpy.nanmin",
"stream_viewer.buffers.MergeLastOnlyBuffer",
"numpy.any",
"numpy.finfo",
"pathlib.Path",
"numpy.array",
"qtpy.QtCo... | [((413, 440), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (430, 440), False, 'import logging\n'), ((512, 541), 'qtpy.QtCore.Signal', 'QtCore.Signal', (['QtCore.QObject'], {}), '(QtCore.QObject)\n', (525, 541), False, 'from qtpy import QtCore\n'), ((3699, 3723), 'qtpy.QtCore.Slot', 'QtCore.Slot', (['IDataSource'], {}), '(IDataSource)\n', (3710, 3723), False, 'from qtpy import QtCore\n'), ((9875, 9918), 'qtpy.QtCore.Slot', 'QtCore.Slot', (['QtWidgets.QTreeWidgetItem', 'int'], {}), '(QtWidgets.QTreeWidgetItem, int)\n', (9886, 9918), False, 'from qtpy import QtCore\n'), ((10506, 10524), 'qtpy.QtCore.Slot', 'QtCore.Slot', (['float'], {}), '(float)\n', (10517, 10524), False, 'from qtpy import QtCore\n'), ((10899, 10917), 'qtpy.QtCore.Slot', 'QtCore.Slot', (['float'], {}), '(float)\n', (10910, 10917), False, 'from qtpy import QtCore\n'), ((11295, 11313), 'qtpy.QtCore.Slot', 'QtCore.Slot', (['float'], {}), '(float)\n', (11306, 11313), False, 'from qtpy import QtCore\n'), ((15736, 15754), 'qtpy.QtCore.Slot', 'QtCore.Slot', (['float'], {}), '(float)\n', (15747, 15754), False, 'from qtpy import QtCore\n'), ((16040, 16056), 'qtpy.QtCore.Slot', 'QtCore.Slot', (['str'], {}), '(str)\n', (16051, 16056), False, 'from qtpy import QtCore\n'), ((16309, 16325), 'qtpy.QtCore.Slot', 'QtCore.Slot', (['str'], {}), '(str)\n', (16320, 16325), False, 'from qtpy import QtCore\n'), ((18456, 18474), 'qtpy.QtCore.Slot', 'QtCore.Slot', (['float'], {}), '(float)\n', (18467, 18474), False, 'from qtpy import QtCore\n'), ((18816, 18832), 'qtpy.QtCore.Slot', 'QtCore.Slot', (['int'], {}), '(int)\n', (18827, 18832), False, 'from qtpy import QtCore\n'), ((2941, 3001), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['name', 'src', 'unit', 'type', 'pos']"}), "(columns=['name', 'src', 'unit', 'type', 'pos'])\n", (2953, 3001), True, 'import pandas as pd\n'), ((7311, 7325), 'pathlib.Path', 'Path', (['settings'], {}), '(settings)\n', (7315, 7325), False, 'from pathlib import Path\n'), ((8432, 8475), 'numpy.abs', 'np.abs', (['(self.upper_limit - self.lower_limit)'], {}), '(self.upper_limit - self.lower_limit)\n', (8438, 8475), True, 'import numpy as np\n'), ((9060, 9104), 'json.loads', 'json.loads', (['self._data_sources[0].identifier'], {}), '(self._data_sources[0].identifier)\n', (9070, 9104), False, 'import json\n'), ((9652, 9671), 'pandas.DataFrame', 'pd.DataFrame', (['value'], {}), '(value)\n', (9664, 9671), True, 'import pandas as pd\n'), ((9718, 9778), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['name', 'src', 'unit', 'type', 'pos']"}), "(columns=['name', 'src', 'unit', 'type', 'pos'])\n", (9730, 9778), True, 'import pandas as pd\n'), ((13070, 13190), 'stream_viewer.buffers.TimeSeriesBuffer', 'TimeSeriesBuffer', ([], {'mode': 'self.plot_mode', 'srate': "src_stats['srate']", 'duration': 'self._duration', 'indicate_write_index': '(True)'}), "(mode=self.plot_mode, srate=src_stats['srate'], duration=\n self._duration, indicate_write_index=True)\n", (13086, 13190), False, 'from stream_viewer.buffers import StreamDataBuffer, MergeLastOnlyBuffer, TimeSeriesBuffer\n'), ((17771, 17786), 'numpy.any', 'np.any', (['data[1]'], {}), '(data[1])\n', (17777, 17786), True, 'import numpy as np\n'), ((19697, 19718), 'stream_viewer.buffers.MergeLastOnlyBuffer', 'MergeLastOnlyBuffer', ([], {}), '()\n', (19716, 19718), False, 'from stream_viewer.buffers import StreamDataBuffer, MergeLastOnlyBuffer, TimeSeriesBuffer\n'), ((13650, 13685), 'numpy.array', 'np.array', (['[[]]'], {'dtype': '_._data.dtype'}), '([[]], dtype=_._data.dtype)\n', (13658, 13685), True, 'import numpy as np\n'), ((13687, 13713), 'numpy.array', 'np.array', (['[]'], {'dtype': 'object'}), '([], dtype=object)\n', (13695, 13713), True, 'import numpy as np\n'), ((13770, 13782), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (13778, 13782), True, 'import numpy as np\n'), ((13784, 13796), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (13792, 13796), True, 'import numpy as np\n'), ((14337, 14352), 'numpy.any', 'np.any', (['data[0]'], {}), '(data[0])\n', (14343, 14352), True, 'import numpy as np\n'), ((15029, 15050), 'numpy.zeros_like', 'np.zeros_like', (['_range'], {}), '(_range)\n', (15042, 15050), True, 'import numpy as np\n'), ((15212, 15247), 'numpy.multiply', 'np.multiply', (['_data', 'coef'], {'out': '_data'}), '(_data, coef, out=_data)\n', (15223, 15247), True, 'import numpy as np\n'), ((7435, 7509), 'qtpy.QtCore.QStandardPaths.writableLocation', 'QtCore.QStandardPaths.writableLocation', (['QtCore.QStandardPaths.HomeLocation'], {}), '(QtCore.QStandardPaths.HomeLocation)\n', (7473, 7509), False, 'from qtpy import QtCore\n'), ((14361, 14376), 'numpy.any', 'np.any', (['data[1]'], {}), '(data[1])\n', (14367, 14376), True, 'import numpy as np\n'), ((14498, 14537), 'numpy.nanmin', 'np.nanmin', (['_data'], {'axis': '(1)', 'keepdims': '(True)'}), '(_data, axis=1, keepdims=True)\n', (14507, 14537), True, 'import numpy as np\n'), ((14565, 14604), 'numpy.nanmax', 'np.nanmax', (['_data'], {'axis': '(1)', 'keepdims': '(True)'}), '(_data, axis=1, keepdims=True)\n', (14574, 14604), True, 'import numpy as np\n'), ((14729, 14745), 'numpy.nanmin', 'np.nanmin', (['_data'], {}), '(_data)\n', (14738, 14745), True, 'import numpy as np\n'), ((14748, 14796), 'numpy.zeros', 'np.zeros', (['(_data.shape[0], 1)'], {'dtype': '_data.dtype'}), '((_data.shape[0], 1), dtype=_data.dtype)\n', (14756, 14796), True, 'import numpy as np\n'), ((14824, 14840), 'numpy.nanmax', 'np.nanmax', (['_data'], {}), '(_data)\n', (14833, 14840), True, 'import numpy as np\n'), ((14843, 14891), 'numpy.zeros', 'np.zeros', (['(_data.shape[0], 1)'], {'dtype': '_data.dtype'}), '((_data.shape[0], 1), dtype=_data.dtype)\n', (14851, 14891), True, 'import numpy as np\n'), ((14981, 15001), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (14989, 15001), True, 'import numpy as np\n')] |
"""
Classes for importing LAMMPS atom trajectory into a xarray dataset. The trajectory class also has a dataset with ellipsoid vectors.
traj_options contains all the inputs for generating the trajectory class.
Usage: traj_opt = t.trajectory_options(path = "./trajectories/",
file_pattern = "sma.dump.gz",
exclude_types = [1],
monomer_types = 3)
traj = t.trajectory(traj_opt)
traj.save_trajectory(path = "./save/")
Accessing data:
traj.vectors.cm
traj.vectors.coord
traj.atoms.xu
Requires modules: xarray
numpy
glob
re
pandas
gzip
Requires scripts: compute_op
io_local
TODO: * save options
* optimize reading file - > dataset process
* more verbosity
* test/remove trim_to_types method
* add nb_atoms to the dataset
"""
import xarray as xr
import numpy as np
from glob import glob
import re
import pandas as pd
import gzip as gz
import compute_op as cop
import io_local as io
class trajectory():
"""Generates trajectory
Args:
- options (trajectory_options): Contains options for reading the trajectory files
Attributes:
- Arguments
- atoms (xarray.Dataset): Contains the trajectory information of the atoms as written in the LAMMPS files.
- vectors (xarray.Dataset): Contains the trajectory of the vectors of the monomers.
"""
def __init__(self, options):
"""Creates the class. Creates the atoms and vectors Datasets.
Args:
- options (trajectory_options): Contains the options for reading the trajectory files
"""
self.options = options
if self.options.restore:
self.restore_trajectory()
else:
self.atoms = self.read_traj()
self.vectors = self.add_vectors()
self.vectors = cop.qmatrix(self.vectors)
# v_traj = self.trim_to_types(self.full_traj, self.options.monomer_types['atom']) ## needs testing
# delimiters = self.trim_to_types(self.full_traj, self.options.monomer_types['del']) ## needs testing
def read_traj(self):
"""Creates the atoms dataset by reading the LAMMPS trajectory files.
Options:
- file_list (list of strings): Contains the files that will be read and converted to a Dataset
- exclude_types (list of ints): Atoms with type that matches any in this list will not be imported into the atoms Dataset
Returns:
- xarray.Dataset: Dataset that contains the trajectory of the atoms contained in the files. Classed by the coordinates ts(current timestep of the atoms data), id(atom id of the current atom) and comp(x, y and z coordinates when needed).
"""
bounds = {'x':[], 'y':[], 'z':[]}
bounds_list = []
bounds_array = []
comp = ['x', 'y', 'z']
nb_atoms = None
step = None
timestep = []
data = []
for filename in self.options.file_list: # For each file in file_list
if re.search('.gz', filename): # if the file is compressed with gz
f = gz.open(filename, 'rt')
else:
f = open(filename, 'rt')
file = f.readlines()
f.close()
cnt = 0 # serves as the line counter
# scans for the line with the properties of the atoms
while not re.search('ITEM: ATOMS', str(file[cnt])): # str() only serves to suppress pyright overload error.
cnt += 1
coordinates = file[cnt].strip().split() [2:] # Dataset variables. The 1st 3 are skipped
for i, line in enumerate(file): # loop over the lines in the current file
if line.strip() == 'ITEM: TIMESTEP':
step = int(file[i+1])
elif line.strip() == 'ITEM: NUMBER OF ATOMS':
nb_atoms = int(file[i+1])
elif line.strip() == 'ITEM: BOX BOUNDS pp pp pp':
bounds['x'] = [float(val) for val in file[i+1].strip().split() ]
bounds['y'] = [float(val) for val in file[i+2].strip().split() ]
bounds['z'] = [float(val) for val in file[i+3].strip().split() ]
elif re.search('ITEM: ATOMS', str(line)): # str() only serves to suppress pyright overload error.
data.append(self.lines_to_df(file[i+1:i+nb_atoms-1], coordinates)) # will put the lines with all the atoms property in a pandas dataframe which is appended in a list for all timesteps
timestep.append(step) # list with all the timesteps
bounds_list = []
for i in bounds:
bounds_list.append(bounds[i][1] - bounds[i][0]) # just reformats the bounds to a good format for the atoms dataset
bounds_array.append(bounds_list)
data = self.dfs_to_ds(data, timestep)
data['bounds'] = xr.DataArray(bounds_array, coords = [data.ts, comp], dims = ['ts', 'comp'])
return data
def lines_to_df(self, lines, column_names):
"""Takes the lines containing atoms properties for a single timestep and imports them in a dataframe
Args:
- lines (list of strings): Contains the properties of the atoms. Each string is one atom.
- column_names (list of strings): Contains the names of the atoms properties.
Options:
- exclude_types (list of ints): Atoms with type that matches any in this list will not be imported into the atoms Dataset
Returns:
pandas DataFrame: Dataframe with the properties of the atoms. Each column is a property and each index is an atom.
"""
data = []
type_id = None
# Checks the position of the property: type.
for i, name in enumerate(column_names):
if name == 'type':
type_id = i
for i in lines:
line = i.strip().split()
if self.options.exclude_types: # will not skip types if exclude_types is None or False
for j in self.options.exclude_types:
if int(line[type_id]) != int(j):
data.append(line)
else:
data.append(line)
df = pd.DataFrame(data, columns = column_names)
df = df.apply(pd.to_numeric)
df = df.set_index(['id'])
df = df.sort_index()
return df
def dfs_to_ds(self, dfs, list_ids):
"""Turns a list of dataframes into a dataset
Args:
- dfs (list of dataframes): Contains the dataframes that needs to be converted to a dataset
- list_ids (list of ints): Contains the coordinate of each item in dfs
Returns:
Dataset: Dataset with each variables matching the dataframes' columns. The coordinates match list_ids and the dataframes' index
"""
ds = []
for i, df in enumerate(dfs):
ds.append(xr.Dataset())
for j in df:
ds[i][j] = df[j].to_xarray()
ds[i].update( {'ts': ( 'ts', [list_ids[i]] )} )
return xr.concat(ds, dim = 'ts')
def trim_to_types(self, traj, types):
"""removes all but those types
Need to check if relevant
TODO: Need to test many subsequent trims
"""
traj = traj.where( traj.type.isin(types), drop = True )
return traj
def add_vectors(self):
"""Creates a Dataset containing vectors information based on the atoms dataset
Options:
- monomer_types
Returns:
- xarray.Dataset: Contains vector trajectories for specified monomer(s). Has same coordinates as the atoms dataset
TODO:
- streamline the vector extremities determination to match a general behavior and not just the ellispoid+pseudoatom template
"""
data = self.atoms # just because it's shorter
mono_type = self.options.monomer_types
droppers = []
coords = ['x', 'y', 'z']
# creates a list containing all the columns that don't are not positions
for i in data.data_vars:
if i not in ['xu', 'yu', 'zu']:
droppers.append(i)
vectors = []
for i in range(len(data.id)):
if int(data.isel(id = i).type[0]) == mono_type: # type doesnt change during simulation
# since ids are ordered, the atom before and after the ellipsoid type are pseudo atoms and are used as vector extremities
atom_0 = data.drop_vars(droppers).isel(id = i-1)
atom_1 = data.drop_vars(droppers).isel(id = i+1)
# we take advantage of the arithmetic properties of datasets
v = atom_1 - atom_0
norm = np.sqrt(v.xu**2 + v.yu**2 + v.zu**2)
v = v / norm
v['coord'] = xr.DataArray( np.transpose([v.xu, v.yu, v.zu]), coords = [v.ts, coords], dims = ['ts', 'comp'] )
v['norm'] = norm
alpha = np.arccos(v.xu)
beta = np.arccos(v.yu)
gamma = np.arccos(v.zu)
v['angle'] = xr.DataArray( np.transpose([alpha, beta, gamma]), coords = [v.ts, coords], dims = ['ts', 'comp'] )
x_cm = data.isel(id = i).xu
y_cm = data.isel(id = i).yu
z_cm = data.isel(id = i).zu
v['cm'] = xr.DataArray( np.transpose([x_cm, y_cm, z_cm]), coords = [v.ts, coords], dims = ['ts', 'comp'] )
v = v.drop_vars(['xu', 'yu', 'zu'])
v.update( {'id': ( 'id', [i] )} )
vectors.append(v)
return xr.concat(vectors, dim = 'id')
def save_trajectory(self, path = 'save/'):
"""Saves the trajectory class in a nc (netCDF) file. Much faster than read_trajectory.
Args:
- path (str, optional): Directory where the trajectory will be saved. Defaults to 'save/'.
TODO:
- Check integration with traj_options
- Allow custom naming of files
- Estimate size of files?
- The try: except: to see if the datasets exist are a little rough. See if there's a better way to do that.
- Integrate for the cluster class
- Integrate a way to save options as well
"""
try:
print('saving atoms...')
io.save_xarray(self.atoms, path, 'atoms_io')
except:
print('No trajectory for atoms; not saving dataset...')
try:
print('saving vectors...')
io.save_xarray(self.vectors, path, 'vectors_io')
except:
print('No trajectory for vectors; not saving dataset...')
def restore_trajectory(self, include = 'all'):
"""Restores a saved trajectory from netCDF files to regenerate the trajectory class.
Args:
- include (str, optional): Which saved datasets to restore. Defaults to 'all'.
TODO:
- Damn this part is rough a bit.
- Allow custom naming. Maybe recognition of files based on a pattern not defined by user, eg. atoms_io.nc -> name.aio.nc
- Remove if include ==.... section, it's clunky and pattern recognition would be more versatile.
- Integrate option restoration.
"""
path = self.options.path
DO_ATOMS = False
DO_VECTORS = False
DO_OPTIONS = False
if include == 'all':
DO_ATOMS = True
DO_VECTORS = True
DO_OPTIONS = True
elif include == 'atoms':
DO_ATOMS = True
elif include == 'vectors':
DO_VECTORS = True
elif include == 'options':
DO_OPTIONS = True
else:
print('argument for include :', include, 'is not recognized!')
if DO_ATOMS:
self.atoms = io.read_xarray(path, name = 'atoms_io.nc')
if DO_VECTORS:
self.vectors = io.read_xarray(path, name = 'vectors_io.nc')
class trajectory_options():
"""Generates options for the trajectory. See Args for description of the options.
Args:
- path (str, optional): Directory of the trajectory files to read, be it LAMMPS dumps or netCDF previously saved. Defaults to "./", the current directory.
- file_pattern (str, optional): Pattern to match for finding the trajectory files. As of current version, it only works for restore=False. Defaults to "ellipsoid.*".
- exclude_types ([int], optional): Types that match the ones in this list will not be imported in the trajectory datasets. As of current version, it only works for restore=False. Defaults to None.
- monomer_types ([int], optional): Types that correspond to the ellipsoid of the monomer. Vectors extremities will be chosen as the particles one before and one after this type in a id ordered dataset. Defaults to None.
- restore (bool, optional): If True, the trajectory class will be generated from restoring the datasets from netCDF files. Defaults to False.
Attributes:
- Arguments
- file_list ([str]): List of the files that match pattern in path. Corresponds to the files that will be read for creating the trajectory class.
TODO:
- monomer_types is rough and need work for a more general approach
"""
def __init__( self, path = "./", file_pattern = "ellipsoid.*", exclude_types = None, monomer_types = None, restore = False):
"""Creates the class. Initializes the arguments attributes and creates file_list.
Args:
- path (str, optional): Directory of the trajectory files to read, be it LAMMPS dumps or netCDF previously saved. Needs to end with "/". Defaults to "./", the current directory.
- file_pattern (str, optional): Pattern to match for finding the trajectory files. Works with the Unix style pathname pattern expansion. As of current version, it only works for restore=False. Defaults to "ellipsoid.*".
- exclude_types ([int], optional): Types that match the ones in this list will not be imported in the trajectory datasets. As of current version, it only works for restore=False. Defaults to None.
- monomer_types ([int], optional): Types that correspond to the ellipsoid of the monomer. Vectors extremities will be chosen as the particles one before and one after this type in a id ordered dataset. Defaults to None.
- restore (bool, optional): If True, the trajectory class will be generated from restoring the datasets from netCDF files. Defaults to False.
"""
self.path = path
self.exclude_types = exclude_types
self.monomer_types = monomer_types
self.restore = restore
if not self.restore:
self.create_file_list(path, file_pattern)
def create_file_list(self, path, file_pattern):
"""Creates the file list from the files matching file_pattern in path
Args:
- path (str): Directory of the trajectory files to read. Needs to end with "/"
- file_pattern (str): Pattern to match for finding the trajectory files. Works with the Unix style pathname pattern expansion.
Raises:
- EnvironmentError: Script doesn't continue if no file is found.
TODO:
- EnvironmentError is a placeholder. Check if a more appropriate exists/can be created.
"""
self.file_list = glob(path + file_pattern)
print("Found", len(self.file_list), "matching", file_pattern, "in", path, "...")
if len(self.file_list) == 0:
raise EnvironmentError
| [
"pandas.DataFrame",
"compute_op.qmatrix",
"gzip.open",
"numpy.transpose",
"xarray.concat",
"io_local.read_xarray",
"xarray.Dataset",
"xarray.DataArray",
"glob.glob",
"io_local.save_xarray",
"numpy.arccos",
"re.search",
"numpy.sqrt"
] | [((5137, 5208), 'xarray.DataArray', 'xr.DataArray', (['bounds_array'], {'coords': '[data.ts, comp]', 'dims': "['ts', 'comp']"}), "(bounds_array, coords=[data.ts, comp], dims=['ts', 'comp'])\n", (5149, 5208), True, 'import xarray as xr\n'), ((6485, 6525), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'column_names'}), '(data, columns=column_names)\n', (6497, 6525), True, 'import pandas as pd\n'), ((7347, 7370), 'xarray.concat', 'xr.concat', (['ds'], {'dim': '"""ts"""'}), "(ds, dim='ts')\n", (7356, 7370), True, 'import xarray as xr\n'), ((9907, 9935), 'xarray.concat', 'xr.concat', (['vectors'], {'dim': '"""id"""'}), "(vectors, dim='id')\n", (9916, 9935), True, 'import xarray as xr\n'), ((15713, 15738), 'glob.glob', 'glob', (['(path + file_pattern)'], {}), '(path + file_pattern)\n', (15717, 15738), False, 'from glob import glob\n'), ((2025, 2050), 'compute_op.qmatrix', 'cop.qmatrix', (['self.vectors'], {}), '(self.vectors)\n', (2036, 2050), True, 'import compute_op as cop\n'), ((3210, 3236), 're.search', 're.search', (['""".gz"""', 'filename'], {}), "('.gz', filename)\n", (3219, 3236), False, 'import re\n'), ((10639, 10683), 'io_local.save_xarray', 'io.save_xarray', (['self.atoms', 'path', '"""atoms_io"""'], {}), "(self.atoms, path, 'atoms_io')\n", (10653, 10683), True, 'import io_local as io\n'), ((10833, 10881), 'io_local.save_xarray', 'io.save_xarray', (['self.vectors', 'path', '"""vectors_io"""'], {}), "(self.vectors, path, 'vectors_io')\n", (10847, 10881), True, 'import io_local as io\n'), ((12134, 12174), 'io_local.read_xarray', 'io.read_xarray', (['path'], {'name': '"""atoms_io.nc"""'}), "(path, name='atoms_io.nc')\n", (12148, 12174), True, 'import io_local as io\n'), ((12227, 12269), 'io_local.read_xarray', 'io.read_xarray', (['path'], {'name': '"""vectors_io.nc"""'}), "(path, name='vectors_io.nc')\n", (12241, 12269), True, 'import io_local as io\n'), ((3294, 3317), 'gzip.open', 'gz.open', (['filename', '"""rt"""'], {}), "(filename, 'rt')\n", (3301, 3317), True, 'import gzip as gz\n'), ((7188, 7200), 'xarray.Dataset', 'xr.Dataset', ([], {}), '()\n', (7198, 7200), True, 'import xarray as xr\n'), ((9024, 9066), 'numpy.sqrt', 'np.sqrt', (['(v.xu ** 2 + v.yu ** 2 + v.zu ** 2)'], {}), '(v.xu ** 2 + v.yu ** 2 + v.zu ** 2)\n', (9031, 9066), True, 'import numpy as np\n'), ((9274, 9289), 'numpy.arccos', 'np.arccos', (['v.xu'], {}), '(v.xu)\n', (9283, 9289), True, 'import numpy as np\n'), ((9313, 9328), 'numpy.arccos', 'np.arccos', (['v.yu'], {}), '(v.yu)\n', (9322, 9328), True, 'import numpy as np\n'), ((9353, 9368), 'numpy.arccos', 'np.arccos', (['v.zu'], {}), '(v.zu)\n', (9362, 9368), True, 'import numpy as np\n'), ((9133, 9165), 'numpy.transpose', 'np.transpose', (['[v.xu, v.yu, v.zu]'], {}), '([v.xu, v.yu, v.zu])\n', (9145, 9165), True, 'import numpy as np\n'), ((9412, 9446), 'numpy.transpose', 'np.transpose', (['[alpha, beta, gamma]'], {}), '([alpha, beta, gamma])\n', (9424, 9446), True, 'import numpy as np\n'), ((9670, 9702), 'numpy.transpose', 'np.transpose', (['[x_cm, y_cm, z_cm]'], {}), '([x_cm, y_cm, z_cm])\n', (9682, 9702), True, 'import numpy as np\n')] |
import numpy as np
import polygon_tools as poly
import csv
import yaml
import sys
import matplotlib as plt
from matplotlib.patches import Polygon as PlotPolygon
from matplotlib.collections import PatchCollection
import matplotlib.cm as cm
import logging
def robot_builder(robot):
# Note that the robot type must be implemented in this module, so the example robot:
# {type: RobotArm2D, parameters: {base_position: [5.0, 5.0], link_lengths: [2.1, 2.1]}
# would call as constructor: robot_tools.RobotArm2D(base_position=[5.0, 5.0], link_lengths=[2.1, 2.1])
return getattr(sys.modules[__name__], robot['type'])(**robot['parameters'])
def workspace_builder(workspace):
# Note that the workspace type must be implemented in this module, so the example workspace:
# {type: Workspace2D, parameters: {limits=[[0,1.0],[0,1.0]], obstacles=[]}
# would call as constructor: robot_tools.Workspace2D(limits=[[0,1.0],[0,1.0]], obstacles=[])
return getattr(sys.modules[__name__], workspace['type'])(**workspace['parameters'])
class Workspace2D(object):
def __init__(self, limits=[[0,1.0],[0,1.0]], obstacles=[]):
self.limits = np.array(limits)
assert self.limits.shape == (2,2), 'Currently only implemented for 2D workspaces'
self.obstacles = []
for ob in obstacles:
# Add each obstacle (must be a Polygon or derived class like Rectangle from poly_tools)
self.obstacles.append(getattr(poly, ob['type'])(**ob['parameters']))
def in_collision_point(self, point):
p = poly.Point(*point)
collision = False
for o in self.obstacles:
if o.point_inside(p):
collision = True
break
return collision
def in_collision_poly(self, polygon):
collision = False
for o in self.obstacles:
if polygon.intersect(o):
collision = True
break
return collision
def plot(self, hax=None, cmap=cm.viridis):
if hax is None:
f, hax = plt.subplots(1)
h_obs = []
for o in self.obstacles:
h_obs.append(PlotPolygon(o, zorder=1))
c_obs = PatchCollection(h_obs, cmap=cmap)
# This sets colors for some reason (command in Polygon does not)
c_obs.set_array(np.linspace(0, 1.0, len(self.obstacles) + 1)[1:])
hax.add_collection(c_obs)
hax.set_aspect('equal')
hax.set_xlabel(r'$x$')
hax.set_ylabel(r'$y$')
hax.set_xlim(self.limits[0])
hax.set_ylim(self.limits[1])
class Robot2D(object):
_c_poly = None
def __init__(self, pos=[0.0, 0.0], heading=0.0, footprint=[(0.0, 0.0)], footprint_file=None):
self.R = np.eye(2)
if footprint_file is not None:
with open(footprint_file, mode='r') as fh:
csv_reader = csv.reader(fh)
footprint = []
for row in csv_reader:
assert len(row) == 2, 'Row {0} does not have 2 elements'.format(len(row)+1)
footprint.append([float(row[0]), float(row[1])])
logging.info('Loaded robot footprint file {0} with {1} points'.format(footprint_file, len(footprint)))
self.position = poly.Point(pos[0], pos[1])
self.footprint = poly.PointList(footprint)
self.heading = heading
self._set_heading_transformation()
self._update_poly()
def _set_heading_transformation(self):
ct, st = np.cos(self.heading), np.sin(self.heading)
self.R = np.array([[ct, -st], [st, ct]])
def set_heading(self, heading):
self.heading = heading
self._set_heading_transformation()
self._update_poly()
def set_position(self, pos):
self.position = pos
self._update_poly()
def set_footprint(self, footprint):
self.footprint = footprint
self._update_poly()
def _update_poly(self):
self._c_poly = poly.Polygon([poly.Point(*(np.matmul(self.R, p)+self.position)) for p in self.footprint])
def get_current_polygon(self):
return self._c_poly
class RobotArm2D(object):
_spine_pts = None
def __init__(self, base_position=[0.0, 0.0], link_lengths=[1.0, 1.0], link_angles=[0.0, 0.0]):
# Assume arm angles are relative (can be summed)
self._base_position = poly.Point(base_position[0], base_position[1])
assert len(link_lengths) == len(link_angles)
self._link_lengths = np.array(link_lengths)
self._link_angles = np.array(link_angles)
self._R = [np.eye(2) for i in self._link_angles]
self._set_rotation_transforms()
def set_link_angles(self, link_angles):
self._link_angles = np.array(link_angles)
self._set_rotation_transforms()
def _set_rotation_transforms(self):
sum_angles = self._link_angles.cumsum()
for i, theta in enumerate(sum_angles):
ct, st = np.cos(theta), np.sin(theta)
self._R[i] = np.array([[ct, -st], [st, ct]])
self._set_spine_points()
def get_current_polygon(self):
# Run backwards through the points to make a polygon
return poly.Polygon(self._spine_pts + self._spine_pts[-2:0:-1])
def _set_spine_points(self):
self._spine_pts = [self._base_position]
for R, ll in zip(self._R, self._link_lengths):
self._spine_pts.append(poly.Point(*(np.matmul(R, [ll, 0])+self._spine_pts[-1])))
def get_spine_points(self):
return [p.x for p in self._spine_pts], [p.y for p in self._spine_pts]
def get_end_effector_position(self):
return self._spine_pts[-1]
def end_effector_path(self, config_path):
c_pose = self._link_angles.copy()
ee_path = []
for pose in config_path:
self.set_link_angles(pose)
ee_path.append(self.get_end_effector_position())
self.set_link_angles(c_pose)
return np.array(ee_path)
class PlanningProblem(object):
def __init__(self, world_file):
# Load world
with open(world_file, 'r') as fh:
world = yaml.safe_load(fh)
self.workspace = workspace_builder(world['workspace'])
# Note that the robot type must be implemented in the robot_tools module, so the example robot:
# {type: RobotArm2D, parameters: {base_position: [5.0, 5.0], link_lengths: [2.1, 2.1]}
# would call as constructor: robot_tools.RobotArm2D(base_position=[5.0, 5.0], link_lengths=[2.1, 2.1])
self.robot = robot_builder(world['robot'])
def construct_config_space(self, nx=101):
# TODO: This should be more general (number of dimensions, wraparound etc. in the robot class)
theta1, theta2 = np.linspace(0, 2.0 * np.pi, nx), np.linspace(0, 2.0 * np.pi, nx)
v = np.zeros((len(theta1), len(theta2)), dtype=int)
for i, t1 in enumerate(theta1):
for j, t2 in enumerate(theta2):
self.robot.set_link_angles([t1, t2])
in_obs = 0
fp = self.robot.get_current_polygon()
for o_num, o in enumerate(self.workspace.obstacles):
if fp.intersect(o):
in_obs = o_num + 1
break
v[i, j] = in_obs
return [theta1, theta2], v | [
"polygon_tools.PointList",
"polygon_tools.Polygon",
"csv.reader",
"polygon_tools.Point",
"matplotlib.patches.Polygon",
"numpy.sin",
"numpy.array",
"yaml.safe_load",
"numpy.cos",
"numpy.linspace",
"matplotlib.collections.PatchCollection",
"numpy.eye",
"numpy.matmul",
"matplotlib.subplots"
] | [((1163, 1179), 'numpy.array', 'np.array', (['limits'], {}), '(limits)\n', (1171, 1179), True, 'import numpy as np\n'), ((1562, 1580), 'polygon_tools.Point', 'poly.Point', (['*point'], {}), '(*point)\n', (1572, 1580), True, 'import polygon_tools as poly\n'), ((2201, 2234), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['h_obs'], {'cmap': 'cmap'}), '(h_obs, cmap=cmap)\n', (2216, 2234), False, 'from matplotlib.collections import PatchCollection\n'), ((2746, 2755), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (2752, 2755), True, 'import numpy as np\n'), ((3270, 3296), 'polygon_tools.Point', 'poly.Point', (['pos[0]', 'pos[1]'], {}), '(pos[0], pos[1])\n', (3280, 3296), True, 'import polygon_tools as poly\n'), ((3322, 3347), 'polygon_tools.PointList', 'poly.PointList', (['footprint'], {}), '(footprint)\n', (3336, 3347), True, 'import polygon_tools as poly\n'), ((3572, 3603), 'numpy.array', 'np.array', (['[[ct, -st], [st, ct]]'], {}), '([[ct, -st], [st, ct]])\n', (3580, 3603), True, 'import numpy as np\n'), ((4381, 4427), 'polygon_tools.Point', 'poly.Point', (['base_position[0]', 'base_position[1]'], {}), '(base_position[0], base_position[1])\n', (4391, 4427), True, 'import polygon_tools as poly\n'), ((4511, 4533), 'numpy.array', 'np.array', (['link_lengths'], {}), '(link_lengths)\n', (4519, 4533), True, 'import numpy as np\n'), ((4562, 4583), 'numpy.array', 'np.array', (['link_angles'], {}), '(link_angles)\n', (4570, 4583), True, 'import numpy as np\n'), ((4755, 4776), 'numpy.array', 'np.array', (['link_angles'], {}), '(link_angles)\n', (4763, 4776), True, 'import numpy as np\n'), ((5205, 5261), 'polygon_tools.Polygon', 'poly.Polygon', (['(self._spine_pts + self._spine_pts[-2:0:-1])'], {}), '(self._spine_pts + self._spine_pts[-2:0:-1])\n', (5217, 5261), True, 'import polygon_tools as poly\n'), ((5975, 5992), 'numpy.array', 'np.array', (['ee_path'], {}), '(ee_path)\n', (5983, 5992), True, 'import numpy as np\n'), ((2066, 2081), 'matplotlib.subplots', 'plt.subplots', (['(1)'], {}), '(1)\n', (2078, 2081), True, 'import matplotlib as plt\n'), ((3512, 3532), 'numpy.cos', 'np.cos', (['self.heading'], {}), '(self.heading)\n', (3518, 3532), True, 'import numpy as np\n'), ((3534, 3554), 'numpy.sin', 'np.sin', (['self.heading'], {}), '(self.heading)\n', (3540, 3554), True, 'import numpy as np\n'), ((4604, 4613), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (4610, 4613), True, 'import numpy as np\n'), ((5028, 5059), 'numpy.array', 'np.array', (['[[ct, -st], [st, ct]]'], {}), '([[ct, -st], [st, ct]])\n', (5036, 5059), True, 'import numpy as np\n'), ((6147, 6165), 'yaml.safe_load', 'yaml.safe_load', (['fh'], {}), '(fh)\n', (6161, 6165), False, 'import yaml\n'), ((6768, 6799), 'numpy.linspace', 'np.linspace', (['(0)', '(2.0 * np.pi)', 'nx'], {}), '(0, 2.0 * np.pi, nx)\n', (6779, 6799), True, 'import numpy as np\n'), ((6801, 6832), 'numpy.linspace', 'np.linspace', (['(0)', '(2.0 * np.pi)', 'nx'], {}), '(0, 2.0 * np.pi, nx)\n', (6812, 6832), True, 'import numpy as np\n'), ((2159, 2183), 'matplotlib.patches.Polygon', 'PlotPolygon', (['o'], {'zorder': '(1)'}), '(o, zorder=1)\n', (2170, 2183), True, 'from matplotlib.patches import Polygon as PlotPolygon\n'), ((2880, 2894), 'csv.reader', 'csv.reader', (['fh'], {}), '(fh)\n', (2890, 2894), False, 'import csv\n'), ((4974, 4987), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4980, 4987), True, 'import numpy as np\n'), ((4989, 5002), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4995, 5002), True, 'import numpy as np\n'), ((4016, 4036), 'numpy.matmul', 'np.matmul', (['self.R', 'p'], {}), '(self.R, p)\n', (4025, 4036), True, 'import numpy as np\n'), ((5447, 5468), 'numpy.matmul', 'np.matmul', (['R', '[ll, 0]'], {}), '(R, [ll, 0])\n', (5456, 5468), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#Source : https://github.com/myleott/mnist_png
## ----------- TO DO IN FUTURE ---------------
#1. make the path recognition OS independent
#2. do a file-exists check before execution
## ----------- INTRODUCTION ---------------
# this script assumes that the following files are in the same location as the script.
# So your folder / directory should look something like this:
#
# --- mnist_jpg.py
# --- train-images.idx3-ubyte
# --- train-labels.idx1-ubyte
# --- t10k-images.idx3-ubyte
# --- t10k-labels.idx1-ubyte
# --- output
# ------------------- USAGE --------------------------
# > python scriptname output
#
# On Windows - The results will be saved in the same folder as the argument passed. So if you pass a different folder name instead of Output in the argument, the script will create that folder and save the results in that.
# On Linux and Mac - not tested yet
# Make sure the MNIST file names as the same as the ones mentioned above
# Make sure you have the dependencies installed
import os
import struct
import sys
import numpy
from array import array
from os import path
from PIL import Image #imported from pillow
# funtion to read the MNIST dataset
def read(dataset):
if dataset is "training":
fname_img = "train-images.idx3-ubyte"
fname_lbl = "train-labels.idx1-ubyte"
elif dataset is "testing":
fname_img = "t10k-images.idx3-ubyte"
fname_lbl = "t10k-labels.idx1-ubyte"
else:
raise ValueError("dataset must be 'testing' or 'training'")
flbl = open(fname_lbl, 'rb')
magic_nr, size = struct.unpack(">II", flbl.read(8))
lbl = array("b", flbl.read())
flbl.close()
fimg = open(fname_img, 'rb')
magic_nr, size, rows, cols = struct.unpack(">IIII", fimg.read(16))
img = array("B", fimg.read())
fimg.close()
return lbl, img, size, rows, cols
# funtion to extract and the MNIST dataset
def write_dataset(labels, data, size, rows, cols, output_dir):
output_dirs = [
path.join(output_dir, str(i))
for i in range(10)
]
for dir in output_dirs:
if not path.exists(dir):
os.makedirs(dir)
# write data
for (i, label) in enumerate(labels):
output_filename = path.join(output_dirs[label], str(i) + ".jpg")
print("writing " + output_filename)
with open(output_filename, "wb") as h:
data_i = [
data[ (i*rows*cols + j*cols) : (i*rows*cols + (j+1)*cols) ]
for j in range(rows)
]
data_array = numpy.asarray(data_i)
im = Image.fromarray(data_array)
im.save(output_filename)
if __name__ == "__main__":
if len(sys.argv) != 2:
print("usage: {0} <output_path>".format(sys.argv[0]))
print("Please create a folder named Output in the same location as the script")
sys.exit()
output_path = sys.argv[1]
for dataset in ["training", "testing"]:
labels, data, size, rows, cols = read(dataset)
write_dataset(labels, data, size, rows, cols,
path.join(output_path, dataset)) | [
"os.makedirs",
"numpy.asarray",
"os.path.exists",
"PIL.Image.fromarray",
"os.path.join",
"sys.exit"
] | [((2903, 2913), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2911, 2913), False, 'import sys\n'), ((2136, 2152), 'os.path.exists', 'path.exists', (['dir'], {}), '(dir)\n', (2147, 2152), False, 'from os import path\n'), ((2166, 2182), 'os.makedirs', 'os.makedirs', (['dir'], {}), '(dir)\n', (2177, 2182), False, 'import os\n'), ((2582, 2603), 'numpy.asarray', 'numpy.asarray', (['data_i'], {}), '(data_i)\n', (2595, 2603), False, 'import numpy\n'), ((2623, 2650), 'PIL.Image.fromarray', 'Image.fromarray', (['data_array'], {}), '(data_array)\n', (2638, 2650), False, 'from PIL import Image\n'), ((3121, 3152), 'os.path.join', 'path.join', (['output_path', 'dataset'], {}), '(output_path, dataset)\n', (3130, 3152), False, 'from os import path\n')] |
import numpy as np
import pandas as pd
import yaml
def anomaly_score_example(source: np.array, reconstructed: np.array):
"""
Calculate anomaly score
:param source: original data
:param reconstructed: reconstructed data
:return:
"""
n, d = source.shape
d_dis = np.zeros((d,))
for i in range(d):
dis = np.abs(source[:, i] - reconstructed[:, i])
dis = dis - np.mean(dis)
d_dis[i] = np.percentile(dis, anomaly_score_example_percentage)
if d <= anomaly_distance_topn:
return d / np.sum(1 / d_dis)
topn = 1 / d_dis[np.argsort(d_dis)][-1 * anomaly_distance_topn:]
return anomaly_distance_topn / np.sum(topn)
def p_normalize(x: np.array):
"""
Normalization
:param: data
:return:
"""
p_min = 0.05
x_max, x_min = np.max(x), np.min(x)
x_min *= 1 - p_min
return (x - x_min) / (x_max - x_min)
def lesinn_score(incoming_data: np.array, historical_data: np.array):
"""
Sampling confidence
:param incoming_data: matrix shape=(n,d) n samples, d dimensions
:param historical_data: matrix shape=(m,d), m time steps, d dimensions
:return: Sampling confidence score shape=(n,)
"""
from algorithm.lesinn import online_lesinn
return p_normalize(1 / online_lesinn(
incoming_data, historical_data, random_state=random_state, t=lesinn_t,
phi=lesinn_phi
))
def moving_average_score(
incoming_data: np.array, historical_data: np.array
):
"""
Moving average score
:param incoming_data: matrix shape=(n,d), n samples, d dimensions
:param historical_data: matrix shape=(m,d), m time steps, d dimensions
:return: Sampling confidence score shape=(n,)
"""
from algorithm.moving_average import online_moving_average
return p_normalize(1 / (1 + online_moving_average(
incoming_data,
historical_data,
moving_average_window,
moving_average_stride
)))
def read_config(config: dict):
"""
init global parameters
:param config: config dictionary, please refer to detector-config.yml
:return:
"""
global random_state, \
workers, \
lesinn_t, lesinn_phi, \
moving_average_window, moving_average_stride, \
rec_window, rec_stride, \
det_window, det_stride, \
anomaly_scoring, \
sample_score_method, \
cluster_threshold, \
sample_rate, \
latest_windows, \
scale, rho, sigma, \
retry_limit, \
without_grouping, without_localize_sampling, \
data_path, rb, re, cb, ce, header, rec_windows_per_cycle, \
label_path, save_path, \
anomaly_score_example_percentage, anomaly_distance_topn
if 'sample_score_method' in config.keys():
sample_score_config = config['sample_score_method']
if 'lesinn' in sample_score_config.keys():
lesinn_t = int(sample_score_config['lesinn']['t'])
lesinn_phi = int(sample_score_config['lesinn']['phi'])
if 'moving_average' in sample_score_config.keys():
moving_average_window = \
int(sample_score_config['moving_average']['window'])
moving_average_stride = \
int(sample_score_config['moving_average']['stride'])
if 'anomaly_scoring' in config.keys():
anomaly_scoring_config = config['anomaly_scoring']
if 'anomaly_score_example' in anomaly_scoring_config.keys():
anomaly_score_example_percentage = \
int(anomaly_scoring_config['anomaly_score_example']
['percentage'])
if anomaly_score_example_percentage > 100 \
or anomaly_score_example_percentage < 0:
raise Exception('percentage must be between 0 and 100')
anomaly_distance_topn = \
int(anomaly_scoring_config['anomaly_score_example']['topn'])
if 'global' in config.keys():
global_config = config['global']
if 'random_state' in global_config.keys():
global random_state
random_state = int(global_config['random_state'])
data_config = config['data']
if 'reconstruct' in data_config.keys():
data_rec_config = data_config['reconstruct']
rec_window = int(data_rec_config['window'])
rec_stride = int(data_rec_config['stride'])
if 'detect' in data_config.keys():
data_det_config = data_config['detect']
det_window = int(data_det_config['window'])
det_stride = int(data_det_config['stride'])
data_path = data_config['path']
label_path = data_config['label_path']
save_path = data_config['save_path']
header = data_config['header']
rb, re = data_config['row_begin'], data_config['row_end']
cb, ce = data_config['col_begin'], data_config['col_end']
rec_windows_per_cycle = data_config['rec_windows_per_cycle']
detector_config = config['detector_arguments']
if detector_config['anomaly_scoring'] == 'anomaly_score_example':
anomaly_scoring = anomaly_score_example
else:
raise Exception(
'unknown config[detector][anomaly_scoring]: %s',
detector_config['anomaly_scoring']
)
if detector_config['sample_score_method'] == 'lesinn_score':
sample_score_method = lesinn_score
elif detector_config['sample_score_method'] == 'moving_average_score':
sample_score_method = moving_average_score
else:
raise Exception(
'unknown config[detector][sample_score_method]: %s',
detector_config['sample_score_method']
)
workers = int(detector_config['workers'])
cluster_threshold = float(detector_config['cluster_threshold'])
sample_rate = float(detector_config['sample_rate'])
latest_windows = int(detector_config['latest_windows'])
scale = float(detector_config['scale'])
rho = float(detector_config['rho'])
sigma = float(detector_config['sigma'])
retry_limit = int(detector_config['retry_limit'])
without_grouping = detector_config['without_grouping']
without_localize_sampling = bool(
detector_config['without_localize_sampling']
)
def run(data: pd.DataFrame):
"""
:param data input
"""
n, d = data.shape
if n < rec_window * rec_windows_per_cycle:
raise Exception('data point count less than 1 cycle')
data = data.values
for i in range(d):
data[:, i] = normalization(data[:, i])
print(
'expected samples per sample unit (recommended >10):',
np.sqrt(2 * np.pi) * rho * scale * sigma * rec_window
)
detector = CSAnomalyDetector(
workers=workers,
cluster_threshold=cluster_threshold,
sample_rate=sample_rate,
sample_score_method=sample_score_method,
distance=anomaly_scoring,
scale=scale,
rho=rho,
sigma=sigma,
random_state=random_state,
retry_limit=retry_limit,
without_grouping=without_grouping,
without_localize_sampling=without_localize_sampling
)
rec, retries = detector.reconstruct(
data, rec_window, rec_windows_per_cycle, rec_stride
)
score = detector.predict(
data, rec, det_window, det_stride
)
print('retries:', retries)
np.savetxt(save_path + '_rec.txt', rec, '%.6f', ',')
np.savetxt(save_path + '_score.txt', score, '%.6f', ',')
label = np.loadtxt(label_path, int, delimiter=',')
# # Online choosing threshold
# proba = sliding_anomaly_predict(score)
# precision, recall, f1score = evaluate_result(proba, label[rb:re])
# Best F-score
precision, recall, f1score, _ = evaluation(label[rb:re], score)
print('Precision: ', precision)
print('Recall: ', recall)
print('F1_score: ', f1score)
if __name__ == '__main__':
import argparse
import sys
import os
sys.path.append(os.path.abspath('..'))
from detector import CSAnomalyDetector
from utils import normalization
from utils.metrics import sliding_anomaly_predict, evaluate_result, evaluation
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str, help='config path')
args = parser.parse_args()
config = 'detector-config.yml'
if args.config:
config = args.config
with open(config, 'r', encoding='utf8') as file:
config_dict = yaml.load(file)
read_config(config_dict)
run(pd.read_csv(data_path, header=header).iloc[rb:re, cb:ce])
| [
"yaml.load",
"numpy.abs",
"argparse.ArgumentParser",
"numpy.sum",
"pandas.read_csv",
"numpy.argsort",
"numpy.mean",
"algorithm.moving_average.online_moving_average",
"os.path.abspath",
"utils.normalization",
"numpy.savetxt",
"detector.CSAnomalyDetector",
"numpy.max",
"numpy.loadtxt",
"nu... | [((294, 308), 'numpy.zeros', 'np.zeros', (['(d,)'], {}), '((d,))\n', (302, 308), True, 'import numpy as np\n'), ((6669, 7024), 'detector.CSAnomalyDetector', 'CSAnomalyDetector', ([], {'workers': 'workers', 'cluster_threshold': 'cluster_threshold', 'sample_rate': 'sample_rate', 'sample_score_method': 'sample_score_method', 'distance': 'anomaly_scoring', 'scale': 'scale', 'rho': 'rho', 'sigma': 'sigma', 'random_state': 'random_state', 'retry_limit': 'retry_limit', 'without_grouping': 'without_grouping', 'without_localize_sampling': 'without_localize_sampling'}), '(workers=workers, cluster_threshold=cluster_threshold,\n sample_rate=sample_rate, sample_score_method=sample_score_method,\n distance=anomaly_scoring, scale=scale, rho=rho, sigma=sigma,\n random_state=random_state, retry_limit=retry_limit, without_grouping=\n without_grouping, without_localize_sampling=without_localize_sampling)\n', (6686, 7024), False, 'from detector import CSAnomalyDetector\n'), ((7330, 7382), 'numpy.savetxt', 'np.savetxt', (["(save_path + '_rec.txt')", 'rec', '"""%.6f"""', '""","""'], {}), "(save_path + '_rec.txt', rec, '%.6f', ',')\n", (7340, 7382), True, 'import numpy as np\n'), ((7387, 7443), 'numpy.savetxt', 'np.savetxt', (["(save_path + '_score.txt')", 'score', '"""%.6f"""', '""","""'], {}), "(save_path + '_score.txt', score, '%.6f', ',')\n", (7397, 7443), True, 'import numpy as np\n'), ((7456, 7498), 'numpy.loadtxt', 'np.loadtxt', (['label_path', 'int'], {'delimiter': '""","""'}), "(label_path, int, delimiter=',')\n", (7466, 7498), True, 'import numpy as np\n'), ((7712, 7743), 'utils.metrics.evaluation', 'evaluation', (['label[rb:re]', 'score'], {}), '(label[rb:re], score)\n', (7722, 7743), False, 'from utils.metrics import sliding_anomaly_predict, evaluate_result, evaluation\n'), ((8143, 8168), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8166, 8168), False, 'import argparse\n'), ((346, 388), 'numpy.abs', 'np.abs', (['(source[:, i] - reconstructed[:, i])'], {}), '(source[:, i] - reconstructed[:, i])\n', (352, 388), True, 'import numpy as np\n'), ((441, 493), 'numpy.percentile', 'np.percentile', (['dis', 'anomaly_score_example_percentage'], {}), '(dis, anomaly_score_example_percentage)\n', (454, 493), True, 'import numpy as np\n'), ((670, 682), 'numpy.sum', 'np.sum', (['topn'], {}), '(topn)\n', (676, 682), True, 'import numpy as np\n'), ((815, 824), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (821, 824), True, 'import numpy as np\n'), ((826, 835), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (832, 835), True, 'import numpy as np\n'), ((6484, 6509), 'utils.normalization', 'normalization', (['data[:, i]'], {}), '(data[:, i])\n', (6497, 6509), False, 'from utils import normalization\n'), ((7943, 7964), 'os.path.abspath', 'os.path.abspath', (['""".."""'], {}), "('..')\n", (7958, 7964), False, 'import os\n'), ((8431, 8446), 'yaml.load', 'yaml.load', (['file'], {}), '(file)\n', (8440, 8446), False, 'import yaml\n'), ((409, 421), 'numpy.mean', 'np.mean', (['dis'], {}), '(dis)\n', (416, 421), True, 'import numpy as np\n'), ((548, 565), 'numpy.sum', 'np.sum', (['(1 / d_dis)'], {}), '(1 / d_dis)\n', (554, 565), True, 'import numpy as np\n'), ((1282, 1387), 'algorithm.lesinn.online_lesinn', 'online_lesinn', (['incoming_data', 'historical_data'], {'random_state': 'random_state', 't': 'lesinn_t', 'phi': 'lesinn_phi'}), '(incoming_data, historical_data, random_state=random_state, t=\n lesinn_t, phi=lesinn_phi)\n', (1295, 1387), False, 'from algorithm.lesinn import online_lesinn\n'), ((587, 604), 'numpy.argsort', 'np.argsort', (['d_dis'], {}), '(d_dis)\n', (597, 604), True, 'import numpy as np\n'), ((1827, 1930), 'algorithm.moving_average.online_moving_average', 'online_moving_average', (['incoming_data', 'historical_data', 'moving_average_window', 'moving_average_stride'], {}), '(incoming_data, historical_data, moving_average_window,\n moving_average_stride)\n', (1848, 1930), False, 'from algorithm.moving_average import online_moving_average\n'), ((8489, 8526), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {'header': 'header'}), '(data_path, header=header)\n', (8500, 8526), True, 'import pandas as pd\n'), ((6593, 6611), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (6600, 6611), True, 'import numpy as np\n')] |
from vidmapy.kurucz import parameters
import numpy as np
def test_default_parameters():
param = parameters.Parameters()
assert param.teff == 5777
assert param.logg == 4.44
assert param.metallicity == 0.0
assert param.microturbulence == 2.0
def test_chemical_composition():
param = parameters.Parameters()
assert param.chemical_composition["H"] == 0.9204
assert param.chemical_composition["He"] == 0.07834
assert param.chemical_composition["C"] == -3.52
assert param.chemical_composition["N"] == -4.12
assert param.chemical_composition["O"] == -3.21
assert param.chemical_composition["Si"] == -4.49
def test_change_chemical_composition():
param = parameters.Parameters()
param.chemical_composition["C"] = -3.40
assert param.chemical_composition["C"] == -3.40
assert param.chemical_composition[6] == -3.40
param.chemical_composition[7] = -3.8
assert param.chemical_composition["N"] == -3.8
assert param.chemical_composition[7] == -3.8
def test_iterate_composition():
param = parameters.Parameters()
x = {k:param.chemical_composition[k] for k in param.chemical_composition}
assert x[1] == 0.9204
assert x[100] == None
def test_update_composition_from_dict():
param = parameters.Parameters()
new_composition = {3: -11.92, 4: -13.64, 5: -9.10, 6: -4.52, 7: -3.12, 8: -3.21}
param.update_chemical_composition(new_composition)
assert np.alltrue([new_composition[k] == param.chemical_composition[k] for k in new_composition])
def test_set_paramters():
param = parameters.Parameters()
param.teff = 3000.
assert param.teff == 3000.
param.logg = 4.0
assert param.logg == 4.0
param.metallicity = 1.0
assert param.metallicity == 1.0
param.microturbulence = 3.
assert param.microturbulence == 3.0
def test_equals():
param1 = parameters.Parameters()
param2 = parameters.Parameters()
param3 = parameters.Parameters()
param4 = parameters.Parameters()
assert param1 == param2
param3.teff = 10000.
assert param1 != param3
param4.chemical_composition["H"] = 1
assert param1 != param4
| [
"vidmapy.kurucz.parameters.Parameters",
"numpy.alltrue"
] | [((101, 124), 'vidmapy.kurucz.parameters.Parameters', 'parameters.Parameters', ([], {}), '()\n', (122, 124), False, 'from vidmapy.kurucz import parameters\n'), ((308, 331), 'vidmapy.kurucz.parameters.Parameters', 'parameters.Parameters', ([], {}), '()\n', (329, 331), False, 'from vidmapy.kurucz import parameters\n'), ((703, 726), 'vidmapy.kurucz.parameters.Parameters', 'parameters.Parameters', ([], {}), '()\n', (724, 726), False, 'from vidmapy.kurucz import parameters\n'), ((1070, 1093), 'vidmapy.kurucz.parameters.Parameters', 'parameters.Parameters', ([], {}), '()\n', (1091, 1093), False, 'from vidmapy.kurucz import parameters\n'), ((1279, 1302), 'vidmapy.kurucz.parameters.Parameters', 'parameters.Parameters', ([], {}), '()\n', (1300, 1302), False, 'from vidmapy.kurucz import parameters\n'), ((1463, 1559), 'numpy.alltrue', 'np.alltrue', (['[(new_composition[k] == param.chemical_composition[k]) for k in new_composition\n ]'], {}), '([(new_composition[k] == param.chemical_composition[k]) for k in\n new_composition])\n', (1473, 1559), True, 'import numpy as np\n'), ((1593, 1616), 'vidmapy.kurucz.parameters.Parameters', 'parameters.Parameters', ([], {}), '()\n', (1614, 1616), False, 'from vidmapy.kurucz import parameters\n'), ((1893, 1916), 'vidmapy.kurucz.parameters.Parameters', 'parameters.Parameters', ([], {}), '()\n', (1914, 1916), False, 'from vidmapy.kurucz import parameters\n'), ((1930, 1953), 'vidmapy.kurucz.parameters.Parameters', 'parameters.Parameters', ([], {}), '()\n', (1951, 1953), False, 'from vidmapy.kurucz import parameters\n'), ((1967, 1990), 'vidmapy.kurucz.parameters.Parameters', 'parameters.Parameters', ([], {}), '()\n', (1988, 1990), False, 'from vidmapy.kurucz import parameters\n'), ((2004, 2027), 'vidmapy.kurucz.parameters.Parameters', 'parameters.Parameters', ([], {}), '()\n', (2025, 2027), False, 'from vidmapy.kurucz import parameters\n')] |
import numpy as np
def generate_sine_wave(freq, duration, amp=0.5, phase_offset=0, sample_rate=44100):
phase_duration = freq / sample_rate
samples_per_phase = sample_rate / freq
num_samples = (sample_rate * duration) + (3 * samples_per_phase)
sine_samples = np.sin(2 * np.pi * np.arange(num_samples) * phase_duration)
sine_samples *= amp
samples_per_phase = sample_rate / freq
start_slice = int(samples_per_phase + (samples_per_phase * phase_offset))
end_slice = int(samples_per_phase + (samples_per_phase * (1 - phase_offset)))
return sine_samples.astype(np.float32)[start_slice: - end_slice]
| [
"numpy.arange"
] | [((300, 322), 'numpy.arange', 'np.arange', (['num_samples'], {}), '(num_samples)\n', (309, 322), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from scipy.fftpack import dct
def hann_window(N):
"""
Create the Hann window 0.5*(1-cos(2pi*n/N))
"""
return 0.5*(1 - np.cos(2*np.pi*np.arange(N)/N))
def specgram(x, win_length, hop_length, win_fn = hann_window):
"""
Compute the non-redundant amplitudes of the STFT
Parameters
----------
x: ndarray(N)
Full audio clip of N samples
win_length: int
Window length to use in STFT
hop_length: int
Hop length to use in STFT
win_fn: int -> ndarray(N)
Window function
Returns
-------
ndarray(w, floor(w/2)+1, dtype=np.complex) STFT
"""
N = len(x)
nwin = int(np.ceil((N-win_length)/hop_length))+1
K = int(np.floor(win_length/2))+1
# Make a 2D array
# The rows correspond to frequency bins
# The columns correspond to windows moved forward in time
S = np.zeros((K, nwin))
# Loop through all of the windows, and put the fourier
# transform amplitudes of each window in its own column
for j in range(nwin):
# Pull out the audio in the jth window
xj = x[hop_length*j:hop_length*j+win_length]
# Zeropad if necessary
if len(xj) < win_length:
xj = np.concatenate((xj, np.zeros(win_length-len(xj))))
# Apply window function
xj = win_fn(win_length)*xj
# Put the fourier transform into S
sj = np.abs(np.fft.fft(xj))
S[:, j] = sj[0:K]
return S
def get_mel_spectrogram(K, win_length, sr, min_freq, max_freq, n_bins):
"""
Compute a mel-spaced spectrogram by multiplying an linearly-spaced
STFT spectrogram on the left by a mel matrix
Parameters
----------
win_length: int
Window length
K: int
Number of frequency bins
sr: int
The sample rate used to generate sdb
min_freq: int
The center of the minimum mel bin, in hz
max_freq: int
The center of the maximum mel bin, in hz
n_bins: int
The number of mel bins to use
Returns
-------
ndarray(n_bins, n_win)
The mel-spaced spectrogram
"""
bins = np.logspace(np.log10(min_freq), np.log10(max_freq), n_bins+2)*win_length/sr
bins = np.array(np.round(bins), dtype=int)
Mel = np.zeros((n_bins, K))
for i in range(n_bins):
i1 = bins[i]
i2 = bins[i+1]
if i1 == i2:
i2 += 1
i3 = bins[i+2]
if i3 <= i2:
i3 = i2+1
tri = np.zeros(K)
tri[i1:i2] = np.linspace(0, 1, i2-i1)
tri[i2:i3] = np.linspace(1, 0, i3-i2)
#tri = tri/np.sum(tri)
Mel[i, :] = tri
return Mel
def get_mfcc(x, sr, win_length=2048, hop_length=512, min_freq=80, max_freq=8000, n_bins=100, n_coeffs=20, amin=1e-5):
"""
Parameters
----------
x: ndarray(N)
Audio samples
sr: int
Sample rate
win_length: int
Window length to use in STFT
hop_length: int
Hop length to use in STFT
min_freq: float
Minimum frequency, in hz, to use in mel-spaced bins
max_freq: float
Maximum frequency, in hz, to use in mel-spaced bins
n_bins: int
Number of bins to take between min_freq and max_freq
n_coeffs: int
Number of DCT coefficients to use in the summary
amin: float
Minimum threshold for integrated energy
"""
S = specgram(x, win_length, hop_length)
S = np.abs(S)**2
Mel = get_mel_spectrogram(S.shape[0], sr, 80, 8000, 40)
mfcc = Mel.dot(S)
mfcc[mfcc < amin] = amin
mfcc = np.log10(mfcc)
mfcc = dct(mfcc, axis=0)
return mfcc[0:n_coeffs, :]
| [
"numpy.abs",
"numpy.ceil",
"numpy.fft.fft",
"numpy.floor",
"scipy.fftpack.dct",
"numpy.zeros",
"numpy.arange",
"numpy.linspace",
"numpy.log10",
"numpy.round"
] | [((924, 943), 'numpy.zeros', 'np.zeros', (['(K, nwin)'], {}), '((K, nwin))\n', (932, 943), True, 'import numpy as np\n'), ((2316, 2337), 'numpy.zeros', 'np.zeros', (['(n_bins, K)'], {}), '((n_bins, K))\n', (2324, 2337), True, 'import numpy as np\n'), ((3616, 3630), 'numpy.log10', 'np.log10', (['mfcc'], {}), '(mfcc)\n', (3624, 3630), True, 'import numpy as np\n'), ((3642, 3659), 'scipy.fftpack.dct', 'dct', (['mfcc'], {'axis': '(0)'}), '(mfcc, axis=0)\n', (3645, 3659), False, 'from scipy.fftpack import dct\n'), ((2279, 2293), 'numpy.round', 'np.round', (['bins'], {}), '(bins)\n', (2287, 2293), True, 'import numpy as np\n'), ((2531, 2542), 'numpy.zeros', 'np.zeros', (['K'], {}), '(K)\n', (2539, 2542), True, 'import numpy as np\n'), ((2564, 2590), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(i2 - i1)'], {}), '(0, 1, i2 - i1)\n', (2575, 2590), True, 'import numpy as np\n'), ((2610, 2636), 'numpy.linspace', 'np.linspace', (['(1)', '(0)', '(i3 - i2)'], {}), '(1, 0, i3 - i2)\n', (2621, 2636), True, 'import numpy as np\n'), ((3481, 3490), 'numpy.abs', 'np.abs', (['S'], {}), '(S)\n', (3487, 3490), True, 'import numpy as np\n'), ((712, 750), 'numpy.ceil', 'np.ceil', (['((N - win_length) / hop_length)'], {}), '((N - win_length) / hop_length)\n', (719, 750), True, 'import numpy as np\n'), ((762, 786), 'numpy.floor', 'np.floor', (['(win_length / 2)'], {}), '(win_length / 2)\n', (770, 786), True, 'import numpy as np\n'), ((1451, 1465), 'numpy.fft.fft', 'np.fft.fft', (['xj'], {}), '(xj)\n', (1461, 1465), True, 'import numpy as np\n'), ((2195, 2213), 'numpy.log10', 'np.log10', (['min_freq'], {}), '(min_freq)\n', (2203, 2213), True, 'import numpy as np\n'), ((2215, 2233), 'numpy.log10', 'np.log10', (['max_freq'], {}), '(max_freq)\n', (2223, 2233), True, 'import numpy as np\n'), ((201, 213), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (210, 213), True, 'import numpy as np\n')] |
from nub import timeit
import iris
from iris.time import PartialDateTime
# Create 5 years of hourly data => ~45k data points
# Similar to: http://hydromet-thredds.princeton.edu:9000/thredds/dodsC/MonitoringStations/butler.nc
def setup():
from iris.coords import DimCoord
from iris.cube import Cube
import numpy as np
times = np.arange(1299002400, 1462554000, 3600, dtype='f8')
time = DimCoord(times, 'time', units='seconds since 1970-01-01')
data = np.random.random(len(times))
cube = Cube(data, dim_coords_and_dims=[(time, 0)])
return cube
if __name__ == '__main__':
cube = setup()
iris.FUTURE.cell_datetime_objects = True
with timeit('Extract month'):
start = PartialDateTime(year=2016, month=1)
end = PartialDateTime(year=2016, month=2)
month = iris.Constraint(time=lambda cell: start <= cell < end)
a_month = cube.extract(month)
with timeit('Extract week'):
start = PartialDateTime(year=2015, month=7, day=6)
end = PartialDateTime(year=2015, month=7, day=13)
week = iris.Constraint(time=lambda cell: start <= cell < end)
a_week = cube.extract(week)
| [
"iris.time.PartialDateTime",
"iris.Constraint",
"iris.coords.DimCoord",
"numpy.arange",
"iris.cube.Cube",
"nub.timeit"
] | [((344, 395), 'numpy.arange', 'np.arange', (['(1299002400)', '(1462554000)', '(3600)'], {'dtype': '"""f8"""'}), "(1299002400, 1462554000, 3600, dtype='f8')\n", (353, 395), True, 'import numpy as np\n'), ((407, 464), 'iris.coords.DimCoord', 'DimCoord', (['times', '"""time"""'], {'units': '"""seconds since 1970-01-01"""'}), "(times, 'time', units='seconds since 1970-01-01')\n", (415, 464), False, 'from iris.coords import DimCoord\n'), ((516, 559), 'iris.cube.Cube', 'Cube', (['data'], {'dim_coords_and_dims': '[(time, 0)]'}), '(data, dim_coords_and_dims=[(time, 0)])\n', (520, 559), False, 'from iris.cube import Cube\n'), ((680, 703), 'nub.timeit', 'timeit', (['"""Extract month"""'], {}), "('Extract month')\n", (686, 703), False, 'from nub import timeit\n'), ((721, 756), 'iris.time.PartialDateTime', 'PartialDateTime', ([], {'year': '(2016)', 'month': '(1)'}), '(year=2016, month=1)\n', (736, 756), False, 'from iris.time import PartialDateTime\n'), ((771, 806), 'iris.time.PartialDateTime', 'PartialDateTime', ([], {'year': '(2016)', 'month': '(2)'}), '(year=2016, month=2)\n', (786, 806), False, 'from iris.time import PartialDateTime\n'), ((823, 877), 'iris.Constraint', 'iris.Constraint', ([], {'time': '(lambda cell: start <= cell < end)'}), '(time=lambda cell: start <= cell < end)\n', (838, 877), False, 'import iris\n'), ((926, 948), 'nub.timeit', 'timeit', (['"""Extract week"""'], {}), "('Extract week')\n", (932, 948), False, 'from nub import timeit\n'), ((966, 1008), 'iris.time.PartialDateTime', 'PartialDateTime', ([], {'year': '(2015)', 'month': '(7)', 'day': '(6)'}), '(year=2015, month=7, day=6)\n', (981, 1008), False, 'from iris.time import PartialDateTime\n'), ((1023, 1066), 'iris.time.PartialDateTime', 'PartialDateTime', ([], {'year': '(2015)', 'month': '(7)', 'day': '(13)'}), '(year=2015, month=7, day=13)\n', (1038, 1066), False, 'from iris.time import PartialDateTime\n'), ((1082, 1136), 'iris.Constraint', 'iris.Constraint', ([], {'time': '(lambda cell: start <= cell < end)'}), '(time=lambda cell: start <= cell < end)\n', (1097, 1136), False, 'import iris\n')] |
#!python
__author__ = 'ashwin'
import pycuda.driver as drv
import pycuda.tools
import pycuda.autoinit
from pycuda.compiler import SourceModule
import numpy as np
import scipy.misc as scm
import matplotlib.pyplot as p
mod = SourceModule \
(
"""
#include<stdio.h>
#define INDEX(a, b) a*256+b
__global__ void rgb2gray(float *dest,float *r_img, float *g_img, float *b_img)
{
unsigned int idx = threadIdx.x+(blockIdx.x*(blockDim.x*blockDim.y));
unsigned int a = idx/256;
unsigned int b = idx%256;
dest[INDEX(a, b)] = (0.299*r_img[INDEX(a, b)]+0.587*g_img[INDEX(a, b)]+0.114*b_img[INDEX(a, b)]);
}
"""
)
a = scm.imread('Lenna.png').astype(np.float32)
print(a)
r_img = a[:, :, 0].reshape(65536, order='F')
g_img = a[:, :, 1].reshape(65536, order='F')
b_img = a[:, :, 2].reshape(65536, order='F')
dest=r_img
print(dest)
rgb2gray = mod.get_function("rgb2gray")
rgb2gray(drv.Out(dest), drv.In(r_img), drv.In(g_img),drv.In(b_img),block=(1024, 1, 1), grid=(64, 1, 1))
dest=np.reshape(dest,(256,256), order='F')
p.imshow(dest)
p.show()
| [
"pycuda.compiler.SourceModule",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"pycuda.driver.In",
"numpy.reshape",
"pycuda.driver.Out",
"scipy.misc.imread"
] | [((227, 617), 'pycuda.compiler.SourceModule', 'SourceModule', (['"""\n#include<stdio.h>\n#define INDEX(a, b) a*256+b\n\n__global__ void rgb2gray(float *dest,float *r_img, float *g_img, float *b_img)\n{\n\nunsigned int idx = threadIdx.x+(blockIdx.x*(blockDim.x*blockDim.y));\n\n unsigned int a = idx/256;\n unsigned int b = idx%256;\n\ndest[INDEX(a, b)] = (0.299*r_img[INDEX(a, b)]+0.587*g_img[INDEX(a, b)]+0.114*b_img[INDEX(a, b)]);\n\n\n}\n\n"""'], {}), '(\n """\n#include<stdio.h>\n#define INDEX(a, b) a*256+b\n\n__global__ void rgb2gray(float *dest,float *r_img, float *g_img, float *b_img)\n{\n\nunsigned int idx = threadIdx.x+(blockIdx.x*(blockDim.x*blockDim.y));\n\n unsigned int a = idx/256;\n unsigned int b = idx%256;\n\ndest[INDEX(a, b)] = (0.299*r_img[INDEX(a, b)]+0.587*g_img[INDEX(a, b)]+0.114*b_img[INDEX(a, b)]);\n\n\n}\n\n"""\n )\n', (239, 617), False, 'from pycuda.compiler import SourceModule\n'), ((994, 1033), 'numpy.reshape', 'np.reshape', (['dest', '(256, 256)'], {'order': '"""F"""'}), "(dest, (256, 256), order='F')\n", (1004, 1033), True, 'import numpy as np\n'), ((1034, 1048), 'matplotlib.pyplot.imshow', 'p.imshow', (['dest'], {}), '(dest)\n', (1042, 1048), True, 'import matplotlib.pyplot as p\n'), ((1049, 1057), 'matplotlib.pyplot.show', 'p.show', ([], {}), '()\n', (1055, 1057), True, 'import matplotlib.pyplot as p\n'), ((893, 906), 'pycuda.driver.Out', 'drv.Out', (['dest'], {}), '(dest)\n', (900, 906), True, 'import pycuda.driver as drv\n'), ((908, 921), 'pycuda.driver.In', 'drv.In', (['r_img'], {}), '(r_img)\n', (914, 921), True, 'import pycuda.driver as drv\n'), ((923, 936), 'pycuda.driver.In', 'drv.In', (['g_img'], {}), '(g_img)\n', (929, 936), True, 'import pycuda.driver as drv\n'), ((937, 950), 'pycuda.driver.In', 'drv.In', (['b_img'], {}), '(b_img)\n', (943, 950), True, 'import pycuda.driver as drv\n'), ((634, 657), 'scipy.misc.imread', 'scm.imread', (['"""Lenna.png"""'], {}), "('Lenna.png')\n", (644, 657), True, 'import scipy.misc as scm\n')] |
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
#plt.figure(figsize=(10,10))
#for i in range(25):
# plt.subplot(5,5,i+1)
# plt.xticks([])
# plt.yticks([])
# plt.grid(False)
# plt.imshow(train_images[i], cmap=plt.cm.binary)
# # The CIFAR labels happen to be arrays,
# # which is why you need the extra index
# plt.xlabel(class_names[train_labels[i][0]])
#plt.show()
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10))
#print(model.summary())
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
history = model.fit(train_images, train_labels, epochs=10,
validation_data=(test_images, test_labels))
plt.figure()
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label = 'val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print(test_acc)
#verificar acuracia por classe
import numpy as np
saidas = model.predict(test_images)
labels_out = np.argmax(saidas, axis=1)
pcts = []
for classe in range(0,10):
indices = np.where(test_labels == classe)[0]
corretos = np.where(labels_out[indices] == classe)[0]
porcentagem = len(corretos) / len(indices)
pcts.append(porcentagem * 100)
print('Porcentagens')
for i in range(0,10):
print('%s -> %.2f %%' %(class_names[i],pcts[i])) | [
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"numpy.argmax",
"tensorflow.keras.layers.Dense",
"matplotlib.pyplot.legend",
"tensorflow.keras.datasets.cifar10.load_... | [((171, 199), 'tensorflow.keras.datasets.cifar10.load_data', 'datasets.cifar10.load_data', ([], {}), '()\n', (197, 199), False, 'from tensorflow.keras import datasets, layers, models\n'), ((791, 810), 'tensorflow.keras.models.Sequential', 'models.Sequential', ([], {}), '()\n', (808, 810), False, 'from tensorflow.keras import datasets, layers, models\n'), ((1489, 1501), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1499, 1501), True, 'import matplotlib.pyplot as plt\n'), ((1502, 1557), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['accuracy']"], {'label': '"""accuracy"""'}), "(history.history['accuracy'], label='accuracy')\n", (1510, 1557), True, 'import matplotlib.pyplot as plt\n'), ((1558, 1621), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_accuracy']"], {'label': '"""val_accuracy"""'}), "(history.history['val_accuracy'], label='val_accuracy')\n", (1566, 1621), True, 'import matplotlib.pyplot as plt\n'), ((1624, 1643), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (1634, 1643), True, 'import matplotlib.pyplot as plt\n'), ((1644, 1666), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (1654, 1666), True, 'import matplotlib.pyplot as plt\n'), ((1667, 1685), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.5, 1]'], {}), '([0.5, 1])\n', (1675, 1685), True, 'import matplotlib.pyplot as plt\n'), ((1686, 1715), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (1696, 1715), True, 'import matplotlib.pyplot as plt\n'), ((1909, 1934), 'numpy.argmax', 'np.argmax', (['saidas'], {'axis': '(1)'}), '(saidas, axis=1)\n', (1918, 1934), True, 'import numpy as np\n'), ((821, 890), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""', 'input_shape': '(32, 32, 3)'}), "(32, (3, 3), activation='relu', input_shape=(32, 32, 3))\n", (834, 890), False, 'from tensorflow.keras import datasets, layers, models\n'), ((902, 929), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (921, 929), False, 'from tensorflow.keras import datasets, layers, models\n'), ((941, 985), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (954, 985), False, 'from tensorflow.keras import datasets, layers, models\n'), ((997, 1024), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (1016, 1024), False, 'from tensorflow.keras import datasets, layers, models\n'), ((1036, 1080), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (1049, 1080), False, 'from tensorflow.keras import datasets, layers, models\n'), ((1092, 1108), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (1106, 1108), False, 'from tensorflow.keras import datasets, layers, models\n'), ((1120, 1155), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (1132, 1155), False, 'from tensorflow.keras import datasets, layers, models\n'), ((1167, 1183), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(10)'], {}), '(10)\n', (1179, 1183), False, 'from tensorflow.keras import datasets, layers, models\n'), ((1262, 1325), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (1307, 1325), True, 'import tensorflow as tf\n'), ((1986, 2017), 'numpy.where', 'np.where', (['(test_labels == classe)'], {}), '(test_labels == classe)\n', (1994, 2017), True, 'import numpy as np\n'), ((2036, 2075), 'numpy.where', 'np.where', (['(labels_out[indices] == classe)'], {}), '(labels_out[indices] == classe)\n', (2044, 2075), True, 'import numpy as np\n')] |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from pptx import Presentation
from pptx.util import Inches
from datetime import datetime
from skimage import color
import glob
import cv2
import numpy as np
from io import BytesIO
from tqdm.autonotebook import tqdm
import PIL.Image as Image
# +
# -- Set meta data which will appear on first slide
title = "Epi/stroma segmentation"
date = datetime.today()
author = "<NAME>"
comments = "data and code taken from blog andrewjanowczyk.com "
pptxfname = "epistroma_results.pptx"
#we only want to generate output for images which have masks, so we find those files
mask_files=glob.glob('./masks/*.png')
# +
#create presentation
prs = Presentation()
prs.slide_width = Inches(10)
prs.slide_height = Inches(10)
blank_slide_layout = prs.slide_layouts[1]
slide = prs.slides.add_slide(blank_slide_layout)
#make first slide with our metadata
slide.placeholders[0].text = title
tf = slide.placeholders[1].text_frame
tf.text = f'Date: {date}\n'
tf.text += f"Author: {author}\n"
tf.text += f"Comments: {comments}\n"
# -
#wrapper function to add an image as a byte stream to a slide
#note that this is in place of having to save output directly to disk, and can be used in dynamic settings as well
def addimagetoslide(slide,img,left,top, height, width, resize = .5):
res = cv2.resize(img , None, fx=resize,fy=resize ,interpolation=cv2.INTER_CUBIC) #since the images are going to be small, we can resize them to prevent the final pptx file from being large for no reason
image_stream = BytesIO()
Image.fromarray(res).save(image_stream,format="PNG")
pic = slide.shapes.add_picture(image_stream, left, top ,height,width)
image_stream.close()
#helper function to blend two images
def blend2Images(img, mask):
if (img.ndim == 3):
img = color.rgb2gray(img)
if (mask.ndim == 3):
mask = color.rgb2gray(mask)
img = img[:, :, None] * 1.0 # can't use boolean
mask = mask[:, :, None] * 1.0
out = np.concatenate((mask, img, mask), 2) * 255
return out.astype('uint8')
# +
for mask_fname in tqdm(mask_files):
#add a new slide for this set of images
blank_slide_layout = prs.slide_layouts[0]
slide = prs.slides.add_slide(blank_slide_layout)
#compute the associated filenames that we'll need
orig_fname=mask_fname.replace("./masks","./imgs").replace("_mask.png",".tif")
output_fname=mask_fname.replace("./masks","./output").replace("_mask.png","_class.png")
#------- orig - load and add to slide
img = cv2.cvtColor(cv2.imread(orig_fname),cv2.COLOR_BGR2RGB)
addimagetoslide(slide, img, Inches(0),Inches(0),Inches(5),Inches(5))
#------ mask - load and add to slide
mask = cv2.cvtColor(cv2.imread(mask_fname),cv2.COLOR_BGR2RGB)
addimagetoslide(slide, mask, Inches(5),Inches(0),Inches(5),Inches(5))
#------ output - load and add to slide
output = cv2.cvtColor(cv2.imread(output_fname),cv2.COLOR_BGR2RGB)
addimagetoslide(slide, output, Inches(5),Inches(5),Inches(5),Inches(5))
#------ Fuse - load and add to slide
addimagetoslide(slide,blend2Images(output,mask), Inches(0),Inches(5),Inches(5),Inches(5))
#------ Lastly we can also add some metrics/results/values if we would like
# here we do simple FP/TP/TN/FN
txBox = slide.shapes.add_textbox(Inches(10), Inches(0),Inches(4),Inches(4) )
tf = txBox.text_frame
tf.text = f"{orig_fname}\n"
tf.text += f"Overall Pixel Agreement: {(output==mask).mean():.4f}\n"
tf.text += f"True Positive Rate: {(mask[output>0]>0).sum()/(output>0).sum():.4f}\n"
tf.text += f"False Positive Rate: {(mask[output==0]>0).sum()/(output==0).sum():.4f}\n"
tf.text += f"True Negative Rate: {(mask[output==0]==0).sum()/(output==0).sum():.4f}\n"
tf.text += f"False Negative Rate: {(mask[output>0]==0).sum()/(output>0).sum():.4f}\n"
#At this point the pptx has not been saved, so we do that here and we're all done!
prs.save(pptxfname)
| [
"io.BytesIO",
"skimage.color.rgb2gray",
"datetime.datetime.today",
"numpy.concatenate",
"pptx.Presentation",
"tqdm.autonotebook.tqdm",
"cv2.imread",
"pptx.util.Inches",
"glob.glob",
"PIL.Image.fromarray",
"cv2.resize"
] | [((639, 655), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (653, 655), False, 'from datetime import datetime\n'), ((872, 898), 'glob.glob', 'glob.glob', (['"""./masks/*.png"""'], {}), "('./masks/*.png')\n", (881, 898), False, 'import glob\n'), ((932, 946), 'pptx.Presentation', 'Presentation', ([], {}), '()\n', (944, 946), False, 'from pptx import Presentation\n'), ((965, 975), 'pptx.util.Inches', 'Inches', (['(10)'], {}), '(10)\n', (971, 975), False, 'from pptx.util import Inches\n'), ((995, 1005), 'pptx.util.Inches', 'Inches', (['(10)'], {}), '(10)\n', (1001, 1005), False, 'from pptx.util import Inches\n'), ((2334, 2350), 'tqdm.autonotebook.tqdm', 'tqdm', (['mask_files'], {}), '(mask_files)\n', (2338, 2350), False, 'from tqdm.autonotebook import tqdm\n'), ((1569, 1643), 'cv2.resize', 'cv2.resize', (['img', 'None'], {'fx': 'resize', 'fy': 'resize', 'interpolation': 'cv2.INTER_CUBIC'}), '(img, None, fx=resize, fy=resize, interpolation=cv2.INTER_CUBIC)\n', (1579, 1643), False, 'import cv2\n'), ((1785, 1794), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (1792, 1794), False, 'from io import BytesIO\n'), ((2058, 2077), 'skimage.color.rgb2gray', 'color.rgb2gray', (['img'], {}), '(img)\n', (2072, 2077), False, 'from skimage import color\n'), ((2118, 2138), 'skimage.color.rgb2gray', 'color.rgb2gray', (['mask'], {}), '(mask)\n', (2132, 2138), False, 'from skimage import color\n'), ((2236, 2272), 'numpy.concatenate', 'np.concatenate', (['(mask, img, mask)', '(2)'], {}), '((mask, img, mask), 2)\n', (2250, 2272), True, 'import numpy as np\n'), ((2825, 2847), 'cv2.imread', 'cv2.imread', (['orig_fname'], {}), '(orig_fname)\n', (2835, 2847), False, 'import cv2\n'), ((2899, 2908), 'pptx.util.Inches', 'Inches', (['(0)'], {}), '(0)\n', (2905, 2908), False, 'from pptx.util import Inches\n'), ((2909, 2918), 'pptx.util.Inches', 'Inches', (['(0)'], {}), '(0)\n', (2915, 2918), False, 'from pptx.util import Inches\n'), ((2919, 2928), 'pptx.util.Inches', 'Inches', (['(5)'], {}), '(5)\n', (2925, 2928), False, 'from pptx.util import Inches\n'), ((2929, 2938), 'pptx.util.Inches', 'Inches', (['(5)'], {}), '(5)\n', (2935, 2938), False, 'from pptx.util import Inches\n'), ((3010, 3032), 'cv2.imread', 'cv2.imread', (['mask_fname'], {}), '(mask_fname)\n', (3020, 3032), False, 'import cv2\n'), ((3085, 3094), 'pptx.util.Inches', 'Inches', (['(5)'], {}), '(5)\n', (3091, 3094), False, 'from pptx.util import Inches\n'), ((3095, 3104), 'pptx.util.Inches', 'Inches', (['(0)'], {}), '(0)\n', (3101, 3104), False, 'from pptx.util import Inches\n'), ((3105, 3114), 'pptx.util.Inches', 'Inches', (['(5)'], {}), '(5)\n', (3111, 3114), False, 'from pptx.util import Inches\n'), ((3115, 3124), 'pptx.util.Inches', 'Inches', (['(5)'], {}), '(5)\n', (3121, 3124), False, 'from pptx.util import Inches\n'), ((3200, 3224), 'cv2.imread', 'cv2.imread', (['output_fname'], {}), '(output_fname)\n', (3210, 3224), False, 'import cv2\n'), ((3279, 3288), 'pptx.util.Inches', 'Inches', (['(5)'], {}), '(5)\n', (3285, 3288), False, 'from pptx.util import Inches\n'), ((3289, 3298), 'pptx.util.Inches', 'Inches', (['(5)'], {}), '(5)\n', (3295, 3298), False, 'from pptx.util import Inches\n'), ((3299, 3308), 'pptx.util.Inches', 'Inches', (['(5)'], {}), '(5)\n', (3305, 3308), False, 'from pptx.util import Inches\n'), ((3309, 3318), 'pptx.util.Inches', 'Inches', (['(5)'], {}), '(5)\n', (3315, 3318), False, 'from pptx.util import Inches\n'), ((3419, 3428), 'pptx.util.Inches', 'Inches', (['(0)'], {}), '(0)\n', (3425, 3428), False, 'from pptx.util import Inches\n'), ((3429, 3438), 'pptx.util.Inches', 'Inches', (['(5)'], {}), '(5)\n', (3435, 3438), False, 'from pptx.util import Inches\n'), ((3439, 3448), 'pptx.util.Inches', 'Inches', (['(5)'], {}), '(5)\n', (3445, 3448), False, 'from pptx.util import Inches\n'), ((3449, 3458), 'pptx.util.Inches', 'Inches', (['(5)'], {}), '(5)\n', (3455, 3458), False, 'from pptx.util import Inches\n'), ((3623, 3633), 'pptx.util.Inches', 'Inches', (['(10)'], {}), '(10)\n', (3629, 3633), False, 'from pptx.util import Inches\n'), ((3635, 3644), 'pptx.util.Inches', 'Inches', (['(0)'], {}), '(0)\n', (3641, 3644), False, 'from pptx.util import Inches\n'), ((3645, 3654), 'pptx.util.Inches', 'Inches', (['(4)'], {}), '(4)\n', (3651, 3654), False, 'from pptx.util import Inches\n'), ((3655, 3664), 'pptx.util.Inches', 'Inches', (['(4)'], {}), '(4)\n', (3661, 3664), False, 'from pptx.util import Inches\n'), ((1799, 1819), 'PIL.Image.fromarray', 'Image.fromarray', (['res'], {}), '(res)\n', (1814, 1819), True, 'import PIL.Image as Image\n')] |
#!/usr/bin/env python3
import time
import numpy as np
from collections import defaultdict, deque
import logging
import shapely.ops
from threading import Thread
#from line_profiler import LineProfiler
import pyqtgraph.opengl as gl
from phonebot.core.common.logger import get_default_logger
from phonebot.core.common.math.utils import anorm, alerp
from phonebot.core.common.math.transform import Rotation, Position, Transform
from phonebot.core.common.config import PhonebotSettings
from phonebot.core.common.queue_listener import QueueListener
from phonebot.core.frame_graph.phonebot_graph import PhonebotGraph
from phonebot.core.frame_graph.graph_utils import get_graph_geometries, solve_knee_angle, solve_inverse_kinematics
from phonebot.core.kinematics.workspace import get_workspace
from phonebot.core.controls.controllers.base_rotation_controller import BaseRotationController
from phonebot.vis.viewer.phonebot_viewer import PhonebotViewer
from phonebot.vis.viewer._pyqtgraph.pyqtgraph_handlers import (
LineStripHandler)
from phonebot.vis.viewer.viewer_base import HandleHelper
logger = get_default_logger(logging.WARN)
class KeyControlListener:
"""Simple listener remapping keys to actions controlling orientation."""
def __init__(self):
self.key_map_ = dict(
w=(0.01, 0.0),
a=(0.0, -0.01),
s=(-0.01, 0.0),
d=(0.0, 0.01),
q=(0.01, -0.01),
e=(0.01, 0.01),
z=(-0.01, -0.01),
c=(-0.01, 0.01),
x='quit'
)
self.quit_ = False
self.reset()
def on_key(self, key: str):
"""Handle keypress input."""
if isinstance(key, int):
key = chr(key).lower()
if key not in self.key_map_:
return
offsets = self.key_map_[key]
if offsets == 'quit':
self.quit_ = True
self.updated_ = True
return
self.offsets_ += offsets
self.updated_ = True
def reset(self):
"""Unset update flag and zero out accumulant."""
self.updated_ = False
self.offsets_ = np.zeros(2)
def has_data(self):
"""Whether at least one relevant user input was provided."""
return self.updated_
def get(self):
"""Retrieve current data and reset state."""
offsets = self.offsets_.copy()
self.reset()
return offsets, self.quit_
def main():
config = PhonebotSettings()
graph = PhonebotGraph(config)
viewer = PhonebotViewer()
handler = HandleHelper(viewer)
for leg_prefix in config.order:
viewer.register('{}_workspace'.format(leg_prefix), LineStripHandler)
viewer.register('{}_trajectory'.format(leg_prefix), LineStripHandler)
key_listener = KeyControlListener()
viewer.on_key(lambda data: key_listener.on_key(data[0]))
acceleration = 1.0
max_iter = np.inf
# Arbitrary stamp.
stamp = time.time() * acceleration
workspace = get_workspace()
# Initialize to a decent place..
for leg_prefix in config.order:
leg_origin = '{}_leg_origin'.format(leg_prefix)
hip_joint_a = '{}_hip_joint_a'.format(leg_prefix)
hip_joint_b = '{}_hip_joint_b'.format(leg_prefix)
knee_joint_a = '{}_knee_joint_a'.format(leg_prefix)
knee_joint_b = '{}_knee_joint_b'.format(leg_prefix)
foot_a = '{}_foot_a'.format(leg_prefix)
foot_b = '{}_foot_b'.format(leg_prefix)
graph.get_edge(knee_joint_a, hip_joint_a).update(
stamp, config.nominal_hip_angle)
graph.get_edge(foot_a, knee_joint_a).update(
stamp, config.nominal_knee_angle)
graph.get_edge(knee_joint_b, hip_joint_b).update(
stamp, config.nominal_hip_angle)
graph.get_edge(foot_b, knee_joint_b).update(
stamp, config.nominal_knee_angle)
# Obtain joint edges in order.
joint_edges = []
for leg_prefix in config.order:
for leg_suffix in 'ab':
knee = '{}_knee_joint_{}'.format(leg_prefix, leg_suffix)
hip = '{}_hip_joint_{}'.format(leg_prefix, leg_suffix)
joint_edge = graph.get_edge(knee, hip)
joint_edges.append(joint_edge)
controller = BaseRotationController(graph)
plane_visuals = None
plane_rotation = None
roll = 0.0
pitch = 0.0
while plane_visuals is None:
# plane_rotation = Rotation.from_euler([0.1,0.1,0])
plane_rotation = Rotation.from_euler([roll, pitch,
0.0]).to_quaternion()
plane_normal = plane_rotation.rotate(Position([0, 0, 1]))
if(plane_normal.dot([0, 0, 1]) < 0):
continue
plane_visuals = controller.update(plane_rotation)
# plane_position = Position(controller.distance_ * controller.normal_)
plane_position = Position(-plane_rotation.inverse().rotate(
controller.distance_ * controller.normal_))
graph.get_edge('body', 'local').update(
0.0, Transform(plane_position, plane_rotation))
for edge in graph.edges:
logger.debug(repr(edge))
# rotation = Rotation.from_euler([0.2,0.0,0.0])
# logger.warn(rotation.to_euler())
# logger.warn(rotation.rotate(Position([0,0,1])))
# plane_visuals = controller.update(rotation)
foot_positions = defaultdict(lambda: deque(maxlen=128))
leg_colors = {k: np.random.uniform(size=3) for k in config.order}
iteration_count = 0
while True:
if iteration_count > max_iter:
break
stamp = time.time() * acceleration
logger.info('current stamp : {}'.format(stamp))
commands = controller.control(stamp)
# Process key.
if key_listener.has_data():
(dp, dr), quit = key_listener.get()
if quit:
break
pitch += dp
roll += dr
plane_rotation = Rotation.from_euler([roll, pitch,
0.0]).to_quaternion()
plane_visuals = controller.update(plane_rotation)
plane_position = Position(-plane_rotation.inverse().rotate(
controller.distance_ * controller.normal_))
graph.get_edge('body', 'local').update(
0.0, Transform(plane_position, plane_rotation))
# Acutation. Currently, the exact joint value is written.
# ( "Perfect" joint )
for joint_edge, joint_command in zip(joint_edges, commands):
joint_edge.update(stamp, anorm(
alerp(joint_edge.angle, joint_command, 0.5)))
# Update passive joints accordingly here as well.
for leg_prefix in config.order:
knee_angle_a, knee_angle_b = solve_knee_angle(
graph, leg_prefix, stamp, config=config)
foot_a = '{}_foot_a'.format(leg_prefix)
foot_b = '{}_foot_b'.format(leg_prefix)
knee_joint_a = '{}_knee_joint_a'.format(leg_prefix)
knee_joint_b = '{}_knee_joint_b'.format(leg_prefix)
knee_edge_a = graph.get_edge(foot_a, knee_joint_a)
knee_edge_b = graph.get_edge(foot_b, knee_joint_b)
knee_edge_a.update(stamp, alerp(
knee_edge_a.angle, knee_angle_a, 0.5))
knee_edge_b.update(stamp, alerp(
knee_edge_b.angle, knee_angle_b, 0.5))
# Query endpoint positions.
for leg_prefix in config.order:
foot_joint = '{}_foot_a'.format(leg_prefix)
foot_positions[leg_prefix].append(graph.get_transform(
foot_joint, 'local', stamp).position)
# TODO: Update local frame view based on foot_positions.?
graph.get_edge('body', 'local').update(
0.0, Transform(plane_position, plane_rotation))
# Compuet workspace visualization data.
workspace_points = {}
for leg_prefix in config.order:
local_from_leg = graph.get_transform(
'{}_leg_origin'.format(leg_prefix), 'local', stamp)
workspace_points[leg_prefix] = local_from_leg * Position(workspace)
plane_lines = []
plane_colors = []
for leg_prefix in config.order:
leg_origin = '{}_leg_origin'.format(leg_prefix)
foot_joint = '{}_foot_a'.format(leg_prefix)
body_from_leg = graph.get_transform(leg_origin,
'body', stamp)
for ps in np.reshape(plane_visuals[leg_prefix], (-1, 2, 3)):
p_start, p_end = [Position(p) for p in ps]
plane_lines.append(
[body_from_leg * p_start, body_from_leg * p_end])
plane_colors.extend(
[leg_colors[leg_prefix], leg_colors[leg_prefix]])
ep = graph.get_transform(foot_joint, 'body', stamp).position
epp = body_from_leg.inverse() * ep
refs = controller.cache_[leg_prefix]
cur = shapely.geometry.Point(epp.x, epp.y)
# test
source, target = shapely.ops.nearest_points(refs, cur)
source = Position([source.x, source.y, 0.0])
target = Position([target.x, target.y, 0.0])
plane_lines.append(
[body_from_leg * source, body_from_leg * target])
plane_colors.extend([[1, 0, 1], [1, 0, 1]])
# Send data to asynchronous viewer.
with handler.collect():
poses, edges = get_graph_geometries(
graph, stamp, target_frame='local', tol=np.inf)
handler.poses(poses=poses)
handler.edges(poses=poses, edges=edges)
local_from_body = graph.get_transform('body', 'local', stamp)
plane_lines = np.float32(plane_lines)
plane_colors = np.float32(plane_colors)
extra_lines = np.concatenate(
[plane_lines], axis=0)
shape = extra_lines.shape
extra_lines = local_from_body * \
Position(extra_lines.reshape(-1, 3))
extra_lines = np.reshape(extra_lines, shape)
extra_colors = np.concatenate(
[plane_colors], axis=0)
handler.line(pos=extra_lines, color=extra_colors)
# Add workspace visualization.
for leg_prefix in config.order:
tag = '{}_workspace'.format(leg_prefix)
handler[tag](
pos=workspace_points[leg_prefix],
color=(1., 1., 0., 1.))
# Add endpoint trajectory.
for leg_prefix in config.order:
tag = '{}_trajectory'.format(leg_prefix)
handler[tag](
pos=np.asarray(foot_positions[leg_prefix]),
color=(0., 1., 1., 1.))
if __name__ == '__main__':
main()
| [
"phonebot.core.common.math.transform.Position",
"phonebot.core.common.config.PhonebotSettings",
"phonebot.vis.viewer.viewer_base.HandleHelper",
"phonebot.core.common.math.transform.Rotation.from_euler",
"phonebot.core.controls.controllers.base_rotation_controller.BaseRotationController",
"collections.dequ... | [((1103, 1135), 'phonebot.core.common.logger.get_default_logger', 'get_default_logger', (['logging.WARN'], {}), '(logging.WARN)\n', (1121, 1135), False, 'from phonebot.core.common.logger import get_default_logger\n'), ((2471, 2489), 'phonebot.core.common.config.PhonebotSettings', 'PhonebotSettings', ([], {}), '()\n', (2487, 2489), False, 'from phonebot.core.common.config import PhonebotSettings\n'), ((2502, 2523), 'phonebot.core.frame_graph.phonebot_graph.PhonebotGraph', 'PhonebotGraph', (['config'], {}), '(config)\n', (2515, 2523), False, 'from phonebot.core.frame_graph.phonebot_graph import PhonebotGraph\n'), ((2537, 2553), 'phonebot.vis.viewer.phonebot_viewer.PhonebotViewer', 'PhonebotViewer', ([], {}), '()\n', (2551, 2553), False, 'from phonebot.vis.viewer.phonebot_viewer import PhonebotViewer\n'), ((2568, 2588), 'phonebot.vis.viewer.viewer_base.HandleHelper', 'HandleHelper', (['viewer'], {}), '(viewer)\n', (2580, 2588), False, 'from phonebot.vis.viewer.viewer_base import HandleHelper\n'), ((3008, 3023), 'phonebot.core.kinematics.workspace.get_workspace', 'get_workspace', ([], {}), '()\n', (3021, 3023), False, 'from phonebot.core.kinematics.workspace import get_workspace\n'), ((4264, 4293), 'phonebot.core.controls.controllers.base_rotation_controller.BaseRotationController', 'BaseRotationController', (['graph'], {}), '(graph)\n', (4286, 4293), False, 'from phonebot.core.controls.controllers.base_rotation_controller import BaseRotationController\n'), ((2141, 2152), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2149, 2152), True, 'import numpy as np\n'), ((2964, 2975), 'time.time', 'time.time', ([], {}), '()\n', (2973, 2975), False, 'import time\n'), ((5038, 5079), 'phonebot.core.common.math.transform.Transform', 'Transform', (['plane_position', 'plane_rotation'], {}), '(plane_position, plane_rotation)\n', (5047, 5079), False, 'from phonebot.core.common.math.transform import Rotation, Position, Transform\n'), ((5422, 5447), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(3)'}), '(size=3)\n', (5439, 5447), True, 'import numpy as np\n'), ((4644, 4663), 'phonebot.core.common.math.transform.Position', 'Position', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (4652, 4663), False, 'from phonebot.core.common.math.transform import Rotation, Position, Transform\n'), ((5382, 5399), 'collections.deque', 'deque', ([], {'maxlen': '(128)'}), '(maxlen=128)\n', (5387, 5399), False, 'from collections import defaultdict, deque\n'), ((5585, 5596), 'time.time', 'time.time', ([], {}), '()\n', (5594, 5596), False, 'import time\n'), ((6769, 6826), 'phonebot.core.frame_graph.graph_utils.solve_knee_angle', 'solve_knee_angle', (['graph', 'leg_prefix', 'stamp'], {'config': 'config'}), '(graph, leg_prefix, stamp, config=config)\n', (6785, 6826), False, 'from phonebot.core.frame_graph.graph_utils import get_graph_geometries, solve_knee_angle, solve_inverse_kinematics\n'), ((7791, 7832), 'phonebot.core.common.math.transform.Transform', 'Transform', (['plane_position', 'plane_rotation'], {}), '(plane_position, plane_rotation)\n', (7800, 7832), False, 'from phonebot.core.common.math.transform import Rotation, Position, Transform\n'), ((8505, 8554), 'numpy.reshape', 'np.reshape', (['plane_visuals[leg_prefix]', '(-1, 2, 3)'], {}), '(plane_visuals[leg_prefix], (-1, 2, 3))\n', (8515, 8554), True, 'import numpy as np\n'), ((9162, 9197), 'phonebot.core.common.math.transform.Position', 'Position', (['[source.x, source.y, 0.0]'], {}), '([source.x, source.y, 0.0])\n', (9170, 9197), False, 'from phonebot.core.common.math.transform import Rotation, Position, Transform\n'), ((9219, 9254), 'phonebot.core.common.math.transform.Position', 'Position', (['[target.x, target.y, 0.0]'], {}), '([target.x, target.y, 0.0])\n', (9227, 9254), False, 'from phonebot.core.common.math.transform import Rotation, Position, Transform\n'), ((9513, 9581), 'phonebot.core.frame_graph.graph_utils.get_graph_geometries', 'get_graph_geometries', (['graph', 'stamp'], {'target_frame': '"""local"""', 'tol': 'np.inf'}), "(graph, stamp, target_frame='local', tol=np.inf)\n", (9533, 9581), False, 'from phonebot.core.frame_graph.graph_utils import get_graph_geometries, solve_knee_angle, solve_inverse_kinematics\n'), ((9792, 9815), 'numpy.float32', 'np.float32', (['plane_lines'], {}), '(plane_lines)\n', (9802, 9815), True, 'import numpy as np\n'), ((9843, 9867), 'numpy.float32', 'np.float32', (['plane_colors'], {}), '(plane_colors)\n', (9853, 9867), True, 'import numpy as np\n'), ((9895, 9932), 'numpy.concatenate', 'np.concatenate', (['[plane_lines]'], {'axis': '(0)'}), '([plane_lines], axis=0)\n', (9909, 9932), True, 'import numpy as np\n'), ((10114, 10144), 'numpy.reshape', 'np.reshape', (['extra_lines', 'shape'], {}), '(extra_lines, shape)\n', (10124, 10144), True, 'import numpy as np\n'), ((10172, 10210), 'numpy.concatenate', 'np.concatenate', (['[plane_colors]'], {'axis': '(0)'}), '([plane_colors], axis=0)\n', (10186, 10210), True, 'import numpy as np\n'), ((4497, 4536), 'phonebot.core.common.math.transform.Rotation.from_euler', 'Rotation.from_euler', (['[roll, pitch, 0.0]'], {}), '([roll, pitch, 0.0])\n', (4516, 4536), False, 'from phonebot.core.common.math.transform import Rotation, Position, Transform\n'), ((6314, 6355), 'phonebot.core.common.math.transform.Transform', 'Transform', (['plane_position', 'plane_rotation'], {}), '(plane_position, plane_rotation)\n', (6323, 6355), False, 'from phonebot.core.common.math.transform import Rotation, Position, Transform\n'), ((7243, 7286), 'phonebot.core.common.math.utils.alerp', 'alerp', (['knee_edge_a.angle', 'knee_angle_a', '(0.5)'], {}), '(knee_edge_a.angle, knee_angle_a, 0.5)\n', (7248, 7286), False, 'from phonebot.core.common.math.utils import anorm, alerp\n'), ((7343, 7386), 'phonebot.core.common.math.utils.alerp', 'alerp', (['knee_edge_b.angle', 'knee_angle_b', '(0.5)'], {}), '(knee_edge_b.angle, knee_angle_b, 0.5)\n', (7348, 7386), False, 'from phonebot.core.common.math.utils import anorm, alerp\n'), ((8131, 8150), 'phonebot.core.common.math.transform.Position', 'Position', (['workspace'], {}), '(workspace)\n', (8139, 8150), False, 'from phonebot.core.common.math.transform import Rotation, Position, Transform\n'), ((5941, 5980), 'phonebot.core.common.math.transform.Rotation.from_euler', 'Rotation.from_euler', (['[roll, pitch, 0.0]'], {}), '([roll, pitch, 0.0])\n', (5960, 5980), False, 'from phonebot.core.common.math.transform import Rotation, Position, Transform\n'), ((6583, 6626), 'phonebot.core.common.math.utils.alerp', 'alerp', (['joint_edge.angle', 'joint_command', '(0.5)'], {}), '(joint_edge.angle, joint_command, 0.5)\n', (6588, 6626), False, 'from phonebot.core.common.math.utils import anorm, alerp\n'), ((8590, 8601), 'phonebot.core.common.math.transform.Position', 'Position', (['p'], {}), '(p)\n', (8598, 8601), False, 'from phonebot.core.common.math.transform import Rotation, Position, Transform\n'), ((10758, 10796), 'numpy.asarray', 'np.asarray', (['foot_positions[leg_prefix]'], {}), '(foot_positions[leg_prefix])\n', (10768, 10796), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
from config.config import SemSegMRIConfig
import numpy as np
from torchio import Image, ImagesDataset, SubjectsDataset
import torchio
from config.augm import train_transform
from tabulate import tabulate
def TorchIODataLoader_train(image_val, label_val):
#print('Building TorchIO Training Set Loader...')
subject_list = list()
for idx, (image_path, label_path) in enumerate(zip(image_val, label_val)):
s1 = torchio.Subject(
t1=Image(type=torchio.INTENSITY, path=image_path),
label=Image(type=torchio.LABEL, path=label_path),
)
subject_list.append(s1)
subjects_dataset = SubjectsDataset(subject_list, transform=train_transform)
train_data = torch.utils.data.DataLoader(subjects_dataset, batch_size=1,
shuffle=True, num_workers=0)
#print('TorchIO Training Loader built!')
return train_data
def calc_weight(kf):
weights=list()
for fold, (train_index, val_index) in enumerate(kf.split(SemSegMRIConfig.train_images)):
train_images_np, train_labels_np = np.array(SemSegMRIConfig.train_images), np.array(SemSegMRIConfig.train_labels)
train_images_list = list(train_images_np[train_index])
train_labels_list = list(train_labels_np[train_index])
train_data = TorchIODataLoader_train(train_images_list, train_labels_list)
x0=0
x1=0
x2=0
for i, data in enumerate(train_data):
labels = data['label']['data']
labels= labels.data.cpu().numpy()
labels= labels [0,:,:,:]
labels = labels.flatten()
x0+=(labels==0).sum()
x1+=(labels==1).sum()
x2+=(labels==2).sum()
xtot= (x0+x1+x2)
frequency0= x0/xtot
in_frequency0=xtot/x0
frequency1= x1/xtot
in_frequency1=xtot/x1
frequency2= x2/xtot
in_frequency2=xtot/x2
# somma = in_frequency0 + in_frequency1 + in_frequency2
# weight = [(in_frequency0)/somma, (in_frequency1)/somma, (in_frequency2)/somma, xtot]
weight = [np.log(in_frequency0), np.log(in_frequency1), np.log(in_frequency2), xtot]
weights.append(weight)
# wheights=torch.tensor( wheights)
#wheights=wheights.float()
train_data = TorchIODataLoader_train(SemSegMRIConfig.train_images, SemSegMRIConfig.train_labels)
x0=0
x1=0
x2=0
for i, data in enumerate(train_data):
labels = data['label']['data']
labels= labels.data.cpu().numpy()
labels= labels [0,:,:,:]
labels = labels.flatten()
x0+=(labels==0).sum()
x1+=(labels==1).sum()
x2+=(labels==2).sum()
xtot= (x0+x1+x2)
frequency0= x0/xtot
in_frequency0=xtot/x0
frequency1= x1/xtot
in_frequency1=xtot/x1
frequency2= x2/xtot
in_frequency2=xtot/x2
#somma = in_frequency0 + in_frequency1 + in_frequency2
# weight = [(in_frequency0)/somma, (in_frequency1)/somma, (in_frequency2)/somma, xtot]
weight = [np.log(in_frequency0), np.log(in_frequency1), np.log(in_frequency2), xtot]
weights.append(weight)
stamp_weights(weights)
for i in range(len(weights)):
weights[i].pop()
return weights
def stamp_weights(weights):
final_matrix=list()
for i in range(len(weights)):
if(i==len(weights)-1):
intermediate_matrix=['TOTAL FOLD ',weights[i][0], weights[i][1],weights[i][2],weights[i][3]]
else:
intermediate_matrix=['FOLD '+i.__str__(),weights[i][0], weights[i][1],weights[i][2],weights[i][3]]
final_matrix.append(intermediate_matrix)
print(tabulate(final_matrix,headers=['FOLD', 'WEIGHT_0','WEIGHT_1','WEIGHT_2','N_VOXEL']))
| [
"numpy.log",
"torch.utils.data.DataLoader",
"torchio.Image",
"torchio.SubjectsDataset",
"tabulate.tabulate",
"numpy.array"
] | [((673, 729), 'torchio.SubjectsDataset', 'SubjectsDataset', (['subject_list'], {'transform': 'train_transform'}), '(subject_list, transform=train_transform)\n', (688, 729), False, 'from torchio import Image, ImagesDataset, SubjectsDataset\n'), ((747, 839), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['subjects_dataset'], {'batch_size': '(1)', 'shuffle': '(True)', 'num_workers': '(0)'}), '(subjects_dataset, batch_size=1, shuffle=True,\n num_workers=0)\n', (774, 839), False, 'import torch\n'), ((3100, 3121), 'numpy.log', 'np.log', (['in_frequency0'], {}), '(in_frequency0)\n', (3106, 3121), True, 'import numpy as np\n'), ((3123, 3144), 'numpy.log', 'np.log', (['in_frequency1'], {}), '(in_frequency1)\n', (3129, 3144), True, 'import numpy as np\n'), ((3146, 3167), 'numpy.log', 'np.log', (['in_frequency2'], {}), '(in_frequency2)\n', (3152, 3167), True, 'import numpy as np\n'), ((3737, 3828), 'tabulate.tabulate', 'tabulate', (['final_matrix'], {'headers': "['FOLD', 'WEIGHT_0', 'WEIGHT_1', 'WEIGHT_2', 'N_VOXEL']"}), "(final_matrix, headers=['FOLD', 'WEIGHT_0', 'WEIGHT_1', 'WEIGHT_2',\n 'N_VOXEL'])\n", (3745, 3828), False, 'from tabulate import tabulate\n'), ((1127, 1165), 'numpy.array', 'np.array', (['SemSegMRIConfig.train_images'], {}), '(SemSegMRIConfig.train_images)\n', (1135, 1165), True, 'import numpy as np\n'), ((1167, 1205), 'numpy.array', 'np.array', (['SemSegMRIConfig.train_labels'], {}), '(SemSegMRIConfig.train_labels)\n', (1175, 1205), True, 'import numpy as np\n'), ((2161, 2182), 'numpy.log', 'np.log', (['in_frequency0'], {}), '(in_frequency0)\n', (2167, 2182), True, 'import numpy as np\n'), ((2184, 2205), 'numpy.log', 'np.log', (['in_frequency1'], {}), '(in_frequency1)\n', (2190, 2205), True, 'import numpy as np\n'), ((2207, 2228), 'numpy.log', 'np.log', (['in_frequency2'], {}), '(in_frequency2)\n', (2213, 2228), True, 'import numpy as np\n'), ((497, 543), 'torchio.Image', 'Image', ([], {'type': 'torchio.INTENSITY', 'path': 'image_path'}), '(type=torchio.INTENSITY, path=image_path)\n', (502, 543), False, 'from torchio import Image, ImagesDataset, SubjectsDataset\n'), ((563, 605), 'torchio.Image', 'Image', ([], {'type': 'torchio.LABEL', 'path': 'label_path'}), '(type=torchio.LABEL, path=label_path)\n', (568, 605), False, 'from torchio import Image, ImagesDataset, SubjectsDataset\n')] |
make_plots = 0
import numpy as np
import os,sys
import matplotlib.pyplot as pl
import matplotlib.cm as plcm
import dinterp
import time
if make_plots:
pl.close("all")
time0 = time.time()
n = 14
print("FOM dof = 2 ** {:02d}".format(n))
snapshot_fname = "_output/sol_snapshots.npy"
mu_fname = "_output/mu_snapshots.npy"
S = np.load(snapshot_fname)
mu = np.load(mu_fname)
N = S.shape[0]
dip0 = dinterp.DIP()
dip0.set_data(S)
dip0.compute_pieces()
dip0.compute_transport()
dip0.compute_lr_model(tol=5.0e-3) # compute SVD
# collect local snapshots
ind_list = []
mu_ind_list = []
for j in range(5):
ind_list.append(np.arange(j,S.shape[1],15))
mu_ind_list.append(np.arange(mu.shape[1]))
local_ind = np.vstack(ind_list).T.flatten()
mu_ind = np.vstack(mu_ind_list).T.flatten()
xhi = np.linspace(0.5,N-0.5,N)
xh = (xhi / N)*2.0
def cs(mu,x=xh):
mu1 = mu[0]
mu2 = mu[1]
mu3 = mu[2]
cval = 1.5 + mu1*np.sin(mu2*x) + 0.5*np.cos(mu3*x)
return cval
Z_list = []
for k in range(len(dip0._signature[0])):
if (dip0._signature[0][k] != 0):
Z = []
for l,j in enumerate(local_ind):
df = dip0._get_snapshot_dpiece(j,k,nz_index=False)
Z.append(np.cumsum(df)[1:])
Z = np.array(Z).T
Z_list.append(Z)
# include some modifications to the mix
if make_plots:
# .PLOT. the source terms
f = pl.figure(figsize=(10,6))
ax0 = f.add_subplot(2,1,1)
ax0.plot(Z)
ax0.set_title(r"local snapshots $u$")
ax1 = f.add_subplot(2,1,2,sharex=ax0)
ax1.set_title(r"source term $\psi(u)$")
f.tight_layout()
f.show()
# translates (for derivatives)
ip = range(1,N) + [N-1]
im = [0] + range(0,N-1)
for k,Z in enumerate(Z_list):
shifts_list = []
for l in range(Z_list[k].shape[1]):
cx = cs(mu[:,mu_ind[l]])
Zl = Z_list[k][:,l]
shifts_list.append((\
cx*Zl,cx*Zl,\
cx*Zl[ip],cx*Zl[im],\
cx*Zl[ip][ip],cx*Zl[im][im],\
cx*Zl[ip][ip][ip],cx*Zl[im][im][im],\
cx*Zl[ip][ip][ip][ip],cx*Zl[im][im][im][im],\
cx*Zl[ip][ip][ip][ip][ip],cx*Zl[im][im][im][im]))
U_list = []
V_list = []
for k,Z in enumerate(Z_list):
U,s,V = np.linalg.svd(Z,full_matrices=False)
if 0:
# pick basis by thresholding
j0 = (s/s[0] > 1.0e-4)
if 1:
# pick fixed no. of basis
j0 = np.arange(7)
U0 = U[:,j0]
V0 = V[j0,:]
U_list.append(U0)
V_list.append(V0)
U0 = np.hstack(U_list)
V0 = np.hstack(V_list)
if make_plots:
# .PLOT. local snapshots
f = pl.figure(figsize=(10,3))
ax = f.add_subplot(1,1,1)
ax.plot(Z)
f.show()
# .PLOT. singvals for local
f = pl.figure(figsize=(10,3))
ax = f.add_subplot(1,1,1)
ax.semilogy(s/s[0],'-rs')
ax.semilogy(s[j0]/s[0],'-bs')
f.show()
_,P0 = dinterp.deim(U0)
if make_plots:
# .PLOT. DEIM basis and ipts
cm0 = plcm.get_cmap("plasma")
f = pl.figure(figsize=(10,3))
ax = f.add_subplot(1,1,1)
ax.plot(U0)
ii = np.arange(U0.shape[0])
for j in range(P0.shape[1]):
color0 = cm0(float(j)/P0.shape[1])
ii0 = ii[P0[:,j]]
ax.plot([ii0,ii0],[0.0,U0[P0[:,j],j]],"-r.",color=color0)
f.show()
def fd1(v):
dv = np.zeros(v.shape[0]+1)
dv[1:-1] = np.diff(v)
return dv
# set up domain
xhi = np.linspace(0.5,N-0.5,N)
xi = np.linspace(0.0,N,N+1)
# set up derivatives
PTU = np.vstack([U0[P0[:,j],:] for j in range(P0.shape[1])])
PTU2 = np.vstack([U0[P0[:,j],:]**2 for j in range(P0.shape[1])])
ip = range(1,N) + [N-1]
sU0 = U0[ip,:]
Pp1TU = np.vstack([sU0[P0[:,j],:] for j in range(P0.shape[1])])
im = [0] + range(0,N-1)
sU0 = U0[im,:]
Pm1TU = np.vstack([sU0[P0[:,j],:] for j in range(P0.shape[1])])
PTDpU = (Pp1TU - PTU)
PTDmU = (PTU - Pm1TU)
if make_plots:
# .PLOT. taking derivatives
k0 = 0
cp0 = np.linalg.solve(PTU,PTDpU[:,k0])
cm0 = np.linalg.solve(PTU,PTDmU[:,k0])
dvp0 = np.dot(U0,cp0)
dvm0 = np.dot(U0,cm0)
f = pl.figure(figsize=(10,5))
ax0 = f.add_subplot(2,1,1)
ax0.plot(xhi,dvp0)
ax0.plot(xhi,dvm0)
ax0.plot(xi,fd1(U0[:,k0]**2),"k--")
ax0.set_title("7")
ax1 = f.add_subplot(2,1,2,sharex=ax)
ax1.plot(xhi,U0[:,k0]**2)
ax1.set_title(r"$u^2$")
f.tight_layout()
f.show()
if make_plots:
# .PLOT. taking derivatives (IC)
k0 = 0
u0 = Z[:,0]
uval = np.dot(P0.T,u0)
coeff0 = np.linalg.solve(PTU,uval)
coeff1 = np.linalg.solve(PTU,uval**2)
cp0 = np.linalg.solve(PTU,np.dot(PTDpU,coeff1))
cm0 = np.linalg.solve(PTU,np.dot(PTDmU,coeff1))
dvp0 = np.dot(U0,cp0)
dvm0 = np.dot(U0,cm0)
f = pl.figure(figsize=(10,5))
ax0 = f.add_subplot(2,1,1)
ax0.plot(xhi,dvp0)
ax0.plot(xhi,dvm0)
ax0.plot(xi,fd1(u0**2),"k--")
ax0.set_title(r"$(u^2)_x$")
ax1 = f.add_subplot(2,1,2,sharex=ax)
ax1.plot(xhi,u0**2)
ax1.set_title(r"$u^2$")
f.tight_layout()
f.show()
dip0._deim_modes = U0
dip0._deim_coords = V0
dip0._deim_ipts = P0
dip0.compute_dinterp_vectors()
dip0.save()
time1 = time.time()
offline_time = np.zeros(1)
offline_time[0] = time1 - time0
np.savetxt('offline_time.txt', offline_time)
| [
"dinterp.deim",
"numpy.load",
"matplotlib.cm.get_cmap",
"matplotlib.pyplot.figure",
"numpy.linalg.svd",
"numpy.arange",
"numpy.sin",
"numpy.linalg.solve",
"matplotlib.pyplot.close",
"numpy.savetxt",
"numpy.cumsum",
"numpy.linspace",
"numpy.hstack",
"numpy.cos",
"numpy.dot",
"numpy.vsta... | [((183, 194), 'time.time', 'time.time', ([], {}), '()\n', (192, 194), False, 'import time\n'), ((334, 357), 'numpy.load', 'np.load', (['snapshot_fname'], {}), '(snapshot_fname)\n', (341, 357), True, 'import numpy as np\n'), ((363, 380), 'numpy.load', 'np.load', (['mu_fname'], {}), '(mu_fname)\n', (370, 380), True, 'import numpy as np\n'), ((405, 418), 'dinterp.DIP', 'dinterp.DIP', ([], {}), '()\n', (416, 418), False, 'import dinterp\n'), ((805, 833), 'numpy.linspace', 'np.linspace', (['(0.5)', '(N - 0.5)', 'N'], {}), '(0.5, N - 0.5, N)\n', (816, 833), True, 'import numpy as np\n'), ((2481, 2498), 'numpy.hstack', 'np.hstack', (['U_list'], {}), '(U_list)\n', (2490, 2498), True, 'import numpy as np\n'), ((2504, 2521), 'numpy.hstack', 'np.hstack', (['V_list'], {}), '(V_list)\n', (2513, 2521), True, 'import numpy as np\n'), ((2845, 2861), 'dinterp.deim', 'dinterp.deim', (['U0'], {}), '(U0)\n', (2857, 2861), False, 'import dinterp\n'), ((3346, 3374), 'numpy.linspace', 'np.linspace', (['(0.5)', '(N - 0.5)', 'N'], {}), '(0.5, N - 0.5, N)\n', (3357, 3374), True, 'import numpy as np\n'), ((3377, 3403), 'numpy.linspace', 'np.linspace', (['(0.0)', 'N', '(N + 1)'], {}), '(0.0, N, N + 1)\n', (3388, 3403), True, 'import numpy as np\n'), ((5085, 5096), 'time.time', 'time.time', ([], {}), '()\n', (5094, 5096), False, 'import time\n'), ((5113, 5124), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (5121, 5124), True, 'import numpy as np\n'), ((5158, 5202), 'numpy.savetxt', 'np.savetxt', (['"""offline_time.txt"""', 'offline_time'], {}), "('offline_time.txt', offline_time)\n", (5168, 5202), True, 'import numpy as np\n'), ((158, 173), 'matplotlib.pyplot.close', 'pl.close', (['"""all"""'], {}), "('all')\n", (166, 173), True, 'import matplotlib.pyplot as pl\n'), ((1388, 1414), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (1397, 1414), True, 'import matplotlib.pyplot as pl\n'), ((2212, 2249), 'numpy.linalg.svd', 'np.linalg.svd', (['Z'], {'full_matrices': '(False)'}), '(Z, full_matrices=False)\n', (2225, 2249), True, 'import numpy as np\n'), ((2575, 2601), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(10, 3)'}), '(figsize=(10, 3))\n', (2584, 2601), True, 'import matplotlib.pyplot as pl\n'), ((2704, 2730), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(10, 3)'}), '(figsize=(10, 3))\n', (2713, 2730), True, 'import matplotlib.pyplot as pl\n'), ((2921, 2944), 'matplotlib.cm.get_cmap', 'plcm.get_cmap', (['"""plasma"""'], {}), "('plasma')\n", (2934, 2944), True, 'import matplotlib.cm as plcm\n'), ((2953, 2979), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(10, 3)'}), '(figsize=(10, 3))\n', (2962, 2979), True, 'import matplotlib.pyplot as pl\n'), ((3034, 3056), 'numpy.arange', 'np.arange', (['U0.shape[0]'], {}), '(U0.shape[0])\n', (3043, 3056), True, 'import numpy as np\n'), ((3260, 3284), 'numpy.zeros', 'np.zeros', (['(v.shape[0] + 1)'], {}), '(v.shape[0] + 1)\n', (3268, 3284), True, 'import numpy as np\n'), ((3298, 3308), 'numpy.diff', 'np.diff', (['v'], {}), '(v)\n', (3305, 3308), True, 'import numpy as np\n'), ((3870, 3904), 'numpy.linalg.solve', 'np.linalg.solve', (['PTU', 'PTDpU[:, k0]'], {}), '(PTU, PTDpU[:, k0])\n', (3885, 3904), True, 'import numpy as np\n'), ((3913, 3947), 'numpy.linalg.solve', 'np.linalg.solve', (['PTU', 'PTDmU[:, k0]'], {}), '(PTU, PTDmU[:, k0])\n', (3928, 3947), True, 'import numpy as np\n'), ((3957, 3972), 'numpy.dot', 'np.dot', (['U0', 'cp0'], {}), '(U0, cp0)\n', (3963, 3972), True, 'import numpy as np\n'), ((3983, 3998), 'numpy.dot', 'np.dot', (['U0', 'cm0'], {}), '(U0, cm0)\n', (3989, 3998), True, 'import numpy as np\n'), ((4007, 4033), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (4016, 4033), True, 'import matplotlib.pyplot as pl\n'), ((4402, 4418), 'numpy.dot', 'np.dot', (['P0.T', 'u0'], {}), '(P0.T, u0)\n', (4408, 4418), True, 'import numpy as np\n'), ((4431, 4457), 'numpy.linalg.solve', 'np.linalg.solve', (['PTU', 'uval'], {}), '(PTU, uval)\n', (4446, 4457), True, 'import numpy as np\n'), ((4470, 4501), 'numpy.linalg.solve', 'np.linalg.solve', (['PTU', '(uval ** 2)'], {}), '(PTU, uval ** 2)\n', (4485, 4501), True, 'import numpy as np\n'), ((4615, 4630), 'numpy.dot', 'np.dot', (['U0', 'cp0'], {}), '(U0, cp0)\n', (4621, 4630), True, 'import numpy as np\n'), ((4641, 4656), 'numpy.dot', 'np.dot', (['U0', 'cm0'], {}), '(U0, cm0)\n', (4647, 4656), True, 'import numpy as np\n'), ((4665, 4691), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (4674, 4691), True, 'import matplotlib.pyplot as pl\n'), ((634, 662), 'numpy.arange', 'np.arange', (['j', 'S.shape[1]', '(15)'], {}), '(j, S.shape[1], 15)\n', (643, 662), True, 'import numpy as np\n'), ((685, 707), 'numpy.arange', 'np.arange', (['mu.shape[1]'], {}), '(mu.shape[1])\n', (694, 707), True, 'import numpy as np\n'), ((2384, 2396), 'numpy.arange', 'np.arange', (['(7)'], {}), '(7)\n', (2393, 2396), True, 'import numpy as np\n'), ((4530, 4551), 'numpy.dot', 'np.dot', (['PTDpU', 'coeff1'], {}), '(PTDpU, coeff1)\n', (4536, 4551), True, 'import numpy as np\n'), ((4582, 4603), 'numpy.dot', 'np.dot', (['PTDmU', 'coeff1'], {}), '(PTDmU, coeff1)\n', (4588, 4603), True, 'import numpy as np\n'), ((722, 741), 'numpy.vstack', 'np.vstack', (['ind_list'], {}), '(ind_list)\n', (731, 741), True, 'import numpy as np\n'), ((763, 785), 'numpy.vstack', 'np.vstack', (['mu_ind_list'], {}), '(mu_ind_list)\n', (772, 785), True, 'import numpy as np\n'), ((961, 976), 'numpy.cos', 'np.cos', (['(mu3 * x)'], {}), '(mu3 * x)\n', (967, 976), True, 'import numpy as np\n'), ((1254, 1265), 'numpy.array', 'np.array', (['Z'], {}), '(Z)\n', (1262, 1265), True, 'import numpy as np\n'), ((941, 956), 'numpy.sin', 'np.sin', (['(mu2 * x)'], {}), '(mu2 * x)\n', (947, 956), True, 'import numpy as np\n'), ((1222, 1235), 'numpy.cumsum', 'np.cumsum', (['df'], {}), '(df)\n', (1231, 1235), True, 'import numpy as np\n')] |
import numpy as np
from scipy.stats import t
from typing import Tuple
def corrected_resampled_t_statistic(x: np.array, n: int, n1: int, n2: int, alpha: float = 0.05) -> Tuple[float, Tuple]:
"""
Nadeau and Bengio (2003), Bouckaert and Frank (2004)
Corrected resampled t-statistic
:param x: vector of differences (between two recommenders) of size n
:param n: number of resamples
:param n1: number of training instances
:param n2: number of testing instances
:param alpha: level of significance for CI
:return: p-value, confidence interval
"""
sample_mean = np.mean(x)
sample_variance = np.var(x)
corrected_std_error = np.sqrt((1/n + n2/n1) * sample_variance)
corrected_t_statistic = sample_mean / corrected_std_error
t_area = t.sf(np.abs(corrected_t_statistic), n - 1)
p_value = t_area * 2
t_value = t.isf(alpha/2, n - 1)
confidence_interval = (sample_mean - t_value * corrected_std_error, sample_mean + t_value * corrected_std_error)
return p_value, confidence_interval
| [
"numpy.abs",
"scipy.stats.t.isf",
"numpy.mean",
"numpy.var",
"numpy.sqrt"
] | [((602, 612), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (609, 612), True, 'import numpy as np\n'), ((635, 644), 'numpy.var', 'np.var', (['x'], {}), '(x)\n', (641, 644), True, 'import numpy as np\n'), ((671, 715), 'numpy.sqrt', 'np.sqrt', (['((1 / n + n2 / n1) * sample_variance)'], {}), '((1 / n + n2 / n1) * sample_variance)\n', (678, 715), True, 'import numpy as np\n'), ((870, 893), 'scipy.stats.t.isf', 't.isf', (['(alpha / 2)', '(n - 1)'], {}), '(alpha / 2, n - 1)\n', (875, 893), False, 'from scipy.stats import t\n'), ((793, 822), 'numpy.abs', 'np.abs', (['corrected_t_statistic'], {}), '(corrected_t_statistic)\n', (799, 822), True, 'import numpy as np\n')] |
"""
Tests for collapsed observation vector
These tests cannot be run for the Clark 1989 model since the dimension of
observations (2) is smaller than the number of states (6).
Author: <NAME>
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
from dismalpy import ssm
import dismalpy.ssm.tests.results_kalman as results_kalman_filter
from numpy.testing import assert_equal, assert_allclose
from nose.exc import SkipTest
current_path = os.path.dirname(os.path.abspath(__file__))
class Trivariate(object):
"""
Tests collapsing three-dimensional observation data to two-dimensional
"""
def __init__(self, dtype=float, alternate_timing=False, **kwargs):
self.results = results_kalman_filter.uc_bi
# GDP and Unemployment, Quarterly, 1948.1 - 1995.3
data = pd.DataFrame(
self.results['data'],
index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),
columns=['GDP', 'UNEMP']
)[4:]
data['GDP'] = np.log(data['GDP'])
data['UNEMP'] = (data['UNEMP']/100)
data['X'] = np.exp(data['GDP']) * data['UNEMP']
k_states = 2
self.model = ssm.Model(data, k_states=k_states, **kwargs)
if alternate_timing:
self.model.timing_init_filtered = True
# Statespace representation
self.model.selection = np.eye(self.model.k_states)
# Update matrices with test parameters
self.model['design'] = np.array([[0.5, 0.2],
[0, 0.8],
[1, -0.5]])
self.model['transition'] = np.array([[0.4, 0.5],
[1, 0]])
self.model['obs_cov'] = np.diag([0.2, 1.1, 0.5])
self.model['state_cov'] = np.diag([2., 1])
# Initialization
self.model.initialize_approximate_diffuse()
def test_using_collapsed(self):
# Test to make sure the results_b actually used a collapsed Kalman
# filtering approach (i.e. that the flag being set actually caused the
# filter to not use the conventional filter)
assert not self.results_a.filter_collapsed
assert self.results_b.filter_collapsed
assert self.results_a.collapsed_forecasts is None
assert self.results_b.collapsed_forecasts is not None
assert_equal(self.results_a.forecasts.shape[0], 3)
assert_equal(self.results_b.collapsed_forecasts.shape[0], 2)
def test_forecasts(self):
assert_allclose(
self.results_a.forecasts[0,:],
self.results_b.forecasts[0,:],
)
def test_forecasts_error(self):
assert_allclose(
self.results_a.forecasts_error[0,:],
self.results_b.forecasts_error[0,:]
)
def test_forecasts_error_cov(self):
assert_allclose(
self.results_a.forecasts_error_cov[0,0,:],
self.results_b.forecasts_error_cov[0,0,:]
)
def test_filtered_state(self):
assert_allclose(
self.results_a.filtered_state,
self.results_b.filtered_state
)
def test_filtered_state_cov(self):
assert_allclose(
self.results_a.filtered_state_cov,
self.results_b.filtered_state_cov
)
def test_predicted_state(self):
assert_allclose(
self.results_a.predicted_state,
self.results_b.predicted_state
)
def test_predicted_state_cov(self):
assert_allclose(
self.results_a.predicted_state_cov,
self.results_b.predicted_state_cov
)
def test_loglike(self):
assert_allclose(
self.results_a.llf_obs,
self.results_b.llf_obs
)
def test_smoothed_states(self):
assert_allclose(
self.results_a.smoothed_state,
self.results_b.smoothed_state
)
def test_smoothed_states_cov(self):
assert_allclose(
self.results_a.smoothed_state_cov,
self.results_b.smoothed_state_cov,
rtol=1e-4
)
@SkipTest
# Skipped because "measurement" refers to different things; even different
# dimensions
def test_smoothed_measurement_disturbance(self):
assert_allclose(
self.results_a.smoothed_measurement_disturbance,
self.results_b.smoothed_measurement_disturbance
)
@SkipTest
# Skipped because "measurement" refers to different things; even different
# dimensions
def test_smoothed_measurement_disturbance_cov(self):
assert_allclose(
self.results_a.smoothed_measurement_disturbance_cov,
self.results_b.smoothed_measurement_disturbance_cov
)
def test_smoothed_state_disturbance(self):
assert_allclose(
self.results_a.smoothed_state_disturbance,
self.results_b.smoothed_state_disturbance
)
def test_smoothed_state_disturbance_cov(self):
assert_allclose(
self.results_a.smoothed_state_disturbance_cov,
self.results_b.smoothed_state_disturbance_cov
)
def test_simulation_smoothed_state(self):
assert_allclose(
self.sim_a.simulated_state,
self.sim_a.simulated_state
)
def test_simulation_smoothed_measurement_disturbance(self):
assert_allclose(
self.sim_a.simulated_measurement_disturbance,
self.sim_a.simulated_measurement_disturbance
)
def test_simulation_smoothed_state_disturbance(self):
assert_allclose(
self.sim_a.simulated_state_disturbance,
self.sim_a.simulated_state_disturbance
)
class TestTrivariateConventional(Trivariate):
def __init__(self, dtype=float, **kwargs):
super(TestTrivariateConventional, self).__init__(dtype, **kwargs)
n_disturbance_variates = (
(self.model.k_endog + self.model.k_posdef) * self.model.nobs
)
# Collapsed filtering, smoothing, and simulation smoothing
self.model.filter_conventional = True
self.model.filter_collapsed = True
self.results_b = self.model.smooth()
self.sim_b = self.model.simulation_smoother(
disturbance_variates=np.zeros(n_disturbance_variates),
initial_state_variates=np.zeros(self.model.k_states)
)
# Conventional filtering, smoothing, and simulation smoothing
self.model.filter_collapsed = False
self.results_a = self.model.smooth()
self.sim_a = self.model.simulation_smoother(
disturbance_variates=np.zeros(n_disturbance_variates),
initial_state_variates=np.zeros(self.model.k_states)
)
class TestTrivariateConventionalAlternate(TestTrivariateConventional):
def __init__(self, *args, **kwargs):
super(TestTrivariateConventionalAlternate, self).__init__(alternate_timing=True, *args, **kwargs)
def test_using_alterate(self):
assert(self.model._kalman_filter.filter_timing == 1)
class TestTrivariateConventionalPartialMissing(Trivariate):
def __init__(self, dtype=float, **kwargs):
super(TestTrivariateConventionalPartialMissing, self).__init__(dtype, **kwargs)
n_disturbance_variates = (
(self.model.k_endog + self.model.k_posdef) * self.model.nobs
)
# Set partially missing data
self.model.endog[:2, 10:180] = np.nan
# Collapsed filtering, smoothing, and simulation smoothing
self.model.filter_conventional = True
self.model.filter_collapsed = True
self.results_b = self.model.smooth()
self.sim_b = self.model.simulation_smoother(
disturbance_variates=np.zeros(n_disturbance_variates),
initial_state_variates=np.zeros(self.model.k_states)
)
# Conventional filtering, smoothing, and simulation smoothing
self.model.filter_collapsed = False
self.results_a = self.model.smooth()
self.sim_a = self.model.simulation_smoother(
disturbance_variates=np.zeros(n_disturbance_variates),
initial_state_variates=np.zeros(self.model.k_states)
)
class TestTrivariateConventionalPartialMissingAlternate(TestTrivariateConventionalPartialMissing):
def __init__(self, *args, **kwargs):
super(TestTrivariateConventionalPartialMissingAlternate, self).__init__(alternate_timing=True, *args, **kwargs)
def test_using_alterate(self):
assert(self.model._kalman_filter.filter_timing == 1)
class TestTrivariateConventionalAllMissing(Trivariate):
def __init__(self, dtype=float, **kwargs):
super(TestTrivariateConventionalAllMissing, self).__init__(dtype, **kwargs)
n_disturbance_variates = (
(self.model.k_endog + self.model.k_posdef) * self.model.nobs
)
# Set partially missing data
self.model.endog[:, 10:180] = np.nan
# Collapsed filtering, smoothing, and simulation smoothing
self.model.filter_conventional = True
self.model.filter_collapsed = True
self.results_b = self.model.smooth()
self.sim_b = self.model.simulation_smoother(
disturbance_variates=np.zeros(n_disturbance_variates),
initial_state_variates=np.zeros(self.model.k_states)
)
# Conventional filtering, smoothing, and simulation smoothing
self.model.filter_collapsed = False
self.results_a = self.model.smooth()
self.sim_a = self.model.simulation_smoother(
disturbance_variates=np.zeros(n_disturbance_variates),
initial_state_variates=np.zeros(self.model.k_states)
)
class TestTrivariateConventionalAllMissingAlternate(TestTrivariateConventionalAllMissing):
def __init__(self, *args, **kwargs):
super(TestTrivariateConventionalAllMissingAlternate, self).__init__(alternate_timing=True, *args, **kwargs)
def test_using_alterate(self):
assert(self.model._kalman_filter.filter_timing == 1)
class TestTrivariateUnivariate(Trivariate):
def __init__(self, dtype=float, **kwargs):
super(TestTrivariateUnivariate, self).__init__(dtype, **kwargs)
n_disturbance_variates = (
(self.model.k_endog + self.model.k_posdef) * self.model.nobs
)
# Collapsed filtering, smoothing, and simulation smoothing
self.model.filter_univariate = True
self.model.filter_collapsed = True
self.results_b = self.model.smooth()
self.sim_b = self.model.simulation_smoother(
disturbance_variates=np.zeros(n_disturbance_variates),
initial_state_variates=np.zeros(self.model.k_states)
)
# Univariate filtering, smoothing, and simulation smoothing
self.model.filter_collapsed = False
self.results_a = self.model.smooth()
self.sim_a = self.model.simulation_smoother(
disturbance_variates=np.zeros(n_disturbance_variates),
initial_state_variates=np.zeros(self.model.k_states)
)
class TestTrivariateUnivariateAlternate(TestTrivariateUnivariate):
def __init__(self, *args, **kwargs):
super(TestTrivariateUnivariateAlternate, self).__init__(alternate_timing=True, *args, **kwargs)
def test_using_alterate(self):
assert(self.model._kalman_filter.filter_timing == 1)
class TestTrivariateUnivariatePartialMissing(Trivariate):
def __init__(self, dtype=float, **kwargs):
super(TestTrivariateUnivariatePartialMissing, self).__init__(dtype, **kwargs)
n_disturbance_variates = (
(self.model.k_endog + self.model.k_posdef) * self.model.nobs
)
# Set partially missing data
self.model.endog[:2, 10:180] = np.nan
# Collapsed filtering, smoothing, and simulation smoothing
self.model.filter_univariate = True
self.model.filter_collapsed = True
self.results_b = self.model.smooth()
self.sim_b = self.model.simulation_smoother(
disturbance_variates=np.zeros(n_disturbance_variates),
initial_state_variates=np.zeros(self.model.k_states)
)
# Univariate filtering, smoothing, and simulation smoothing
self.model.filter_collapsed = False
self.results_a = self.model.smooth()
self.sim_a = self.model.simulation_smoother(
disturbance_variates=np.zeros(n_disturbance_variates),
initial_state_variates=np.zeros(self.model.k_states)
)
class TestTrivariateUnivariatePartialMissingAlternate(TestTrivariateUnivariatePartialMissing):
def __init__(self, *args, **kwargs):
super(TestTrivariateUnivariatePartialMissingAlternate, self).__init__(alternate_timing=True, *args, **kwargs)
def test_using_alterate(self):
assert(self.model._kalman_filter.filter_timing == 1)
class TestTrivariateUnivariateAllMissing(Trivariate):
def __init__(self, dtype=float, **kwargs):
super(TestTrivariateUnivariateAllMissing, self).__init__(dtype, **kwargs)
n_disturbance_variates = (
(self.model.k_endog + self.model.k_posdef) * self.model.nobs
)
# Set partially missing data
self.model.endog[:, 10:180] = np.nan
# Univariate filtering, smoothing, and simulation smoothing
self.model.filter_univariate = True
self.model.filter_collapsed = True
self.results_b = self.model.smooth()
self.sim_b = self.model.simulation_smoother(
disturbance_variates=np.zeros(n_disturbance_variates),
initial_state_variates=np.zeros(self.model.k_states)
)
# Conventional filtering, smoothing, and simulation smoothing
self.model.filter_collapsed = False
self.results_a = self.model.smooth()
self.sim_a = self.model.simulation_smoother(
disturbance_variates=np.zeros(n_disturbance_variates),
initial_state_variates=np.zeros(self.model.k_states)
)
class TestTrivariateUnivariateAllMissingAlternate(TestTrivariateUnivariateAllMissing):
def __init__(self, *args, **kwargs):
super(TestTrivariateUnivariateAllMissingAlternate, self).__init__(alternate_timing=True, *args, **kwargs)
def test_using_alterate(self):
assert(self.model._kalman_filter.filter_timing == 1)
| [
"os.path.abspath",
"pandas.date_range",
"numpy.log",
"dismalpy.ssm.Model",
"numpy.testing.assert_allclose",
"numpy.zeros",
"numpy.array",
"numpy.exp",
"numpy.testing.assert_equal",
"numpy.eye",
"numpy.diag"
] | [((546, 571), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (561, 571), False, 'import os\n'), ((1082, 1101), 'numpy.log', 'np.log', (["data['GDP']"], {}), "(data['GDP'])\n", (1088, 1101), True, 'import numpy as np\n'), ((1245, 1289), 'dismalpy.ssm.Model', 'ssm.Model', (['data'], {'k_states': 'k_states'}), '(data, k_states=k_states, **kwargs)\n', (1254, 1289), False, 'from dismalpy import ssm\n'), ((1438, 1465), 'numpy.eye', 'np.eye', (['self.model.k_states'], {}), '(self.model.k_states)\n', (1444, 1465), True, 'import numpy as np\n'), ((1545, 1588), 'numpy.array', 'np.array', (['[[0.5, 0.2], [0, 0.8], [1, -0.5]]'], {}), '([[0.5, 0.2], [0, 0.8], [1, -0.5]])\n', (1553, 1588), True, 'import numpy as np\n'), ((1709, 1739), 'numpy.array', 'np.array', (['[[0.4, 0.5], [1, 0]]'], {}), '([[0.4, 0.5], [1, 0]])\n', (1717, 1739), True, 'import numpy as np\n'), ((1819, 1843), 'numpy.diag', 'np.diag', (['[0.2, 1.1, 0.5]'], {}), '([0.2, 1.1, 0.5])\n', (1826, 1843), True, 'import numpy as np\n'), ((1878, 1895), 'numpy.diag', 'np.diag', (['[2.0, 1]'], {}), '([2.0, 1])\n', (1885, 1895), True, 'import numpy as np\n'), ((2446, 2496), 'numpy.testing.assert_equal', 'assert_equal', (['self.results_a.forecasts.shape[0]', '(3)'], {}), '(self.results_a.forecasts.shape[0], 3)\n', (2458, 2496), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((2505, 2565), 'numpy.testing.assert_equal', 'assert_equal', (['self.results_b.collapsed_forecasts.shape[0]', '(2)'], {}), '(self.results_b.collapsed_forecasts.shape[0], 2)\n', (2517, 2565), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((2605, 2684), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.results_a.forecasts[0, :]', 'self.results_b.forecasts[0, :]'], {}), '(self.results_a.forecasts[0, :], self.results_b.forecasts[0, :])\n', (2620, 2684), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((2763, 2859), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.results_a.forecasts_error[0, :]', 'self.results_b.forecasts_error[0, :]'], {}), '(self.results_a.forecasts_error[0, :], self.results_b.\n forecasts_error[0, :])\n', (2778, 2859), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((2936, 3046), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.results_a.forecasts_error_cov[0, 0, :]', 'self.results_b.forecasts_error_cov[0, 0, :]'], {}), '(self.results_a.forecasts_error_cov[0, 0, :], self.results_b\n .forecasts_error_cov[0, 0, :])\n', (2951, 3046), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((3116, 3193), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.results_a.filtered_state', 'self.results_b.filtered_state'], {}), '(self.results_a.filtered_state, self.results_b.filtered_state)\n', (3131, 3193), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((3276, 3366), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.results_a.filtered_state_cov', 'self.results_b.filtered_state_cov'], {}), '(self.results_a.filtered_state_cov, self.results_b.\n filtered_state_cov)\n', (3291, 3366), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((3441, 3520), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.results_a.predicted_state', 'self.results_b.predicted_state'], {}), '(self.results_a.predicted_state, self.results_b.predicted_state)\n', (3456, 3520), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((3604, 3696), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.results_a.predicted_state_cov', 'self.results_b.predicted_state_cov'], {}), '(self.results_a.predicted_state_cov, self.results_b.\n predicted_state_cov)\n', (3619, 3696), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((3763, 3826), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.results_a.llf_obs', 'self.results_b.llf_obs'], {}), '(self.results_a.llf_obs, self.results_b.llf_obs)\n', (3778, 3826), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((3906, 3983), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.results_a.smoothed_state', 'self.results_b.smoothed_state'], {}), '(self.results_a.smoothed_state, self.results_b.smoothed_state)\n', (3921, 3983), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((4067, 4170), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.results_a.smoothed_state_cov', 'self.results_b.smoothed_state_cov'], {'rtol': '(0.0001)'}), '(self.results_a.smoothed_state_cov, self.results_b.\n smoothed_state_cov, rtol=0.0001)\n', (4082, 4170), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((4382, 4500), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.results_a.smoothed_measurement_disturbance', 'self.results_b.smoothed_measurement_disturbance'], {}), '(self.results_a.smoothed_measurement_disturbance, self.\n results_b.smoothed_measurement_disturbance)\n', (4397, 4500), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((4706, 4832), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.results_a.smoothed_measurement_disturbance_cov', 'self.results_b.smoothed_measurement_disturbance_cov'], {}), '(self.results_a.smoothed_measurement_disturbance_cov, self.\n results_b.smoothed_measurement_disturbance_cov)\n', (4721, 4832), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((4918, 5024), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.results_a.smoothed_state_disturbance', 'self.results_b.smoothed_state_disturbance'], {}), '(self.results_a.smoothed_state_disturbance, self.results_b.\n smoothed_state_disturbance)\n', (4933, 5024), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((5114, 5228), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.results_a.smoothed_state_disturbance_cov', 'self.results_b.smoothed_state_disturbance_cov'], {}), '(self.results_a.smoothed_state_disturbance_cov, self.\n results_b.smoothed_state_disturbance_cov)\n', (5129, 5228), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((5313, 5384), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.sim_a.simulated_state', 'self.sim_a.simulated_state'], {}), '(self.sim_a.simulated_state, self.sim_a.simulated_state)\n', (5328, 5384), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((5492, 5604), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.sim_a.simulated_measurement_disturbance', 'self.sim_a.simulated_measurement_disturbance'], {}), '(self.sim_a.simulated_measurement_disturbance, self.sim_a.\n simulated_measurement_disturbance)\n', (5507, 5604), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((5701, 5801), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.sim_a.simulated_state_disturbance', 'self.sim_a.simulated_state_disturbance'], {}), '(self.sim_a.simulated_state_disturbance, self.sim_a.\n simulated_state_disturbance)\n', (5716, 5801), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((1166, 1185), 'numpy.exp', 'np.exp', (["data['GDP']"], {}), "(data['GDP'])\n", (1172, 1185), True, 'import numpy as np\n'), ((6406, 6438), 'numpy.zeros', 'np.zeros', (['n_disturbance_variates'], {}), '(n_disturbance_variates)\n', (6414, 6438), True, 'import numpy as np\n'), ((6475, 6504), 'numpy.zeros', 'np.zeros', (['self.model.k_states'], {}), '(self.model.k_states)\n', (6483, 6504), True, 'import numpy as np\n'), ((6761, 6793), 'numpy.zeros', 'np.zeros', (['n_disturbance_variates'], {}), '(n_disturbance_variates)\n', (6769, 6793), True, 'import numpy as np\n'), ((6830, 6859), 'numpy.zeros', 'np.zeros', (['self.model.k_states'], {}), '(self.model.k_states)\n', (6838, 6859), True, 'import numpy as np\n'), ((7874, 7906), 'numpy.zeros', 'np.zeros', (['n_disturbance_variates'], {}), '(n_disturbance_variates)\n', (7882, 7906), True, 'import numpy as np\n'), ((7943, 7972), 'numpy.zeros', 'np.zeros', (['self.model.k_states'], {}), '(self.model.k_states)\n', (7951, 7972), True, 'import numpy as np\n'), ((8229, 8261), 'numpy.zeros', 'np.zeros', (['n_disturbance_variates'], {}), '(n_disturbance_variates)\n', (8237, 8261), True, 'import numpy as np\n'), ((8298, 8327), 'numpy.zeros', 'np.zeros', (['self.model.k_states'], {}), '(self.model.k_states)\n', (8306, 8327), True, 'import numpy as np\n'), ((9376, 9408), 'numpy.zeros', 'np.zeros', (['n_disturbance_variates'], {}), '(n_disturbance_variates)\n', (9384, 9408), True, 'import numpy as np\n'), ((9445, 9474), 'numpy.zeros', 'np.zeros', (['self.model.k_states'], {}), '(self.model.k_states)\n', (9453, 9474), True, 'import numpy as np\n'), ((9731, 9763), 'numpy.zeros', 'np.zeros', (['n_disturbance_variates'], {}), '(n_disturbance_variates)\n', (9739, 9763), True, 'import numpy as np\n'), ((9800, 9829), 'numpy.zeros', 'np.zeros', (['self.model.k_states'], {}), '(self.model.k_states)\n', (9808, 9829), True, 'import numpy as np\n'), ((10757, 10789), 'numpy.zeros', 'np.zeros', (['n_disturbance_variates'], {}), '(n_disturbance_variates)\n', (10765, 10789), True, 'import numpy as np\n'), ((10826, 10855), 'numpy.zeros', 'np.zeros', (['self.model.k_states'], {}), '(self.model.k_states)\n', (10834, 10855), True, 'import numpy as np\n'), ((11110, 11142), 'numpy.zeros', 'np.zeros', (['n_disturbance_variates'], {}), '(n_disturbance_variates)\n', (11118, 11142), True, 'import numpy as np\n'), ((11179, 11208), 'numpy.zeros', 'np.zeros', (['self.model.k_states'], {}), '(self.model.k_states)\n', (11187, 11208), True, 'import numpy as np\n'), ((12212, 12244), 'numpy.zeros', 'np.zeros', (['n_disturbance_variates'], {}), '(n_disturbance_variates)\n', (12220, 12244), True, 'import numpy as np\n'), ((12281, 12310), 'numpy.zeros', 'np.zeros', (['self.model.k_states'], {}), '(self.model.k_states)\n', (12289, 12310), True, 'import numpy as np\n'), ((12565, 12597), 'numpy.zeros', 'np.zeros', (['n_disturbance_variates'], {}), '(n_disturbance_variates)\n', (12573, 12597), True, 'import numpy as np\n'), ((12634, 12663), 'numpy.zeros', 'np.zeros', (['self.model.k_states'], {}), '(self.model.k_states)\n', (12642, 12663), True, 'import numpy as np\n'), ((13701, 13733), 'numpy.zeros', 'np.zeros', (['n_disturbance_variates'], {}), '(n_disturbance_variates)\n', (13709, 13733), True, 'import numpy as np\n'), ((13770, 13799), 'numpy.zeros', 'np.zeros', (['self.model.k_states'], {}), '(self.model.k_states)\n', (13778, 13799), True, 'import numpy as np\n'), ((14056, 14088), 'numpy.zeros', 'np.zeros', (['n_disturbance_variates'], {}), '(n_disturbance_variates)\n', (14064, 14088), True, 'import numpy as np\n'), ((14125, 14154), 'numpy.zeros', 'np.zeros', (['self.model.k_states'], {}), '(self.model.k_states)\n', (14133, 14154), True, 'import numpy as np\n'), ((955, 1007), 'pandas.date_range', 'pd.date_range', (['"""1947-01-01"""', '"""1995-07-01"""'], {'freq': '"""QS"""'}), "('1947-01-01', '1995-07-01', freq='QS')\n", (968, 1007), True, 'import pandas as pd\n')] |
import unittest
import numpy as np
from hockbot.scara_kinematics import (
fkin,
jacobian,
ikin,
)
class TestScaraKinematics(unittest.TestCase):
TEST_VECS = [
(np.array([0 , 0]), np.array([ 1.0, 0.0])),
(np.array([np.pi/2, 0]), np.array([ 0.5, 0.5])),
(np.array([np.pi/2, -np.pi/2]), np.array([ 0.0, 1.0])),
(np.array([np.pi/2, np.pi/2]), np.array([ 0.0, 0.0])),
(np.array([np.pi , -np.pi/2]), np.array([-0.5, 0.5])),
(np.array([np.pi/4, 0]), np.array([ 0.5+np.sqrt(0.125), np.sqrt(0.125)])),
(np.array([np.pi/4, -np.pi/4]), np.array([ 0.5*np.sqrt(2), 0.5*np.sqrt(2)])),
]
def test_fkin(self):
# Test fkin
for joints, expected_pos in self.TEST_VECS:
act_pos = fkin(*joints)
# assert np.allclose(
# act_pos,
# expected_pos)
def test_ikin_of_fkin(self):
# Assumes that fkin is trustworthy, and then
# uses trustworthy fkin to verify output of
# ikin is correct
for joints, expected_pos in self.TEST_VECS:
act_pos = fkin(*joints)
ikin_joints = ikin(*act_pos)
for possible_joints in ikin_joints:
pass
# assert np.allclose(
# fkin(*possible_joints),
# act_pos)
| [
"hockbot.scara_kinematics.fkin",
"hockbot.scara_kinematics.ikin",
"numpy.array",
"numpy.sqrt"
] | [((187, 203), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (195, 203), True, 'import numpy as np\n'), ((218, 238), 'numpy.array', 'np.array', (['[1.0, 0.0]'], {}), '([1.0, 0.0])\n', (226, 238), True, 'import numpy as np\n'), ((251, 275), 'numpy.array', 'np.array', (['[np.pi / 2, 0]'], {}), '([np.pi / 2, 0])\n', (259, 275), True, 'import numpy as np\n'), ((282, 302), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (290, 302), True, 'import numpy as np\n'), ((315, 348), 'numpy.array', 'np.array', (['[np.pi / 2, -np.pi / 2]'], {}), '([np.pi / 2, -np.pi / 2])\n', (323, 348), True, 'import numpy as np\n'), ((346, 366), 'numpy.array', 'np.array', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (354, 366), True, 'import numpy as np\n'), ((379, 411), 'numpy.array', 'np.array', (['[np.pi / 2, np.pi / 2]'], {}), '([np.pi / 2, np.pi / 2])\n', (387, 411), True, 'import numpy as np\n'), ((410, 430), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (418, 430), True, 'import numpy as np\n'), ((443, 472), 'numpy.array', 'np.array', (['[np.pi, -np.pi / 2]'], {}), '([np.pi, -np.pi / 2])\n', (451, 472), True, 'import numpy as np\n'), ((474, 495), 'numpy.array', 'np.array', (['[-0.5, 0.5]'], {}), '([-0.5, 0.5])\n', (482, 495), True, 'import numpy as np\n'), ((507, 531), 'numpy.array', 'np.array', (['[np.pi / 4, 0]'], {}), '([np.pi / 4, 0])\n', (515, 531), True, 'import numpy as np\n'), ((597, 630), 'numpy.array', 'np.array', (['[np.pi / 4, -np.pi / 4]'], {}), '([np.pi / 4, -np.pi / 4])\n', (605, 630), True, 'import numpy as np\n'), ((800, 813), 'hockbot.scara_kinematics.fkin', 'fkin', (['*joints'], {}), '(*joints)\n', (804, 813), False, 'from hockbot.scara_kinematics import fkin, jacobian, ikin\n'), ((1146, 1159), 'hockbot.scara_kinematics.fkin', 'fkin', (['*joints'], {}), '(*joints)\n', (1150, 1159), False, 'from hockbot.scara_kinematics import fkin, jacobian, ikin\n'), ((1186, 1200), 'hockbot.scara_kinematics.ikin', 'ikin', (['*act_pos'], {}), '(*act_pos)\n', (1190, 1200), False, 'from hockbot.scara_kinematics import fkin, jacobian, ikin\n'), ((569, 583), 'numpy.sqrt', 'np.sqrt', (['(0.125)'], {}), '(0.125)\n', (576, 583), True, 'import numpy as np\n'), ((553, 567), 'numpy.sqrt', 'np.sqrt', (['(0.125)'], {}), '(0.125)\n', (560, 567), True, 'import numpy as np\n'), ((643, 653), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (650, 653), True, 'import numpy as np\n'), ((659, 669), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (666, 669), True, 'import numpy as np\n')] |
from __future__ import print_function
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as pl
import keras
from keras import metrics
from keras import regularizers
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Input, Flatten, Dropout
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import Adam, RMSprop
from keras.callbacks import TensorBoard, EarlyStopping, ModelCheckpoint
from keras.utils import plot_model
from keras.models import load_model
from keras.layers.embeddings import Embedding
from keras.layers import BatchNormalization
from keras.callbacks import ReduceLROnPlateau, EarlyStopping
from keras.layers.convolutional import Conv1D, MaxPooling1D
import matplotlib.pyplot as plt
kc_data_org = pd.read_csv("kc_house_data.csv")
kc_data_org['sale_yr'] = pd.to_numeric(kc_data_org.date.str.slice(0, 4))
kc_data_org['sale_month'] = pd.to_numeric(kc_data_org.date.str.slice(4, 6))
kc_data_org['sale_day'] = pd.to_numeric(kc_data_org.date.str.slice(6, 8))
kc_data = pd.DataFrame(kc_data_org, columns=[
'sale_yr', 'sale_month', 'sale_day',
'bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors',
'condition', 'grade', 'sqft_above', 'sqft_basement', 'yr_built',
'zipcode', 'lat', 'long', 'sqft_living15', 'sqft_lot15', 'price'])
label_col = 'price'
print(kc_data.describe())
def train_validate_test_split(df, train_part=.6, validate_part=.2, test_part=.2, seed=None):
np.random.seed(seed)
total_size = train_part + validate_part + test_part
train_percent = train_part / total_size
validate_percent = validate_part / total_size
test_percent = test_part / total_size
perm = np.random.permutation(df.index)
m = len(df)
train_end = int(train_percent * m)
validate_end = int(validate_percent * m) + train_end
train = perm[:train_end]
validate = perm[train_end:validate_end]
test = perm[validate_end:]
return train, validate, test
train_size, valid_size, test_size = (70, 30, 0)
kc_train, kc_valid, kc_test = train_validate_test_split(kc_data,
train_part=train_size,
validate_part=valid_size,
test_part=test_size,
seed=2017)
kc_y_train = kc_data.loc[kc_train, [label_col]]
kc_x_train = kc_data.loc[kc_train, :].drop(label_col, axis=1)
kc_y_valid = kc_data.loc[kc_valid, [label_col]]
kc_x_valid = kc_data.loc[kc_valid, :].drop(label_col, axis=1)
print('Size of training set: ', len(kc_x_train))
print('Size of validation set: ', len(kc_x_valid))
print('Size of test set: ', len(kc_test), '(not converted)')
def norm_stats(df1, df2):
dfs = df1.append(df2)
minimum = np.min(dfs)
maximum = np.max(dfs)
mu = np.mean(dfs)
sigma = np.std(dfs)
return (minimum, maximum, mu, sigma)
def z_score(col, stats):
m, M, mu, s = stats
df = pd.DataFrame()
for c in col.columns:
df[c] = (col[c] - mu[c]) / s[c]
return df
stats = norm_stats(kc_x_train, kc_x_valid)
arr_x_train = np.array(z_score(kc_x_train, stats))
arr_y_train = np.array(kc_y_train)
arr_x_valid = np.array(z_score(kc_x_valid, stats))
arr_y_valid = np.array(kc_y_valid)
print('Training shape:', arr_x_train.shape)
print('Training samples: ', arr_x_train.shape[0])
print('Validation samples: ', arr_x_valid.shape[0])
def basic_model_1(x_size, y_size):
t_model = Sequential()
t_model.add(Dense(100, activation="tanh", input_shape=(x_size,)))
t_model.add(Dense(50, activation="relu"))
t_model.add(Dense(y_size))
print(t_model.summary())
t_model.compile(loss='mean_squared_error',
optimizer=Adam(),
metrics=[metrics.mae])
return (t_model)
def basic_model_2(x_size, y_size):
t_model = Sequential()
t_model.add(Dense(100, activation="tanh", input_shape=(x_size,)))
t_model.add(Dropout(0.1))
t_model.add(Dense(50, activation="relu"))
t_model.add(Dense(20, activation="relu"))
t_model.add(Dense(y_size))
print(t_model.summary())
t_model.compile(loss='mean_squared_error',
optimizer=Adam(),
metrics=[metrics.mae])
return (t_model)
def basic_model_3(x_size, y_size):
t_model = Sequential()
t_model.add(Dense(80, activation="tanh", kernel_initializer='normal', input_shape=(x_size,)))
t_model.add(Dropout(0.2))
t_model.add(Dense(120, activation="relu", kernel_initializer='normal',
kernel_regularizer=regularizers.l1(0.01), bias_regularizer=regularizers.l1(0.01)))
t_model.add(Dropout(0.1))
t_model.add(Dense(20, activation="relu", kernel_initializer='normal',
kernel_regularizer=regularizers.l1_l2(0.01), bias_regularizer=regularizers.l1_l2(0.01)))
t_model.add(Dropout(0.1))
t_model.add(Dense(10, activation="relu", kernel_initializer='normal'))
t_model.add(Dropout(0.0))
t_model.add(Dense(y_size))
t_model.compile(
loss='mean_squared_error',
optimizer='nadam',
metrics=[metrics.mae])
return (t_model)
def cnn_model(x_size, y_size):
main_input = Input(shape=(x_size,), name='main_input')
emb = Embedding(256 * 8, output_dim=64, input_length=x_size)(main_input)
conv1d = Conv1D(filters=32, kernel_size=3, padding='valid')(emb)
bn = BatchNormalization()(conv1d)
sgconv1d = Activation('sigmoid')(bn)
conv1d_2 = Conv1D(filters=32, kernel_size=3, padding='valid')(sgconv1d)
bn2 = BatchNormalization()(conv1d_2)
sgconv1d_2 = Activation('sigmoid')(bn2)
# conv = Multiply()([conv1d, sgconv1d])
# pool = MaxPooling1D(pool_size = 32)(conv)
out = Flatten()(sgconv1d_2)
out = Dense(512, activation='relu')(out)
out = Dense(256, activation='relu')(out)
loss = Dense(1, activation='linear')(out)
model = Model(inputs=[main_input], outputs=[loss])
model.compile(loss='mean_absolute_percentage_error', optimizer='Adam', \
metrics=['mean_squared_error', 'mean_absolute_percentage_error'])
return model
def cnn_model1(x_size, y_size):
c_model = Sequential()
c_model.add(Embedding(256 * 8, output_dim=64, input_length=x_size, input_shape=(18,)))
c_model.add(Conv1D(filters=32, kernel_size=3, padding='valid'))
c_model.add(BatchNormalization())
c_model.add(Activation('sigmoid'))
c_model.add(Conv1D(filters=32, kernel_size=3, padding='valid'))
c_model.add(Flatten())
c_model.add(Dense(512, activation='relu'))
c_model.add(Dense(256, activation='relu'))
c_model.add(Dense(1, activation='linear'))
c_model.compile(
loss='mean_squared_error',
optimizer='nadam',
metrics=[metrics.mae])
return (c_model)
model = basic_model_3(arr_x_train.shape[1], arr_y_train.shape[1])
model.summary()
# modelCnn = cnn_model1(arr_x_train.shape[1], arr_y_train.shape[1])
# modelCnn.summary()
epochs = 100
batch_size = 128
print('Epochs: ', epochs)
print('Batch size: ', batch_size)
keras_callbacks = [
# ModelCheckpoint('/tmp/keras_checkpoints/model.{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss', save_best_only=True, verbose=2)
# ModelCheckpoint('/tmp/keras_checkpoints/model.{epoch:02d}.hdf5', monitor='val_loss', save_best_only=True, verbose=0)
# TensorBoard(log_dir='/tmp/keras_logs/model_3', histogram_freq=0, write_graph=True, write_images=True, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None),
EarlyStopping(monitor='val_mean_absolute_error', patience=20, verbose=0)
]
history = model.fit(arr_x_train, arr_y_train,
batch_size=batch_size,
epochs=epochs,
shuffle=True,
verbose=2, # Change it to 2, if wished to observe execution
validation_data=(arr_x_valid, arr_y_valid),
callbacks=keras_callbacks)
# learning_rate_reduction = ReduceLROnPlateau(monitor='val_loss',
# patience=5,
# verbose=1,
# factor=0.5,
# min_lr=0.0001)
# early_stopping = EarlyStopping(monitor='val_loss',
# min_delta=0,
# patience=5,
# verbose=1,
# mode='auto')
# callback_list = [early_stopping]
# history = modelCnn.fit(arr_x_train, arr_y_train,
# batch_size=batch_size,
# epochs=epochs,
# shuffle=True,
# verbose=2,
# validation_data=(arr_x_valid, arr_y_valid),
# callbacks=keras_callbacks)
# print(history.history.keys())
train_score = history.evaluate(arr_x_train, arr_y_train, verbose=0)
valid_score = history.evaluate(arr_x_valid, arr_y_valid, verbose=0)
print('Train MAE: ', round(train_score[1], 4), ', Train Loss: ', round(train_score[0], 4))
print('Val MAE: ', round(valid_score[1], 4), ', Val Loss: ', round(valid_score[0], 4))
def plot_hist(h, xsize=6, ysize=10):
# Prepare plotting
fig_size = plt.rcParams["figure.figsize"]
plt.rcParams["figure.figsize"] = [xsize, ysize]
fig, axes = plt.subplots(nrows=4, ncols=4, sharex=True)
# summarize history for MAE
plt.subplot(211)
plt.plot(h['mean_absolute_error'])
plt.plot(h['val_mean_absolute_error'])
plt.title('Training vs Validation MAE')
plt.ylabel('MAE')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper left')
# summarize history for loss
plt.subplot(212)
plt.plot(h['loss'])
plt.plot(h['val_loss'])
plt.title('Training vs Validation Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper left')
# Plot it all in IPython (non-interactive)
plt.draw()
plt.show()
return
plot_hist(history.history, xsize=8, ysize=12)
| [
"matplotlib.pyplot.title",
"numpy.random.seed",
"pandas.read_csv",
"keras.models.Model",
"numpy.mean",
"keras.regularizers.l1",
"keras.layers.Input",
"keras.regularizers.l1_l2",
"pandas.DataFrame",
"keras.layers.embeddings.Embedding",
"numpy.std",
"keras.layers.Flatten",
"matplotlib.pyplot.d... | [((789, 821), 'pandas.read_csv', 'pd.read_csv', (['"""kc_house_data.csv"""'], {}), "('kc_house_data.csv')\n", (800, 821), True, 'import pandas as pd\n'), ((1056, 1333), 'pandas.DataFrame', 'pd.DataFrame', (['kc_data_org'], {'columns': "['sale_yr', 'sale_month', 'sale_day', 'bedrooms', 'bathrooms',\n 'sqft_living', 'sqft_lot', 'floors', 'condition', 'grade', 'sqft_above',\n 'sqft_basement', 'yr_built', 'zipcode', 'lat', 'long', 'sqft_living15',\n 'sqft_lot15', 'price']"}), "(kc_data_org, columns=['sale_yr', 'sale_month', 'sale_day',\n 'bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'floors',\n 'condition', 'grade', 'sqft_above', 'sqft_basement', 'yr_built',\n 'zipcode', 'lat', 'long', 'sqft_living15', 'sqft_lot15', 'price'])\n", (1068, 1333), True, 'import pandas as pd\n'), ((3251, 3271), 'numpy.array', 'np.array', (['kc_y_train'], {}), '(kc_y_train)\n', (3259, 3271), True, 'import numpy as np\n'), ((3337, 3357), 'numpy.array', 'np.array', (['kc_y_valid'], {}), '(kc_y_valid)\n', (3345, 3357), True, 'import numpy as np\n'), ((1485, 1505), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1499, 1505), True, 'import numpy as np\n'), ((1709, 1740), 'numpy.random.permutation', 'np.random.permutation', (['df.index'], {}), '(df.index)\n', (1730, 1740), True, 'import numpy as np\n'), ((2861, 2872), 'numpy.min', 'np.min', (['dfs'], {}), '(dfs)\n', (2867, 2872), True, 'import numpy as np\n'), ((2887, 2898), 'numpy.max', 'np.max', (['dfs'], {}), '(dfs)\n', (2893, 2898), True, 'import numpy as np\n'), ((2908, 2920), 'numpy.mean', 'np.mean', (['dfs'], {}), '(dfs)\n', (2915, 2920), True, 'import numpy as np\n'), ((2933, 2944), 'numpy.std', 'np.std', (['dfs'], {}), '(dfs)\n', (2939, 2944), True, 'import numpy as np\n'), ((3046, 3060), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3058, 3060), True, 'import pandas as pd\n'), ((3556, 3568), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3566, 3568), False, 'from keras.models import Sequential, Model\n'), ((3945, 3957), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3955, 3957), False, 'from keras.models import Sequential, Model\n'), ((4410, 4422), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4420, 4422), False, 'from keras.models import Sequential, Model\n'), ((5297, 5338), 'keras.layers.Input', 'Input', ([], {'shape': '(x_size,)', 'name': '"""main_input"""'}), "(shape=(x_size,), name='main_input')\n", (5302, 5338), False, 'from keras.layers import Dense, Activation, Input, Flatten, Dropout\n'), ((5997, 6039), 'keras.models.Model', 'Model', ([], {'inputs': '[main_input]', 'outputs': '[loss]'}), '(inputs=[main_input], outputs=[loss])\n', (6002, 6039), False, 'from keras.models import Sequential, Model\n'), ((6266, 6278), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (6276, 6278), False, 'from keras.models import Sequential, Model\n'), ((7621, 7693), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_mean_absolute_error"""', 'patience': '(20)', 'verbose': '(0)'}), "(monitor='val_mean_absolute_error', patience=20, verbose=0)\n", (7634, 7693), False, 'from keras.callbacks import ReduceLROnPlateau, EarlyStopping\n'), ((9478, 9521), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(4)', 'ncols': '(4)', 'sharex': '(True)'}), '(nrows=4, ncols=4, sharex=True)\n', (9490, 9521), True, 'import matplotlib.pyplot as plt\n'), ((9559, 9575), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (9570, 9575), True, 'import matplotlib.pyplot as plt\n'), ((9580, 9614), 'matplotlib.pyplot.plot', 'plt.plot', (["h['mean_absolute_error']"], {}), "(h['mean_absolute_error'])\n", (9588, 9614), True, 'import matplotlib.pyplot as plt\n'), ((9619, 9657), 'matplotlib.pyplot.plot', 'plt.plot', (["h['val_mean_absolute_error']"], {}), "(h['val_mean_absolute_error'])\n", (9627, 9657), True, 'import matplotlib.pyplot as plt\n'), ((9662, 9701), 'matplotlib.pyplot.title', 'plt.title', (['"""Training vs Validation MAE"""'], {}), "('Training vs Validation MAE')\n", (9671, 9701), True, 'import matplotlib.pyplot as plt\n'), ((9706, 9723), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""MAE"""'], {}), "('MAE')\n", (9716, 9723), True, 'import matplotlib.pyplot as plt\n'), ((9728, 9747), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (9738, 9747), True, 'import matplotlib.pyplot as plt\n'), ((9752, 9805), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train', 'Validation']"], {'loc': '"""upper left"""'}), "(['Train', 'Validation'], loc='upper left')\n", (9762, 9805), True, 'import matplotlib.pyplot as plt\n'), ((9844, 9860), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (9855, 9860), True, 'import matplotlib.pyplot as plt\n'), ((9865, 9884), 'matplotlib.pyplot.plot', 'plt.plot', (["h['loss']"], {}), "(h['loss'])\n", (9873, 9884), True, 'import matplotlib.pyplot as plt\n'), ((9889, 9912), 'matplotlib.pyplot.plot', 'plt.plot', (["h['val_loss']"], {}), "(h['val_loss'])\n", (9897, 9912), True, 'import matplotlib.pyplot as plt\n'), ((9917, 9957), 'matplotlib.pyplot.title', 'plt.title', (['"""Training vs Validation Loss"""'], {}), "('Training vs Validation Loss')\n", (9926, 9957), True, 'import matplotlib.pyplot as plt\n'), ((9962, 9980), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (9972, 9980), True, 'import matplotlib.pyplot as plt\n'), ((9985, 10004), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (9995, 10004), True, 'import matplotlib.pyplot as plt\n'), ((10009, 10062), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train', 'Validation']"], {'loc': '"""upper left"""'}), "(['Train', 'Validation'], loc='upper left')\n", (10019, 10062), True, 'import matplotlib.pyplot as plt\n'), ((10115, 10125), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (10123, 10125), True, 'import matplotlib.pyplot as plt\n'), ((10130, 10140), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10138, 10140), True, 'import matplotlib.pyplot as plt\n'), ((3585, 3637), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""tanh"""', 'input_shape': '(x_size,)'}), "(100, activation='tanh', input_shape=(x_size,))\n", (3590, 3637), False, 'from keras.layers import Dense, Activation, Input, Flatten, Dropout\n'), ((3655, 3683), 'keras.layers.Dense', 'Dense', (['(50)'], {'activation': '"""relu"""'}), "(50, activation='relu')\n", (3660, 3683), False, 'from keras.layers import Dense, Activation, Input, Flatten, Dropout\n'), ((3701, 3714), 'keras.layers.Dense', 'Dense', (['y_size'], {}), '(y_size)\n', (3706, 3714), False, 'from keras.layers import Dense, Activation, Input, Flatten, Dropout\n'), ((3974, 4026), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""tanh"""', 'input_shape': '(x_size,)'}), "(100, activation='tanh', input_shape=(x_size,))\n", (3979, 4026), False, 'from keras.layers import Dense, Activation, Input, Flatten, Dropout\n'), ((4044, 4056), 'keras.layers.Dropout', 'Dropout', (['(0.1)'], {}), '(0.1)\n', (4051, 4056), False, 'from keras.layers import Dense, Activation, Input, Flatten, Dropout\n'), ((4074, 4102), 'keras.layers.Dense', 'Dense', (['(50)'], {'activation': '"""relu"""'}), "(50, activation='relu')\n", (4079, 4102), False, 'from keras.layers import Dense, Activation, Input, Flatten, Dropout\n'), ((4120, 4148), 'keras.layers.Dense', 'Dense', (['(20)'], {'activation': '"""relu"""'}), "(20, activation='relu')\n", (4125, 4148), False, 'from keras.layers import Dense, Activation, Input, Flatten, Dropout\n'), ((4166, 4179), 'keras.layers.Dense', 'Dense', (['y_size'], {}), '(y_size)\n', (4171, 4179), False, 'from keras.layers import Dense, Activation, Input, Flatten, Dropout\n'), ((4439, 4524), 'keras.layers.Dense', 'Dense', (['(80)'], {'activation': '"""tanh"""', 'kernel_initializer': '"""normal"""', 'input_shape': '(x_size,)'}), "(80, activation='tanh', kernel_initializer='normal', input_shape=(x_size,)\n )\n", (4444, 4524), False, 'from keras.layers import Dense, Activation, Input, Flatten, Dropout\n'), ((4537, 4549), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (4544, 4549), False, 'from keras.layers import Dense, Activation, Input, Flatten, Dropout\n'), ((4747, 4759), 'keras.layers.Dropout', 'Dropout', (['(0.1)'], {}), '(0.1)\n', (4754, 4759), False, 'from keras.layers import Dense, Activation, Input, Flatten, Dropout\n'), ((4962, 4974), 'keras.layers.Dropout', 'Dropout', (['(0.1)'], {}), '(0.1)\n', (4969, 4974), False, 'from keras.layers import Dense, Activation, Input, Flatten, Dropout\n'), ((4992, 5049), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""', 'kernel_initializer': '"""normal"""'}), "(10, activation='relu', kernel_initializer='normal')\n", (4997, 5049), False, 'from keras.layers import Dense, Activation, Input, Flatten, Dropout\n'), ((5067, 5079), 'keras.layers.Dropout', 'Dropout', (['(0.0)'], {}), '(0.0)\n', (5074, 5079), False, 'from keras.layers import Dense, Activation, Input, Flatten, Dropout\n'), ((5097, 5110), 'keras.layers.Dense', 'Dense', (['y_size'], {}), '(y_size)\n', (5102, 5110), False, 'from keras.layers import Dense, Activation, Input, Flatten, Dropout\n'), ((5349, 5403), 'keras.layers.embeddings.Embedding', 'Embedding', (['(256 * 8)'], {'output_dim': '(64)', 'input_length': 'x_size'}), '(256 * 8, output_dim=64, input_length=x_size)\n', (5358, 5403), False, 'from keras.layers.embeddings import Embedding\n'), ((5429, 5479), 'keras.layers.convolutional.Conv1D', 'Conv1D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'padding': '"""valid"""'}), "(filters=32, kernel_size=3, padding='valid')\n", (5435, 5479), False, 'from keras.layers.convolutional import Conv1D, MaxPooling1D\n'), ((5494, 5514), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5512, 5514), False, 'from keras.layers import BatchNormalization\n'), ((5538, 5559), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (5548, 5559), False, 'from keras.layers import Dense, Activation, Input, Flatten, Dropout\n'), ((5579, 5629), 'keras.layers.convolutional.Conv1D', 'Conv1D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'padding': '"""valid"""'}), "(filters=32, kernel_size=3, padding='valid')\n", (5585, 5629), False, 'from keras.layers.convolutional import Conv1D, MaxPooling1D\n'), ((5650, 5670), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5668, 5670), False, 'from keras.layers import BatchNormalization\n'), ((5698, 5719), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (5708, 5719), False, 'from keras.layers import Dense, Activation, Input, Flatten, Dropout\n'), ((5827, 5836), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (5834, 5836), False, 'from keras.layers import Dense, Activation, Input, Flatten, Dropout\n'), ((5859, 5888), 'keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (5864, 5888), False, 'from keras.layers import Dense, Activation, Input, Flatten, Dropout\n'), ((5904, 5933), 'keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (5909, 5933), False, 'from keras.layers import Dense, Activation, Input, Flatten, Dropout\n'), ((5950, 5979), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (5955, 5979), False, 'from keras.layers import Dense, Activation, Input, Flatten, Dropout\n'), ((6295, 6368), 'keras.layers.embeddings.Embedding', 'Embedding', (['(256 * 8)'], {'output_dim': '(64)', 'input_length': 'x_size', 'input_shape': '(18,)'}), '(256 * 8, output_dim=64, input_length=x_size, input_shape=(18,))\n', (6304, 6368), False, 'from keras.layers.embeddings import Embedding\n'), ((6386, 6436), 'keras.layers.convolutional.Conv1D', 'Conv1D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'padding': '"""valid"""'}), "(filters=32, kernel_size=3, padding='valid')\n", (6392, 6436), False, 'from keras.layers.convolutional import Conv1D, MaxPooling1D\n'), ((6454, 6474), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (6472, 6474), False, 'from keras.layers import BatchNormalization\n'), ((6492, 6513), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (6502, 6513), False, 'from keras.layers import Dense, Activation, Input, Flatten, Dropout\n'), ((6531, 6581), 'keras.layers.convolutional.Conv1D', 'Conv1D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'padding': '"""valid"""'}), "(filters=32, kernel_size=3, padding='valid')\n", (6537, 6581), False, 'from keras.layers.convolutional import Conv1D, MaxPooling1D\n'), ((6599, 6608), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6606, 6608), False, 'from keras.layers import Dense, Activation, Input, Flatten, Dropout\n'), ((6626, 6655), 'keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (6631, 6655), False, 'from keras.layers import Dense, Activation, Input, Flatten, Dropout\n'), ((6673, 6702), 'keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (6678, 6702), False, 'from keras.layers import Dense, Activation, Input, Flatten, Dropout\n'), ((6720, 6749), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (6725, 6749), False, 'from keras.layers import Dense, Activation, Input, Flatten, Dropout\n'), ((3822, 3828), 'keras.optimizers.Adam', 'Adam', ([], {}), '()\n', (3826, 3828), False, 'from keras.optimizers import Adam, RMSprop\n'), ((4287, 4293), 'keras.optimizers.Adam', 'Adam', ([], {}), '()\n', (4291, 4293), False, 'from keras.optimizers import Adam, RMSprop\n'), ((4667, 4688), 'keras.regularizers.l1', 'regularizers.l1', (['(0.01)'], {}), '(0.01)\n', (4682, 4688), False, 'from keras import regularizers\n'), ((4707, 4728), 'keras.regularizers.l1', 'regularizers.l1', (['(0.01)'], {}), '(0.01)\n', (4722, 4728), False, 'from keras import regularizers\n'), ((4876, 4900), 'keras.regularizers.l1_l2', 'regularizers.l1_l2', (['(0.01)'], {}), '(0.01)\n', (4894, 4900), False, 'from keras import regularizers\n'), ((4919, 4943), 'keras.regularizers.l1_l2', 'regularizers.l1_l2', (['(0.01)'], {}), '(0.01)\n', (4937, 4943), False, 'from keras import regularizers\n')] |
import os.path
from typing import Callable
from scipy.io import loadmat, savemat
import numpy as np
import nibabel as nib
import torch
import argparse
import time
import matplotlib.pyplot as plt
from torch.fft import ifftn, ifftshift, fftn, fftshift
from utils.common import set_env
from utils.proj_utils import phase2in, load_model
from config import last_model, THR_01, THR_02
gyro = 2 * 3.14159265 * 42.58
def infer_Kirby_data(device, epoch, model, loss_func, thr):
print(' Kirby_data')
end_time = time.time()
root_fdr = '../data/Kirby_data'
sub = 'Sub001'
B0, TE = 7, 0.23
mat = loadmat(f'{root_fdr}/{sub}/cosmos.mat')
susc = mat['susc']
mask = torch.from_numpy(mat['mask'])
oris = [i for i in os.listdir(f'{root_fdr}/{sub}') if 'ori' in i]
for i, ori in enumerate(oris):
print(f' {ori}:', end='')
phs = nib.load(f'{root_fdr}/{sub}/{ori}/{sub}_{ori}_phase.nii.gz')
phs = np.array(phs.dataobj) / (TE * B0 * gyro)
with open(f'{root_fdr}/{sub}/{ori}/{sub}_{ori}.txt') as f:
s = f.read()
ori_vec = np.array(s.split('\n'), dtype=np.float32)
mat_fdr = f'../data/predict/Kirby'
img_fdr = f'../data/images/Kirby'
mat_file: Callable[[float], str] = lambda loss: f'{sub}_{ori}_t{thr}_e{epoch}_{loss}.mat'
img_file: Callable[[float], str] = lambda loss: f'{sub}_{ori}_t{thr}_e{epoch}_{loss}.png'
end_time = do_infer(phs, susc, mask, ori_vec, device, model,
loss_func, thr, mat_fdr, img_fdr, mat_file, img_file)
return end_time
def do_infer(phs, susc, mask, ori_vec, device, model, loss_func, thr, mat_fdr, img_fdr, mat_file, img_file):
x_input, y_label, mask = format_data(phs, susc, mask, ori_vec, device, thr)
y_hat = model(x_input)
end_time = time.time()
t_loss, _, _ = loss_func(y_label, y_hat, x_input, thr)
t_loss = round(t_loss.item(), 4)
print(f' loss={t_loss}')
y_hat = to_susc(y_hat, mask)
k_tkd = torch.complex(x_input[0, 0], x_input[0, 1])
tkd = ifftn(ifftshift(k_tkd))
tkd = torch.sign(torch.real(tkd)) * torch.abs(tkd) * mask
if not os.path.exists(mat_fdr):
os.mkdir(mat_fdr)
savemat(f'{mat_fdr}/{mat_file(t_loss)}', mdict={'kQSM': y_hat[0].detach().numpy(),
'label': susc, 'mask': mask.numpy(),
'TKD': tkd.detach().numpy()})
if not os.path.exists(img_fdr):
os.mkdir(img_fdr)
print_img([tkd.detach().numpy(), y_hat[0].detach().numpy(), susc],
['TKD', 'k-QSM', 'Label'],
f'{img_fdr}/{img_file(t_loss)}')
return end_time
def to_susc(y_hat, mask):
y_hat = torch.complex(y_hat[:, 0, :, :, :], y_hat[:, 1, :, :, :])
y_hat = torch.fft.ifftn(torch.fft.ifftshift(y_hat))
return torch.abs(y_hat) * torch.sign(torch.real(y_hat)) * mask
def print_img(data_arr: list, title_arr: list, to_file: str):
row, col = 3, len(data_arr)
x, y, z = np.array(data_arr[0].shape) // 2
fig = plt.figure(1)
for i in range(col):
plt.subplot(row, col, 1 + i)
plt.xticks([]), plt.yticks([]), plt.ylabel('Axial')
plt.title(title_arr[i])
plt.imshow(data_arr[i][:, :, z].T, vmax=0.1, vmin=-0.1, cmap='gray')
plt.subplot(row, col, 1 + i + col)
plt.xticks([]), plt.yticks([]), plt.ylabel('Coronal')
plt.imshow(np.flipud(data_arr[i][:, y, :].T), vmax=0.1, vmin=-0.1, cmap='gray')
plt.subplot(row, col, 1 + i + 2 * col)
plt.xticks([]), plt.yticks([]), plt.ylabel('Sagittal')
plt.imshow(np.flipud(data_arr[i][x, :, :].T), vmax=0.1, vmin=-0.1, cmap='gray')
plt.savefig(to_file)
plt.close(fig)
def format_data(X, Y, mask, ori_vec, device, thr):
X = torch.from_numpy(np.expand_dims(X, [0, 1])).to(device)
Y = torch.from_numpy(np.expand_dims(Y, [0, 1])).to(device)
y_label = fftshift(fftn(Y))
y_label = torch.cat([torch.real(y_label), torch.imag(y_label)], dim=1)
mask = mask.to(device)
ori_vec = torch.from_numpy(np.expand_dims(ori_vec, [0, 1])).to(device)
return phase2in(X, ori_vec, thr), y_label, mask
def main(args):
print('---------------------------------------------------')
device = set_env(args.device)
print('Loading model ...')
epoch, model, loss_func, _, _ = load_model(device, args.model(args.threshold))
print('[Done]Model loaded\n')
print('Inferencing ...')
start_time = time.time()
model.eval()
end_time = infer_Kirby_data(device, epoch, model, loss_func, args.threshold)
print(f'Inferenced, duration: {round(end_time - start_time, 3)}s\n')
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--device', type=str, default='cpu')
parser.add_argument('-m', '--model', type=str, default=last_model)
parser.add_argument('-t', '--threshold', type=float, default=THR_01, choices=[THR_01, THR_02])
return parser.parse_args()
if __name__ == '__main__':
main(get_args())
| [
"matplotlib.pyplot.title",
"utils.proj_utils.phase2in",
"argparse.ArgumentParser",
"scipy.io.loadmat",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.close",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.yticks",
"utils.common.set_env",
"torch.real",
"matplotlib.pyplot.xticks",
"torch.complex... | [((514, 525), 'time.time', 'time.time', ([], {}), '()\n', (523, 525), False, 'import time\n'), ((613, 652), 'scipy.io.loadmat', 'loadmat', (['f"""{root_fdr}/{sub}/cosmos.mat"""'], {}), "(f'{root_fdr}/{sub}/cosmos.mat')\n", (620, 652), False, 'from scipy.io import loadmat, savemat\n'), ((687, 716), 'torch.from_numpy', 'torch.from_numpy', (["mat['mask']"], {}), "(mat['mask'])\n", (703, 716), False, 'import torch\n'), ((1833, 1844), 'time.time', 'time.time', ([], {}), '()\n', (1842, 1844), False, 'import time\n'), ((2018, 2061), 'torch.complex', 'torch.complex', (['x_input[0, 0]', 'x_input[0, 1]'], {}), '(x_input[0, 0], x_input[0, 1])\n', (2031, 2061), False, 'import torch\n'), ((2761, 2818), 'torch.complex', 'torch.complex', (['y_hat[:, 0, :, :, :]', 'y_hat[:, 1, :, :, :]'], {}), '(y_hat[:, 0, :, :, :], y_hat[:, 1, :, :, :])\n', (2774, 2818), False, 'import torch\n'), ((3096, 3109), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (3106, 3109), True, 'import matplotlib.pyplot as plt\n'), ((3739, 3759), 'matplotlib.pyplot.savefig', 'plt.savefig', (['to_file'], {}), '(to_file)\n', (3750, 3759), True, 'import matplotlib.pyplot as plt\n'), ((3764, 3778), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (3773, 3778), True, 'import matplotlib.pyplot as plt\n'), ((4318, 4338), 'utils.common.set_env', 'set_env', (['args.device'], {}), '(args.device)\n', (4325, 4338), False, 'from utils.common import set_env\n'), ((4535, 4546), 'time.time', 'time.time', ([], {}), '()\n', (4544, 4546), False, 'import time\n'), ((4749, 4774), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4772, 4774), False, 'import argparse\n'), ((873, 933), 'nibabel.load', 'nib.load', (['f"""{root_fdr}/{sub}/{ori}/{sub}_{ori}_phase.nii.gz"""'], {}), "(f'{root_fdr}/{sub}/{ori}/{sub}_{ori}_phase.nii.gz')\n", (881, 933), True, 'import nibabel as nib\n'), ((2078, 2094), 'torch.fft.ifftshift', 'ifftshift', (['k_tkd'], {}), '(k_tkd)\n', (2087, 2094), False, 'from torch.fft import ifftn, ifftshift, fftn, fftshift\n'), ((2847, 2873), 'torch.fft.ifftshift', 'torch.fft.ifftshift', (['y_hat'], {}), '(y_hat)\n', (2866, 2873), False, 'import torch\n'), ((3052, 3079), 'numpy.array', 'np.array', (['data_arr[0].shape'], {}), '(data_arr[0].shape)\n', (3060, 3079), True, 'import numpy as np\n'), ((3143, 3171), 'matplotlib.pyplot.subplot', 'plt.subplot', (['row', 'col', '(1 + i)'], {}), '(row, col, 1 + i)\n', (3154, 3171), True, 'import matplotlib.pyplot as plt\n'), ((3240, 3263), 'matplotlib.pyplot.title', 'plt.title', (['title_arr[i]'], {}), '(title_arr[i])\n', (3249, 3263), True, 'import matplotlib.pyplot as plt\n'), ((3272, 3340), 'matplotlib.pyplot.imshow', 'plt.imshow', (['data_arr[i][:, :, z].T'], {'vmax': '(0.1)', 'vmin': '(-0.1)', 'cmap': '"""gray"""'}), "(data_arr[i][:, :, z].T, vmax=0.1, vmin=-0.1, cmap='gray')\n", (3282, 3340), True, 'import matplotlib.pyplot as plt\n'), ((3350, 3384), 'matplotlib.pyplot.subplot', 'plt.subplot', (['row', 'col', '(1 + i + col)'], {}), '(row, col, 1 + i + col)\n', (3361, 3384), True, 'import matplotlib.pyplot as plt\n'), ((3544, 3582), 'matplotlib.pyplot.subplot', 'plt.subplot', (['row', 'col', '(1 + i + 2 * col)'], {}), '(row, col, 1 + i + 2 * col)\n', (3555, 3582), True, 'import matplotlib.pyplot as plt\n'), ((3982, 3989), 'torch.fft.fftn', 'fftn', (['Y'], {}), '(Y)\n', (3986, 3989), False, 'from torch.fft import ifftn, ifftshift, fftn, fftshift\n'), ((4181, 4206), 'utils.proj_utils.phase2in', 'phase2in', (['X', 'ori_vec', 'thr'], {}), '(X, ori_vec, thr)\n', (4189, 4206), False, 'from utils.proj_utils import phase2in, load_model\n'), ((948, 969), 'numpy.array', 'np.array', (['phs.dataobj'], {}), '(phs.dataobj)\n', (956, 969), True, 'import numpy as np\n'), ((2136, 2150), 'torch.abs', 'torch.abs', (['tkd'], {}), '(tkd)\n', (2145, 2150), False, 'import torch\n'), ((2886, 2902), 'torch.abs', 'torch.abs', (['y_hat'], {}), '(y_hat)\n', (2895, 2902), False, 'import torch\n'), ((3180, 3194), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (3190, 3194), True, 'import matplotlib.pyplot as plt\n'), ((3196, 3210), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (3206, 3210), True, 'import matplotlib.pyplot as plt\n'), ((3212, 3231), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Axial"""'], {}), "('Axial')\n", (3222, 3231), True, 'import matplotlib.pyplot as plt\n'), ((3393, 3407), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (3403, 3407), True, 'import matplotlib.pyplot as plt\n'), ((3409, 3423), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (3419, 3423), True, 'import matplotlib.pyplot as plt\n'), ((3425, 3446), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Coronal"""'], {}), "('Coronal')\n", (3435, 3446), True, 'import matplotlib.pyplot as plt\n'), ((3466, 3499), 'numpy.flipud', 'np.flipud', (['data_arr[i][:, y, :].T'], {}), '(data_arr[i][:, y, :].T)\n', (3475, 3499), True, 'import numpy as np\n'), ((3591, 3605), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (3601, 3605), True, 'import matplotlib.pyplot as plt\n'), ((3607, 3621), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (3617, 3621), True, 'import matplotlib.pyplot as plt\n'), ((3623, 3645), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Sagittal"""'], {}), "('Sagittal')\n", (3633, 3645), True, 'import matplotlib.pyplot as plt\n'), ((3665, 3698), 'numpy.flipud', 'np.flipud', (['data_arr[i][x, :, :].T'], {}), '(data_arr[i][x, :, :].T)\n', (3674, 3698), True, 'import numpy as np\n'), ((4016, 4035), 'torch.real', 'torch.real', (['y_label'], {}), '(y_label)\n', (4026, 4035), False, 'import torch\n'), ((4037, 4056), 'torch.imag', 'torch.imag', (['y_label'], {}), '(y_label)\n', (4047, 4056), False, 'import torch\n'), ((2117, 2132), 'torch.real', 'torch.real', (['tkd'], {}), '(tkd)\n', (2127, 2132), False, 'import torch\n'), ((2916, 2933), 'torch.real', 'torch.real', (['y_hat'], {}), '(y_hat)\n', (2926, 2933), False, 'import torch\n'), ((3857, 3882), 'numpy.expand_dims', 'np.expand_dims', (['X', '[0, 1]'], {}), '(X, [0, 1])\n', (3871, 3882), True, 'import numpy as np\n'), ((3920, 3945), 'numpy.expand_dims', 'np.expand_dims', (['Y', '[0, 1]'], {}), '(Y, [0, 1])\n', (3934, 3945), True, 'import numpy as np\n'), ((4125, 4156), 'numpy.expand_dims', 'np.expand_dims', (['ori_vec', '[0, 1]'], {}), '(ori_vec, [0, 1])\n', (4139, 4156), True, 'import numpy as np\n')] |
def ivcurve(mechanism_name, i_type, vmin=-100, vmax=100, deltav=1, transient_time=50, test_time=50, rs=1, vinit=-665):
"""
Returns the (peak) current-voltage relationship for an ion channel.
Args:
mechanism_name = name of the mechanism (e.g. hh)
i_type = which current to monitor (e.g. ik, ina)
vmin = minimum voltage step to test
vmax = maximum voltage step to test
deltav = increment of voltage
transient_time = how long to ignore for initial conditions to stabilize (ms)
test_time = duration of the voltage clamp tests (ms)
rs = resistance of voltage clamp in MOhm
vinit = initialization voltage
Returns:
i = iterable of peak currents (in mA/cm^2)
v = iterable of corresponding test voltages
Note:
The initialization potential (vinit) may affect the result. For example, consider
the Hodgkin-Huxley sodium channel; a large fraction are inactivated at rest. Using a
strongly hyperpolarizing vinit will uninactivate many channels, leading to more
current.
"""
from neuron import h
import numpy
h.load_file('stdrun.hoc')
sec = h.Section()
sec.insert(mechanism_name)
sec.L = 1
sec.diam = 1
seclamp = h.SEClamp(sec(0.5))
seclamp.amp1 = vinit
seclamp.dur1 = transient_time
seclamp.dur2 = test_time
seclamp.rs = rs
i_record = h.Vector()
i_record.record(sec(0.5).__getattribute__('_ref_' + i_type))
result_i = []
result_v = numpy.arange(vmin, vmax, deltav)
for test_v in result_v:
seclamp.amp2 = test_v
h.finitialize(vinit)
h.continuerun(transient_time)
num_transient_points = len(i_record)
h.continuerun(test_time + transient_time)
i_record2 = i_record.as_numpy()[num_transient_points:]
baseline_i = i_record2[0]
i_record_shift = i_record2 - baseline_i
max_i = max(i_record_shift)
min_i = min(i_record_shift)
peak_i = max_i if abs(max_i) > abs(min_i) else min_i
peak_i += baseline_i
result_i.append(peak_i)
return result_i, result_v
if __name__ == '__main__':
from matplotlib import pyplot
import numpy
from neuron import h
h.CVode().active(1)
ik, v = ivcurve('hh', 'ik')
pyplot.plot(v, ik, label='ik')
ina, v = ivcurve('hh', 'ina', vinit=-100)
pyplot.plot(v, ina, label='ina')
pyplot.xlabel('v (mV)')
pyplot.ylabel('current (mA/cm^2)')
pyplot.legend()
pyplot.show()
| [
"neuron.h.continuerun",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"neuron.h.Vector",
"matplotlib.pyplot.legend",
"neuron.h.finitialize",
"numpy.arange",
"neuron.h.CVode",
"matplotlib.pyplot.ylabel",
"neuron.h.Section",
"matplotlib.pyplot.xlabel",
"neuron.h.load_file"
] | [((1154, 1179), 'neuron.h.load_file', 'h.load_file', (['"""stdrun.hoc"""'], {}), "('stdrun.hoc')\n", (1165, 1179), False, 'from neuron import h\n'), ((1190, 1201), 'neuron.h.Section', 'h.Section', ([], {}), '()\n', (1199, 1201), False, 'from neuron import h\n'), ((1422, 1432), 'neuron.h.Vector', 'h.Vector', ([], {}), '()\n', (1430, 1432), False, 'from neuron import h\n'), ((1531, 1563), 'numpy.arange', 'numpy.arange', (['vmin', 'vmax', 'deltav'], {}), '(vmin, vmax, deltav)\n', (1543, 1563), False, 'import numpy\n'), ((2317, 2347), 'matplotlib.pyplot.plot', 'pyplot.plot', (['v', 'ik'], {'label': '"""ik"""'}), "(v, ik, label='ik')\n", (2328, 2347), False, 'from matplotlib import pyplot\n'), ((2398, 2430), 'matplotlib.pyplot.plot', 'pyplot.plot', (['v', 'ina'], {'label': '"""ina"""'}), "(v, ina, label='ina')\n", (2409, 2430), False, 'from matplotlib import pyplot\n'), ((2435, 2458), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""v (mV)"""'], {}), "('v (mV)')\n", (2448, 2458), False, 'from matplotlib import pyplot\n'), ((2463, 2497), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""current (mA/cm^2)"""'], {}), "('current (mA/cm^2)')\n", (2476, 2497), False, 'from matplotlib import pyplot\n'), ((2502, 2517), 'matplotlib.pyplot.legend', 'pyplot.legend', ([], {}), '()\n', (2515, 2517), False, 'from matplotlib import pyplot\n'), ((2522, 2535), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (2533, 2535), False, 'from matplotlib import pyplot\n'), ((1630, 1650), 'neuron.h.finitialize', 'h.finitialize', (['vinit'], {}), '(vinit)\n', (1643, 1650), False, 'from neuron import h\n'), ((1659, 1688), 'neuron.h.continuerun', 'h.continuerun', (['transient_time'], {}), '(transient_time)\n', (1672, 1688), False, 'from neuron import h\n'), ((1742, 1783), 'neuron.h.continuerun', 'h.continuerun', (['(test_time + transient_time)'], {}), '(test_time + transient_time)\n', (1755, 1783), False, 'from neuron import h\n'), ((2261, 2270), 'neuron.h.CVode', 'h.CVode', ([], {}), '()\n', (2268, 2270), False, 'from neuron import h\n')] |
# pylint: disable=missing-docstring, protected-access, unused-argument, too-many-arguments, too-many-statements
# pylint: disable=too-many-locals, bad-continuation
# pydocstyle: disable=missing-docstring
from collections import defaultdict
from io import StringIO
from unittest import TestCase, main
from unittest import mock
from unittest.mock import patch, mock_open, MagicMock, file_spec
import numpy as np
import VirClass.VirClass.load as load
class LoadUnitTests(TestCase):
def test_one_hot(self):
# tests: 1x list, 1x np.array, n < number_of_classes, n = number_of_classes, n > number_of_classes
x = [0, 1, 3, 2, 0]
x_1 = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [1, 0, 0, 0]])
x_2 = np.array([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 1, 0, 0],
[1, 0, 0, 0, 0]])
number_of_classes = max(x) + 1
self.assertRaisesRegex(AssertionError, "Cannot create numpy array; number of classes must be bigger than max "
"number of list.", load.one_hot, x, number_of_classes - 1)
np.testing.assert_array_equal(load.one_hot(x, number_of_classes), x_1)
np.testing.assert_array_equal(load.one_hot(x, number_of_classes + 1), x_2)
np.testing.assert_array_equal(load.one_hot(np.array(x), number_of_classes), x_1)
def test_seq_to_bits(self):
vec = "ATCGYM"
test_atcgym = [1, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0,
0, 0, 1, 0, 0, 0,
0, 0, 0, 1, 0, 0,
0, 0, 0, 0, 1, 0,
0, 0, 0, 0, 0, 1]
test_atcg = [1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
1, 1, 1, 1,
1, 1, 1, 1]
dict_1 = {"A": [1, 1, 0], "G": [1, 0, 0], "T": [1, 1, 1]}
test_dict_1 = [1, 1, 0,
1, 1, 1,
1, 1, 1,
1, 0, 0,
1, 1, 1,
1, 1, 1]
dict_2 = {"T": [1, 0], "C": [0, 1]}
test_dict_2 = [1, 1,
1, 0,
0, 1,
1, 1,
1, 1,
1, 1]
self.assertRaisesRegex(AssertionError, "Number of unique nucleotides and transmission dictionary not present.",
load.seq_to_bits, vec, None, None)
res = load.seq_to_bits(vec, "ATCGYM", None)
self.assertEqual(res, test_atcgym)
self.assertEqual(len(res) % 6, 0) # we have 6 unique nucleotides - len % 6 must be 0
res = load.seq_to_bits(vec, "ATCG", None)
self.assertEqual(res, test_atcg)
self.assertEqual(len(res) % 4, 0)
res = load.seq_to_bits(vec, None, dict_1)
self.assertEqual(res, test_dict_1)
self.assertEqual(len(res) % 3, 0)
res = load.seq_to_bits(vec, "AT", dict_1)
self.assertEqual(res, test_dict_1)
self.assertEqual(len(res) % 3, 0)
res = load.seq_to_bits(vec, None, dict_2)
self.assertEqual(res, test_dict_2)
self.assertEqual(len(res) % 2, 0)
res = load.seq_to_bits(vec, "CTGM", dict_2)
self.assertEqual(res, test_dict_2)
self.assertEqual(len(res) % 2, 0)
@patch('VirClass.VirClass.load.os.path.isfile')
@patch('VirClass.VirClass.load.load_seqs_from_ncbi')
def test_load_from_file_fasta(self, arg1, arg2):
load.os.path.isfile.return_value = True
temp = defaultdict(list)
temp['1004345262'] = \
'TGTTGCGTTAACAACAAACCAACCTCCGACCCAAAACAAAGATGAAAATAAAAGATGCCACCCAAACGCCGACTAGTGGACAGCCCAGAAGATATGGAAGAAA' \
'GATGCCACCCAAACGCCGACTAGTGGACAGCCCAGAAGATATGGAAGACGAGGGACCCTCTGACCGACCAACTCACCTACCCAAACTCCCAGGAACC'
res_tuple = (
temp,
{
'1004345262':
'Viruses;ssRNA viruses;ssRNA negative-strand viruses;Mononegavirales;Bornaviridae;Bornavirus'
}
)
read_data = \
'>1004345262 Viruses;ssRNA viruses;ssRNA negative-strand viruses;Mononegavirales;Bornaviridae;Bornavirus' \
'\nTGTTGCGTTAACAACAAACCAACCTCCGACCCAAAACAAAGATGAAAATAAAAGATGCCACCCAAACGCCGACTAGTGGACAGCCCAGAAGATATGGAAG\n' \
'AAAGATGCCACCCAAACGCCGACTAGTGGACAGCCCAGAAGATATGGAAGACGAGGGACCCTCTGACCGACCAACTCACCTACCCAAACTCCCAGGAACC\n'
# https://www.biostars.org/p/190067/
with patch('VirClass.VirClass.load.gzip.open') as mocked_open:
handle = MagicMock(spec=file_spec)
handle.__enter__.return_value = StringIO(read_data)
mocked_open.return_value = handle
res = load.load_from_file_fasta('bla.bla')
mocked_open.assert_called_once_with('bla.bla', 'rt')
self.assertEqual(res, res_tuple)
load.os.path.isfile.return_value = False
load.load_seqs_from_ncbi.return_value = res_tuple
with patch('VirClass.VirClass.load.gzip.open', mock_open(), create=True) as mocked_open:
res = load.load_from_file_fasta('bla.bla')
mocked_open.assert_called_once_with('bla.bla', 'wt')
self.assertEqual(res, res_tuple)
# @patch('VirClass.VirClass.load.seq_to_bits')
def test_dataset_from_id(self):
# data
dict_1 = {"A": [1, 0, 0, 0], "T": [0, 1, 0, 0], "C": [0, 0, 1, 0], "G": [0, 0, 0, 1]}
temp_data = defaultdict(list)
temp_data['1004345262'] = \
'TGTTGCGTTAACAACAAACCAACCTCCGACCCAAAACAAAGATGAAAATAAAAGATGCCACCCAAACGCCGACTAGTGGACAGCCCAGAAGATATGGAAGAAA' \
'GATGCCACCCAAACGCCGACTAGTGGACAGCCCAGAAGATATGGAAGACGAGGGACCCTCTGACCGACCAACTCACCTACCCAAACTCCCAGGAACC'
temp_data['10043452'] = \
'GATGCCACCCAAACGCCGACTAGTGGACAGCCCAGAAGATATGGAAGACGAGGGACCCTCTGACCGACCAACTCACCTACCCAAACTCCCAGGAACC' \
'TGTTGCGTTAACAACAAACCAACCTCCGACCCAAAACAAAGATGAAAATAAAAGATGCCACCCAAACGCCGACTAGTGGACAGCCCAGAAGATATGGA'
temp_tax = {'10043452': 0, '1004345262': 1}
ids = ['1004345262', '10043452']
# test1
expected_x = [
[0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0,
1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0,
0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0,
1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0,
1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0,
0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0,
1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0,
0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0,
0, 0, 0, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0,
1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1,
0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0,
0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0,
1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0,
0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0,
0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0,
1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0,
0, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0,
0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0,
0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0,
0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0,
0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0,
0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0,
1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1,
0, 1, 0, 0]]
expected_y = [1, 1, 0]
res = load.dataset_from_id(temp_data, temp_tax, ids, 100, 1.0, dict_1)
self.assertTrue(res, (expected_x, expected_y))
# test2
res = load.dataset_from_id(defaultdict(list), {}, [], 100, 0.5, dict_1)
self.assertTrue(res, ([], []))
# test3
expected_x2 = [
[0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0,
1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0,
0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0,
1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0,
1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0,
0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0,
1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0,
0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0,
0, 0, 0, 1],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0,
0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0,
0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0,
0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0,
0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0,
0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0,
1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1,
0, 1, 0, 0]]
expected_y2 = [1, 0]
res = load.dataset_from_id(temp_data, temp_tax, ids, 100, 0.2, dict_1)
self.assertTrue(res, (expected_x2, expected_y2))
# test4
expected_x20 = [
[0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0,
1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
0, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0,
1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0,
0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0,
0, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0,
0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0,
0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0,
0, 1, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0,
0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0,
1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 1, 0, 0],
[1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0,
0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0,
1, 0, 0, 0, 0, 0, 0, 1]]
expected_y20 = [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]
res = load.dataset_from_id(temp_data, temp_tax, ids, 20, 0.5, dict_1)
self.assertTrue(res, (expected_x20, expected_y20))
self.assertRaisesRegex(AssertionError, "Sampling size is in wrong range - it must be between 0.0 and 1.0.",
load.dataset_from_id, temp_data, temp_tax, ids, 20, 20, dict_1)
self.assertRaisesRegex(AssertionError, "Both transmission dictionary and unique nucleotides cannot be empty.",
load.dataset_from_id, temp_data, temp_tax, ids, 20, 0.5, None)
@patch('VirClass.VirClass.load.pickle.load')
def test_load_dataset(self, mock_pickle_load):
m_file = mock_open()
with patch('VirClass.VirClass.load.gzip.open', m_file):
load.load_dataset('bla.bla')
self.assertEqual(mock_pickle_load.call_count, 1)
self.assertTrue(m_file.called)
m_file.assert_called_once_with('bla.bla', 'rt')
@patch('VirClass.VirClass.load.pickle.dump')
def test_save_dataset(self, mock_pickle_dump):
m_file = mock_open()
with patch('VirClass.VirClass.load.gzip.open', m_file):
load.save_dataset('bla.bla', {'test_key': 'test_val'})
mock_pickle_dump.assert_called_once_with({'test_key': 'test_val'}, mock.ANY)
self.assertTrue(m_file.called)
m_file.assert_called_once_with('bla.bla', 'wt')
def test_build_dataset_ids(self):
oids = ['1006610892', '1021076629', '1023464444', '1028356461', '1028356384', '1006160387', '10086561',
'1016776533', '1005739119', '10140926', '10313991', '1007626122', '1021076583', '10257473',
'1021076642', '1004345262', '1002160105', '1023176908', '1007626112', '1024325226']
res = load.build_dataset_ids(oids=oids, test=0.2, seed=0)
self.assertTrue(isinstance(res, dict))
self.assertEqual(len(res.keys()), 4)
self.assertCountEqual(res.keys(), ['tr_ids', 'te_ids', 'trtr_ids', 'trte_ids'])
self.assertEqual(len(res['tr_ids'] + res['te_ids']), len(oids))
self.assertCountEqual(res['tr_ids'] + res['te_ids'], oids)
self.assertTrue(set(res['tr_ids']).isdisjoint(res['te_ids']))
self.assertEqual(len(res['trtr_ids'] + res['trte_ids']), len(res['tr_ids']))
self.assertCountEqual(res['trtr_ids'] + res['trte_ids'], res['tr_ids'])
self.assertTrue(set(res['trtr_ids']).isdisjoint(res['trte_ids']))
self.assertRaisesRegex(ValueError, "test_size=1.000000 should be smaller than 1.0 or be an integer",
load.build_dataset_ids, oids, 1.0, 0)
datasets2 = {'tr_ids': [], 'te_ids': [], 'trte_ids': [], 'trtr_ids': []}
res = load.build_dataset_ids([], test=0.2, seed=0)
self.assertTrue(isinstance(res, dict))
self.assertDictEqual(res, datasets2)
res = load.build_dataset_ids(oids, test=0.0, seed=0)
self.assertTrue(isinstance(res, dict))
self.assertEqual(len(res.keys()), 4)
self.assertCountEqual(res.keys(), ['tr_ids', 'te_ids', 'trtr_ids', 'trte_ids'])
self.assertEqual(len(res['tr_ids'] + res['te_ids']), len(oids))
self.assertCountEqual(res['tr_ids'] + res['te_ids'], oids)
self.assertTrue(set(res['tr_ids']).isdisjoint(res['te_ids']))
self.assertEqual(len(res['te_ids']), 0)
self.assertEqual(len(res['tr_ids']), len(oids))
self.assertEqual(len(res['trtr_ids'] + res['trte_ids']), len(res['tr_ids']))
self.assertCountEqual(res['trtr_ids'] + res['trte_ids'], res['tr_ids'])
self.assertTrue(set(res['trtr_ids']).isdisjoint(res['trte_ids']))
self.assertRaisesRegex(ValueError, "test_size=1.000000 should be smaller than 1.0 or be an integer",
load.build_dataset_ids, oids, 1.0, 0)
def test_classes_to_numerical(self):
temp = defaultdict(list)
temp['1004345262'] = \
'TGTTGCGTTAACAACAAACCAACCTCCGACCCAAAACAAAGATGAAAATAAAAGATGCCACCCAAACGCCGACTAGTGGACAGCCCAGAAGATATGGAAGAAA' \
'GATGCCACCCAAACGCCGACTAGTGGACAGCCCAGAAGATATGGAAGACGAGGGACCCTCTGACCGACCAACTCACCTACCCAAACTCCCAGGAACC'
temp['10043452'] = \
'GATGCCACCCAAACGCCGACTAGTGGACAGCCCAGAAGATATGGAAGACGAGGGACCCTCTGACCGACCAACTCACCTACCCAAACTCCCAGGAACC' \
'TGTTGCGTTAACAACAAACCAACCTCCGACCCAAAACAAAGATGAAAATAAAAGATGCCACCCAAACGCCGACTAGTGGACAGCCCAGAAGATATGGA'
temp['1023464444'] = \
'AAACACAACAGGGCCTCAAGCCTGTCGCAAAAAGAACAGGTAACAACGACAGGAACGTGGCGGACGAGATACAGACCGGCACGTAAACCCAACCGACAC' \
'ATCCAATATGGTACCCCTCATTGAACCACATAACACAACACAGGCCGCAACTCCGAATACGCATGACAATCACCAAGAATGGGCAAGCTCAATCGCAGCACTCATG'
temp['1028356461'] = \
'CCAATCCCGACCGGAATGGAGGTCCTGACAGGGTACTAAACCCAGTGTAGCGCCCACACGCAATCAGAACAAGACAAAAGCCCCCTAAACCCCACTCCGAAAA' \
'GCGGACAAAAATCCAACCTCATACAAACAAACAAGGGCTAGATGCCAACAGGGACTGCCATCCAATGAGAATGTCCATAGGAGTCGAAACAAAGCCA'
temp['1028356384'] = \
'GAAGCCACCAGAAAGATAAGTGAAACAGTACACGAGCCCTAAACACAACGAATCTTCATAATAACCACCCGACTAAGCGACAAAACCACAGGAACCGACCC' \
'AGACGAAAGCACCGACCAGTGATCACAACTCTTTCGAGGTCACACCCGGTACTACGTAAGTGCCACCATCGCAGCTAAGAGGGCACGCA'
labels = {'1004345262':
'Viruses;ssRNA viruses;ssRNA negative-strand viruses;Mononegavirales;Bornaviridae;Bornavirus',
'10043452': 'Viruses;ssRNA viruses;ssRNA positive-strand viruses;ViralesA;ViridaeB;VirusC',
'1023464444': 'Viruses;ssDNA viruses;ssDNA negative-strand viruses;ViralesA;ViridaeB;VirusC',
'1028356461': 'Viruses;ssDNA viruses;ssDNA negative-strand viruses;ViridaeB;VirusC',
'1028356384': 'Viruses;ssDNA viruses;ssDNA negative-strand viruses;ViridaeB;VirusC'}
res_temp = defaultdict(int)
res_temp[0] = 195.0
res_temp[1] = 200.0
res_temp[2] = 205.0
res_temp[2] = 205.0
res_temp[2] = 195.0
res_expect = ({'10043452': 0, '1004345262': 1, '1023464444': 2, '1028356461': 3, '1028356384': 4}, res_temp)
res = load.classes_to_numerical(temp, labels)
self.assertTrue(res, res_expect)
# try with empty
res = load.classes_to_numerical(defaultdict(list), {})
self.assertTrue(res, ({}, defaultdict(int)))
@patch('VirClass.VirClass.load.load_from_file_fasta')
@patch('VirClass.VirClass.load.classes_to_numerical')
@patch('VirClass.VirClass.load.build_dataset_ids')
@patch('VirClass.VirClass.load.one_hot')
@patch('VirClass.VirClass.load.os.path.join')
@patch('VirClass.VirClass.load.dataset_from_id')
@patch('VirClass.VirClass.load.pickle.dump')
@patch('VirClass.VirClass.load.load_dataset')
def test_load_data(self, load_dataset_mock, pickle_mock, dataset_mock, os_mock, one_hot_mock, arg2, arg3, arg4):
self.assertRaisesRegex(AssertionError, "Test size is in wrong range - it must be between 0.0 and 1.0.",
load.load_data, filename='a.fasta.gz', test=1.0)
self.assertRaisesRegex(AssertionError, "Test size is in wrong range - it must be between 0.0 and 1.0.",
load.load_data, filename='a.fasta.gz', test=-1.0)
self.assertRaisesRegex(AssertionError, "Sampling size is in wrong range - it must be between 0.0 and 1.0.",
load.load_data, filename='a.fasta.gz', sample=2.0)
self.assertRaisesRegex(AssertionError, "Sampling size is in wrong range - it must be between 0.0 and 1.0.",
load.load_data, filename='a.fasta.gz', sample=-1.0)
self.assertRaisesRegex(AssertionError, "Currently supported suffixes is '.fasta.gz'.",
load.load_data, filename='a.txt')
self.assertRaisesRegex(AssertionError, "Both transmission dictionary and unique nucleotides cannot be empty.",
load.load_data, filename='a.fasta.gz')
temp = defaultdict(list)
temp['1004345262'] = \
'TGTTGCGTTAACAACAAACCAACCTCCGACCCAAAACAAAGATGAAAATAAAAGATGCCACCCAAACGCCGACTAGTGGACAGCCCAGAAGATATGGAAGAAA' \
'GATGCCACCCAAACGCCGACTAGTGGACAGCCCAGAAGATATGGAAGACGAGGGACCCTCTGACCGACCAACTCACCTACCCAAACTCCCAGGAACC'
temp['10043452'] = \
'GATGCCACCCAAACGCCGACTAGTGGACAGCCCAGAAGATATGGAAGACGAGGGACCCTCTGACCGACCAACTCACCTACCCAAACTCCCAGGAACC' \
'TGTTGCGTTAACAACAAACCAACCTCCGACCCAAAACAAAGATGAAAATAAAAGATGCCACCCAAACGCCGACTAGTGGACAGCCCAGAAGATATGGA'
temp['1023464444'] = \
'AAACACAACAGGGCCTCAAGCCTGTCGCAAAAAGAACAGGTAACAACGACAGGAACGTGGCGGACGAGATACAGACCGGCACGTAAACCCAACCGACAC' \
'ATCCAATATGGTACCCCTCATTGAACCACATAACACAACACAGGCCGCAACTCCGAATACGCATGACAATCACCAAGAATGGGCAAGCTCAATCGCAGCACTCATG'
temp['1028356461'] = \
'CCAATCCCGACCGGAATGGAGGTCCTGACAGGGTACTAAACCCAGTGTAGCGCCCACACGCAATCAGAACAAGACAAAAGCCCCCTAAACCCCACTCCGAAAA' \
'GCGGACAAAAATCCAACCTCATACAAACAAACAAGGGCTAGATGCCAACAGGGACTGCCATCCAATGAGAATGTCCATAGGAGTCGAAACAAAGCCA'
temp['1028356384'] = \
'GAAGCCACCAGAAAGATAAGTGAAACAGTACACGAGCCCTAAACACAACGAATCTTCATAATAACCACCCGACTAAGCGACAAAACCACAGGAACCGACCC' \
'AGACGAAAGCACCGACCAGTGATCACAACTCTTTCGAGGTCACACCCGGTACTACGTAAGTGCCACCATCGCAGCTAAGAGGGCACGCA'
labels_assert = {'1004345262':
'Viruses;ssRNA viruses;ssRNA negative-strand viruses;Mononegavirales;Bornaviridae;Bornavirus'}
load.load_from_file_fasta.return_value = (temp, labels_assert)
self.assertRaisesRegex(AssertionError,
"When loading from fasta keys in data dictionary and labels dictionary must be same.",
load.load_data, filename='a.fasta.gz', unique_nuc='ATCG')
labels = {'1023464444': 'Viruses;ssDNA viruses;ssDNA negative-strand viruses;ViralesA;ViridaeB;VirusC',
'1028356461': 'Viruses;ssDNA viruses;ssDNA negative-strand viruses;ViridaeB;VirusC',
'10043452': 'Viruses;ssRNA viruses;ssRNA positive-strand viruses;ViralesA;ViridaeB;VirusC',
'1028356384': 'Viruses;ssDNA viruses;ssDNA negative-strand viruses;ViridaeB;VirusC',
'1004345262':
'Viruses;ssRNA viruses;ssRNA negative-strand viruses;Mononegavirales;Bornaviridae;Bornavirus'}
load.load_from_file_fasta.return_value = (temp, labels)
res_temp = defaultdict(int)
res_temp[0] = 205.0
res_temp[1] = 200.0
res_temp[2] = 190.0
res_temp[3] = 197.5
classes_to_numerical_expected = ({'1023464444': 0, '1028356461': 1, '10043452': 2, '1028356384': 1,
'1004345262': 3}, res_temp)
load.classes_to_numerical.return_value = classes_to_numerical_expected
load.build_dataset_ids.return_value = {'tr_ids': ['1004345262', '10043452', '1028356461', '1028356384'],
'te_ids': ['1023464444'],
'trtr_ids': ['10043452', '1028356461', '1004345262'],
'trte_ids': ['1028356384']}
trans_dict = {"A": [1, 0, 0, 0], "T": [0, 1, 0, 0], "C": [0, 0, 1, 0], "G": [0, 0, 0, 1]}
# load.dataset_from_id.return_value = {}
dataset_expected = {'teX': [
[1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0,
1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1,
0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0,
0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0,
1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0,
1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0,
1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0,
0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0,
1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0,
1, 0, 0, 0]], 'teY': [0], 'trX': [
[0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0,
0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0,
0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0,
0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0,
0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0,
0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0,
1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1,
0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1,
0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0,
0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1,
0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0,
0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0,
0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0,
0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0,
0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0,
0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1,
1, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0,
1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0,
0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0,
1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0,
1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0,
0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0,
1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0,
0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0,
0, 0, 0, 1]], 'trY': [3, 2, 1], 'trteX': [
[0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0,
1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0,
1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0,
0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1,
0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0,
0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0,
0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0,
1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0,
0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0,
1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0,
0, 0, 1, 0]], 'trteY': [1]}
dataset_mock.side_effect = [(dataset_expected['teX'], dataset_expected['teY']),
(dataset_expected['trX'], dataset_expected['trY']),
(dataset_expected['trteX'], dataset_expected['trteY'])]
os_mock.side_effect = ['dummy', 'dummy', 'a-trX.fasta.gz', 'a-teX.fasta.gz', 'a-trY.fasta.gz', 'a-teY.fasta.gz',
'a-trteX.fasta.gz', 'a-trteY.fasta.gz']
# mock load_dataset without side effect
# mock load_dataset with side effect IOError
load_dataset_mock.side_effect = IOError()
m_file = mock_open()
with patch('VirClass.VirClass.load.gzip.open', m_file):
res = load.load_data(filename='a.fasta.gz', trans_dict=trans_dict, onehot=False)
self.assertEqual(m_file.call_count, 6)
self.assertTrue(isinstance(res, tuple))
self.assertEqual(pickle_mock.call_count, 6)
self.assertDictEqual(res[-1], res_temp)
self.assertTrue(isinstance(res[-2], int))
for idx, dataset_name in enumerate(['trX', 'teX', 'trY', 'teY', 'trteX', 'trteY']):
m_file.assert_any_call('a-' + dataset_name + '.fasta.gz', 'wt')
pickle_mock.any_call(dataset_expected[dataset_name], mock.ANY)
np.testing.assert_array_equal(res[idx], np.asarray(dataset_expected[dataset_name]))
dataset_mock.side_effect = [(dataset_expected['teX'], dataset_expected['teY']),
(dataset_expected['trX'], dataset_expected['trY']),
(dataset_expected['trteX'], dataset_expected['trteY'])]
os_mock.side_effect = ['dummy', 'dummy', 'a-trX.fasta.gz', 'a-teX.fasta.gz', 'a-trY.fasta.gz', 'a-teY.fasta.gz',
'a-trteX.fasta.gz', 'a-trteY.fasta.gz']
m_file.reset_mock()
pickle_mock.reset_mock()
one_hot_mock.side_effect = (np.array([[0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0]]),
np.array([[1, 0, 0, 0]]),
np.array([[0, 1, 0, 0]]))
dataset_expected['trY'] = np.array([[0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0]])
dataset_expected['teY'] = np.array([[1, 0, 0, 0]])
dataset_expected['trteY'] = np.array([[0, 1, 0, 0]])
with patch('VirClass.VirClass.load.gzip.open', m_file):
res = load.load_data(filename='a.fasta.gz', trans_dict=trans_dict, onehot=True)
self.assertEqual(m_file.call_count, 6)
self.assertTrue(isinstance(res, tuple))
self.assertEqual(pickle_mock.call_count, 6)
self.assertDictEqual(res[-1], res_temp)
self.assertTrue(isinstance(res[-2], int))
for idx, dataset_name in enumerate(['trX', 'teX', 'trY', 'teY', 'trteX', 'trteY']):
m_file.assert_any_call('a-' + dataset_name + '.fasta.gz', 'wt')
pickle_mock.any_call(dataset_expected[dataset_name], mock.ANY)
np.testing.assert_array_equal(res[idx], np.asarray(dataset_expected[dataset_name]))
if __name__ == '__main__':
main()
| [
"unittest.main",
"VirClass.VirClass.load.one_hot",
"io.StringIO",
"VirClass.VirClass.load.load_dataset",
"unittest.mock.MagicMock",
"VirClass.VirClass.load.save_dataset",
"numpy.asarray",
"VirClass.VirClass.load.load_from_file_fasta",
"VirClass.VirClass.load.load_data",
"unittest.mock.patch",
"c... | [((3427, 3473), 'unittest.mock.patch', 'patch', (['"""VirClass.VirClass.load.os.path.isfile"""'], {}), "('VirClass.VirClass.load.os.path.isfile')\n", (3432, 3473), False, 'from unittest.mock import patch, mock_open, MagicMock, file_spec\n'), ((3479, 3530), 'unittest.mock.patch', 'patch', (['"""VirClass.VirClass.load.load_seqs_from_ncbi"""'], {}), "('VirClass.VirClass.load.load_seqs_from_ncbi')\n", (3484, 3530), False, 'from unittest.mock import patch, mock_open, MagicMock, file_spec\n'), ((16995, 17038), 'unittest.mock.patch', 'patch', (['"""VirClass.VirClass.load.pickle.load"""'], {}), "('VirClass.VirClass.load.pickle.load')\n", (17000, 17038), False, 'from unittest.mock import patch, mock_open, MagicMock, file_spec\n'), ((17394, 17437), 'unittest.mock.patch', 'patch', (['"""VirClass.VirClass.load.pickle.dump"""'], {}), "('VirClass.VirClass.load.pickle.dump')\n", (17399, 17437), False, 'from unittest.mock import patch, mock_open, MagicMock, file_spec\n'), ((22769, 22821), 'unittest.mock.patch', 'patch', (['"""VirClass.VirClass.load.load_from_file_fasta"""'], {}), "('VirClass.VirClass.load.load_from_file_fasta')\n", (22774, 22821), False, 'from unittest.mock import patch, mock_open, MagicMock, file_spec\n'), ((22827, 22879), 'unittest.mock.patch', 'patch', (['"""VirClass.VirClass.load.classes_to_numerical"""'], {}), "('VirClass.VirClass.load.classes_to_numerical')\n", (22832, 22879), False, 'from unittest.mock import patch, mock_open, MagicMock, file_spec\n'), ((22885, 22934), 'unittest.mock.patch', 'patch', (['"""VirClass.VirClass.load.build_dataset_ids"""'], {}), "('VirClass.VirClass.load.build_dataset_ids')\n", (22890, 22934), False, 'from unittest.mock import patch, mock_open, MagicMock, file_spec\n'), ((22940, 22979), 'unittest.mock.patch', 'patch', (['"""VirClass.VirClass.load.one_hot"""'], {}), "('VirClass.VirClass.load.one_hot')\n", (22945, 22979), False, 'from unittest.mock import patch, mock_open, MagicMock, file_spec\n'), ((22985, 23029), 'unittest.mock.patch', 'patch', (['"""VirClass.VirClass.load.os.path.join"""'], {}), "('VirClass.VirClass.load.os.path.join')\n", (22990, 23029), False, 'from unittest.mock import patch, mock_open, MagicMock, file_spec\n'), ((23035, 23082), 'unittest.mock.patch', 'patch', (['"""VirClass.VirClass.load.dataset_from_id"""'], {}), "('VirClass.VirClass.load.dataset_from_id')\n", (23040, 23082), False, 'from unittest.mock import patch, mock_open, MagicMock, file_spec\n'), ((23088, 23131), 'unittest.mock.patch', 'patch', (['"""VirClass.VirClass.load.pickle.dump"""'], {}), "('VirClass.VirClass.load.pickle.dump')\n", (23093, 23131), False, 'from unittest.mock import patch, mock_open, MagicMock, file_spec\n'), ((23137, 23181), 'unittest.mock.patch', 'patch', (['"""VirClass.VirClass.load.load_dataset"""'], {}), "('VirClass.VirClass.load.load_dataset')\n", (23142, 23181), False, 'from unittest.mock import patch, mock_open, MagicMock, file_spec\n'), ((37868, 37874), 'unittest.main', 'main', ([], {}), '()\n', (37872, 37874), False, 'from unittest import TestCase, main\n'), ((660, 745), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [1, 0, 0, 0]]'], {}), '([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [1, 0, 0, 0]]\n )\n', (668, 745), True, 'import numpy as np\n'), ((755, 855), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 1, 0, 0], [1, 0,\n 0, 0, 0]]'], {}), '([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 1, 0, 0\n ], [1, 0, 0, 0, 0]])\n', (763, 855), True, 'import numpy as np\n'), ((2566, 2603), 'VirClass.VirClass.load.seq_to_bits', 'load.seq_to_bits', (['vec', '"""ATCGYM"""', 'None'], {}), "(vec, 'ATCGYM', None)\n", (2582, 2603), True, 'import VirClass.VirClass.load as load\n'), ((2756, 2791), 'VirClass.VirClass.load.seq_to_bits', 'load.seq_to_bits', (['vec', '"""ATCG"""', 'None'], {}), "(vec, 'ATCG', None)\n", (2772, 2791), True, 'import VirClass.VirClass.load as load\n'), ((2890, 2925), 'VirClass.VirClass.load.seq_to_bits', 'load.seq_to_bits', (['vec', 'None', 'dict_1'], {}), '(vec, None, dict_1)\n', (2906, 2925), True, 'import VirClass.VirClass.load as load\n'), ((3026, 3061), 'VirClass.VirClass.load.seq_to_bits', 'load.seq_to_bits', (['vec', '"""AT"""', 'dict_1'], {}), "(vec, 'AT', dict_1)\n", (3042, 3061), True, 'import VirClass.VirClass.load as load\n'), ((3162, 3197), 'VirClass.VirClass.load.seq_to_bits', 'load.seq_to_bits', (['vec', 'None', 'dict_2'], {}), '(vec, None, dict_2)\n', (3178, 3197), True, 'import VirClass.VirClass.load as load\n'), ((3298, 3335), 'VirClass.VirClass.load.seq_to_bits', 'load.seq_to_bits', (['vec', '"""CTGM"""', 'dict_2'], {}), "(vec, 'CTGM', dict_2)\n", (3314, 3335), True, 'import VirClass.VirClass.load as load\n'), ((3648, 3665), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3659, 3665), False, 'from collections import defaultdict\n'), ((5559, 5576), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5570, 5576), False, 'from collections import defaultdict\n'), ((10355, 10419), 'VirClass.VirClass.load.dataset_from_id', 'load.dataset_from_id', (['temp_data', 'temp_tax', 'ids', '(100)', '(1.0)', 'dict_1'], {}), '(temp_data, temp_tax, ids, 100, 1.0, dict_1)\n', (10375, 10419), True, 'import VirClass.VirClass.load as load\n'), ((13409, 13473), 'VirClass.VirClass.load.dataset_from_id', 'load.dataset_from_id', (['temp_data', 'temp_tax', 'ids', '(100)', '(0.2)', 'dict_1'], {}), '(temp_data, temp_tax, ids, 100, 0.2, dict_1)\n', (13429, 13473), True, 'import VirClass.VirClass.load as load\n'), ((16441, 16504), 'VirClass.VirClass.load.dataset_from_id', 'load.dataset_from_id', (['temp_data', 'temp_tax', 'ids', '(20)', '(0.5)', 'dict_1'], {}), '(temp_data, temp_tax, ids, 20, 0.5, dict_1)\n', (16461, 16504), True, 'import VirClass.VirClass.load as load\n'), ((17107, 17118), 'unittest.mock.mock_open', 'mock_open', ([], {}), '()\n', (17116, 17118), False, 'from unittest.mock import patch, mock_open, MagicMock, file_spec\n'), ((17506, 17517), 'unittest.mock.mock_open', 'mock_open', ([], {}), '()\n', (17515, 17517), False, 'from unittest.mock import patch, mock_open, MagicMock, file_spec\n'), ((18214, 18265), 'VirClass.VirClass.load.build_dataset_ids', 'load.build_dataset_ids', ([], {'oids': 'oids', 'test': '(0.2)', 'seed': '(0)'}), '(oids=oids, test=0.2, seed=0)\n', (18236, 18265), True, 'import VirClass.VirClass.load as load\n'), ((19169, 19213), 'VirClass.VirClass.load.build_dataset_ids', 'load.build_dataset_ids', (['[]'], {'test': '(0.2)', 'seed': '(0)'}), '([], test=0.2, seed=0)\n', (19191, 19213), True, 'import VirClass.VirClass.load as load\n'), ((19321, 19367), 'VirClass.VirClass.load.build_dataset_ids', 'load.build_dataset_ids', (['oids'], {'test': '(0.0)', 'seed': '(0)'}), '(oids, test=0.0, seed=0)\n', (19343, 19367), True, 'import VirClass.VirClass.load as load\n'), ((20336, 20353), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (20347, 20353), False, 'from collections import defaultdict\n'), ((22251, 22267), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (22262, 22267), False, 'from collections import defaultdict\n'), ((22540, 22579), 'VirClass.VirClass.load.classes_to_numerical', 'load.classes_to_numerical', (['temp', 'labels'], {}), '(temp, labels)\n', (22565, 22579), True, 'import VirClass.VirClass.load as load\n'), ((24446, 24463), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (24457, 24463), False, 'from collections import defaultdict\n'), ((26912, 26928), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (26923, 26928), False, 'from collections import defaultdict\n'), ((35326, 35337), 'unittest.mock.mock_open', 'mock_open', ([], {}), '()\n', (35335, 35337), False, 'from unittest.mock import patch, mock_open, MagicMock, file_spec\n'), ((36886, 36938), 'numpy.array', 'np.array', (['[[0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0]]'], {}), '([[0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0]])\n', (36894, 36938), True, 'import numpy as np\n'), ((36973, 36997), 'numpy.array', 'np.array', (['[[1, 0, 0, 0]]'], {}), '([[1, 0, 0, 0]])\n', (36981, 36997), True, 'import numpy as np\n'), ((37034, 37058), 'numpy.array', 'np.array', (['[[0, 1, 0, 0]]'], {}), '([[0, 1, 0, 0]])\n', (37042, 37058), True, 'import numpy as np\n'), ((1177, 1211), 'VirClass.VirClass.load.one_hot', 'load.one_hot', (['x', 'number_of_classes'], {}), '(x, number_of_classes)\n', (1189, 1211), True, 'import VirClass.VirClass.load as load\n'), ((1256, 1294), 'VirClass.VirClass.load.one_hot', 'load.one_hot', (['x', '(number_of_classes + 1)'], {}), '(x, number_of_classes + 1)\n', (1268, 1294), True, 'import VirClass.VirClass.load as load\n'), ((4592, 4633), 'unittest.mock.patch', 'patch', (['"""VirClass.VirClass.load.gzip.open"""'], {}), "('VirClass.VirClass.load.gzip.open')\n", (4597, 4633), False, 'from unittest.mock import patch, mock_open, MagicMock, file_spec\n'), ((4671, 4696), 'unittest.mock.MagicMock', 'MagicMock', ([], {'spec': 'file_spec'}), '(spec=file_spec)\n', (4680, 4696), False, 'from unittest.mock import patch, mock_open, MagicMock, file_spec\n'), ((4741, 4760), 'io.StringIO', 'StringIO', (['read_data'], {}), '(read_data)\n', (4749, 4760), False, 'from io import StringIO\n'), ((4825, 4861), 'VirClass.VirClass.load.load_from_file_fasta', 'load.load_from_file_fasta', (['"""bla.bla"""'], {}), "('bla.bla')\n", (4850, 4861), True, 'import VirClass.VirClass.load as load\n'), ((5195, 5231), 'VirClass.VirClass.load.load_from_file_fasta', 'load.load_from_file_fasta', (['"""bla.bla"""'], {}), "('bla.bla')\n", (5220, 5231), True, 'import VirClass.VirClass.load as load\n'), ((10527, 10544), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10538, 10544), False, 'from collections import defaultdict\n'), ((17132, 17181), 'unittest.mock.patch', 'patch', (['"""VirClass.VirClass.load.gzip.open"""', 'm_file'], {}), "('VirClass.VirClass.load.gzip.open', m_file)\n", (17137, 17181), False, 'from unittest.mock import patch, mock_open, MagicMock, file_spec\n'), ((17195, 17223), 'VirClass.VirClass.load.load_dataset', 'load.load_dataset', (['"""bla.bla"""'], {}), "('bla.bla')\n", (17212, 17223), True, 'import VirClass.VirClass.load as load\n'), ((17531, 17580), 'unittest.mock.patch', 'patch', (['"""VirClass.VirClass.load.gzip.open"""', 'm_file'], {}), "('VirClass.VirClass.load.gzip.open', m_file)\n", (17536, 17580), False, 'from unittest.mock import patch, mock_open, MagicMock, file_spec\n'), ((17594, 17648), 'VirClass.VirClass.load.save_dataset', 'load.save_dataset', (['"""bla.bla"""', "{'test_key': 'test_val'}"], {}), "('bla.bla', {'test_key': 'test_val'})\n", (17611, 17648), True, 'import VirClass.VirClass.load as load\n'), ((22687, 22704), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (22698, 22704), False, 'from collections import defaultdict\n'), ((35351, 35400), 'unittest.mock.patch', 'patch', (['"""VirClass.VirClass.load.gzip.open"""', 'm_file'], {}), "('VirClass.VirClass.load.gzip.open', m_file)\n", (35356, 35400), False, 'from unittest.mock import patch, mock_open, MagicMock, file_spec\n'), ((35420, 35494), 'VirClass.VirClass.load.load_data', 'load.load_data', ([], {'filename': '"""a.fasta.gz"""', 'trans_dict': 'trans_dict', 'onehot': '(False)'}), "(filename='a.fasta.gz', trans_dict=trans_dict, onehot=False)\n", (35434, 35494), True, 'import VirClass.VirClass.load as load\n'), ((36674, 36726), 'numpy.array', 'np.array', (['[[0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0]]'], {}), '([[0, 0, 0, 1], [0, 0, 1, 0], [0, 1, 0, 0]])\n', (36682, 36726), True, 'import numpy as np\n'), ((36764, 36788), 'numpy.array', 'np.array', (['[[1, 0, 0, 0]]'], {}), '([[1, 0, 0, 0]])\n', (36772, 36788), True, 'import numpy as np\n'), ((36826, 36850), 'numpy.array', 'np.array', (['[[0, 1, 0, 0]]'], {}), '([[0, 1, 0, 0]])\n', (36834, 36850), True, 'import numpy as np\n'), ((37072, 37121), 'unittest.mock.patch', 'patch', (['"""VirClass.VirClass.load.gzip.open"""', 'm_file'], {}), "('VirClass.VirClass.load.gzip.open', m_file)\n", (37077, 37121), False, 'from unittest.mock import patch, mock_open, MagicMock, file_spec\n'), ((37141, 37214), 'VirClass.VirClass.load.load_data', 'load.load_data', ([], {'filename': '"""a.fasta.gz"""', 'trans_dict': 'trans_dict', 'onehot': '(True)'}), "(filename='a.fasta.gz', trans_dict=trans_dict, onehot=True)\n", (37155, 37214), True, 'import VirClass.VirClass.load as load\n'), ((1352, 1363), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1360, 1363), True, 'import numpy as np\n'), ((5135, 5146), 'unittest.mock.mock_open', 'mock_open', ([], {}), '()\n', (5144, 5146), False, 'from unittest.mock import patch, mock_open, MagicMock, file_spec\n'), ((22744, 22760), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (22755, 22760), False, 'from collections import defaultdict\n'), ((36071, 36113), 'numpy.asarray', 'np.asarray', (['dataset_expected[dataset_name]'], {}), '(dataset_expected[dataset_name])\n', (36081, 36113), True, 'import numpy as np\n'), ((37791, 37833), 'numpy.asarray', 'np.asarray', (['dataset_expected[dataset_name]'], {}), '(dataset_expected[dataset_name])\n', (37801, 37833), True, 'import numpy as np\n')] |
import numpy as np
import cv2
#### define variables
display = True # display frames while executing
save_format = 'gray' # how to save frames, options are:
# gray_norm - gray-scaled frame with normalized brightness and contrast
# gray - gray-scaled frame (use only green channel)
# color - colored frame
# color_all - complete colored frame, except the top overlay
overlay_top = 120 # number of top pixels to cut the overlay
out_W, out_H = 1024, 768 # output keyframe width and height
overlap = 0.05 # percent one keyframe should overlap another
time_offset = 1 # set offset in seconds
blur_thr = 1500 # below this value frame is considered too bluried
blur_skip = 8 # skip n consecutive frames when blur is detected
# maximum time allowed for new keyframe.
# if this time is reached, a frame is always grabbed
elapsed_max = 60 # in seconds
#### Define a function to detect overlay
"""
check_overlay must be a function that input the frame and output a True/False value,
indicating that must be skipped (True) or processed (False)
You can change this example below that I used in my videos
You have to select a feature in the overlay that can easily be compared, and check if it match
values that you previously saved or that you know (eg. all values in one area should be 0)
If you don't have overlays in your videos, you can simply put
def check_overlay(img):
return False
"""
sony1_overlay = np.load('overlay/sony1_overlay.npy')
sony2_overlay = np.load('overlay/sony2_overlay.npy')
sony3_overlay = np.load('overlay/sony3_overlay.npy')
feed_overlay = np.load('overlay/feed_overlay.npy')
def check_overlay(img):
overlay_check1 = img[126:134, 40:140, 1]
overlay_sobel = cv2.Sobel(overlay_check1, cv2.CV_16S, 0, 1, ksize=3)
sony1_dif = np.sum(np.abs(overlay_sobel - sony1_overlay))
sony3_dif = np.sum(np.abs(overlay_sobel - sony3_overlay))
overlay_check2 = img[578:588, 460:500, 1]
sony2_dif = np.sum(np.abs(overlay_check2 - sony2_overlay))
feed_dif = np.sum(np.abs(overlay_check2 - feed_overlay))
return sony1_dif < 30000 or sony3_dif < 30000 or sony2_dif < 10 or feed_dif < 10
#### advanced settings that you can change as well
# values to normalize image by 127 mean and variance below
norm_var = 1200 # variance to normalize image
min_contrast = 0.7 # maximun allowed contrast
max_contrast = 2.0 # minimun allowed contrast
points_to_LK = 300 # minimum points to use LK. below that will use Farneback
key_move_thr = 150 # number of pixels the edge of one key frame can move compared to the last
downsize_interpolate = 8 # downscale frame by value to calculate brightest spot
# Parameters for ShiTomasi corner detection
feature_params = dict(maxCorners = 800, # How many pts. to locate
qualityLevel = 0.1, # b/w 0 & 1, min. quality below which everyone is rejected
minDistance = 20, # Min eucledian distance b/w corners detected
blockSize = 5) # Size of an average block for computing a derivative covariation matrix over each pixel neighborhood
# Parameters for lucas kanade optical flow
lk_params = dict(winSize = (10,10), # size of the search window at each pyramid level
maxLevel = 2, # 0, pyramids are not used (single level), if set to 1, two levels are used, and so on
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# same as above, with a bigger window size after blured images
lk_params_blur = lk_params.copy()
lk_params_blur.update(winSize = (50,50),
maxLevel = 3)
# params to farneback (dense flow detector)
downsize_flow = 3 # use only X part of center of the window to calculate flow. higher means smaller window
percent_flow = 0.2 # n percent of highest and lowest will be ignored to measure flow
rectify_flow_max = 2.0 # max value to multiply flow when image is blurier. when frame is blurier, flow tends to be underestimated
farneback_params = dict(pyr_scale = 0.5,
levels = 2,
winsize = 10,
iterations = 3,
poly_n = 5,
poly_sigma = 1.0,
flags = 0)
# same as above, with a bigger window size after blured images
farneback_params_blur = farneback_params.copy()
farneback_params_blur.update(levels = 4)
#### define variables to display output during runtime
window_size = 15 # time window to display the graph, in seconds
# Parameters for text display over the image
text_params = dict(org = (20,150),
fontFace = cv2.FONT_HERSHEY_SIMPLEX,
fontScale = 1.5,
color = (0,255,0),
thickness = 2)
text_params_sub = text_params.copy()
text_params_sub.update(org = (20,200))
# Parameters to resize images when displayed
resize_params = dict(dsize = (0,0),
fx = 0.5, fy = 0.5,
interpolation = cv2.INTER_AREA)
| [
"numpy.load",
"numpy.abs",
"cv2.Sobel"
] | [((1406, 1442), 'numpy.load', 'np.load', (['"""overlay/sony1_overlay.npy"""'], {}), "('overlay/sony1_overlay.npy')\n", (1413, 1442), True, 'import numpy as np\n'), ((1459, 1495), 'numpy.load', 'np.load', (['"""overlay/sony2_overlay.npy"""'], {}), "('overlay/sony2_overlay.npy')\n", (1466, 1495), True, 'import numpy as np\n'), ((1512, 1548), 'numpy.load', 'np.load', (['"""overlay/sony3_overlay.npy"""'], {}), "('overlay/sony3_overlay.npy')\n", (1519, 1548), True, 'import numpy as np\n'), ((1564, 1599), 'numpy.load', 'np.load', (['"""overlay/feed_overlay.npy"""'], {}), "('overlay/feed_overlay.npy')\n", (1571, 1599), True, 'import numpy as np\n'), ((1690, 1742), 'cv2.Sobel', 'cv2.Sobel', (['overlay_check1', 'cv2.CV_16S', '(0)', '(1)'], {'ksize': '(3)'}), '(overlay_check1, cv2.CV_16S, 0, 1, ksize=3)\n', (1699, 1742), False, 'import cv2\n'), ((1766, 1803), 'numpy.abs', 'np.abs', (['(overlay_sobel - sony1_overlay)'], {}), '(overlay_sobel - sony1_overlay)\n', (1772, 1803), True, 'import numpy as np\n'), ((1828, 1865), 'numpy.abs', 'np.abs', (['(overlay_sobel - sony3_overlay)'], {}), '(overlay_sobel - sony3_overlay)\n', (1834, 1865), True, 'import numpy as np\n'), ((1937, 1975), 'numpy.abs', 'np.abs', (['(overlay_check2 - sony2_overlay)'], {}), '(overlay_check2 - sony2_overlay)\n', (1943, 1975), True, 'import numpy as np\n'), ((2000, 2037), 'numpy.abs', 'np.abs', (['(overlay_check2 - feed_overlay)'], {}), '(overlay_check2 - feed_overlay)\n', (2006, 2037), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
tenxtools.significant_test
~~~~~~~~~~~~~~~~~~~~~~~~~~
@Copyright: (c) 2018-08 by <NAME> (<EMAIL>).
@License: LICENSE_NAME, see LICENSE for more details.
"""
from scipy import stats
from skidmarks import wald_wolfowitz
from statsmodels.stats.multitest import fdrcorrection
import numpy as np
np.random.seed(12345678)
def fdr(pvals):
return fdrcorrection(pvals, alpha=0.05, method='indep', is_sorted=False)
def exponent_distribution(x):
'''
null hypothesis: samples from exponent distribution
'''
if len(x) <= 2:
return None, None
param = stats.expon.fit(x)
D, p = stats.kstest(x, 'expon', args=param)
return p < 0.05, p
def multinomial_distribution(x):
'''
null hypothesis: samples from multinominal distribution
'''
# the expected frequencies are uniform and given by the mean of the observed frequencies.
chisq, p = stats.chisquare(x)
return p < 0.05, p
def wald_wolfowitz_test(x):
'''
null hypothesis: samples from alternating sequence
010101010101
'''
if len(set(x)) == 1:
return None, None, None
if len(x) <= 2:
return None, None, None
p, z, n_runs, sd, mean = wald_wolfowitz(x).values()
return p < 0.05, p, int(n_runs)/float(len(x))
def rank_sum_test(x1, x2):
'''
statistic : float
The test statistic under the large-sample approximation that the rank sum statistic is normally distributed
pvalue : float
The two-sided p-value of the test
'''
s, p = stats.ranksums(x1, x2)
return p < 0.05, p
def t_test(x1, x2, equal_var=True, ind=True):
if ind:
s, p = stats.ttest_ind(x1, x2, equal_var=equal_var)
else:
s, p = stats.ttest_rel(x1, x2)
return p < 0.05, p
def anova_test(x1, x2):
s, p = stats.f_oneway(x1, x2)
return p < 0.05, p
| [
"scipy.stats.kstest",
"numpy.random.seed",
"scipy.stats.ttest_rel",
"scipy.stats.expon.fit",
"scipy.stats.ttest_ind",
"scipy.stats.f_oneway",
"statsmodels.stats.multitest.fdrcorrection",
"scipy.stats.ranksums",
"skidmarks.wald_wolfowitz",
"scipy.stats.chisquare"
] | [((359, 383), 'numpy.random.seed', 'np.random.seed', (['(12345678)'], {}), '(12345678)\n', (373, 383), True, 'import numpy as np\n'), ((412, 477), 'statsmodels.stats.multitest.fdrcorrection', 'fdrcorrection', (['pvals'], {'alpha': '(0.05)', 'method': '"""indep"""', 'is_sorted': '(False)'}), "(pvals, alpha=0.05, method='indep', is_sorted=False)\n", (425, 477), False, 'from statsmodels.stats.multitest import fdrcorrection\n'), ((639, 657), 'scipy.stats.expon.fit', 'stats.expon.fit', (['x'], {}), '(x)\n', (654, 657), False, 'from scipy import stats\n'), ((669, 705), 'scipy.stats.kstest', 'stats.kstest', (['x', '"""expon"""'], {'args': 'param'}), "(x, 'expon', args=param)\n", (681, 705), False, 'from scipy import stats\n'), ((949, 967), 'scipy.stats.chisquare', 'stats.chisquare', (['x'], {}), '(x)\n', (964, 967), False, 'from scipy import stats\n'), ((1573, 1595), 'scipy.stats.ranksums', 'stats.ranksums', (['x1', 'x2'], {}), '(x1, x2)\n', (1587, 1595), False, 'from scipy import stats\n'), ((1851, 1873), 'scipy.stats.f_oneway', 'stats.f_oneway', (['x1', 'x2'], {}), '(x1, x2)\n', (1865, 1873), False, 'from scipy import stats\n'), ((1696, 1740), 'scipy.stats.ttest_ind', 'stats.ttest_ind', (['x1', 'x2'], {'equal_var': 'equal_var'}), '(x1, x2, equal_var=equal_var)\n', (1711, 1740), False, 'from scipy import stats\n'), ((1766, 1789), 'scipy.stats.ttest_rel', 'stats.ttest_rel', (['x1', 'x2'], {}), '(x1, x2)\n', (1781, 1789), False, 'from scipy import stats\n'), ((1248, 1265), 'skidmarks.wald_wolfowitz', 'wald_wolfowitz', (['x'], {}), '(x)\n', (1262, 1265), False, 'from skidmarks import wald_wolfowitz\n')] |
from __future__ import division
import sys
import os
import tempfile
import warnings
from distutils.spawn import find_executable
from subprocess import Popen, PIPE
import numpy as np
from mbuild.compound import Compound
from mbuild.exceptions import MBuildError
from mbuild.box import Box
from mbuild import clone
__all__ = ['fill_box', 'fill_region', 'solvate']
PACKMOL = find_executable('packmol')
PACKMOL_HEADER = """
tolerance {0:.16f}
filetype pdb
output {1}
seed {2}
"""
PACKMOL_SOLUTE = """
structure {0}
number 1
center
fixed {1:.3f} {2:.3f} {3:.3f} 0. 0. 0.
end structure
"""
PACKMOL_BOX = """
structure {0}
number {1:d}
inside box {2:.3f} {3:.3f} {4:.3f} {5:.3f} {6:.3f} {7:.3f}
end structure
"""
def fill_box(compound, n_compounds=None, box=None, density=None, overlap=0.2,
seed=12345, edge=0.2, compound_ratio=None,
aspect_ratio=None, temp_file=None):
"""Fill a box with a compound using packmol.
Two arguments of `n_compounds, box, and density` must be specified.
If `n_compounds` and `box` are not None, the specified number of
n_compounds will be inserted into a box of the specified size.
If `n_compounds` and `density` are not None, the corresponding box
size will be calculated internally. In this case, `n_compounds`
must be an int and not a list of int.
If `box` and `density` are not None, the corresponding number of
compounds will be calculated internally.
For the cases in which `box` is not specified but generated internally,
the default behavior is to calculate a cubic box. Optionally,
`aspect_ratio` can be passed to generate a non-cubic box.
Parameters
----------
compound : mb.Compound or list of mb.Compound
Compound or list of compounds to be put in box.
n_compounds : int or list of int
Number of compounds to be put in box.
box : mb.Box
Box to be filled by compounds.
density : float, units kg/m^3, default=None
Target density for the system in macroscale units. If not None, one of
`n_compounds` or `box`, but not both, must be specified.
overlap : float, units nm, default=0.2
Minimum separation between atoms of different molecules.
seed : int, default=12345
Random seed to be passed to PACKMOL.
edge : float, units nm, default=0.2
Buffer at the edge of the box to not place molecules. This is necessary
in some systems because PACKMOL does not account for periodic boundary
conditions in its optimization.
compound_ratio : list, default=None
Ratio of number of each compound to be put in box. Only used in the
case of `density` and `box` having been specified, `n_compounds` not
specified, and more than one `compound`.
aspect_ratio : list of float
If a non-cubic box is desired, the ratio of box lengths in the x, y,
and z directions.
temp_file : str, default=None
File name to write PACKMOL's raw output to.
Returns
-------
filled : mb.Compound
"""
_check_packmol(PACKMOL)
arg_count = 3 - [n_compounds, box, density].count(None)
if arg_count != 2:
msg = ("Exactly 2 of `n_compounds`, `box`, and `density` "
"must be specified. {} were given.".format(arg_count))
raise ValueError(msg)
if box is not None:
box = _validate_box(box)
if not isinstance(compound, (list, set)):
compound = [compound]
if n_compounds is not None and not isinstance(n_compounds, (list, set)):
n_compounds = [n_compounds]
if compound is not None and n_compounds is not None:
if len(compound) != len(n_compounds):
msg = ("`compound` and `n_compounds` must be of equal length.")
raise ValueError(msg)
if density is not None:
if box is None and n_compounds is not None:
total_mass = np.sum([n*np.sum([a.mass for a in c.to_parmed().atoms])
for c,n in zip(compound, n_compounds)])
# Conversion from (amu/(kg/m^3))**(1/3) to nm
L = (total_mass/density)**(1/3)*1.1841763
if aspect_ratio is None:
box = _validate_box(Box(3*[L]))
else:
L *= np.prod(aspect_ratio) ** (-1/3)
box = _validate_box(Box([val*L for val in aspect_ratio]))
if n_compounds is None and box is not None:
if len(compound) == 1:
compound_mass = np.sum([a.mass for a in compound[0].to_parmed().atoms])
# Conversion from kg/m^3 / amu * nm^3 to dimensionless units
n_compounds = [int(density/compound_mass*np.prod(box.lengths)*.60224)]
else:
if compound_ratio is None:
msg = ("Determing `n_compounds` from `density` and `box` "
"for systems with more than one compound type requires"
"`compound_ratio`")
raise ValueError(msg)
if len(compound) != len(compound_ratio):
msg = ("Length of `compound_ratio` must equal length of "
"`compound`")
raise ValueError(msg)
prototype_mass = 0
for c, r in zip(compound, compound_ratio):
prototype_mass += r * np.sum([a.mass for a in c.to_parmed().atoms])
# Conversion from kg/m^3 / amu * nm^3 to dimensionless units
n_prototypes = int(density/prototype_mass*np.prod(box.lengths)*.60224)
n_compounds = list()
for c in compound_ratio:
n_compounds.append(int(n_prototypes * c))
# In angstroms for packmol.
box_mins = box.mins * 10
box_maxs = box.maxs * 10
overlap *= 10
# Apply edge buffer
box_maxs -= edge * 10
# Build the input file for each compound and call packmol.
filled_pdb = tempfile.mkstemp(suffix='.pdb')[1]
input_text = PACKMOL_HEADER.format(overlap, filled_pdb, seed)
for comp, m_compounds in zip(compound, n_compounds):
m_compounds = int(m_compounds)
compound_pdb = tempfile.mkstemp(suffix='.pdb')[1]
comp.save(compound_pdb, overwrite=True)
input_text += PACKMOL_BOX.format(compound_pdb, m_compounds,
box_mins[0], box_mins[1], box_mins[2],
box_maxs[0], box_maxs[1], box_maxs[2])
_run_packmol(input_text, filled_pdb, temp_file)
# Create the topology and update the coordinates.
filled = Compound()
for comp, m_compounds in zip(compound, n_compounds):
for _ in range(m_compounds):
filled.add(clone(comp))
filled.update_coordinates(filled_pdb)
filled.periodicity = np.asarray(box.lengths, dtype=np.float32)
return filled
def fill_region(compound, n_compounds, region, overlap=0.2,
seed=12345, edge=0.2, temp_file=None):
"""Fill a region of a box with a compound using packmol.
Parameters
----------
compound : mb.Compound or list of mb.Compound
Compound or list of compounds to be put in region.
n_compounds : int or list of int
Number of compounds to be put in region.
region : mb.Box or list of mb.Box
Region to be filled by compounds.
overlap : float, units nm, default=0.2
Minimum separation between atoms of different molecules.
seed : int, default=12345
Random seed to be passed to PACKMOL.
edge : float, units nm, default=0.2
Buffer at the edge of the region to not place molecules. This is
necessary in some systems because PACKMOL does not account for
periodic boundary conditions in its optimization.
temp_file : str, default=None
File name to write PACKMOL's raw output to.
Returns
-------
filled : mb.Compound
If using mulitple regions and compounds, the nth value in each list are used in order.
For example, if the third compound will be put in the third region using the third value in n_compounds.
"""
_check_packmol(PACKMOL)
if not isinstance(compound, (list, set)):
compound = [compound]
if not isinstance(n_compounds, (list, set)):
n_compounds = [n_compounds]
if compound is not None and n_compounds is not None:
if len(compound) != len(n_compounds):
msg = ("`compound` and `n_compounds` must be of equal length.")
raise ValueError(msg)
# See if region is a single region or list
if isinstance(region, Box): # Cannot iterate over boxes
region = [region]
elif not any(isinstance(reg, (list, set, Box)) for reg in region):
region = [region]
region = [_validate_box(reg) for reg in region]
# In angstroms for packmol.
overlap *= 10
# Build the input file and call packmol.
filled_pdb = tempfile.mkstemp(suffix='.pdb')[1]
input_text = PACKMOL_HEADER.format(overlap, filled_pdb, seed)
for comp, m_compounds, reg in zip(compound, n_compounds, region):
m_compounds = int(m_compounds)
compound_pdb = tempfile.mkstemp(suffix='.pdb')[1]
comp.save(compound_pdb, overwrite=True)
reg_mins = reg.mins * 10
reg_maxs = reg.maxs * 10
reg_maxs -= edge * 10 # Apply edge buffer
input_text += PACKMOL_BOX.format(compound_pdb, m_compounds,
reg_mins[0], reg_mins[1], reg_mins[2],
reg_maxs[0], reg_maxs[1], reg_maxs[2])
_run_packmol(input_text, filled_pdb, temp_file)
# Create the topology and update the coordinates.
filled = Compound()
for comp, m_compounds in zip(compound, n_compounds):
for _ in range(m_compounds):
filled.add(clone(comp))
filled.update_coordinates(filled_pdb)
return filled
def solvate(solute, solvent, n_solvent, box, overlap=0.2,
seed=12345, edge=0.2, temp_file=None):
"""Solvate a compound in a box of solvent using packmol.
Parameters
----------
solute : mb.Compound
Compound to be placed in a box and solvated.
solvent : mb.Compound
Compound to solvate the box.
n_solvent : int
Number of solvents to be put in box.
box : mb.Box
Box to be filled by compounds.
overlap : float, units nm, default=0.2
Minimum separation between atoms of different molecules.
seed : int, default=12345
Random seed to be passed to PACKMOL.
edge : float, units nm, default=0.2
Buffer at the edge of the box to not place molecules. This is necessary
in some systems because PACKMOL does not account for periodic boundary
conditions in its optimization.
temp_file : str, default=None
File name to write PACKMOL's raw output to.
Returns
-------
solvated : mb.Compound
"""
_check_packmol(PACKMOL)
box = _validate_box(box)
if not isinstance(solvent, (list, set)):
solvent = [solvent]
if not isinstance(n_solvent, (list, set)):
n_solvent = [n_solvent]
if len(solvent) != len(n_solvent):
msg = ("`n_solvent` and `n_solvent` must be of equal length.")
raise ValueError(msg)
# In angstroms for packmol.
box_mins = box.mins * 10
box_maxs = box.maxs * 10
overlap *= 10
center_solute = (box_maxs + box_mins) / 2
# Apply edge buffer
box_maxs -= edge * 10
# Build the input file for each compound and call packmol.
solvated_pdb = tempfile.mkstemp(suffix='.pdb')[1]
solute_pdb = tempfile.mkstemp(suffix='.pdb')[1]
solute.save(solute_pdb, overwrite=True)
input_text = (PACKMOL_HEADER.format(overlap, solvated_pdb, seed) +
PACKMOL_SOLUTE.format(solute_pdb, *center_solute))
for solv, m_solvent in zip(solvent, n_solvent):
m_solvent = int(m_solvent)
solvent_pdb = tempfile.mkstemp(suffix='.pdb')[1]
solv.save(solvent_pdb, overwrite=True)
input_text += PACKMOL_BOX.format(solvent_pdb, m_solvent,
box_mins[0], box_mins[1], box_mins[2],
box_maxs[0], box_maxs[1], box_maxs[2])
_run_packmol(input_text, solvated_pdb, temp_file)
# Create the topology and update the coordinates.
solvated = Compound()
solvated.add(solute)
for solv, m_solvent in zip(solvent, n_solvent):
for _ in range(m_solvent):
solvated.add(clone(solv))
solvated.update_coordinates(solvated_pdb)
return solvated
def _validate_box(box):
if isinstance(box, (list, tuple)):
if len(box) == 3:
box = Box(lengths=box)
elif len(box) == 6:
box = Box(mins=box[:3], maxs=box[3:])
if not isinstance(box, Box):
raise MBuildError('Unknown format for `box` parameter. Must pass a'
' list/tuple of length 3 (box lengths) or length'
' 6 (box mins and maxes) or an mbuild.Box object.')
return box
def _packmol_error(out, err):
"""Log packmol output to files. """
with open('log.txt', 'w') as log_file, open('err.txt', 'w') as err_file:
log_file.write(out)
err_file.write(err)
raise RuntimeError("PACKMOL failed. See 'err.txt' and 'log.txt'")
def _run_packmol(input_text, filled_pdb, temp_file):
proc = Popen(PACKMOL, stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True)
out, err = proc.communicate(input=input_text)
if 'WITHOUT PERFECT PACKING' in out:
msg = ("Packmol finished with imperfect packing. Using "
"the .pdb_FORCED file instead. This may not be a "
"sufficient packing result.")
warnings.warn(msg)
os.system('cp {0}_FORCED {0}'.format(filled_pdb))
if 'ERROR' in out:
_packmol_error(out, err)
if temp_file is not None:
os.system('cp {0} {1}'.format(filled_pdb, os.path.join(temp_file)))
def _check_packmol(PACKMOL):
if not PACKMOL:
msg = "Packmol not found."
if sys.platform.startswith("win"):
msg = (msg + " If packmol is already installed, make sure that the "
"packmol.exe is on the path.")
raise IOError(msg)
| [
"mbuild.compound.Compound",
"sys.platform.startswith",
"subprocess.Popen",
"os.path.join",
"tempfile.mkstemp",
"numpy.asarray",
"mbuild.box.Box",
"mbuild.clone",
"distutils.spawn.find_executable",
"warnings.warn",
"mbuild.exceptions.MBuildError",
"numpy.prod"
] | [((378, 404), 'distutils.spawn.find_executable', 'find_executable', (['"""packmol"""'], {}), "('packmol')\n", (393, 404), False, 'from distutils.spawn import find_executable\n'), ((6619, 6629), 'mbuild.compound.Compound', 'Compound', ([], {}), '()\n', (6627, 6629), False, 'from mbuild.compound import Compound\n'), ((6827, 6868), 'numpy.asarray', 'np.asarray', (['box.lengths'], {'dtype': 'np.float32'}), '(box.lengths, dtype=np.float32)\n', (6837, 6868), True, 'import numpy as np\n'), ((9722, 9732), 'mbuild.compound.Compound', 'Compound', ([], {}), '()\n', (9730, 9732), False, 'from mbuild.compound import Compound\n'), ((12387, 12397), 'mbuild.compound.Compound', 'Compound', ([], {}), '()\n', (12395, 12397), False, 'from mbuild.compound import Compound\n'), ((13437, 13514), 'subprocess.Popen', 'Popen', (['PACKMOL'], {'stdin': 'PIPE', 'stdout': 'PIPE', 'stderr': 'PIPE', 'universal_newlines': '(True)'}), '(PACKMOL, stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True)\n', (13442, 13514), False, 'from subprocess import Popen, PIPE\n'), ((5994, 6025), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".pdb"""'}), "(suffix='.pdb')\n", (6010, 6025), False, 'import tempfile\n'), ((8942, 8973), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".pdb"""'}), "(suffix='.pdb')\n", (8958, 8973), False, 'import tempfile\n'), ((11602, 11633), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".pdb"""'}), "(suffix='.pdb')\n", (11618, 11633), False, 'import tempfile\n'), ((11654, 11685), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".pdb"""'}), "(suffix='.pdb')\n", (11670, 11685), False, 'import tempfile\n'), ((12866, 13033), 'mbuild.exceptions.MBuildError', 'MBuildError', (['"""Unknown format for `box` parameter. Must pass a list/tuple of length 3 (box lengths) or length 6 (box mins and maxes) or an mbuild.Box object."""'], {}), "(\n 'Unknown format for `box` parameter. Must pass a list/tuple of length 3 (box lengths) or length 6 (box mins and maxes) or an mbuild.Box object.'\n )\n", (12877, 13033), False, 'from mbuild.exceptions import MBuildError\n'), ((13791, 13809), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (13804, 13809), False, 'import warnings\n'), ((14127, 14157), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (14150, 14157), False, 'import sys\n'), ((6215, 6246), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".pdb"""'}), "(suffix='.pdb')\n", (6231, 6246), False, 'import tempfile\n'), ((9176, 9207), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".pdb"""'}), "(suffix='.pdb')\n", (9192, 9207), False, 'import tempfile\n'), ((11983, 12014), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".pdb"""'}), "(suffix='.pdb')\n", (11999, 12014), False, 'import tempfile\n'), ((12723, 12739), 'mbuild.box.Box', 'Box', ([], {'lengths': 'box'}), '(lengths=box)\n', (12726, 12739), False, 'from mbuild.box import Box\n'), ((6747, 6758), 'mbuild.clone', 'clone', (['comp'], {}), '(comp)\n', (6752, 6758), False, 'from mbuild import clone\n'), ((9850, 9861), 'mbuild.clone', 'clone', (['comp'], {}), '(comp)\n', (9855, 9861), False, 'from mbuild import clone\n'), ((12535, 12546), 'mbuild.clone', 'clone', (['solv'], {}), '(solv)\n', (12540, 12546), False, 'from mbuild import clone\n'), ((12786, 12817), 'mbuild.box.Box', 'Box', ([], {'mins': 'box[:3]', 'maxs': 'box[3:]'}), '(mins=box[:3], maxs=box[3:])\n', (12789, 12817), False, 'from mbuild.box import Box\n'), ((14005, 14028), 'os.path.join', 'os.path.join', (['temp_file'], {}), '(temp_file)\n', (14017, 14028), False, 'import os\n'), ((4236, 4248), 'mbuild.box.Box', 'Box', (['(3 * [L])'], {}), '(3 * [L])\n', (4239, 4248), False, 'from mbuild.box import Box\n'), ((4287, 4308), 'numpy.prod', 'np.prod', (['aspect_ratio'], {}), '(aspect_ratio)\n', (4294, 4308), True, 'import numpy as np\n'), ((4355, 4395), 'mbuild.box.Box', 'Box', (['[(val * L) for val in aspect_ratio]'], {}), '([(val * L) for val in aspect_ratio])\n', (4358, 4395), False, 'from mbuild.box import Box\n'), ((5579, 5599), 'numpy.prod', 'np.prod', (['box.lengths'], {}), '(box.lengths)\n', (5586, 5599), True, 'import numpy as np\n'), ((4702, 4722), 'numpy.prod', 'np.prod', (['box.lengths'], {}), '(box.lengths)\n', (4709, 4722), True, 'import numpy as np\n')] |
import numbers
import numpy as np
import time
import pickle
from typing import Optional
from sklearn.base import BaseEstimator
from sklearn.cluster import KMeans
from dl_portfolio.logger import LOGGER
from dl_portfolio.nmf.utils import negative_matrix, positive_matrix, reconstruction_error
EPSILON = 1e-12
class SemiNMF(BaseEstimator):
def __init__(self, n_components, max_iter=200, tol=1e-6, random_state=None, verbose=0, loss="mse", shuffle=False):
self.n_components = n_components
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.verbose = verbose
self.shuffle = shuffle
self._is_fitted = False
self.components = None
self.loss = loss
def _check_params(self, X):
# n_components
self._n_components = self.n_components
if self._n_components is None:
self._n_components = X.shape[1]
if (
not isinstance(self._n_components, numbers.Integral)
or self._n_components <= 0
):
raise ValueError(
"Number of components must be a positive integer; got "
f"(n_components={self._n_components!r})"
)
# max_iter
if not isinstance(self.max_iter, numbers.Integral) or self.max_iter < 0:
raise ValueError(
"Maximum number of iterations must be a positive "
f"integer; got (max_iter={self.max_iter!r})"
)
# tol
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError(
"Tolerance for stopping criteria must be positive; got "
f"(tol={self.tol!r})"
)
return self
def fit(self, X, verbose: Optional[int] = None):
X = X.astype(np.float32)
if verbose is not None:
self.verbose = verbose
start_time = time.time()
self._check_params(X)
# Initialize G and F
G = self._initilize_g(X)
F = self._update_f(X, G)
# used for the convergence criterion
error_at_init = reconstruction_error(X, F, G, loss=self.loss)
previous_error = error_at_init
for n_iter in range(self.max_iter):
# Update G
G = self._update_g(X, G, F)
# Update F
F = self._update_f(X, G)
if n_iter == self.max_iter - 1:
if self.verbose:
LOGGER.info('Reached max iteration number, stopping')
if self.tol > 0 and n_iter % 10 == 0:
error = reconstruction_error(X, F, G, loss=self.loss)
if self.verbose:
iter_time = time.time()
LOGGER.info(
"Epoch %02d reached after %.3f seconds, error: %f"
% (n_iter, iter_time - start_time, error)
)
if (previous_error - error) / error_at_init < self.tol:
if self.verbose:
LOGGER.info(f"Converged at iteration: {n_iter} with tolerance: {self.tol}")
break
previous_error = error
self.components = G
self._is_fitted = True
def transform(self, X):
assert self._is_fitted, "You must fit the model first"
G = self.components.copy()
F = X.dot(G.dot(np.linalg.inv(G.T.dot(G))))
return F
def inverse_transform(self, F):
assert self._is_fitted, "You must fit the model first"
return np.dot(F, self.components.T)
def _initilize_g(self, X):
d = X.shape[-1]
G = np.zeros((d, self._n_components))
kmeans = KMeans(n_clusters=self._n_components, random_state=self.random_state).fit(X.T)
for i in range(d):
G[i, kmeans.labels_[i]] = 1
# add constant
G += 0.2
return G
def evaluate(self, X):
F = self.transform(X)
return reconstruction_error(X, F, self.components, loss=self.loss)
def save(self, path):
assert self._is_fitted, "Fit the model before dumping it"
assert path.split('.')[-1] in ["p", "pkl"], f"Extension must be 'p' or 'pkl', not: {path.split('.')[-1]}"
pickle.dump(self, open(path, "wb"))
@staticmethod
def _update_f(X, G):
return X.dot(G.dot(np.linalg.inv(G.T.dot(G))))
@staticmethod
def _update_g(X, G, F):
F_TF_minus = negative_matrix(F.T.dot(F))
F_TF_plus = positive_matrix(F.T.dot(F))
X_TF_minus = negative_matrix(X.T.dot(F))
X_TF_plus = positive_matrix(X.T.dot(F))
numerator = X_TF_plus + G.dot(F_TF_minus)
denominator = X_TF_minus + G.dot(F_TF_plus)
# TODO: Handle denominator has 0
denominator += EPSILON
assert (denominator != 0).all(), "Division by 0"
# if not (denominator != 0).all():
# denominator[:,:] = np.nan
return G * np.sqrt(numerator / denominator)
| [
"dl_portfolio.nmf.utils.reconstruction_error",
"dl_portfolio.logger.LOGGER.info",
"sklearn.cluster.KMeans",
"numpy.zeros",
"time.time",
"numpy.dot",
"numpy.sqrt"
] | [((1954, 1965), 'time.time', 'time.time', ([], {}), '()\n', (1963, 1965), False, 'import time\n'), ((2161, 2206), 'dl_portfolio.nmf.utils.reconstruction_error', 'reconstruction_error', (['X', 'F', 'G'], {'loss': 'self.loss'}), '(X, F, G, loss=self.loss)\n', (2181, 2206), False, 'from dl_portfolio.nmf.utils import negative_matrix, positive_matrix, reconstruction_error\n'), ((3607, 3635), 'numpy.dot', 'np.dot', (['F', 'self.components.T'], {}), '(F, self.components.T)\n', (3613, 3635), True, 'import numpy as np\n'), ((3704, 3737), 'numpy.zeros', 'np.zeros', (['(d, self._n_components)'], {}), '((d, self._n_components))\n', (3712, 3737), True, 'import numpy as np\n'), ((4031, 4090), 'dl_portfolio.nmf.utils.reconstruction_error', 'reconstruction_error', (['X', 'F', 'self.components'], {'loss': 'self.loss'}), '(X, F, self.components, loss=self.loss)\n', (4051, 4090), False, 'from dl_portfolio.nmf.utils import negative_matrix, positive_matrix, reconstruction_error\n'), ((5019, 5051), 'numpy.sqrt', 'np.sqrt', (['(numerator / denominator)'], {}), '(numerator / denominator)\n', (5026, 5051), True, 'import numpy as np\n'), ((2641, 2686), 'dl_portfolio.nmf.utils.reconstruction_error', 'reconstruction_error', (['X', 'F', 'G'], {'loss': 'self.loss'}), '(X, F, G, loss=self.loss)\n', (2661, 2686), False, 'from dl_portfolio.nmf.utils import negative_matrix, positive_matrix, reconstruction_error\n'), ((3755, 3824), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'self._n_components', 'random_state': 'self.random_state'}), '(n_clusters=self._n_components, random_state=self.random_state)\n', (3761, 3824), False, 'from sklearn.cluster import KMeans\n'), ((2512, 2565), 'dl_portfolio.logger.LOGGER.info', 'LOGGER.info', (['"""Reached max iteration number, stopping"""'], {}), "('Reached max iteration number, stopping')\n", (2523, 2565), False, 'from dl_portfolio.logger import LOGGER\n'), ((2753, 2764), 'time.time', 'time.time', ([], {}), '()\n', (2762, 2764), False, 'import time\n'), ((2785, 2895), 'dl_portfolio.logger.LOGGER.info', 'LOGGER.info', (["('Epoch %02d reached after %.3f seconds, error: %f' % (n_iter, iter_time -\n start_time, error))"], {}), "('Epoch %02d reached after %.3f seconds, error: %f' % (n_iter, \n iter_time - start_time, error))\n", (2796, 2895), False, 'from dl_portfolio.logger import LOGGER\n'), ((3095, 3170), 'dl_portfolio.logger.LOGGER.info', 'LOGGER.info', (['f"""Converged at iteration: {n_iter} with tolerance: {self.tol}"""'], {}), "(f'Converged at iteration: {n_iter} with tolerance: {self.tol}')\n", (3106, 3170), False, 'from dl_portfolio.logger import LOGGER\n')] |
"""Adds dilution of precision (DOP) to dataset
Description:
------------
Dilution of precision calculation is based on estimated covariance matrix of unknowns. GDOP, PDOP, HDOP, VDOP and TDOP
is added to dataset.
TODO: Check if the calculation of HDOP and VDOP is correct.
"""
# External library imports
import numpy as np
# Midgard imports
from midgard.dev import plugins
# Where imports
from where.lib import config
from where.lib import log
# Name of section in configuration
_SECTION = "_".join(__name__.split(".")[-1:])
@plugins.register
def gnss_dop_cov(dset: "Dataset") -> None:
"""Adds dilution of precision (DOP) to dataset
Args:
dset: A Dataset containing model data.
"""
# PDOP
pdop = (
np.sqrt(dset.estimate_cov_site_pos_xx + dset.estimate_cov_site_pos_yy + dset.estimate_cov_site_pos_zz)
/ dset.estimate_variance_factor
)
if "pdop" in dset.fields:
dset["pdop"][:] = pdop
log.debug(f"{_SECTION}: Update pdop field in Dataset.")
else:
dset.add_float("pdop", val=pdop)
# GDOP
gdop = np.sqrt(
dset.estimate_cov_site_pos_xx
+ dset.estimate_cov_site_pos_yy
+ dset.estimate_cov_site_pos_zz
+ dset.estimate_cov_rcv_clock_tt
) / (dset.estimate_variance_factor)
if "gdop" in dset.fields:
dset["gdop"][:] = pdop
log.debug(f"{_SECTION}: Update gdop field in Dataset.")
else:
dset.add_float("gdop", val=gdop)
# TDOP
tdop = np.sqrt(dset.estimate_cov_rcv_clock_tt) / dset.estimate_variance_factor
if "tdop" in dset.fields:
dset["tdop"][:] = pdop
log.debug(f"{_SECTION}: Update tdop field in Dataset.")
else:
dset.add_float("tdop", val=tdop)
# HDOP and VDOP
#
# Epochwise estimation or over whole time period
dop_xyz = True
if dop_xyz:
# HDOP (xyz)
hdop = np.sqrt(dset.estimate_cov_site_pos_xx + dset.estimate_cov_site_pos_yy) / dset.estimate_variance_factor
# VDOP
vdop = np.sqrt(dset.estimate_cov_site_pos_zz) / dset.estimate_variance_factor
else:
if config.tech.estimate_epochwise.bool:
hdop = np.zeros(dset.num_obs)
vdop = np.zeros(dset.num_obs)
for epoch in sorted(set(dset.time.gps.mjd)):
cov_xyz = np.zeros((3, 3))
idx = dset.time.gps.mjd == epoch
cov_xyz[0][0] = dset.estimate_cov_site_pos_xx[idx][0]
cov_xyz[0][1] = dset.estimate_cov_site_pos_xy[idx][0]
cov_xyz[1][0] = dset.estimate_cov_site_pos_xy[idx][0]
cov_xyz[0][2] = dset.estimate_cov_site_pos_xz[idx][0]
cov_xyz[2][0] = dset.estimate_cov_site_pos_xz[idx][0]
cov_xyz[1][1] = dset.estimate_cov_site_pos_yy[idx][0]
cov_xyz[1][2] = dset.estimate_cov_site_pos_yz[idx][0]
cov_xyz[2][1] = dset.estimate_cov_site_pos_yz[idx][0]
cov_xyz[2][2] = dset.estimate_cov_site_pos_zz[idx][0]
R = dset.site_pos._enu2itrs[idx][0]
sigma0 = dset.estimate_variance_factor[idx][0]
q_enu = R.T @ (cov_xyz / sigma0) @ R
hdop[idx] = np.sqrt(q_enu[0][0] + q_enu[1][1])
vdop[idx] = np.sqrt(q_enu[2][2])
else:
cov_xyz = np.zeros((3, 3))
cov_xyz[0][0] = dset.estimate_cov_site_pos_xx[0]
cov_xyz[0][1] = dset.estimate_cov_site_pos_xy[0]
cov_xyz[1][0] = dset.estimate_cov_site_pos_xy[0]
cov_xyz[0][2] = dset.estimate_cov_site_pos_xz[0]
cov_xyz[2][0] = dset.estimate_cov_site_pos_xz[0]
cov_xyz[1][1] = dset.estimate_cov_site_pos_yy[0]
cov_xyz[1][2] = dset.estimate_cov_site_pos_yz[0]
cov_xyz[2][1] = dset.estimate_cov_site_pos_yz[0]
cov_xyz[2][2] = dset.estimate_cov_site_pos_zz[0]
R = dset.site_pos._enu2itrs[0]
sigma0 = dset.estimate_variance_factor[0]
q_enu = R.T @ (cov_xyz / sigma0) @ R
hdop = np.repeat(np.sqrt(q_enu[0][0] + q_enu[1][1]), dset.num_obs)
vdop = np.repeat(np.sqrt(q_enu[2][2]), dset.num_obs)
if "hdop" in dset.fields:
dset["hdop"][:] = hdop
log.debug(f"{_SECTION}: Update hdop field in Dataset.")
else:
dset.add_float("hdop", val=hdop)
if "vdop" in dset.fields:
dset["vdop"][:] = vdop
log.debug(f"{_SECTION}: Update vdop field in Dataset.")
else:
dset.add_float("vdop", val=vdop)
dset.add_float
| [
"where.lib.log.debug",
"numpy.zeros",
"numpy.sqrt"
] | [((748, 854), 'numpy.sqrt', 'np.sqrt', (['(dset.estimate_cov_site_pos_xx + dset.estimate_cov_site_pos_yy + dset.\n estimate_cov_site_pos_zz)'], {}), '(dset.estimate_cov_site_pos_xx + dset.estimate_cov_site_pos_yy +\n dset.estimate_cov_site_pos_zz)\n', (755, 854), True, 'import numpy as np\n'), ((967, 1022), 'where.lib.log.debug', 'log.debug', (['f"""{_SECTION}: Update pdop field in Dataset."""'], {}), "(f'{_SECTION}: Update pdop field in Dataset.')\n", (976, 1022), False, 'from where.lib import log\n'), ((1098, 1237), 'numpy.sqrt', 'np.sqrt', (['(dset.estimate_cov_site_pos_xx + dset.estimate_cov_site_pos_yy + dset.\n estimate_cov_site_pos_zz + dset.estimate_cov_rcv_clock_tt)'], {}), '(dset.estimate_cov_site_pos_xx + dset.estimate_cov_site_pos_yy +\n dset.estimate_cov_site_pos_zz + dset.estimate_cov_rcv_clock_tt)\n', (1105, 1237), True, 'import numpy as np\n'), ((1376, 1431), 'where.lib.log.debug', 'log.debug', (['f"""{_SECTION}: Update gdop field in Dataset."""'], {}), "(f'{_SECTION}: Update gdop field in Dataset.')\n", (1385, 1431), False, 'from where.lib import log\n'), ((1507, 1546), 'numpy.sqrt', 'np.sqrt', (['dset.estimate_cov_rcv_clock_tt'], {}), '(dset.estimate_cov_rcv_clock_tt)\n', (1514, 1546), True, 'import numpy as np\n'), ((1649, 1704), 'where.lib.log.debug', 'log.debug', (['f"""{_SECTION}: Update tdop field in Dataset."""'], {}), "(f'{_SECTION}: Update tdop field in Dataset.')\n", (1658, 1704), False, 'from where.lib import log\n'), ((4283, 4338), 'where.lib.log.debug', 'log.debug', (['f"""{_SECTION}: Update hdop field in Dataset."""'], {}), "(f'{_SECTION}: Update hdop field in Dataset.')\n", (4292, 4338), False, 'from where.lib import log\n'), ((4461, 4516), 'where.lib.log.debug', 'log.debug', (['f"""{_SECTION}: Update vdop field in Dataset."""'], {}), "(f'{_SECTION}: Update vdop field in Dataset.')\n", (4470, 4516), False, 'from where.lib import log\n'), ((1909, 1979), 'numpy.sqrt', 'np.sqrt', (['(dset.estimate_cov_site_pos_xx + dset.estimate_cov_site_pos_yy)'], {}), '(dset.estimate_cov_site_pos_xx + dset.estimate_cov_site_pos_yy)\n', (1916, 1979), True, 'import numpy as np\n'), ((2043, 2081), 'numpy.sqrt', 'np.sqrt', (['dset.estimate_cov_site_pos_zz'], {}), '(dset.estimate_cov_site_pos_zz)\n', (2050, 2081), True, 'import numpy as np\n'), ((2192, 2214), 'numpy.zeros', 'np.zeros', (['dset.num_obs'], {}), '(dset.num_obs)\n', (2200, 2214), True, 'import numpy as np\n'), ((2234, 2256), 'numpy.zeros', 'np.zeros', (['dset.num_obs'], {}), '(dset.num_obs)\n', (2242, 2256), True, 'import numpy as np\n'), ((3356, 3372), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (3364, 3372), True, 'import numpy as np\n'), ((2341, 2357), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (2349, 2357), True, 'import numpy as np\n'), ((3235, 3269), 'numpy.sqrt', 'np.sqrt', (['(q_enu[0][0] + q_enu[1][1])'], {}), '(q_enu[0][0] + q_enu[1][1])\n', (3242, 3269), True, 'import numpy as np\n'), ((3298, 3318), 'numpy.sqrt', 'np.sqrt', (['q_enu[2][2]'], {}), '(q_enu[2][2])\n', (3305, 3318), True, 'import numpy as np\n'), ((4098, 4132), 'numpy.sqrt', 'np.sqrt', (['(q_enu[0][0] + q_enu[1][1])'], {}), '(q_enu[0][0] + q_enu[1][1])\n', (4105, 4132), True, 'import numpy as np\n'), ((4177, 4197), 'numpy.sqrt', 'np.sqrt', (['q_enu[2][2]'], {}), '(q_enu[2][2])\n', (4184, 4197), True, 'import numpy as np\n')] |
from numpy.polynomial.polynomial import polypow
from time import time
def probability(dice_number, sides, target):
powers = [0] + [1] * sides
poly = polypow(powers, dice_number)
try:
return round(poly[target] / sides ** dice_number, 4)
except IndexError:
return 0
if __name__ == '__main__':
s = time()
print(probability(10, 10, 60))
print(time() - s)
| [
"numpy.polynomial.polynomial.polypow",
"time.time"
] | [((159, 187), 'numpy.polynomial.polynomial.polypow', 'polypow', (['powers', 'dice_number'], {}), '(powers, dice_number)\n', (166, 187), False, 'from numpy.polynomial.polynomial import polypow\n'), ((335, 341), 'time.time', 'time', ([], {}), '()\n', (339, 341), False, 'from time import time\n'), ((387, 393), 'time.time', 'time', ([], {}), '()\n', (391, 393), False, 'from time import time\n')] |
# -*- coding:UTF-8 -*-
# ---------------------------------------------------#
# Aim of the program:
# Create plots to compare groups of models
# FIG. 3 in Planton et al. 2020: Evaluating climate models with the CLIVAR 2020 ENSO metrics package. BAMS
# It uses the first available member of each model or all members of each model and averages them
# Updated json files (needed to create this plot) can be downloaded from the page "Summary statistics in Interactive
# Portrait Plots" at https://cmec.llnl.gov/results/enso/
# ---------------------------------------------------#
# ---------------------------------------------------#
# Import the right packages
# ---------------------------------------------------#
from __future__ import print_function
from copy import deepcopy
from numpy import array as NUMPYarray
from numpy import mean as NUMPYmean
from numpy import moveaxis as NUMPYmoveaxis
from numpy.ma import masked_invalid as NUMPYma__masked_invalid
from numpy.ma import masked_where as NUMPYma__masked_where
from os.path import join as OSpath__join
# set of functions to find cmip/obs files and save a json file
# to be adapted/changed by users depending on their environments
from driver_tools_lib import get_metric_values, get_mod_mem_json
# ENSO_metrics functions
from EnsoPlots.EnsoPlotTemplate import plot_projects_comparison
from EnsoPlots.EnsoPlotToolsLib import bootstrap, sort_metrics
# ---------------------------------------------------#
# Arguments
# ---------------------------------------------------#
# metric collections to plot
list_metric_collections = ["ENSO_perf", "ENSO_proc", "ENSO_tel"]
# CMIP experiment
experiment = "historical"
# project to use, here both CMIP5 and CMIP6 models will be used
list_projects = ["CMIP6", "CMIP5"]
# True to use the set of metric in the BAMS paper
# More metric have been computed and tested but not kept
reduced_set = True # False #
# False to projects defined in 'list_project'
# If set to True, all projects defined in 'list_project' will be used as one and will be compared to a given selection
# of models (see 'my_project' and 'my_selection')
big_ensemble = False # True #
# marker colors
if big_ensemble is False:
colors = ["r", "dodgerblue"]
else:
colors = ["orange", "forestgreen"]
# True to use the first available member only
# If set to False, all members will be used and the metric values computed for all members of each model will be
# averaged
first_member = True # False #
# If 'big_ensemble' is set to True, 'BAMS_teleconnection' will be compared to 'CMIP', all projects defined in
# 'list_project' used as one
my_project = ["BAMS_teleconnection", "CMIP"]
# Definition of selection to use if 'big_ensemble' is set to True
my_selection = {
"BAMS_teleconnection": [
'CESM2', 'CESM2-FV2', 'CESM2-WACCM', 'CESM2-WACCM-FV2', 'CMCC-CM', 'CNRM-CM5', 'CNRM-CM5-2', 'EC-Earth3',
'EC-Earth3-Veg', 'FGOALS-f3-L', 'FGOALS-s2', 'GFDL-CM4', 'GFDL-ESM4', 'MIROC-ES2L', 'MIROC6', 'NESM3',
'NorESM2-MM']}
# List of additional observations
# the reading part is very 'ad hoc', do not change the obs!
list_obs = ["20CRv2", "NCEP2", "ERA-Interim"]
# computation version, 'v20200427' is provided with the package
version = "v20200427"
# json files
dict_json = {
"CMIP5": {
"ENSO_perf": "share/EnsoMetrics/cmip5_historical_ENSO_perf_" + version + "_allModels_allRuns.json",
"ENSO_proc": "share/EnsoMetrics/cmip5_historical_ENSO_proc_" + version + "_allModels_allRuns.json",
"ENSO_tel": "share/EnsoMetrics/cmip5_historical_ENSO_tel_" + version + "_allModels_allRuns.json"},
"CMIP6": {
"ENSO_perf": "share/EnsoMetrics/cmip6_historical_ENSO_perf_" + version + "_allModels_allRuns.json",
"ENSO_proc": "share/EnsoMetrics/cmip6_historical_ENSO_proc_" + version + "_allModels_allRuns.json",
"ENSO_tel": "share/EnsoMetrics/cmip6_historical_ENSO_tel_" + version + "_allModels_allRuns.json"},
"obs2obs": {
"ENSO_perf": "share/EnsoMetrics/obs2obs_ENSO_perf_" + version + ".json",
"ENSO_proc": "share/EnsoMetrics/obs2obs_ENSO_proc_" + version + ".json",
"ENSO_tel": "share/EnsoMetrics/obs2obs_ENSO_tel_" + version + ".json"}}
# figure name
path_out = ""
figure_name = "metrics_intercomparison_" + str(len(list_metric_collections)) + "metric_collections_" + version
if len(list_projects) == 1:
figure_name += "_" + str(list_projects[0])
else:
figure_name += "_" + str(len(list_projects)) + "cmip"
if big_ensemble is False:
figure_name += "_" + list_projects[1] + "_vs_" + list_projects[0]
else:
figure_name += "_" + my_project[1] + "_vs_" + my_project[0]
if first_member is True:
figure_name += "_first_member"
else:
figure_name += "_members_averaged"
if reduced_set is False:
figure_name += "_all_metrics"
figure_name = OSpath__join(path_out, figure_name)
# ---------------------------------------------------#
# ---------------------------------------------------#
# Functions
# ---------------------------------------------------#
def common_save(dict_in, dict_out={}):
for mod in dict_in.keys():
try: dict_out[mod]
except: dict_out[mod] = dict_in[mod]
else:
for met in dict_in[mod].keys():
dict_out[mod][met] = dict_in[mod][met]
return dict_out
# ---------------------------------------------------#
# ---------------------------------------------------#
# Main
# ---------------------------------------------------#
# get members by model by project from json file
# only metrics from models/members chosen here will be used
# all metrics from models/members chosen here will be used (ensures that if a model/member is not available for one or
# several metric collections, the corresponding line will still be created in the portraitplot)
model_by_proj = get_mod_mem_json(list_projects, list_metric_collections, dict_json, first_only=first_member)
# read json file
dict_met = dict()
if big_ensemble is False:
for proj in list_projects:
dict_mc = dict()
for mc in list_metric_collections:
dict1 = get_metric_values(proj, mc, dict_json, model_by_proj, reduced_set=reduced_set)
# save in common dictionary
dict_mc = common_save(dict1, dict_out=dict_mc)
dict_met[proj] = dict_mc
del dict_mc
else:
dict_mc = dict()
for proj in list_projects:
for mc in list_metric_collections:
dict1 = get_metric_values(proj, mc, dict_json, model_by_proj, reduced_set=reduced_set)
# save in common dictionary
dict_mc = common_save(dict1, dict_out=dict_mc)
dict_met["CMIP"] = dict_mc
# put the selected models in a separate key
for mod in my_selection[my_project[0]]:
try: dict_met[my_project[0]]
except: dict_met[my_project[0]] = {mod: dict_met["CMIP"][mod]}
else: dict_met[my_project[0]][mod] = dict_met["CMIP"][mod]
del dict_mc
# ---------------------------------------------------#
# Plot
# ---------------------------------------------------#
if ' ':
list_metrics = list()
for k1 in dict_met.keys():
for k2 in dict_met[k1].keys():
list_metrics += dict_met[k1][k2].keys()
list_metrics = sort_metrics(list(set(list_metrics)))
opposed_groups = deepcopy(list_projects) if big_ensemble is False else deepcopy(my_project)
# mean metric evaluation
tab_bst, tab_val = list(), list()
for met in list_metrics:
tab_tmp = list()
for grp in opposed_groups:
tab = list()
for mod in dict_met[grp].keys():
if met in dict_met[grp][mod].keys():
if dict_met[grp][mod][met] is not None and dict_met[grp][mod][met] != 1e20:
tab.append(dict_met[grp][mod][met])
tab = NUMPYarray(tab)
tab_tmp.append(NUMPYma__masked_invalid(tab).compressed())
del tab
tab1, tab2 = list(), list()
for ii in range(len(tab_tmp)):
tab1.append(float(NUMPYmean(tab_tmp[ii])))
nbr = nbr = len(tab_tmp[1]) if ii==0 else len(tab_tmp[0])
bst = bootstrap(tab_tmp[ii], nech=nbr)
tab2.append(bst)
del bst, nbr
tab_bst.append(tab2)
tab_val.append(tab1)
tab_bst = NUMPYmoveaxis(NUMPYarray(tab_bst), 0, 1)
tab_bst = NUMPYma__masked_where(tab_bst == 1e20, tab_bst)
tab_val = NUMPYmoveaxis(NUMPYarray(tab_val), 0, -1)
tmp = NUMPYmoveaxis(NUMPYarray([tab_val[1], tab_val[1]]), 0, 1)
tab_bst = tab_bst / tmp
tab_val = tab_val / tab_val[1]
# plot project comparison
plot_projects_comparison(tab_val, figure_name, xticklabel=list_metrics, yticklabel=opposed_groups[1].upper(),
colors=colors, tab_bst=tab_bst, legend=opposed_groups, chigh=True, cfram=True)
del list_metrics, opposed_groups, tab_bst, tab_val, tmp
| [
"driver_tools_lib.get_mod_mem_json",
"driver_tools_lib.get_metric_values",
"copy.deepcopy",
"EnsoPlots.EnsoPlotToolsLib.bootstrap",
"numpy.ma.masked_where",
"numpy.ma.masked_invalid",
"numpy.mean",
"numpy.array",
"os.path.join"
] | [((4842, 4877), 'os.path.join', 'OSpath__join', (['path_out', 'figure_name'], {}), '(path_out, figure_name)\n', (4854, 4877), True, 'from os.path import join as OSpath__join\n'), ((5849, 5945), 'driver_tools_lib.get_mod_mem_json', 'get_mod_mem_json', (['list_projects', 'list_metric_collections', 'dict_json'], {'first_only': 'first_member'}), '(list_projects, list_metric_collections, dict_json,\n first_only=first_member)\n', (5865, 5945), False, 'from driver_tools_lib import get_metric_values, get_mod_mem_json\n'), ((8390, 8438), 'numpy.ma.masked_where', 'NUMPYma__masked_where', (['(tab_bst == 1e+20)', 'tab_bst'], {}), '(tab_bst == 1e+20, tab_bst)\n', (8411, 8438), True, 'from numpy.ma import masked_where as NUMPYma__masked_where\n'), ((7324, 7347), 'copy.deepcopy', 'deepcopy', (['list_projects'], {}), '(list_projects)\n', (7332, 7347), False, 'from copy import deepcopy\n'), ((7378, 7398), 'copy.deepcopy', 'deepcopy', (['my_project'], {}), '(my_project)\n', (7386, 7398), False, 'from copy import deepcopy\n'), ((8349, 8368), 'numpy.array', 'NUMPYarray', (['tab_bst'], {}), '(tab_bst)\n', (8359, 8368), True, 'from numpy import array as NUMPYarray\n'), ((8466, 8485), 'numpy.array', 'NUMPYarray', (['tab_val'], {}), '(tab_val)\n', (8476, 8485), True, 'from numpy import array as NUMPYarray\n'), ((8518, 8554), 'numpy.array', 'NUMPYarray', (['[tab_val[1], tab_val[1]]'], {}), '([tab_val[1], tab_val[1]])\n', (8528, 8554), True, 'from numpy import array as NUMPYarray\n'), ((6122, 6200), 'driver_tools_lib.get_metric_values', 'get_metric_values', (['proj', 'mc', 'dict_json', 'model_by_proj'], {'reduced_set': 'reduced_set'}), '(proj, mc, dict_json, model_by_proj, reduced_set=reduced_set)\n', (6139, 6200), False, 'from driver_tools_lib import get_metric_values, get_mod_mem_json\n'), ((6474, 6552), 'driver_tools_lib.get_metric_values', 'get_metric_values', (['proj', 'mc', 'dict_json', 'model_by_proj'], {'reduced_set': 'reduced_set'}), '(proj, mc, dict_json, model_by_proj, reduced_set=reduced_set)\n', (6491, 6552), False, 'from driver_tools_lib import get_metric_values, get_mod_mem_json\n'), ((7852, 7867), 'numpy.array', 'NUMPYarray', (['tab'], {}), '(tab)\n', (7862, 7867), True, 'from numpy import array as NUMPYarray\n'), ((8176, 8208), 'EnsoPlots.EnsoPlotToolsLib.bootstrap', 'bootstrap', (['tab_tmp[ii]'], {'nech': 'nbr'}), '(tab_tmp[ii], nech=nbr)\n', (8185, 8208), False, 'from EnsoPlots.EnsoPlotToolsLib import bootstrap, sort_metrics\n'), ((8063, 8085), 'numpy.mean', 'NUMPYmean', (['tab_tmp[ii]'], {}), '(tab_tmp[ii])\n', (8072, 8085), True, 'from numpy import mean as NUMPYmean\n'), ((7895, 7923), 'numpy.ma.masked_invalid', 'NUMPYma__masked_invalid', (['tab'], {}), '(tab)\n', (7918, 7923), True, 'from numpy.ma import masked_invalid as NUMPYma__masked_invalid\n')] |
# Adapted from score written by wkentaro
# https://github.com/wkentaro/pytorch-fcn/blob/master/torchfcn/utils.py
import numpy as np
class runningScore(object):
def __init__(self, n_classes):
self.n_classes = n_classes
self.confusion_matrix = np.zeros((n_classes, n_classes))
def _fast_hist(self, label_true, label_pred, n_class):
mask = (label_true >= 0) & (label_true < n_class)
hist = np.bincount(
n_class * label_true[mask].astype(int) + label_pred[mask], minlength=n_class ** 2
).reshape(n_class, n_class)
return hist
def update(self, label_trues, label_preds):
for lt, lp in zip(label_trues, label_preds):
self.confusion_matrix += self._fast_hist(lt.flatten(), lp.flatten(), self.n_classes)
def get_scores(self):
"""Returns accuracy score evaluation result.
- overall accuracy
- mean accuracy
- mean IU
- fwavacc
"""
hist = self.confusion_matrix
acc = np.diag(hist).sum() / hist.sum()
acc_cls = np.diag(hist) / hist.sum(axis=1)
acc_cls = np.nanmean(acc_cls)
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
mean_iu = np.nanmean(iu)
freq = hist.sum(axis=1) / hist.sum()
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
cls_iu = dict(zip(range(self.n_classes), iu))
return (
{
"Overall Acc: \t": acc,
"Mean Acc : \t": acc_cls,
"FreqW Acc : \t": fwavacc,
"Mean IoU : \t": mean_iu,
},
cls_iu,
)
def reset(self):
self.confusion_matrix = np.zeros((self.n_classes, self.n_classes))
class averageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class heightacc(object):
'''
compute acc
'''
def __init__(self):
self.reset()
def reset(self):
#self.r2 = 0 不好计算
self.mse = 0
self.se = 0
self.mae = 0
#self.mape = 0
self.count = 0
self.yrefmean = 0
self.ypref2 = 0
def update(self, ypred, yref, num):
self.se += np.mean(ypred-yref)*num
self.mae += np.mean(np.abs(ypred-yref))*num
self.mse += np.mean((ypred-yref)**2)*num
#self.mape += np.mean(np.abs((ypred-yref)/(1e-8+yref)))*num
self.yrefmean += np.mean(yref)*num
self.ypref2 += np.mean(yref**2)*num
self.count += num
def getacc(self):
se = self.se/self.count
mae = self.mae/self.count
mse = self.mse/self.count
#mape = self.mape/self.count
rmse = np.sqrt(mse)
yrefmean = self.yrefmean/self.count
yref2 = self.ypref2/self.count
r2 = 1 - mse/(yref2 -yrefmean**2)
return r2, rmse, mae, se
| [
"numpy.abs",
"numpy.zeros",
"numpy.mean",
"numpy.nanmean",
"numpy.diag",
"numpy.sqrt"
] | [((265, 297), 'numpy.zeros', 'np.zeros', (['(n_classes, n_classes)'], {}), '((n_classes, n_classes))\n', (273, 297), True, 'import numpy as np\n'), ((1141, 1160), 'numpy.nanmean', 'np.nanmean', (['acc_cls'], {}), '(acc_cls)\n', (1151, 1160), True, 'import numpy as np\n'), ((1262, 1276), 'numpy.nanmean', 'np.nanmean', (['iu'], {}), '(iu)\n', (1272, 1276), True, 'import numpy as np\n'), ((1730, 1772), 'numpy.zeros', 'np.zeros', (['(self.n_classes, self.n_classes)'], {}), '((self.n_classes, self.n_classes))\n', (1738, 1772), True, 'import numpy as np\n'), ((3023, 3035), 'numpy.sqrt', 'np.sqrt', (['mse'], {}), '(mse)\n', (3030, 3035), True, 'import numpy as np\n'), ((1090, 1103), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (1097, 1103), True, 'import numpy as np\n'), ((1174, 1187), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (1181, 1187), True, 'import numpy as np\n'), ((2536, 2557), 'numpy.mean', 'np.mean', (['(ypred - yref)'], {}), '(ypred - yref)\n', (2543, 2557), True, 'import numpy as np\n'), ((2634, 2662), 'numpy.mean', 'np.mean', (['((ypred - yref) ** 2)'], {}), '((ypred - yref) ** 2)\n', (2641, 2662), True, 'import numpy as np\n'), ((2758, 2771), 'numpy.mean', 'np.mean', (['yref'], {}), '(yref)\n', (2765, 2771), True, 'import numpy as np\n'), ((2800, 2818), 'numpy.mean', 'np.mean', (['(yref ** 2)'], {}), '(yref ** 2)\n', (2807, 2818), True, 'import numpy as np\n'), ((1229, 1242), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (1236, 1242), True, 'import numpy as np\n'), ((2589, 2609), 'numpy.abs', 'np.abs', (['(ypred - yref)'], {}), '(ypred - yref)\n', (2595, 2609), True, 'import numpy as np\n'), ((1039, 1052), 'numpy.diag', 'np.diag', (['hist'], {}), '(hist)\n', (1046, 1052), True, 'import numpy as np\n')] |
import cv2
import numpy as np
class OpencvBruteForceMatcher(object):
name = 'opencv_brute_force_matcher'
distances = {}
distances['l2'] = cv2.NORM_L2
distances['hamming'] = cv2.NORM_HAMMING
def __init__(self, distance='l2'):
self._matcher = cv2.BFMatcher(self.distances[distance])
def match(self, descs1, descs2):
"""Compute brute force matches between two sets of descriptors.
"""
assert isinstance(descs1, np.ndarray), type(descs1)
assert isinstance(descs2, np.ndarray), type(descs2)
assert len(descs1.shape) == 2, descs1.shape
assert len(descs2.shape) == 2, descs2.shape
matches = self._matcher.match(descs1, descs2)
return matches
def match_putative(self, descs1, descs2, knn=2, threshold_ratio=0.7):
"""Compute putatives matches betweem two sets of descriptors.
"""
assert isinstance(descs1, np.ndarray), type(descs1)
assert isinstance(descs2, np.ndarray), type(descs2)
assert len(descs1.shape) == 2, descs1.shape
assert len(descs2.shape) == 2, descs2.shape
matches = self._matcher.knnMatch(descs1, descs2, k=knn)
# apply Lowe's ratio test
good = []
for m, n in matches:
if m.distance < threshold_ratio * n.distance:
good.append(m)
return good
def convert_opencv_matches_to_numpy(self, matches):
"""Returns a np.ndarray array with points indices correspondences
with the shape of Nx2 which each N feature is a vector containing
the keypoints id [id_ref, id_dst].
"""
assert isinstance(matches, list), type(matches)
correspondences = []
for match in matches:
assert isinstance(match, cv2.DMatch), type(match)
correspondences.append([match.queryIdx, match.trainIdx])
return np.asarray(correspondences) | [
"numpy.asarray",
"cv2.BFMatcher"
] | [((278, 317), 'cv2.BFMatcher', 'cv2.BFMatcher', (['self.distances[distance]'], {}), '(self.distances[distance])\n', (291, 317), False, 'import cv2\n'), ((1937, 1964), 'numpy.asarray', 'np.asarray', (['correspondences'], {}), '(correspondences)\n', (1947, 1964), True, 'import numpy as np\n')] |
import numpy as np
from utils import *
class RandomPlayer():
def __init__(self, game):
self.game = game
def play(self, board):
a = np.random.randint(self.game.getActionSize())
valids = self.game.getValidMoves(board, 1)
while valids[a]!=1:
a = np.random.randint(self.game.getActionSize())
return a
class HumanTKPlayer():
def __init__(self, game):
self.game = game
def play(self, board):
valid = self.game.getValidMoves(board, 1)
print("valid?\t" + '\t'.join(map(int_to_bool_string, valid)))
print("number\t" + '\t'.join(map(str, range(len(valid)))))
print("Enter any number of valid action, marked " + int_to_bool_string(1) + ":")
print("\n")
action_number = None
while True:
a = input()
try:
action_number = int(a)
except ValueError:
print(red("Error: Not a number"))
continue
if action_number < 0 or action_number > len(valid):
print(str(action_number) + ' is out of range')
continue
if valid[action_number] == 0:
print(str(action_number) + ' is invalid action')
continue
print("Selected action is: " + green(action_number))
break
return action_number
import random
from tk.TKGame import Board
class HeuristicPlayer():
INVALID_ACTION_REWARD = -1
def random_argmax(self, array):
MAX_DIFF = 1
arg_max = np.argmax(array)
max_value = array[arg_max]
max_value_ids = [arg_max,arg_max,arg_max]
for idx, value in enumerate(array):
if value != self.INVALID_ACTION_REWARD and max_value - value <= MAX_DIFF:
max_value_ids.append(idx)
return random.choice(max_value_ids)
def play(self, encoded_state):
board = Board()
board.set_encoded_state(encoded_state)
player = 1
validMoves = board.get_legal_moves(player)
current_score = board.get_players_scores()[player]
rewards = []
for action, valid in enumerate(validMoves):
if valid == 1:
next_board = Board()
next_board.set_encoded_state(encoded_state)
next_board.execute_move(action, player)
next_score = next_board.get_players_scores()[player]
reward = next_score - current_score
rewards.append(reward)
else:
rewards.append(self.INVALID_ACTION_REWARD) # invalid action
validRewards = map(lambda x: x if x != -1 else 0,rewards)
if sum(validRewards) == 0:
validPieces = board.get_pieces()
for action,valid in enumerate(validMoves):
if valid == 0:
validPieces[action] = 0
return np.argmax(validPieces)
else:
action = self.random_argmax(rewards)
return action
def int_to_bool_string(int):
return "\033[32mYES\033[0m" if int > 0 else "\033[31mNO\033[0m" | [
"tk.TKGame.Board",
"random.choice",
"numpy.argmax"
] | [((1577, 1593), 'numpy.argmax', 'np.argmax', (['array'], {}), '(array)\n', (1586, 1593), True, 'import numpy as np\n'), ((1869, 1897), 'random.choice', 'random.choice', (['max_value_ids'], {}), '(max_value_ids)\n', (1882, 1897), False, 'import random\n'), ((1952, 1959), 'tk.TKGame.Board', 'Board', ([], {}), '()\n', (1957, 1959), False, 'from tk.TKGame import Board\n'), ((2942, 2964), 'numpy.argmax', 'np.argmax', (['validPieces'], {}), '(validPieces)\n', (2951, 2964), True, 'import numpy as np\n'), ((2266, 2273), 'tk.TKGame.Board', 'Board', ([], {}), '()\n', (2271, 2273), False, 'from tk.TKGame import Board\n')] |
from __future__ import absolute_import, division, print_function
import logging
import sys
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
level=logging.INFO,
stream=sys.stderr)
import json
import os
if os.path.isfile("../../code/config.json"):
with open("../../code/config.json", "r") as f:
config = json.load(f)
config['data_dir'] = '../' + config['data_dir']
config['julia_pkg_dir'] = '../' + config['julia_pkg_dir']
config['models_dir'] = '../' + config['models_dir']
else:
logging.error("Please run setup.py in this directory before running any .ipynb's.")
_ARGS_LENGTH = 7
if len(sys.argv) == _ARGS_LENGTH:
os.environ["CUDA_VISIBLE_DEVICES"] = sys.argv[1]
else:
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from glob import glob
import tensorflow as tf
import time
import shutil
import cPickle as pickle
from collections import OrderedDict, defaultdict
from tensorflow.python.client import device_lib
logging.info(device_lib.list_local_devices())
sys.path.append("../../code")
sys.path.append("..")
from utils.bags import *
from utils.learning import *
from multimodal_generative_model import *
from st_graph import *
from data_utils import *
from stg_node import *
from experiment_details import get_output_ckpts_dir_name
if len(sys.argv) == _ARGS_LENGTH:
NUM_DATAFILES = int(sys.argv[2])
ROWS_TO_EXTRACT = sys.argv[3]
EDGE_RADIUS = float(sys.argv[4])
EDGE_STATE_COMBINE_METHOD = sys.argv[5]
EDGE_INFLUENCE_COMBINE_METHOD = sys.argv[6]
else:
from experiment_details import NUM_DATAFILES, ROWS_TO_EXTRACT, EDGE_RADIUS, EDGE_STATE_COMBINE_METHOD, EDGE_INFLUENCE_COMBINE_METHOD
logging.info('NUM_DATAFILES = %d' % NUM_DATAFILES)
logging.info('ROWS_TO_EXTRACT = %s' % str(ROWS_TO_EXTRACT))
def memory(prefix=''):
import psutil
pid = os.getpid()
py = psutil.Process(pid)
memoryUse = py.memory_info()[0]/2.**30 # memory use in GB...I think
logging.info(prefix + ' memory use: ' + str(memoryUse))
class Runner(object):
def setup(self):
logging.info(config)
model_dir = os.path.join(config["models_dir"], get_output_ckpts_dir_name(NUM_DATAFILES,
ROWS_TO_EXTRACT,
EDGE_RADIUS,
EDGE_STATE_COMBINE_METHOD,
EDGE_INFLUENCE_COMBINE_METHOD))
#logging.warn('Deleting %s!' % model_dir)
#shutil.rmtree(model_dir, ignore_errors=True)
sc = tf.ConfigProto(device_count={'GPU': 1},
allow_soft_placement=True)
rc = tf.estimator.RunConfig().replace(session_config=sc,
model_dir=model_dir,
save_summary_steps=10,
save_checkpoints_steps=10,
log_step_count_steps=10,
keep_checkpoint_max=None,
tf_random_seed=None)
# required due to a bug in tf.contrib.learn.Experiment.train_and_evaluate
rc.environment = None
self.rc = rc
self.model_dir = model_dir
logging.info(model_dir)
logging.info('[setup] Done!')
def load_data_and_define_model(self):
self.robot_node = robot_node = STGNode('Al Horford', 'HomeC')
positions_map_path = os.path.join(config['data_dir'], "positions_map.pkl")
pos_dict_path = os.path.join(config['data_dir'], "pos_dict_%d_files_%s_rows.pkl" % (NUM_DATAFILES, str(ROWS_TO_EXTRACT)))
with open(pos_dict_path, 'rb') as f:
pos_dict = pickle.load(f)
STG = SpatioTemporalGraphCVAE(pos_dict, robot_node,
edge_radius=EDGE_RADIUS,
edge_state_combine_method=EDGE_STATE_COMBINE_METHOD,
edge_influence_combine_method=EDGE_INFLUENCE_COMBINE_METHOD)
train_data_dict_path = os.path.join(config['data_dir'], "train_data_dict_%d_files_%s_rows.pkl" % (NUM_DATAFILES, str(ROWS_TO_EXTRACT)))
with open(train_data_dict_path, 'rb') as f:
train_data_dict = pickle.load(f)
hps.add_hparam("nodes_standardization", train_data_dict["nodes_standardization"])
hps.add_hparam("extras_standardization", {"mean": train_data_dict["extras_mean"],
"std": train_data_dict["extras_std"]})
hps.add_hparam("labels_standardization", train_data_dict["labels_standardization"])
hps.add_hparam("pred_indices", train_data_dict["pred_indices"])
eval_data_dict_path = os.path.join(config['data_dir'], "eval_data_dict_%d_files_%s_rows.pkl" % (NUM_DATAFILES, str(ROWS_TO_EXTRACT)))
with open(eval_data_dict_path, 'rb') as f:
eval_data_dict = pickle.load(f)
train_input_function = tf.estimator.inputs.numpy_input_fn(train_data_dict["input_dict"],
y = train_data_dict["labels"],
batch_size = hps.batch_size,
num_epochs = None,
shuffle = True)
# Need all possible nodes to have been seen by the STG above, does
# that mean we feed in the all_files pos_dict in order to create
# the nodes ahead of time?
token_eval_node = None
token_eval_label_node = None
for node in eval_data_dict["input_dict"]:
if isinstance(node, STGNode):
token_eval_node = node
token_eval_label_node = convert_to_label_node(node)
break
for node in train_data_dict["input_dict"]:
if isinstance(node, STGNode):
if node not in eval_data_dict["input_dict"]:
eval_data_dict["input_dict"][node] = np.zeros_like(eval_data_dict["input_dict"][token_eval_node])
eval_data_dict["labels"][convert_to_label_node(node)] = np.zeros_like(eval_data_dict["labels"][token_eval_label_node])
eval_input_function = tf.estimator.inputs.numpy_input_fn(eval_data_dict["input_dict"],
y = eval_data_dict["labels"],
batch_size = 4,
num_epochs = 1,
shuffle = False)
self.STG = STG
self.hps = hps
self.train_input_function = train_input_function
self.eval_input_function = eval_input_function
self.train_data_dict = train_data_dict
logging.info('[load_data_and_define_model] Done!')
def setup_model(self):
train_input_function = self.train_input_function
eval_input_function = self.eval_input_function
laneswap_model = self.STG
self.nn = nn = tf.estimator.Estimator(laneswap_model.model_fn, params=self.hps,
config=self.rc, model_dir=self.model_dir)
self.experiment = tf.contrib.learn.Experiment(nn, train_input_function, eval_input_function,
eval_steps=None)
logging.info('[setup_model] Done!')
# @profile
def train_and_evaluate(self):
logging.info('[train_and_evaluate] Started!')
self.experiment.train()
logging.info('[train_and_evaluate] Done!')
def print_num_params(self, level=2):
nn = self.nn
variable_names = nn.get_variable_names()
if level == 0:
# Total number of parameters
num_params = np.sum([np.prod(nn.get_variable_value(var_name).shape) for var_name in variable_names]).astype(int)
logging.info("Total number of parameters: {:,}".format(num_params))
else:
node_type_params = defaultdict(int)
for variable_name in variable_names:
key = '/'.join(variable_name.split('/')[:level])
node_type_params[key] += np.prod(nn.get_variable_value(variable_name).shape).astype(int)
for (key, value) in node_type_params.iteritems():
logging.info("{}: {:,}".format(key, value))
logging.info("-"*40)
# @profile
def save_model(self):
nodes = [node for node in self.train_data_dict["input_dict"] if isinstance(node, STGNode)]
state_dim = self.train_data_dict["input_dict"][nodes[0]].shape[2]
extras_dim = self.train_data_dict["input_dict"]["extras"].shape[2]
ph = self.hps.prediction_horizon
with tf.Graph().as_default():
input_dict = {"extras": tf.placeholder(tf.float32, shape=[1, None, extras_dim], name="extras"),
"sample_ct": tf.placeholder(tf.int32, shape=[1], name="sample_ct"),
"traj_lengths": tf.placeholder(tf.int32, shape=[1], name="traj_lengths")}
for node in nodes:
input_dict[str(node)] = tf.placeholder(tf.float32, shape=[1, None, state_dim], name=str(node))
input_dict[str(self.robot_node) + "_future"] = tf.placeholder(tf.float32,
shape=[None, ph, state_dim],
name=str(self.robot_node) + "_future")
serving_input_receiver_fn = tf.estimator.export.build_raw_serving_input_receiver_fn(input_dict)
save_path = self.nn.export_savedmodel(config["models_dir"], serving_input_receiver_fn)
logging.info('[save_model] Done! Saved to %s' % save_path)
def run(self):
self.setup()
self.load_data_and_define_model()
self.setup_model()
self.train_and_evaluate()
self.print_num_params(level=0)
self.print_num_params(level=1)
self.print_num_params(level=2)
self.save_model()
def main():
runner = Runner()
runner.run()
if __name__ == "__main__":
main()
| [
"tensorflow.estimator.export.build_raw_serving_input_receiver_fn",
"cPickle.load",
"collections.defaultdict",
"tensorflow.ConfigProto",
"os.path.isfile",
"tensorflow.estimator.Estimator",
"experiment_details.get_output_ckpts_dir_name",
"os.path.join",
"sys.path.append",
"logging.error",
"numpy.z... | [((91, 234), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)s: %(message)s"""', 'datefmt': '"""%m/%d/%Y %I:%M:%S %p"""', 'level': 'logging.INFO', 'stream': 'sys.stderr'}), "(format='%(asctime)s %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO, stream=sys.stderr)\n", (110, 234), False, 'import logging\n'), ((319, 359), 'os.path.isfile', 'os.path.isfile', (['"""../../code/config.json"""'], {}), "('../../code/config.json')\n", (333, 359), False, 'import os\n'), ((1188, 1217), 'sys.path.append', 'sys.path.append', (['"""../../code"""'], {}), "('../../code')\n", (1203, 1217), False, 'import sys\n'), ((1218, 1239), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (1233, 1239), False, 'import sys\n'), ((1843, 1893), 'logging.info', 'logging.info', (["('NUM_DATAFILES = %d' % NUM_DATAFILES)"], {}), "('NUM_DATAFILES = %d' % NUM_DATAFILES)\n", (1855, 1893), False, 'import logging\n'), ((634, 722), 'logging.error', 'logging.error', (['"""Please run setup.py in this directory before running any .ipynb\'s."""'], {}), '(\n "Please run setup.py in this directory before running any .ipynb\'s.")\n', (647, 722), False, 'import logging\n'), ((1154, 1185), 'tensorflow.python.client.device_lib.list_local_devices', 'device_lib.list_local_devices', ([], {}), '()\n', (1183, 1185), False, 'from tensorflow.python.client import device_lib\n'), ((2007, 2018), 'os.getpid', 'os.getpid', ([], {}), '()\n', (2016, 2018), False, 'import os\n'), ((2028, 2047), 'psutil.Process', 'psutil.Process', (['pid'], {}), '(pid)\n', (2042, 2047), False, 'import psutil\n'), ((429, 441), 'json.load', 'json.load', (['f'], {}), '(f)\n', (438, 441), False, 'import json\n'), ((2243, 2263), 'logging.info', 'logging.info', (['config'], {}), '(config)\n', (2255, 2263), False, 'import logging\n'), ((2903, 2969), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'device_count': "{'GPU': 1}", 'allow_soft_placement': '(True)'}), "(device_count={'GPU': 1}, allow_soft_placement=True)\n", (2917, 2969), True, 'import tensorflow as tf\n'), ((3687, 3710), 'logging.info', 'logging.info', (['model_dir'], {}), '(model_dir)\n', (3699, 3710), False, 'import logging\n'), ((3719, 3748), 'logging.info', 'logging.info', (['"""[setup] Done!"""'], {}), "('[setup] Done!')\n", (3731, 3748), False, 'import logging\n'), ((3892, 3945), 'os.path.join', 'os.path.join', (["config['data_dir']", '"""positions_map.pkl"""'], {}), "(config['data_dir'], 'positions_map.pkl')\n", (3904, 3945), False, 'import os\n'), ((5429, 5590), 'tensorflow.estimator.inputs.numpy_input_fn', 'tf.estimator.inputs.numpy_input_fn', (["train_data_dict['input_dict']"], {'y': "train_data_dict['labels']", 'batch_size': 'hps.batch_size', 'num_epochs': 'None', 'shuffle': '(True)'}), "(train_data_dict['input_dict'], y=\n train_data_dict['labels'], batch_size=hps.batch_size, num_epochs=None,\n shuffle=True)\n", (5463, 5590), True, 'import tensorflow as tf\n'), ((6772, 6912), 'tensorflow.estimator.inputs.numpy_input_fn', 'tf.estimator.inputs.numpy_input_fn', (["eval_data_dict['input_dict']"], {'y': "eval_data_dict['labels']", 'batch_size': '(4)', 'num_epochs': '(1)', 'shuffle': '(False)'}), "(eval_data_dict['input_dict'], y=\n eval_data_dict['labels'], batch_size=4, num_epochs=1, shuffle=False)\n", (6806, 6912), True, 'import tensorflow as tf\n'), ((7407, 7457), 'logging.info', 'logging.info', (['"""[load_data_and_define_model] Done!"""'], {}), "('[load_data_and_define_model] Done!')\n", (7419, 7457), False, 'import logging\n'), ((7665, 7776), 'tensorflow.estimator.Estimator', 'tf.estimator.Estimator', (['laneswap_model.model_fn'], {'params': 'self.hps', 'config': 'self.rc', 'model_dir': 'self.model_dir'}), '(laneswap_model.model_fn, params=self.hps, config=\n self.rc, model_dir=self.model_dir)\n', (7687, 7776), True, 'import tensorflow as tf\n'), ((7845, 7940), 'tensorflow.contrib.learn.Experiment', 'tf.contrib.learn.Experiment', (['nn', 'train_input_function', 'eval_input_function'], {'eval_steps': 'None'}), '(nn, train_input_function, eval_input_function,\n eval_steps=None)\n', (7872, 7940), True, 'import tensorflow as tf\n'), ((7999, 8034), 'logging.info', 'logging.info', (['"""[setup_model] Done!"""'], {}), "('[setup_model] Done!')\n", (8011, 8034), False, 'import logging\n'), ((8106, 8151), 'logging.info', 'logging.info', (['"""[train_and_evaluate] Started!"""'], {}), "('[train_and_evaluate] Started!')\n", (8118, 8151), False, 'import logging\n'), ((8192, 8234), 'logging.info', 'logging.info', (['"""[train_and_evaluate] Done!"""'], {}), "('[train_and_evaluate] Done!')\n", (8204, 8234), False, 'import logging\n'), ((9052, 9074), 'logging.info', 'logging.info', (["('-' * 40)"], {}), "('-' * 40)\n", (9064, 9074), False, 'import logging\n'), ((10478, 10536), 'logging.info', 'logging.info', (["('[save_model] Done! Saved to %s' % save_path)"], {}), "('[save_model] Done! Saved to %s' % save_path)\n", (10490, 10536), False, 'import logging\n'), ((2320, 2452), 'experiment_details.get_output_ckpts_dir_name', 'get_output_ckpts_dir_name', (['NUM_DATAFILES', 'ROWS_TO_EXTRACT', 'EDGE_RADIUS', 'EDGE_STATE_COMBINE_METHOD', 'EDGE_INFLUENCE_COMBINE_METHOD'], {}), '(NUM_DATAFILES, ROWS_TO_EXTRACT, EDGE_RADIUS,\n EDGE_STATE_COMBINE_METHOD, EDGE_INFLUENCE_COMBINE_METHOD)\n', (2345, 2452), False, 'from experiment_details import get_output_ckpts_dir_name\n'), ((4145, 4159), 'cPickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4156, 4159), True, 'import cPickle as pickle\n'), ((4702, 4716), 'cPickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4713, 4716), True, 'import cPickle as pickle\n'), ((5374, 5388), 'cPickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5385, 5388), True, 'import cPickle as pickle\n'), ((8684, 8700), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (8695, 8700), False, 'from collections import OrderedDict, defaultdict\n'), ((10294, 10361), 'tensorflow.estimator.export.build_raw_serving_input_receiver_fn', 'tf.estimator.export.build_raw_serving_input_receiver_fn', (['input_dict'], {}), '(input_dict)\n', (10349, 10361), True, 'import tensorflow as tf\n'), ((3012, 3036), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {}), '()\n', (3034, 3036), True, 'import tensorflow as tf\n'), ((9493, 9563), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[1, None, extras_dim]', 'name': '"""extras"""'}), "(tf.float32, shape=[1, None, extras_dim], name='extras')\n", (9507, 9563), True, 'import tensorflow as tf\n'), ((9604, 9657), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[1]', 'name': '"""sample_ct"""'}), "(tf.int32, shape=[1], name='sample_ct')\n", (9618, 9657), True, 'import tensorflow as tf\n'), ((9701, 9757), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[1]', 'name': '"""traj_lengths"""'}), "(tf.int32, shape=[1], name='traj_lengths')\n", (9715, 9757), True, 'import tensorflow as tf\n'), ((6541, 6601), 'numpy.zeros_like', 'np.zeros_like', (["eval_data_dict['input_dict'][token_eval_node]"], {}), "(eval_data_dict['input_dict'][token_eval_node])\n", (6554, 6601), True, 'import numpy as np\n'), ((6678, 6740), 'numpy.zeros_like', 'np.zeros_like', (["eval_data_dict['labels'][token_eval_label_node]"], {}), "(eval_data_dict['labels'][token_eval_label_node])\n", (6691, 6740), True, 'import numpy as np\n'), ((9432, 9442), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (9440, 9442), True, 'import tensorflow as tf\n')] |
import time
import numpy as np
class CalcTime(object):
def __init__(self, print_every_toc=True, between_toc=False):
self.__print_every_toc = print_every_toc
self.__between_toc = between_toc
self.__num = 0
self.__nameList = []
self.__start_dic = {}
self.__time_dic = {}
def refresh():
self.__num = 0
self.__nameList = []
self.__start_dic = {}
self.__time_dic = {}
def tic(self, name=None):
if name is None:
name = self.__num
self.__nameList.append(name)
self.__num += 1
else:
if name not in self.__nameList:
self.__nameList.append(name)
self.__start_dic[name] = time.time()
def toc(self, name=None):
tmp = time.time()
if name is None:
name = self.__nameList.pop()
else:
if name in self.__start_dic:
if name in self.__nameList:
self.__nameList.remove(name)
else:
raise('Warning: No tic() matched')
tmp -= self.__start_dic[name]
last = 0
if name in self.__time_dic:
last = self.__time_dic[name][-1]
self.__time_dic[name] = np.append(self.__time_dic[name], tmp)
else:
self.__time_dic[name] = np.array([tmp])
if self.__print_every_toc:
if self.__between_toc:
print('{} time: {:.4f}s'.format(name,tmp-last))
else:
print('{} time: {:.4f}s'.format(name,tmp))
return tmp
def show(self,delta_time=False):
np.set_printoptions(threshold=np.nan)
for name in self.__time_dic:
if len(self.__time_dic[name]) == 1:
print('{}\t time : {}s'.format(name, np.round(self.__time_dic[name][0],4)))
else:
if delta_time:
delta_time = self.__time_dic[name].copy()
delta_time[1:] -= self.__time_dic[name][:-1]
print('{}\t Total: {}s, Mean: {}s \t delta_times: {}s'.format(name,
np.round(np.sum(delta_time), 4), np.round(np.mean(delta_time), 4), np.round(delta_time, 4),))
else:
print('{}\t cumul_times: {}s'.format(name, np.round(self.__time_dic[name], 4),))
if __name__ == "__main__":
ct = CalcTime(False)
ct.tic()
ct.toc(0)
time.sleep(0.1)
ct.toc(0)
time.sleep(0.2)
ct.toc(0)
ct.show()
ct.show(True)
| [
"numpy.set_printoptions",
"numpy.sum",
"time.time",
"time.sleep",
"numpy.append",
"numpy.mean",
"numpy.array",
"numpy.round"
] | [((2495, 2510), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2505, 2510), False, 'import time\n'), ((2529, 2544), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (2539, 2544), False, 'import time\n'), ((748, 759), 'time.time', 'time.time', ([], {}), '()\n', (757, 759), False, 'import time\n'), ((805, 816), 'time.time', 'time.time', ([], {}), '()\n', (814, 816), False, 'import time\n'), ((1665, 1702), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.nan'}), '(threshold=np.nan)\n', (1684, 1702), True, 'import numpy as np\n'), ((1272, 1309), 'numpy.append', 'np.append', (['self.__time_dic[name]', 'tmp'], {}), '(self.__time_dic[name], tmp)\n', (1281, 1309), True, 'import numpy as np\n'), ((1360, 1375), 'numpy.array', 'np.array', (['[tmp]'], {}), '([tmp])\n', (1368, 1375), True, 'import numpy as np\n'), ((1841, 1878), 'numpy.round', 'np.round', (['self.__time_dic[name][0]', '(4)'], {}), '(self.__time_dic[name][0], 4)\n', (1849, 1878), True, 'import numpy as np\n'), ((2239, 2262), 'numpy.round', 'np.round', (['delta_time', '(4)'], {}), '(delta_time, 4)\n', (2247, 2262), True, 'import numpy as np\n'), ((2351, 2385), 'numpy.round', 'np.round', (['self.__time_dic[name]', '(4)'], {}), '(self.__time_dic[name], 4)\n', (2359, 2385), True, 'import numpy as np\n'), ((2181, 2199), 'numpy.sum', 'np.sum', (['delta_time'], {}), '(delta_time)\n', (2187, 2199), True, 'import numpy as np\n'), ((2214, 2233), 'numpy.mean', 'np.mean', (['delta_time'], {}), '(delta_time)\n', (2221, 2233), True, 'import numpy as np\n')] |
"""
Implementation of Moving Least Squares transformation.
Powered by molesq, an optional dependency.
"""
from typing import Optional
import numpy as np
from molesq.transform import Transformer as _Transformer
from ..base import Transform
from ..util import SpaceRef
class MovingLeastSquares(Transform):
def __init__(
self,
source_control_points: np.ndarray,
target_control_points: np.ndarray,
*,
source_space: Optional[SpaceRef] = None,
target_space: Optional[SpaceRef] = None
):
"""Non-rigid transforms powered by molesq package.
Parameters
----------
source_control_points : np.ndarray
NxD array of control point coordinates in the source space.
target_control_points : np.ndarray
NxD array of coordinates of the corresponding control points
in the target (deformed) space.
source_space : Optional[SpaceRef]
target_space : Optional[SpaceRef]
"""
super().__init__(source_space=source_space, target_space=target_space)
self._transformer = _Transformer(
np.asarray(source_control_points),
np.asarray(target_control_points),
)
self.ndim = {self._transformer.control_points.shape[1]}
def __call__(self, coords: np.ndarray) -> np.ndarray:
coords = self._validate_coords(coords)
return self._transformer.transform(coords)
def __invert__(self) -> Transform:
return MovingLeastSquares(
self._transformer.deformed_control_points,
self._transformer.control_points,
source_space=self.target_space,
target_space=self.source_space,
)
| [
"numpy.asarray"
] | [((1144, 1177), 'numpy.asarray', 'np.asarray', (['source_control_points'], {}), '(source_control_points)\n', (1154, 1177), True, 'import numpy as np\n'), ((1191, 1224), 'numpy.asarray', 'np.asarray', (['target_control_points'], {}), '(target_control_points)\n', (1201, 1224), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.