code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""
To generate images with a pre-trained rcGAN model.
This is a mix of the image quilting algorithm over GAN generated patches.
@author: <NAME>
"""
import os
import sys
import heapq
import argparse
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.utils.data
import torch.backends.cudnn as cudnn
from concurrent import futures
from itertools import product
# size of z latent vector (i.e. size of generator input)
NZ = 100
BLOCK_SIZE = 64
OVERLAP_SIZE = int(BLOCK_SIZE/2)
# size of feature maps in generator
NGF = 64
# size of feature maps in discriminator
NDF = 64
# Number of channels in the training images. RGB+Label
NC = 3
parser = argparse.ArgumentParser()
parser.add_argument('--inputImagePath', required=True,
help='Input image path.')
parser.add_argument('--outputFolder', required=True,
help='Output folder path.')
parser.add_argument('--modelPath', required=True,
help='rcGAN generator model path.')
parser.add_argument('--numberOfTiles', required=True, type=int, nargs='+',
default=[5, 5],
help=('output image width in tiles of 64x64,'
'change it to change generated image size.'))
parser.add_argument('--n', required=True, type=int,
default=5, help='Number of images to generate.')
opt = parser.parse_args()
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.sz = 64*64*3
self.createLayers()
def createLayers(self):
self.l1 = nn.Sequential(
nn.ConvTranspose2d(self.sz+NZ, NGF * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(NGF * 8),
nn.ReLU(True))
self.l2 = nn.Sequential(
nn.ConvTranspose2d(NGF * 8, NGF * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(NGF * 4),
nn.ReLU(True))
self.l3 = nn.Sequential(
nn.ConvTranspose2d(NGF * 4, NGF * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(NGF * 2),
nn.ReLU(True))
self.l4 = nn.Sequential(
nn.ConvTranspose2d(NGF * 2, NGF, 4, 2, 1, bias=False),
nn.BatchNorm2d(NGF),
nn.ReLU(True))
self.l5 = nn.Sequential(
nn.ConvTranspose2d(76, NC, 4, 2, 1, bias=False),
nn.Tanh())
def forward(self, condition, z):
c = condition.reshape(condition.shape[0], self.sz, 1, 1)
i = torch.cat([c, z], 1)
out = self.l1(i)
out = self.l2(out)
out = self.l3(out)
out = self.l4(out)
c = condition.reshape(condition.shape[0], self.sz//1024, 32, 32)
i = torch.cat([c, out], 1)
out = self.l5(i)
return out
class PatternsDataset():
def __init__(self, patterns, transform=None):
self.patterns = patterns
self.count = len(self.patterns)
self.transform = transform
def __getitem__(self, index):
data = self.patterns[index].data.astype(np.float32)
data = self.transform(data)
return (data, 0)
def __len__(self):
return self.count
class Pattern:
def __init__(self, data):
self.data = data.copy().astype(np.float32)
def __eq__(self, other):
return (self.data == other.data).all()
def __hash__(self):
return hash(self.data.tobytes())
class CreatePattern:
def __init__(self, sample, N, ref=False, rot=False):
self.sample = sample
self.ref = ref
self.rot = rot
self.N = N
def __call__(self, t):
i = t[0]
j = t[1]
t = Pattern(self.sample.
take(range(i, i+self.N), mode='raise', axis=0).
take(range(j, j+self.N), mode='raise', axis=1))
res = set([t])
if self.ref:
res.add(Pattern(np.fliplr(t.data)))
if self.rot:
res.add(Pattern(np.rot90(t.data)))
return os.getpid(), res
# define the possible tiles orientations
class Orientation:
RIGHT_LEFT = 1
BOTTOM_TOP = 2
BOTH = 3
class Minimum_Cost_Path:
def __init__(self, blk1, blk2, overlap_size, orientation):
assert blk1.shape == blk2.shape
assert blk1.shape[0] == blk2.shape[1]
# get the overlap regions
block_size = blk1.shape[0]
# calculate LE error for the overlap region
self.L2_error = self.calc_L2_error(blk1, blk2, block_size,
overlap_size, orientation)
# calculate the minimum cost matrix
self.cost = np.zeros(self.L2_error.shape, dtype=np.float32)
self.calc_cost()
# now calculate the minimum cost path
self.path = self.minimum_cost_path()
def get_cost_at(self, i, j):
if i < 0 or i >= self.cost.shape[0] or \
j <= 0 or \
j >= self.cost.shape[1]-1:
return sys.maxsize
return self.cost[i][j]
def get_costs(self, i, j):
x = self.get_cost_at(i-1, j-1)
y = self.cost[i-1][j]
z = self.get_cost_at(i-1, j+1)
return x, y, z
def min_index(self, i, j):
x, y, z = self.get_costs(i, j)
if (x < y):
return j-1 if (x < z) else j+1
else:
return j if (y < z) else j+1
def minimum_cost_path(self):
rows, _ = self.cost.shape
p = [np.argmin(self.cost[rows-1, :])]
for i in range(rows-1, 0, -1):
j = p[-1]
# get the index of smaller cost
p.append(self.min_index(i, j))
p.reverse()
return p
def calc_cost(self):
# we don't need to calculate the first row
self.cost[0, :] = self.L2_error[0, :]
rows, cols = self.cost.shape
for i in range(1, rows):
for j in range(cols):
x, y, z = self.get_costs(i, j)
self.cost[i][j] = min(x, y, z) + self.L2_error[i][j]
@staticmethod
def get_overlap(blk1, blk2, block_size, overlap_size, orientation):
if orientation == Orientation.RIGHT_LEFT:
ov1 = blk1[:, -overlap_size:, :3] # right
ov2 = blk2[:, :overlap_size, : 3] # left
elif orientation == Orientation.BOTTOM_TOP:
# bottom
ov1 = np.transpose(blk1[-overlap_size:, :, : 3], (1, 0, 2))
# top
ov2 = np.transpose(blk2[:overlap_size, :, : 3], (1, 0, 2))
assert ov1.shape == ov2.shape
return ov1, ov2
@staticmethod
def calc_L2_error(blk1, blk2, block_size, overlap_size, orientation):
ov1, ov2 = Minimum_Cost_Path.get_overlap(blk1, blk2, block_size,
overlap_size, orientation)
L2_error = np.sum((ov1-ov2)**2, axis=2)
assert (L2_error >= 0).all() == True
return L2_error
class Image_Quilting:
def __init__(self, source_image, generator_model, block_size,
overlap_size, number_of_tiles_in_output):
self.source_image = self.normalizeBetweenMinus1and1(source_image)
self.generator_model = generator_model
self.block_size = block_size
self.overlap_size = overlap_size
self.number_of_tiles_in_output = number_of_tiles_in_output
self.image_size = [0, 0]
self.image_size[0] = (2*(block_size-overlap_size)) + \
((number_of_tiles_in_output[0]-2)*(block_size-2*overlap_size)) + \
((number_of_tiles_in_output[0]-1)*overlap_size)
self.image_size[1] = (2*(block_size-overlap_size)) + \
((number_of_tiles_in_output[1]-2)*(block_size-2*overlap_size)) + \
((number_of_tiles_in_output[1]-1)*overlap_size)
self.image_channels = source_image.shape[2]
self.patterns = self.patterns_from_sample(self.source_image)
np.random.shuffle(self.patterns)
def __save_debug_image(self, title, img, image_name=None):
img = self.normalizeBetween0and1(img)
plt.margins(0, 0)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.title(title)
plt.imshow(img)
plt.show()
if image_name is not None:
plt.imsave(image_name, img)
@staticmethod
def normalizeBetweenMinus1and1(_m):
m = _m.copy().astype(np.float32)
m = -1+2*(m-m.min())/(m.max()-m.min())
assert m.min() >= -1. and m.min() < 0. and m.max() > 0. and m.max() <= 1.
return m
@staticmethod
def normalizeBetween0and1(_m):
m = _m.copy().astype(np.float32)
m = (m-m.min()) / (m.max()-m.min())
assert m.min() >= 0. and m.min() <= 1.
return m
def patterns_from_sample(self, source_image):
patts = set()
N = self.block_size
h, w, _ = source_image.shape
with futures.ProcessPoolExecutor() as pool:
createPattern = CreatePattern(source_image, N)
for _, toappend in pool.map(createPattern,
product(range(0, h-N), range(0, w-N)),
chunksize=w):
patts.update(toappend)
return list(patts)
def join_horizontal_blocks(self, blk1, blk2, path, debug=False):
G = [[0, -1., -1.]] # red pixel
sl1 = blk1[:, -self.overlap_size:]
sl2 = blk2[:, :self.overlap_size]
a = path[0]
if debug:
join_row = lambda i: np.concatenate((sl1[i, :max(0, a-1)], G,
sl2[i, max(a, 1):]))
else:
join_row = lambda i: np.concatenate((sl1[i, :a], sl2[i, a:]))
c = join_row(0)
res = np.zeros((self.block_size, self.overlap_size,
self.image_channels), dtype=np.float32)
res[0, :] = c
for i in range(1, self.block_size):
a = path[i]
c = join_row(i)
res[i, :] = c
if debug:
self.__save_debug_image('Join Horizontal',
np.hstack((res,
blk2[:, self.overlap_size:])))
return np.hstack((res, blk2[:, self.overlap_size:]))
def join_vertical_blocks(self, blk1, blk2, path, debug=False):
G = [[0, -1., -1.]] # red pixel
sl1 = blk1[-self.overlap_size:, :]
sl2 = blk2[:self.overlap_size, :]
a = path[0]
if debug:
join_col = lambda i: np.concatenate((sl1[: max(0, a-1),i], G,
sl2[max(a, 1):, i]))
else:
join_col = lambda i: np.concatenate((sl1[:a, i], sl2[a:, i]))
c = join_col(0)
res = np.zeros((self.overlap_size, self.block_size,
self.image_channels), dtype=np.float32)
res[:, 0] = c
for i in range(1, self.block_size):
a = path[i]
c = join_col(i)
res[:, i] = c
if debug:
self.__save_debug_image('Join Vertical',
np.vstack((res, blk2[self.overlap_size:, :])))
return np.vstack((res, blk2[self.overlap_size:, :]))
def get_random_pattern(self):
y, x = np.random.randint(0, 50, 2)
blk = self.source_image[y:y+self.block_size, x:x+self.block_size, :]
return blk
def old_get_random_pattern(self):
i = np.random.randint(0, high=len(self.patterns))
return self.patterns[i].data
def get_gen_block(self, condition, z):
x = self.generator_model(condition, z)
y = np.array(x.to("cpu").permute([0, 2, 3, 1]).detach().numpy())
return y
def get_GAN_genetared_samples(self, condition, N):
z = torch.randn(N, NZ, 1, 1, device=device)
blk = torch.from_numpy(np.transpose(np.array([condition]),
(0, 3, 1, 2))).to(device)
b = blk.repeat(N, 1, 1, 1)
if self.debug:
self.condition = condition.copy()
self.__save_debug_image('Condition', self.condition)
return [Pattern(b) for b in self.get_gen_block(b, z)]
def get_samples(self, blks, orientation):
# 1% probability of selecting a real patch.
if np.random.rand() < .99:
N = 100
if orientation != Orientation.BOTH:
blk = np.zeros((self.block_size, self.block_size,
self.image_channels), dtype=np.float32)
if orientation == Orientation.BOTTOM_TOP:
blk[:self.overlap_size, :] = blks[0][-self.overlap_size:, :]
else:
blk[:, :self.overlap_size] = blks[0][:, -self.overlap_size:]
else:
tmp = np.zeros((self.block_size*2, self.block_size*2,
self.image_channels), dtype=np.float32)
tmp[:self.block_size, :self.block_size] = blks[2]
tmp[self.overlap_size:self.block_size+self.overlap_size,
:self.block_size] = blks[1]
tmp[:self.block_size,
self.overlap_size:self.block_size+self.overlap_size] = blks[0]
blk = tmp[self.overlap_size:self.block_size+self.overlap_size,
self.overlap_size:self.block_size+self.overlap_size]
return self.get_GAN_genetared_samples(blk, N)
else:
return np.random.choice(self.patterns,
size=self.sample_size, replace=False)
def get_best(self, blks, orientation):
pq = []
pq_N = 1
heapq.heapify(pq)
samples = self.get_samples(blks, orientation)
for patt in samples:
blk = patt.data
if orientation != Orientation.BOTH:
l2 = Minimum_Cost_Path.calc_L2_error(blks[0], blk,
self.block_size,
self.overlap_size,
orientation)
err = l2.sum()
else:
l2u = Minimum_Cost_Path.calc_L2_error(blks[0], blk,
self.block_size,
self.overlap_size,
Orientation.BOTTOM_TOP)
l2l = Minimum_Cost_Path.calc_L2_error(blks[1], blk,
self.block_size,
self.overlap_size,
Orientation.RIGHT_LEFT)
err = l2u.sum() + l2l.sum()
pqe = (-err, blk)
if len(pq) < pq_N:
heapq.heappush(pq, pqe)
else:
try:
heapq.heappushpop(pq, pqe)
except ValueError:
# skip errors related to duplicate values
None
idx = np.random.choice(len(pq), 1)[0]
if self.debug:
self.best = pq[idx][1].copy()
self.__save_debug_image('Best', self.best)
return pq[idx][1]
def add_block(self, blk, y, x):
dx = max(0, x*(self.block_size-self.overlap_size))
dy = max(0, y*(self.block_size-self.overlap_size))
self.output_image[dy:dy+self.block_size,
dx:dx+self.block_size, :] = blk.copy()
def get_block(self, y, x):
dx = max(0, x*(self.block_size-self.overlap_size))
dy = max(0, y*(self.block_size-self.overlap_size))
return self.output_image[dy:dy+self.block_size,
dx:dx+self.block_size, :].copy()
def generate(self, sample_size=1, debug=False, show_progress=False):
self.debug = debug
self.sample_size = int(np.ceil(len(self.patterns)*sample_size))
self.output_image = np.zeros((self.image_size[0], self.image_size[1],
self.image_channels), dtype=np.float32)
for i in range(self.number_of_tiles_in_output[0]):
self.row = i
for j in range(self.number_of_tiles_in_output[1]):
self.col = j
if show_progress:
print('\rProgress : (%d,%d) ' % (i+1, j+1),
end='', flush=True)
if i == 0 and j == 0:
self.output_image[:self.block_size, :self.block_size] = \
self.get_random_pattern()
elif i == 0 and j > 0:
blk1 = self.get_block(0, j-1) # up
blk2 = self.get_best((blk1,), Orientation.RIGHT_LEFT)
mcp = Minimum_Cost_Path(blk1, blk2, self.overlap_size,
Orientation.RIGHT_LEFT)
out = self.join_horizontal_blocks(blk1, blk2, mcp.path)
self.add_block(out, i, j)
##################
if self.debug:
out = self.join_horizontal_blocks(blk1, blk2, mcp.path,
debug=True)
pad = np.ones((blk1.shape[0], 2, 3))
img = np.hstack((self.condition, pad,
blk2[:, :, :3], pad,
out))
self.__save_debug_image('Column', img,
os.path.join(opt.outputFolder,
'join_{}_col_{}.png').
format(i, j))
elif i > 0 and j == 0:
blk1 = self.get_block(i-1, 0) # left
blk2 = self.get_best((blk1,), Orientation.BOTTOM_TOP)
mcp = Minimum_Cost_Path(blk1, blk2, self.overlap_size,
Orientation.BOTTOM_TOP)
out = self.join_vertical_blocks(blk1, blk2, mcp.path)
self.add_block(out, i, j)
##################
if self.debug:
out = self.join_vertical_blocks(blk1, blk2, mcp.path,
debug=True)
pad = np.ones((blk1.shape[0], 2, 3))
img = np.hstack((self.condition, pad,
blk2[:, :, :3], pad,
out))
self.__save_debug_image('Row', img,
os.path.join(opt.outputFolder,
'join_{}_col_{}.png').
format(i, j))
elif i > 0 and j > 0:
blk1 = self.get_block(i-1, j) # up
blk2 = self.get_block(i, j-1) # left
blk3 = self.get_block(i-1, j-1) # corner
blk4 = self.get_best((blk1, blk2, blk3), Orientation.BOTH)
mcp1 = Minimum_Cost_Path(blk1, blk4, self.overlap_size,
Orientation.BOTTOM_TOP)
mcp2 = Minimum_Cost_Path(blk2, blk4, self.overlap_size,
Orientation.RIGHT_LEFT)
assert mcp1 != mcp2
out1 = self.join_vertical_blocks(blk1, blk4, mcp1.path)
out2 = self.join_horizontal_blocks(blk2, out1, mcp2.path)
out1.shape == out2.shape
self.add_block(out2, i, j)
##################
if self.debug:
out1 = self.join_vertical_blocks(blk1, blk4,
mcp1.path, debug=True)
out2 = self.join_horizontal_blocks(blk2, out1,
mcp2.path, debug=True)
pad = np.ones((blk1.shape[0], 2, 3))
img = np.hstack((self.condition, pad,
blk4, pad,
out2, pad))
self.__save_debug_image('Corner', img,
os.path.join(opt.outputFolder,
'join_{}_col_{}.png').
format(i, j))
if self.debug:
self.__save_debug_image('Result',
self.output_image[:, :, :3],
os.path.join(opt.outputFolder,
'row_{}_col_{}.png').
format(i, j))
return self.normalizeBetween0and1(self.output_image)
def load_source_image(source_path):
img = plt.imread(source_path)
if np.max(img) > 1.:
img = Image_Quilting.normalizeBetween0and1(img)
print('img.shape={}, img.dtype={}, img.max={}, img.min={}'.format(
img.shape, img.dtype, img.max(), img.min()))
assert img.min() >= 0. and img.max() <= 1.
return img
def initialise_torch():
global device, ngpu
ngpu = torch.cuda.device_count()
print('ngpus=%d' % (ngpu))
print('torch.cuda.is_available=%d' % (torch.cuda.is_available()))
if torch.cuda.is_available():
print('torch.version.cuda=%s' % (torch.version.cuda))
# Decide which device we want to run on
device = torch.device("cuda:0"
if (torch.cuda.is_available() and ngpu > 0)
else "cpu")
print(device)
######################
cudnn.benchmark = True
def load_model(model_path, model_class):
# Create the generator
netG = model_class().to(device)
# Handle multi-gpu if desired
netG.load_state_dict(torch.load(model_path, map_location=device))
netG.eval()
return netG
def load_generator_models():
return load_model(opt.modelPath, model_class=Generator)
def initialise():
global netG, source_image, uniqueLabels, linearSpace
initialise_torch()
netG = load_generator_models()
source_image = load_source_image(opt.inputImagePath)
if __name__ == '__main__':
initialise()
iq = Image_Quilting(source_image, netG, BLOCK_SIZE, OVERLAP_SIZE,
(opt.numberOfTiles[0], opt.numberOfTiles[1]))
print('Number of patterns = {}'.format(len(iq.patterns)))
#
for i in range(opt.n):
output_image = iq.generate(show_progress=True)
plt.axis("off")
plt.imsave(os.path.join(opt.outputFolder, '{}_iq_{}x{}.png').
format(i, opt.numberOfTiles[0], opt.numberOfTiles[1]),
output_image)
print()
plt.close()
| [
"matplotlib.pyplot.title",
"numpy.sum",
"argparse.ArgumentParser",
"heapq.heappush",
"heapq.heappushpop",
"matplotlib.pyplot.margins",
"concurrent.futures.ProcessPoolExecutor",
"torch.cat",
"torch.randn",
"torch.cuda.device_count",
"numpy.argmin",
"numpy.ones",
"numpy.random.randint",
"num... | [((693, 718), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (716, 718), False, 'import argparse\n'), ((21233, 21256), 'matplotlib.pyplot.imread', 'plt.imread', (['source_path'], {}), '(source_path)\n', (21243, 21256), True, 'import matplotlib.pyplot as plt\n'), ((21590, 21615), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (21613, 21615), False, 'import torch\n'), ((21724, 21749), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (21747, 21749), False, 'import torch\n'), ((2501, 2521), 'torch.cat', 'torch.cat', (['[c, z]', '(1)'], {}), '([c, z], 1)\n', (2510, 2521), False, 'import torch\n'), ((2713, 2735), 'torch.cat', 'torch.cat', (['[c, out]', '(1)'], {}), '([c, out], 1)\n', (2722, 2735), False, 'import torch\n'), ((4620, 4667), 'numpy.zeros', 'np.zeros', (['self.L2_error.shape'], {'dtype': 'np.float32'}), '(self.L2_error.shape, dtype=np.float32)\n', (4628, 4667), True, 'import numpy as np\n'), ((6797, 6829), 'numpy.sum', 'np.sum', (['((ov1 - ov2) ** 2)'], {'axis': '(2)'}), '((ov1 - ov2) ** 2, axis=2)\n', (6803, 6829), True, 'import numpy as np\n'), ((7878, 7910), 'numpy.random.shuffle', 'np.random.shuffle', (['self.patterns'], {}), '(self.patterns)\n', (7895, 7910), True, 'import numpy as np\n'), ((8029, 8046), 'matplotlib.pyplot.margins', 'plt.margins', (['(0)', '(0)'], {}), '(0, 0)\n', (8040, 8046), True, 'import matplotlib.pyplot as plt\n'), ((8177, 8193), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (8186, 8193), True, 'import matplotlib.pyplot as plt\n'), ((8202, 8217), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (8212, 8217), True, 'import matplotlib.pyplot as plt\n'), ((8226, 8236), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8234, 8236), True, 'import matplotlib.pyplot as plt\n'), ((9768, 9858), 'numpy.zeros', 'np.zeros', (['(self.block_size, self.overlap_size, self.image_channels)'], {'dtype': 'np.float32'}), '((self.block_size, self.overlap_size, self.image_channels), dtype=\n np.float32)\n', (9776, 9858), True, 'import numpy as np\n'), ((10240, 10285), 'numpy.hstack', 'np.hstack', (['(res, blk2[:, self.overlap_size:])'], {}), '((res, blk2[:, self.overlap_size:]))\n', (10249, 10285), True, 'import numpy as np\n'), ((10788, 10878), 'numpy.zeros', 'np.zeros', (['(self.overlap_size, self.block_size, self.image_channels)'], {'dtype': 'np.float32'}), '((self.overlap_size, self.block_size, self.image_channels), dtype=\n np.float32)\n', (10796, 10878), True, 'import numpy as np\n'), ((11211, 11256), 'numpy.vstack', 'np.vstack', (['(res, blk2[self.overlap_size:, :])'], {}), '((res, blk2[self.overlap_size:, :]))\n', (11220, 11256), True, 'import numpy as np\n'), ((11307, 11334), 'numpy.random.randint', 'np.random.randint', (['(0)', '(50)', '(2)'], {}), '(0, 50, 2)\n', (11324, 11334), True, 'import numpy as np\n'), ((11814, 11853), 'torch.randn', 'torch.randn', (['N', 'NZ', '(1)', '(1)'], {'device': 'device'}), '(N, NZ, 1, 1, device=device)\n', (11825, 11853), False, 'import torch\n'), ((13713, 13730), 'heapq.heapify', 'heapq.heapify', (['pq'], {}), '(pq)\n', (13726, 13730), False, 'import heapq\n'), ((16081, 16174), 'numpy.zeros', 'np.zeros', (['(self.image_size[0], self.image_size[1], self.image_channels)'], {'dtype': 'np.float32'}), '((self.image_size[0], self.image_size[1], self.image_channels),\n dtype=np.float32)\n', (16089, 16174), True, 'import numpy as np\n'), ((21264, 21275), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (21270, 21275), True, 'import numpy as np\n'), ((22238, 22281), 'torch.load', 'torch.load', (['model_path'], {'map_location': 'device'}), '(model_path, map_location=device)\n', (22248, 22281), False, 'import torch\n'), ((22943, 22958), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (22951, 22958), True, 'import matplotlib.pyplot as plt\n'), ((23161, 23172), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (23170, 23172), True, 'import matplotlib.pyplot as plt\n'), ((1647, 1709), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(self.sz + NZ)', '(NGF * 8)', '(4)', '(1)', '(0)'], {'bias': '(False)'}), '(self.sz + NZ, NGF * 8, 4, 1, 0, bias=False)\n', (1665, 1709), True, 'import torch.nn as nn\n'), ((1721, 1744), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(NGF * 8)'], {}), '(NGF * 8)\n', (1735, 1744), True, 'import torch.nn as nn\n'), ((1758, 1771), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1765, 1771), True, 'import torch.nn as nn\n'), ((1818, 1875), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(NGF * 8)', '(NGF * 4)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(NGF * 8, NGF * 4, 4, 2, 1, bias=False)\n', (1836, 1875), True, 'import torch.nn as nn\n'), ((1889, 1912), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(NGF * 4)'], {}), '(NGF * 4)\n', (1903, 1912), True, 'import torch.nn as nn\n'), ((1926, 1939), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1933, 1939), True, 'import torch.nn as nn\n'), ((1986, 2043), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(NGF * 4)', '(NGF * 2)', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(NGF * 4, NGF * 2, 4, 2, 1, bias=False)\n', (2004, 2043), True, 'import torch.nn as nn\n'), ((2057, 2080), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(NGF * 2)'], {}), '(NGF * 2)\n', (2071, 2080), True, 'import torch.nn as nn\n'), ((2094, 2107), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2101, 2107), True, 'import torch.nn as nn\n'), ((2154, 2207), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(NGF * 2)', 'NGF', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(NGF * 2, NGF, 4, 2, 1, bias=False)\n', (2172, 2207), True, 'import torch.nn as nn\n'), ((2221, 2240), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['NGF'], {}), '(NGF)\n', (2235, 2240), True, 'import torch.nn as nn\n'), ((2254, 2267), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (2261, 2267), True, 'import torch.nn as nn\n'), ((2314, 2361), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(76)', 'NC', '(4)', '(2)', '(1)'], {'bias': '(False)'}), '(76, NC, 4, 2, 1, bias=False)\n', (2332, 2361), True, 'import torch.nn as nn\n'), ((2375, 2384), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (2382, 2384), True, 'import torch.nn as nn\n'), ((3992, 4003), 'os.getpid', 'os.getpid', ([], {}), '()\n', (4001, 4003), False, 'import os\n'), ((5429, 5462), 'numpy.argmin', 'np.argmin', (['self.cost[rows - 1, :]'], {}), '(self.cost[rows - 1, :])\n', (5438, 5462), True, 'import numpy as np\n'), ((8089, 8106), 'matplotlib.pyplot.NullLocator', 'plt.NullLocator', ([], {}), '()\n', (8104, 8106), True, 'import matplotlib.pyplot as plt\n'), ((8150, 8167), 'matplotlib.pyplot.NullLocator', 'plt.NullLocator', ([], {}), '()\n', (8165, 8167), True, 'import matplotlib.pyplot as plt\n'), ((8284, 8311), 'matplotlib.pyplot.imsave', 'plt.imsave', (['image_name', 'img'], {}), '(image_name, img)\n', (8294, 8311), True, 'import matplotlib.pyplot as plt\n'), ((8912, 8941), 'concurrent.futures.ProcessPoolExecutor', 'futures.ProcessPoolExecutor', ([], {}), '()\n', (8939, 8941), False, 'from concurrent import futures\n'), ((12332, 12348), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (12346, 12348), True, 'import numpy as np\n'), ((13522, 13591), 'numpy.random.choice', 'np.random.choice', (['self.patterns'], {'size': 'self.sample_size', 'replace': '(False)'}), '(self.patterns, size=self.sample_size, replace=False)\n', (13538, 13591), True, 'import numpy as np\n'), ((21689, 21714), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (21712, 21714), False, 'import torch\n'), ((6331, 6383), 'numpy.transpose', 'np.transpose', (['blk1[-overlap_size:, :, :3]', '(1, 0, 2)'], {}), '(blk1[-overlap_size:, :, :3], (1, 0, 2))\n', (6343, 6383), True, 'import numpy as np\n'), ((6421, 6472), 'numpy.transpose', 'np.transpose', (['blk2[:overlap_size, :, :3]', '(1, 0, 2)'], {}), '(blk2[:overlap_size, :, :3], (1, 0, 2))\n', (6433, 6472), True, 'import numpy as np\n'), ((9689, 9729), 'numpy.concatenate', 'np.concatenate', (['(sl1[i, :a], sl2[i, a:])'], {}), '((sl1[i, :a], sl2[i, a:]))\n', (9703, 9729), True, 'import numpy as np\n'), ((10131, 10176), 'numpy.hstack', 'np.hstack', (['(res, blk2[:, self.overlap_size:])'], {}), '((res, blk2[:, self.overlap_size:]))\n', (10140, 10176), True, 'import numpy as np\n'), ((10709, 10749), 'numpy.concatenate', 'np.concatenate', (['(sl1[:a, i], sl2[a:, i])'], {}), '((sl1[:a, i], sl2[a:, i]))\n', (10723, 10749), True, 'import numpy as np\n'), ((11149, 11194), 'numpy.vstack', 'np.vstack', (['(res, blk2[self.overlap_size:, :])'], {}), '((res, blk2[self.overlap_size:, :]))\n', (11158, 11194), True, 'import numpy as np\n'), ((12446, 12534), 'numpy.zeros', 'np.zeros', (['(self.block_size, self.block_size, self.image_channels)'], {'dtype': 'np.float32'}), '((self.block_size, self.block_size, self.image_channels), dtype=np.\n float32)\n', (12454, 12534), True, 'import numpy as np\n'), ((12844, 12939), 'numpy.zeros', 'np.zeros', (['(self.block_size * 2, self.block_size * 2, self.image_channels)'], {'dtype': 'np.float32'}), '((self.block_size * 2, self.block_size * 2, self.image_channels),\n dtype=np.float32)\n', (12852, 12939), True, 'import numpy as np\n'), ((14915, 14938), 'heapq.heappush', 'heapq.heappush', (['pq', 'pqe'], {}), '(pq, pqe)\n', (14929, 14938), False, 'import heapq\n'), ((21922, 21947), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (21945, 21947), False, 'import torch\n'), ((3889, 3906), 'numpy.fliplr', 'np.fliplr', (['t.data'], {}), '(t.data)\n', (3898, 3906), True, 'import numpy as np\n'), ((3958, 3974), 'numpy.rot90', 'np.rot90', (['t.data'], {}), '(t.data)\n', (3966, 3974), True, 'import numpy as np\n'), ((8055, 8064), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8062, 8064), True, 'import matplotlib.pyplot as plt\n'), ((8116, 8125), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8123, 8125), True, 'import matplotlib.pyplot as plt\n'), ((14998, 15024), 'heapq.heappushpop', 'heapq.heappushpop', (['pq', 'pqe'], {}), '(pq, pqe)\n', (15015, 15024), False, 'import heapq\n'), ((22978, 23027), 'os.path.join', 'os.path.join', (['opt.outputFolder', '"""{}_iq_{}x{}.png"""'], {}), "(opt.outputFolder, '{}_iq_{}x{}.png')\n", (22990, 23027), False, 'import os\n'), ((11898, 11919), 'numpy.array', 'np.array', (['[condition]'], {}), '([condition])\n', (11906, 11919), True, 'import numpy as np\n'), ((17384, 17414), 'numpy.ones', 'np.ones', (['(blk1.shape[0], 2, 3)'], {}), '((blk1.shape[0], 2, 3))\n', (17391, 17414), True, 'import numpy as np\n'), ((17445, 17503), 'numpy.hstack', 'np.hstack', (['(self.condition, pad, blk2[:, :, :3], pad, out)'], {}), '((self.condition, pad, blk2[:, :, :3], pad, out))\n', (17454, 17503), True, 'import numpy as np\n'), ((18545, 18575), 'numpy.ones', 'np.ones', (['(blk1.shape[0], 2, 3)'], {}), '((blk1.shape[0], 2, 3))\n', (18552, 18575), True, 'import numpy as np\n'), ((18606, 18664), 'numpy.hstack', 'np.hstack', (['(self.condition, pad, blk2[:, :, :3], pad, out)'], {}), '((self.condition, pad, blk2[:, :, :3], pad, out))\n', (18615, 18664), True, 'import numpy as np\n'), ((20968, 21019), 'os.path.join', 'os.path.join', (['opt.outputFolder', '"""row_{}_col_{}.png"""'], {}), "(opt.outputFolder, 'row_{}_col_{}.png')\n", (20980, 21019), False, 'import os\n'), ((20292, 20322), 'numpy.ones', 'np.ones', (['(blk1.shape[0], 2, 3)'], {}), '((blk1.shape[0], 2, 3))\n', (20299, 20322), True, 'import numpy as np\n'), ((20353, 20407), 'numpy.hstack', 'np.hstack', (['(self.condition, pad, blk4, pad, out2, pad)'], {}), '((self.condition, pad, blk4, pad, out2, pad))\n', (20362, 20407), True, 'import numpy as np\n'), ((17697, 17749), 'os.path.join', 'os.path.join', (['opt.outputFolder', '"""join_{}_col_{}.png"""'], {}), "(opt.outputFolder, 'join_{}_col_{}.png')\n", (17709, 17749), False, 'import os\n'), ((18855, 18907), 'os.path.join', 'os.path.join', (['opt.outputFolder', '"""join_{}_col_{}.png"""'], {}), "(opt.outputFolder, 'join_{}_col_{}.png')\n", (18867, 18907), False, 'import os\n'), ((20601, 20653), 'os.path.join', 'os.path.join', (['opt.outputFolder', '"""join_{}_col_{}.png"""'], {}), "(opt.outputFolder, 'join_{}_col_{}.png')\n", (20613, 20653), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""Cost matrix computation."""
import numpy as np
from numba import jit
@jit(nopython=True)
def _calc_cumsum_matrix_jit(X, w_list, p_ar, open_begin):
"""Fast implementation by numba.jit."""
len_x, len_y = X.shape
# cumsum matrix
D = np.ones((len_x, len_y), dtype=np.float64) * np.inf
if open_begin:
X = np.vstack((np.zeros((1, X.shape[1])), X))
D = np.vstack((np.zeros((1, D.shape[1])), D))
w_list[:, 0] += 1
# number of patterns
num_pattern = p_ar.shape[0]
# max pattern length
max_pattern_len = p_ar.shape[1]
# pattern cost
pattern_cost = np.zeros(num_pattern, dtype=np.float64)
# step cost
step_cost = np.zeros(max_pattern_len, dtype=np.float64)
# number of cells
num_cells = w_list.shape[0]
for cell_idx in range(num_cells):
i = w_list[cell_idx, 0]
j = w_list[cell_idx, 1]
if i == j == 0:
D[i, j] = X[0, 0]
continue
for pidx in range(num_pattern):
# calculate local cost for each pattern
for sidx in range(1, max_pattern_len):
# calculate step cost of pair-wise cost matrix
pattern_index = p_ar[pidx, sidx, 0:2]
ii = int(i + pattern_index[0])
jj = int(j + pattern_index[1])
if ii < 0 or jj < 0:
step_cost[sidx] = np.inf
continue
else:
step_cost[sidx] = X[ii, jj] \
* p_ar[pidx, sidx, 2]
pattern_index = p_ar[pidx, 0, 0:2]
ii = int(i + pattern_index[0])
jj = int(j + pattern_index[1])
if ii < 0 or jj < 0:
pattern_cost[pidx] = np.inf
continue
pattern_cost[pidx] = D[ii, jj] \
+ step_cost.sum()
min_cost = pattern_cost.min()
if min_cost != np.inf:
D[i, j] = min_cost
return D
| [
"numpy.zeros",
"numba.jit",
"numpy.ones"
] | [((100, 118), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (103, 118), False, 'from numba import jit\n'), ((638, 677), 'numpy.zeros', 'np.zeros', (['num_pattern'], {'dtype': 'np.float64'}), '(num_pattern, dtype=np.float64)\n', (646, 677), True, 'import numpy as np\n'), ((710, 753), 'numpy.zeros', 'np.zeros', (['max_pattern_len'], {'dtype': 'np.float64'}), '(max_pattern_len, dtype=np.float64)\n', (718, 753), True, 'import numpy as np\n'), ((276, 317), 'numpy.ones', 'np.ones', (['(len_x, len_y)'], {'dtype': 'np.float64'}), '((len_x, len_y), dtype=np.float64)\n', (283, 317), True, 'import numpy as np\n'), ((370, 395), 'numpy.zeros', 'np.zeros', (['(1, X.shape[1])'], {}), '((1, X.shape[1]))\n', (378, 395), True, 'import numpy as np\n'), ((424, 449), 'numpy.zeros', 'np.zeros', (['(1, D.shape[1])'], {}), '((1, D.shape[1]))\n', (432, 449), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : Feb-01-20 06:51
# @Author : <NAME> (<EMAIL>)
# @Link : https://www.kaggle.com/uysimty/keras-cnn-dog-or-cat-classification
import numpy as np
import pandas as pd
from keras.preprocessing.image import ImageDataGenerator, load_img
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import random
import os
import pickle
import tensorflow as tf
from keras import backend as K
from model import simple_CNN
def auc(y_true, y_pred):
auc = tf.metrics.auc(y_true, y_pred)[1]
K.get_session().run(tf.local_variables_initializer())
return auc
# data path
TRAIN_DATA_DIR = "./data/train/"
MODEL_SAVES_DIR = "./models-simpleCNN/"
# constants
IF_FAST_RUN = True
EPOCHS_OVER_NIGHT = 50
IMAGE_WIDTH = IMAGE_HEIGHT = 128
IMAGE_SIZE = (IMAGE_WIDTH, IMAGE_HEIGHT)
IMAGE_CHANNELS = 3
BATCH_SIZE = 15
def main():
""" Dir """
if not os.path.exists(MODEL_SAVES_DIR):
os.mkdir(MODEL_SAVES_DIR)
""" Create Model """
model_type = "simpleCNN"
model = simple_CNN(IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS)
model.summary()
print(model_type)
print("Continuing training...")
# model_ckpt = "model-" + model_type + ".h5"
model_ckpt = os.path.join(MODEL_SAVES_DIR, "model_24-val_acc-0.7852.h5")
if os.path.isfile(model_ckpt):
print("loading existed model...")
model.load_weights(model_ckpt)
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
earlystop = EarlyStopping(patience=10)
learning_rate_reduction = ReduceLROnPlateau(monitor="val_acc",
patience=2,
verbose=1,
factor=0.5,
min_lr=0.00001)
filename = "model_{epoch:02d}-val_acc-{val_acc:.4f}.h5"
checkpoint = ModelCheckpoint(
filepath=os.path.join(MODEL_SAVES_DIR, filename), monitor="val_acc", verbose=1, period=1)
callbacks = [learning_rate_reduction, checkpoint]
"""Prepare Data Frame"""
filenames = os.listdir(TRAIN_DATA_DIR)
categories = []
for filename in filenames:
category = filename.split('.')[0]
if category == 'donkey': # donkey 1
categories.append(1)
else: # rabbit 0
categories.append(0)
df = pd.DataFrame({
'filename': filenames,
'category': categories
})
print(df.head())
print(df.tail())
# df['category'].value_counts().plot.bar()
# plt.show()
"""Sample Image"""
# sample = random.choice(filenames)
# image = load_img("./data/train/"+sample)
# plt.imshow(image)
# plt.show()
"""Prepare data"""
df["category"] = df["category"].replace({0: 'rabbit', 1: 'donkey'})
""" 这里用来自动划分 train 集和 val 集 """
train_df, validate_df = train_test_split(
df, test_size=0.20, random_state=42)
train_df = train_df.reset_index(drop=True)
validate_df = validate_df.reset_index(drop=True)
# train_df['category'].value_counts().plot.bar()
total_train = train_df.shape[0]
total_validate = validate_df.shape[0]
"""Traning Generator"""
train_datagen = ImageDataGenerator(
rotation_range=15,
rescale=1./255,
shear_range=0.1,
zoom_range=0.2,
horizontal_flip=True,
width_shift_range=0.1,
height_shift_range=0.1
)
train_generator = train_datagen.flow_from_dataframe(
train_df,
TRAIN_DATA_DIR,
x_col='filename',
y_col='category',
target_size=IMAGE_SIZE,
class_mode='categorical',
batch_size=BATCH_SIZE,
shuffle=True
)
"""Validation Generator"""
validation_datagen = ImageDataGenerator(rescale=1./255)
validation_generator = validation_datagen.flow_from_dataframe(
validate_df,
TRAIN_DATA_DIR,
x_col='filename',
y_col='category',
target_size=IMAGE_SIZE,
class_mode='categorical',
batch_size=BATCH_SIZE,
shuffle=True
)
"""Example Generation"""
example_df = train_df.sample(n=1).reset_index(drop=True)
example_generator = train_datagen.flow_from_dataframe(
example_df,
TRAIN_DATA_DIR,
x_col='filename',
y_col='category',
target_size=IMAGE_SIZE,
class_mode='categorical'
)
"""Example Generation Ploting"""
# plt.figure(figsize=(12, 12))
# for i in range(0, 15):
# plt.subplot(5, 3, i+1)
# for X_batch, Y_batch in example_generator:
# image = X_batch[0]
# plt.imshow(image)
# break
# plt.tight_layout()
# plt.show()
"""Fit Model"""
epochs = 3 if IF_FAST_RUN else EPOCHS_OVER_NIGHT
history = model.fit_generator(
train_generator,
epochs=epochs,
validation_data=validation_generator,
validation_steps=total_validate//BATCH_SIZE,
steps_per_epoch=total_train//BATCH_SIZE,
callbacks=callbacks
)
print("Save history")
with open('./history', 'wb') as pickle_file:
pickle.dump(history.history, pickle_file)
print("Save model...")
model.save_weights("model-" + model_type + ".h5")
print("Visualize training...")
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 12))
ax1.plot(history.history['loss'], color='b', label="Training loss")
ax1.plot(history.history['val_loss'], color='r', label="validation loss")
ax1.set_xticks(np.arange(1, epochs, 1))
ax1.set_yticks(np.arange(0, 1, 0.1))
ax2.plot(history.history['acc'], color='b', label="Training accuracy")
ax2.plot(history.history['val_acc'], color='r',
label="Validation accuracy")
ax2.set_xticks(np.arange(1, epochs, 1))
legend = plt.legend(loc='best', shadow=True)
plt.tight_layout()
# TODO plot.save
plt.show()
if __name__ == "__main__":
main()
| [
"keras.preprocessing.image.ImageDataGenerator",
"os.mkdir",
"pickle.dump",
"sklearn.model_selection.train_test_split",
"tensorflow.local_variables_initializer",
"os.path.isfile",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"os.path.join",
"pandas.DataFrame",
"os.path.exists",
"keras.call... | [((1161, 1214), 'model.simple_CNN', 'simple_CNN', (['IMAGE_WIDTH', 'IMAGE_HEIGHT', 'IMAGE_CHANNELS'], {}), '(IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS)\n', (1171, 1214), False, 'from model import simple_CNN\n'), ((1366, 1425), 'os.path.join', 'os.path.join', (['MODEL_SAVES_DIR', '"""model_24-val_acc-0.7852.h5"""'], {}), "(MODEL_SAVES_DIR, 'model_24-val_acc-0.7852.h5')\n", (1378, 1425), False, 'import os\n'), ((1434, 1460), 'os.path.isfile', 'os.path.isfile', (['model_ckpt'], {}), '(model_ckpt)\n', (1448, 1460), False, 'import os\n'), ((1647, 1673), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': '(10)'}), '(patience=10)\n', (1660, 1673), False, 'from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint\n'), ((1705, 1794), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_acc"""', 'patience': '(2)', 'verbose': '(1)', 'factor': '(0.5)', 'min_lr': '(1e-05)'}), "(monitor='val_acc', patience=2, verbose=1, factor=0.5,\n min_lr=1e-05)\n", (1722, 1794), False, 'from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint\n'), ((2288, 2314), 'os.listdir', 'os.listdir', (['TRAIN_DATA_DIR'], {}), '(TRAIN_DATA_DIR)\n', (2298, 2314), False, 'import os\n'), ((2562, 2623), 'pandas.DataFrame', 'pd.DataFrame', (["{'filename': filenames, 'category': categories}"], {}), "({'filename': filenames, 'category': categories})\n", (2574, 2623), True, 'import pandas as pd\n'), ((3086, 3138), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(df, test_size=0.2, random_state=42)\n', (3102, 3138), False, 'from sklearn.model_selection import train_test_split\n'), ((3442, 3608), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rotation_range': '(15)', 'rescale': '(1.0 / 255)', 'shear_range': '(0.1)', 'zoom_range': '(0.2)', 'horizontal_flip': '(True)', 'width_shift_range': '(0.1)', 'height_shift_range': '(0.1)'}), '(rotation_range=15, rescale=1.0 / 255, shear_range=0.1,\n zoom_range=0.2, horizontal_flip=True, width_shift_range=0.1,\n height_shift_range=0.1)\n', (3460, 3608), False, 'from keras.preprocessing.image import ImageDataGenerator, load_img\n'), ((4015, 4052), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (4033, 4052), False, 'from keras.preprocessing.image import ImageDataGenerator, load_img\n'), ((5628, 5664), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(12, 12)'}), '(2, 1, figsize=(12, 12))\n', (5640, 5664), True, 'import matplotlib.pyplot as plt\n'), ((6139, 6174), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""', 'shadow': '(True)'}), "(loc='best', shadow=True)\n", (6149, 6174), True, 'import matplotlib.pyplot as plt\n'), ((6180, 6198), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6196, 6198), True, 'import matplotlib.pyplot as plt\n'), ((6228, 6238), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6236, 6238), True, 'import matplotlib.pyplot as plt\n'), ((599, 629), 'tensorflow.metrics.auc', 'tf.metrics.auc', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (613, 629), True, 'import tensorflow as tf\n'), ((658, 690), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (688, 690), True, 'import tensorflow as tf\n'), ((1022, 1053), 'os.path.exists', 'os.path.exists', (['MODEL_SAVES_DIR'], {}), '(MODEL_SAVES_DIR)\n', (1036, 1053), False, 'import os\n'), ((1064, 1089), 'os.mkdir', 'os.mkdir', (['MODEL_SAVES_DIR'], {}), '(MODEL_SAVES_DIR)\n', (1072, 1089), False, 'import os\n'), ((5440, 5481), 'pickle.dump', 'pickle.dump', (['history.history', 'pickle_file'], {}), '(history.history, pickle_file)\n', (5451, 5481), False, 'import pickle\n'), ((5837, 5860), 'numpy.arange', 'np.arange', (['(1)', 'epochs', '(1)'], {}), '(1, epochs, 1)\n', (5846, 5860), True, 'import numpy as np\n'), ((5882, 5902), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.1)'], {}), '(0, 1, 0.1)\n', (5891, 5902), True, 'import numpy as np\n'), ((6098, 6121), 'numpy.arange', 'np.arange', (['(1)', 'epochs', '(1)'], {}), '(1, epochs, 1)\n', (6107, 6121), True, 'import numpy as np\n'), ((638, 653), 'keras.backend.get_session', 'K.get_session', ([], {}), '()\n', (651, 653), True, 'from keras import backend as K\n'), ((2103, 2142), 'os.path.join', 'os.path.join', (['MODEL_SAVES_DIR', 'filename'], {}), '(MODEL_SAVES_DIR, filename)\n', (2115, 2142), False, 'import os\n')] |
"""K-prototypes clustering"""
# Author: <NAME> <<EMAIL>>
# License: MIT
from collections import defaultdict
import numpy as np
from .KModes import KModes
def euclidean_dissim(a, b):
"""Euclidean distance dissimilarity function"""
return np.sum((a - b) ** 2, axis=1)
def move_point_num(point, ipoint, to_clust, from_clust,
cl_attr_sum, membership):
"""Move point between clusters, numerical attributes."""
membership[to_clust, ipoint] = 1
membership[from_clust, ipoint] = 0
# Update sum of attributes in cluster.
for iattr, curattr in enumerate(point):
cl_attr_sum[to_clust][iattr] += curattr
cl_attr_sum[from_clust][iattr] -= curattr
return cl_attr_sum, membership
def _labels_cost(Xnum, Xcat, centroids, gamma):
"""Calculate labels and cost function given a matrix of points and
a list of centroids for the k-prototypes algorithm.
"""
npoints = Xnum.shape[0]
cost = 0.
labels = np.empty(npoints, dtype='int64')
for ipoint in range(npoints):
# Numerical cost = sum of Euclidean distances
num_costs = euclidean_dissim(centroids[0], Xnum[ipoint])
cat_costs = KModes.matching_dissim(centroids[1], Xcat[ipoint])
# Gamma relates the categorical cost to the numerical cost.
tot_costs = num_costs + gamma * cat_costs
clust = np.argmin(tot_costs)
labels[ipoint] = clust
cost += tot_costs[clust]
return labels, cost
def _k_prototypes_iter(Xnum, Xcat, centroids, cl_attr_sum, cl_attr_freq,
membership, gamma):
"""Single iteration of the k-prototypes algorithm"""
moves = 0
for ipoint in range(Xnum.shape[0]):
clust = np.argmin(
euclidean_dissim(centroids[0], Xnum[ipoint]) +
gamma * KModes.matching_dissim(centroids[1], Xcat[ipoint]))
if membership[clust, ipoint]:
# Point is already in its right place.
continue
# Move point, and update old/new cluster frequencies and centroids.
moves += 1
old_clust = np.argwhere(membership[:, ipoint])[0][0]
cl_attr_sum, membership = move_point_num(
Xnum[ipoint], ipoint, clust, old_clust, cl_attr_sum,
membership)
cl_attr_freq, membership = KModes.move_point_cat(
Xcat[ipoint], ipoint, clust, old_clust, cl_attr_freq,
membership)
# Update new and old centroids by choosing mean for numerical
# and mode for categorical attributes.
for iattr in range(len(Xnum[ipoint])):
for curc in (clust, old_clust):
if sum(membership[curc, :]):
centroids[0][curc, iattr] = \
cl_attr_sum[curc, iattr] / sum(membership[curc, :])
else:
centroids[0][curc, iattr] = 0.
for iattr in range(len(Xcat[ipoint])):
for curc in (clust, old_clust):
centroids[1][curc, iattr] = \
KModes.get_max_value_key(cl_attr_freq[curc][iattr])
# In case of an empty cluster, reinitialize with a random point
# from largest cluster.
if sum(membership[old_clust, :]) == 0:
from_clust = membership.sum(axis=1).argmax()
choices = \
[ii for ii, ch in enumerate(membership[from_clust, :]) if ch]
rindx = np.random.choice(choices)
cl_attr_freq, membership = move_point_num(
Xnum[rindx], rindx, old_clust, from_clust, cl_attr_sum,
membership)
cl_attr_freq, membership = KModes.move_point_cat(
Xcat[rindx], rindx, old_clust, from_clust, cl_attr_freq,
membership)
return centroids, moves
def k_prototypes(X, n_clusters, gamma, init, n_init, max_iter, verbose):
"""k-prototypes algorithm"""
assert len(X) == 2, "X should be a list of Xnum and Xcat arrays"
# List where [0] = numerical part of centroid and
# [1] = categorical part. Same for centroids.
Xnum, Xcat = X
# Convert to numpy arrays, if needed.
Xnum = np.asanyarray(Xnum)
Xcat = np.asanyarray(Xcat)
nnumpoints, nnumattrs = Xnum.shape
ncatpoints, ncatattrs = Xcat.shape
assert nnumpoints == ncatpoints,\
"Uneven number of numerical and categorical points"
npoints = nnumpoints
assert n_clusters < npoints, "More clusters than data points?"
# Estimate a good value for gamma, which determines the weighing of
# categorical values in clusters (see Huang [1997]).
if gamma is None:
gamma = 0.5 * Xnum.std()
all_centroids = []
all_labels = []
all_costs = []
for init_no in range(n_init):
# For numerical part of initialization, we don't have a guarantee
# that there is not an empty cluster, so we need to retry until
# there is none.
while True:
# _____ INIT _____
if verbose:
print("Init: initializing centroids")
if init == 'Huang':
centroids = KModes.init_huang(Xcat, n_clusters)
elif init == 'Cao':
centroids = KModes.init_cao(Xcat, n_clusters)
elif init == 'random':
seeds = np.random.choice(range(npoints), n_clusters)
centroids = Xcat[seeds]
elif hasattr(init, '__array__'):
centroids = init
else:
raise NotImplementedError
# Numerical is initialized by drawing from normal distribution,
# categorical following the k-modes methods.
meanX = np.mean(Xnum, axis=0)
stdX = np.std(Xnum, axis=0)
centroids = [meanX + np.random.randn(n_clusters, nnumattrs) * stdX,
centroids]
if verbose:
print("Init: initializing clusters")
membership = np.zeros((n_clusters, npoints), dtype='int64')
# Keep track of the sum of attribute values per cluster so that we
# can do k-means on the numerical attributes.
cl_attr_sum = np.zeros((n_clusters, nnumattrs), dtype='float')
# cl_attr_freq is a list of lists with dictionaries that contain
# the frequencies of values per cluster and attribute.
cl_attr_freq = [[defaultdict(int) for _ in range(ncatattrs)]
for _ in range(n_clusters)]
for ipoint in range(npoints):
# Initial assignment to clusters
clust = np.argmin(
euclidean_dissim(centroids[0], Xnum[ipoint]) +
gamma * KModes.matching_dissim(centroids[1], Xcat[ipoint]))
membership[clust, ipoint] = 1
# Count attribute values per cluster.
for iattr, curattr in enumerate(Xnum[ipoint]):
cl_attr_sum[clust, iattr] += curattr
for iattr, curattr in enumerate(Xcat[ipoint]):
cl_attr_freq[clust][iattr][curattr] += 1
# If no empty clusters, then consider initialization finalized.
if membership.sum(axis=1).min() > 0:
break
# Perform an initial centroid update.
for ik in range(n_clusters):
for iattr in range(nnumattrs):
centroids[0][ik, iattr] = \
cl_attr_sum[ik, iattr] / sum(membership[ik, :])
for iattr in range(ncatattrs):
centroids[1][ik, iattr] = \
KModes.get_max_value_key(cl_attr_freq[ik][iattr])
# _____ ITERATION _____
if verbose:
print("Starting iterations...")
itr = 0
converged = False
cost = np.Inf
while itr <= max_iter and not converged:
itr += 1
centroids, moves = _k_prototypes_iter(
Xnum, Xcat, centroids, cl_attr_sum, cl_attr_freq,
membership, gamma)
# All points seen in this iteration
labels, ncost = \
_labels_cost(Xnum, Xcat, centroids, gamma)
converged = (moves == 0) or (ncost >= cost)
cost = ncost
if verbose:
print("Run: {}, iteration: {}/{}, moves: {}, ncost: {}"
.format(init_no + 1, itr, max_iter, moves, ncost))
# Store results of current run.
all_centroids.append(centroids)
all_labels.append(labels)
all_costs.append(cost)
best = np.argmin(all_costs)
if n_init > 1 and verbose:
print("Best run was number {}".format(best + 1))
# Note: return gamma in case it was automatically determined.
return all_centroids[best], all_labels[best], all_costs[best], gamma
class KPrototypes(KModes):
"""k-protoypes clustering algorithm for mixed numerical/categorical data.
Parameters
-----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
gamma : float, default: None
Weighing factor that determines relative importance of numerical vs.
categorical attributes (see discussion in Huang [1997]). By default,
automatically calculated from data.
max_iter : int, default: 300
Maximum number of iterations of the k-modes algorithm for a
single run.
n_init : int, default: 10
Number of time the k-modes algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of cost.
init : {'Huang', 'Cao', 'random' or an ndarray}
Method for initialization:
'Huang': Method in Huang [1997, 1998]
'Cao': Method in Cao et al. [2009]
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centroids.
verbose : boolean, optional
Verbosity mode.
Attributes
----------
cluster_centroids_ : array, [n_clusters, n_features]
Categories of cluster centroids
labels_ :
Labels of each point
cost_ : float
Clustering cost, defined as the sum distance of all points to
their respective cluster centroids.
Notes
-----
See:
Huang, Z.: Extensions to the k-modes algorithm for clustering large
data sets with categorical values, Data Mining and Knowledge
Discovery 2(3), 1998.
"""
def __init__(self, n_clusters=8, gamma=None, init='Huang', n_init=10,
max_iter=100, verbose=0):
super(KPrototypes, self).__init__(n_clusters, init, n_init, max_iter,
verbose)
self.gamma = gamma
def fit(self, X):
"""Compute k-prototypes clustering.
Parameters
----------
X : list of array-like, shape=[[n_num_samples, n_features],
[n_cat_samples, n_features]]
"""
# If self.gamma is None, gamma will be automatically determined from
# the data. The function below returns its value.
self.cluster_centroids_, self.labels_, self.cost_, self.gamma = \
k_prototypes(X, self.n_clusters, self.gamma, self.init,
self.n_init, self.max_iter, self.verbose)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : list of array-like, shape=[[n_num_samples, n_features],
[n_cat_samples, n_features]]
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
assert hasattr(self, 'cluster_centroids_'), "Model not yet fitted."
return _labels_cost(X[0], X[1], self.cluster_centroids_,
self.gamma)[0]
| [
"numpy.sum",
"numpy.random.randn",
"numpy.std",
"numpy.empty",
"numpy.asanyarray",
"numpy.zeros",
"numpy.argmin",
"collections.defaultdict",
"numpy.mean",
"numpy.random.choice",
"numpy.argwhere"
] | [((250, 278), 'numpy.sum', 'np.sum', (['((a - b) ** 2)'], {'axis': '(1)'}), '((a - b) ** 2, axis=1)\n', (256, 278), True, 'import numpy as np\n'), ((980, 1012), 'numpy.empty', 'np.empty', (['npoints'], {'dtype': '"""int64"""'}), "(npoints, dtype='int64')\n", (988, 1012), True, 'import numpy as np\n'), ((4144, 4163), 'numpy.asanyarray', 'np.asanyarray', (['Xnum'], {}), '(Xnum)\n', (4157, 4163), True, 'import numpy as np\n'), ((4175, 4194), 'numpy.asanyarray', 'np.asanyarray', (['Xcat'], {}), '(Xcat)\n', (4188, 4194), True, 'import numpy as np\n'), ((8576, 8596), 'numpy.argmin', 'np.argmin', (['all_costs'], {}), '(all_costs)\n', (8585, 8596), True, 'import numpy as np\n'), ((1371, 1391), 'numpy.argmin', 'np.argmin', (['tot_costs'], {}), '(tot_costs)\n', (1380, 1391), True, 'import numpy as np\n'), ((3416, 3441), 'numpy.random.choice', 'np.random.choice', (['choices'], {}), '(choices)\n', (3432, 3441), True, 'import numpy as np\n'), ((5672, 5693), 'numpy.mean', 'np.mean', (['Xnum'], {'axis': '(0)'}), '(Xnum, axis=0)\n', (5679, 5693), True, 'import numpy as np\n'), ((5713, 5733), 'numpy.std', 'np.std', (['Xnum'], {'axis': '(0)'}), '(Xnum, axis=0)\n', (5719, 5733), True, 'import numpy as np\n'), ((5953, 5999), 'numpy.zeros', 'np.zeros', (['(n_clusters, npoints)'], {'dtype': '"""int64"""'}), "((n_clusters, npoints), dtype='int64')\n", (5961, 5999), True, 'import numpy as np\n'), ((6163, 6211), 'numpy.zeros', 'np.zeros', (['(n_clusters, nnumattrs)'], {'dtype': '"""float"""'}), "((n_clusters, nnumattrs), dtype='float')\n", (6171, 6211), True, 'import numpy as np\n'), ((2094, 2128), 'numpy.argwhere', 'np.argwhere', (['membership[:, ipoint]'], {}), '(membership[:, ipoint])\n', (2105, 2128), True, 'import numpy as np\n'), ((6385, 6401), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (6396, 6401), False, 'from collections import defaultdict\n'), ((5767, 5805), 'numpy.random.randn', 'np.random.randn', (['n_clusters', 'nnumattrs'], {}), '(n_clusters, nnumattrs)\n', (5782, 5805), True, 'import numpy as np\n')] |
# coding: utf-8
# In[1]:
import numpy as np
# In[2]:
def IOU(box,boxes):
'''裁剪的box和图片所有人脸box的iou值
参数:
box:裁剪的box,当box维度为4时表示box左上右下坐标,维度为5时,最后一维为box的置信度
boxes:图片所有人脸box,[n,4]
返回值:
iou值,[n,]
'''
#box面积
box_area=(box[2]-box[0]+1)*(box[3]-box[1]+1)
#boxes面积,[n,]
area=(boxes[:,2]-boxes[:,0]+1)*(boxes[:,3]-boxes[:,1]+1)
#重叠部分左上右下坐标
xx1=np.maximum(box[0],boxes[:,0])
yy1=np.maximum(box[1],boxes[:,1])
xx2=np.minimum(box[2],boxes[:,2])
yy2=np.minimum(box[3],boxes[:,3])
#重叠部分长宽
w=np.maximum(0,xx2-xx1+1)
h=np.maximum(0,yy2-yy1+1)
#重叠部分面积
inter=w*h
return inter/(box_area+area-inter+1e-10)
# In[3]:
def read_annotation(base_dir, label_path):
'''读取文件的image,box'''
data = dict()
images = []
bboxes = []
labelfile = open(label_path, 'r')
while True:
# 图像地址
imagepath = labelfile.readline().strip('\n')
if not imagepath:
break
imagepath = base_dir + '/images/' + imagepath
images.append(imagepath)
# 人脸数目
nums = labelfile.readline().strip('\n')
one_image_bboxes = []
for i in range(int(nums)):
bb_info = labelfile.readline().strip('\n').split(' ')
#人脸框
face_box = [float(bb_info[i]) for i in range(4)]
xmin = face_box[0]
ymin = face_box[1]
xmax = xmin + face_box[2]
ymax = ymin + face_box[3]
one_image_bboxes.append([xmin, ymin, xmax, ymax])
bboxes.append(one_image_bboxes)
data['images'] = images
data['bboxes'] = bboxes
return data
def convert_to_square(box):
'''将box转换成更大的正方形
参数:
box:预测的box,[n,5]
返回值:
调整后的正方形box,[n,5]
'''
square_box=box.copy()
h=box[:,3]-box[:,1]+1
w=box[:,2]-box[:,0]+1
#找寻正方形最大边长
max_side=np.maximum(w,h)
square_box[:,0]=box[:,0]+w*0.5-max_side*0.5
square_box[:,1]=box[:,1]+h*0.5-max_side*0.5
square_box[:,2]=square_box[:,0]+max_side-1
square_box[:,3]=square_box[:,1]+max_side-1
return square_box
| [
"numpy.minimum",
"numpy.maximum"
] | [((399, 430), 'numpy.maximum', 'np.maximum', (['box[0]', 'boxes[:, 0]'], {}), '(box[0], boxes[:, 0])\n', (409, 430), True, 'import numpy as np\n'), ((437, 468), 'numpy.maximum', 'np.maximum', (['box[1]', 'boxes[:, 1]'], {}), '(box[1], boxes[:, 1])\n', (447, 468), True, 'import numpy as np\n'), ((475, 506), 'numpy.minimum', 'np.minimum', (['box[2]', 'boxes[:, 2]'], {}), '(box[2], boxes[:, 2])\n', (485, 506), True, 'import numpy as np\n'), ((513, 544), 'numpy.minimum', 'np.minimum', (['box[3]', 'boxes[:, 3]'], {}), '(box[3], boxes[:, 3])\n', (523, 544), True, 'import numpy as np\n'), ((566, 594), 'numpy.maximum', 'np.maximum', (['(0)', '(xx2 - xx1 + 1)'], {}), '(0, xx2 - xx1 + 1)\n', (576, 594), True, 'import numpy as np\n'), ((596, 624), 'numpy.maximum', 'np.maximum', (['(0)', '(yy2 - yy1 + 1)'], {}), '(0, yy2 - yy1 + 1)\n', (606, 624), True, 'import numpy as np\n'), ((1942, 1958), 'numpy.maximum', 'np.maximum', (['w', 'h'], {}), '(w, h)\n', (1952, 1958), True, 'import numpy as np\n')] |
from individual.case import Case
from linear.stability import Stability
import glob
import configobj
import numpy as np
class Actual:
def __init__(self, path_to_source):
self.path = path_to_source
self.systems = ['aeroelastic', 'aerodynamic', 'structural']
self.cases = dict()
for sys in self.systems:
self.cases[sys] = SetOfCases()
self.structural = self.cases['structural']
self.aerodynamic = self.cases['aerodynamic']
self.aeroelastic = self.cases['aeroelastic']
self.database = dict()
def load_bulk_cases(self, *args, replace_dir=None, append=False, **kwargs):
if kwargs.get('rom_library'):
source_cases_name = [entry['path_to_data'] for entry in kwargs['rom_library'].library]
else:
source_cases_name = glob.glob(self.path)
eigs_legacy = kwargs.get('eigs_legacy', True)
n_loaded_cases = 0
for source in source_cases_name:
try:
param_file = glob.glob(source + '/*.pmor.sharpy')[0]
except IndexError:
print('Unable to find source case .pmor.sharpy at {:s}'.format(source))
continue
case_info = configobj.ConfigObj(param_file)
self.param_name = []
param_value = []
for k, v in case_info['parameters'].items():
self.param_name.append(k)
param_value.append(v)
if replace_dir is not None:
path_to_source_case = case_info['sim_info']['path_to_data'].replace('/home/ng213/sharpy_cases/',
'/home/ng213/2TB/')
else:
path_to_source_case = case_info['sim_info']['path_to_data']
for sys in self.systems:
if param_value in self.cases[sys].parameter_values and append:
continue
case = Case(case_info['parameters'].values(), sys, parameter_name=self.param_name,
path_to_data=path_to_source_case, case_info=case_info['parameters'])
case.name = case_info['sim_info']['case']
if eigs_legacy: # asymtotic stability in dev_pmor has an extra setting to save aeroelastic_eigenvalues.dat
case.path_to_sys['eigs'] = case.path + '/stability/eigenvalues.dat'
else:
case.path_to_sys['eigs'] = case.path + '/stability/{:s}_eigenvalues.dat'.format(sys)
case.path_to_sys['freqresp'] = case.path + '/frequencyresponse/{:s}.freqresp.h5'.format(sys)
case.path_to_sys['ss'] = case.path + '/statespace/{:s}.statespace.dat'.format(sys)
try:
case.alpha = float(list(case_info['parameters'].items())[1][1])
except IndexError:
pass
if 'eigs' in args:
case.load_eigs()
if 'bode' in args:
case.load_bode()
case.path_to_sys['WriteVariablesTime'] = case.path + '/WriteVariablesTime/*'
if 'deflection' in args:
case.load_deflection()
if sys == 'aeroelastic' and 'stability' in args:
case.stability = Stability(case.path + '/stability/')
case.path_to_sys['beam_modal_analysis'] = case.path + '/beam_modal_analysis'
if 'beam_modal_analysis' in args:
case.load_beam_modal_analysis()
if 'forces' in args:
case.load_forces(case.path + '/forces/aeroforces.txt')
if 'ss' in args:
case.load_ss(path=case.path)
self.cases[sys].add_case(param_value, case, case_info['parameters'])
n_loaded_cases += 1
print('Loaded {} cases'.format(n_loaded_cases))
if n_loaded_cases == 0:
print(source_cases_name)
def eigs(self, sys):
param_array = []
eigs = []
for case in self.cases[sys]:
try:
param_array.append(np.ones((case.eigs.shape[0], len(case.parameter_value))) * case.parameter_value)
eigs.append(case.eigs)
except TypeError:
param_array.append(np.ones_like(case.eigs[:, 0]) * case.parameter_value)
except AttributeError:
continue
if len(eigs) == 0:
raise FileNotFoundError('No eigenvalue data was found.')
return np.concatenate(param_array), np.concatenate(eigs)
def wing_tip_deflection(self, frame='a', alpha=0, reference_line=np.array([0, 0, 0], dtype=float)):
param_array = []
deflection = []
if frame == 'g':
try:
import sharpy.utils.algebra as algebra
except ModuleNotFoundError:
raise(ModuleNotFoundError('Please load sharpy'))
else:
cga = algebra.quat2rotation(algebra.euler2quat(np.array([0, alpha * np.pi / 180, 0])))
for case in self.cases['aeroelastic']:
param_array.append(case.parameter_value)
if frame == 'a':
deflection.append(case.get_deflection_at_line(reference_line)[-1, -3:])
elif frame == 'g':
deflection.append(cga.dot(case.get_deflection_at_line(reference_line)[-1, -3:]))
param_array = np.array(param_array)
order = np.argsort(param_array)
param_array = param_array[order]
deflection = np.array([deflection[ith] for ith in order])
return param_array, deflection
def forces(self, frame='g'):
param_array = []
forces = []
for case in self.cases['aeroelastic']:
param_array.append(case.parameter_value)
if frame == 'g':
forces.append(case.aero_forces[1:4])
elif frame == 'a':
forces.append(case.aero_forces[7:10])
else:
raise NameError('Frame can only be A or G')
param_array = np.array(param_array)
order = np.argsort(param_array)
param_array = param_array[order]
forces = np.vstack(([forces[ith] for ith in order]))
return param_array, forces
def moments(self, frame='g'):
param_array = []
moments = []
for case in self.cases['aeroelastic']:
param_array.append(case.parameter_value)
if frame == 'g':
moments.append(case.aero_moments[1:4])
elif frame == 'a':
moments.append(case.aero_moments[7:10])
else:
raise NameError('Frame can only be A or G')
param_array = np.array(param_array)
order = np.argsort(param_array)
param_array = param_array[order]
moments = np.vstack(([moments[ith] for ith in order]))
return param_array, moments
class SetIterator:
def __init__(self, set_of_cases):
self._set_cases = set_of_cases
self._index = 0
def __next__(self):
if self._index < self._set_cases.n_cases:
res = self._set_cases(self._index)
self._index += 1
return res
raise StopIteration
class SetOfCases:
def __init__(self):
self.cases = list()
self.parameter_values = list()
self.id_list = dict()
self.database = dict()
self._n_cases = 0
def add_case(self, parameter_value, case, param_dict=None):
case.case_id = self.n_cases + 1
self.cases.append(case)
self.parameter_values.append(parameter_value)
self.id_list[case.case_id] = case
if param_dict is None:
import pdb; pdb.set_trace()
self.database[case.case_id] = {k: float(v) for k, v in param_dict.items()}
def __call__(self, i):
return self.cases[i]
@property
def n_cases(self):
self.n_cases = len(self.cases)
return self._n_cases
@n_cases.setter
def n_cases(self, number):
self._n_cases = number
def __iter__(self):
return SetIterator(self)
def find_parameter_value(self, param_value, return_idx=False):
ind = self.parameter_values.index(param_value)
if not return_idx:
return self(ind)
else:
return ind
def find_param(self, param_value, return_idx=False):
for case_id, entry_values in self.database.items():
if entry_values == param_value:
if not return_idx:
try:
return self.id_list[case_id]
except KeyError:
msg = f'Unable to find case with {case_id}'
else:
return case_id
| [
"numpy.ones_like",
"numpy.concatenate",
"configobj.ConfigObj",
"numpy.argsort",
"numpy.array",
"pdb.set_trace",
"linear.stability.Stability",
"glob.glob",
"numpy.vstack"
] | [((4722, 4754), 'numpy.array', 'np.array', (['[0, 0, 0]'], {'dtype': 'float'}), '([0, 0, 0], dtype=float)\n', (4730, 4754), True, 'import numpy as np\n'), ((5499, 5520), 'numpy.array', 'np.array', (['param_array'], {}), '(param_array)\n', (5507, 5520), True, 'import numpy as np\n'), ((5537, 5560), 'numpy.argsort', 'np.argsort', (['param_array'], {}), '(param_array)\n', (5547, 5560), True, 'import numpy as np\n'), ((5623, 5667), 'numpy.array', 'np.array', (['[deflection[ith] for ith in order]'], {}), '([deflection[ith] for ith in order])\n', (5631, 5667), True, 'import numpy as np\n'), ((6155, 6176), 'numpy.array', 'np.array', (['param_array'], {}), '(param_array)\n', (6163, 6176), True, 'import numpy as np\n'), ((6193, 6216), 'numpy.argsort', 'np.argsort', (['param_array'], {}), '(param_array)\n', (6203, 6216), True, 'import numpy as np\n'), ((6275, 6316), 'numpy.vstack', 'np.vstack', (['[forces[ith] for ith in order]'], {}), '([forces[ith] for ith in order])\n', (6284, 6316), True, 'import numpy as np\n'), ((6808, 6829), 'numpy.array', 'np.array', (['param_array'], {}), '(param_array)\n', (6816, 6829), True, 'import numpy as np\n'), ((6846, 6869), 'numpy.argsort', 'np.argsort', (['param_array'], {}), '(param_array)\n', (6856, 6869), True, 'import numpy as np\n'), ((6929, 6971), 'numpy.vstack', 'np.vstack', (['[moments[ith] for ith in order]'], {}), '([moments[ith] for ith in order])\n', (6938, 6971), True, 'import numpy as np\n'), ((839, 859), 'glob.glob', 'glob.glob', (['self.path'], {}), '(self.path)\n', (848, 859), False, 'import glob\n'), ((1239, 1270), 'configobj.ConfigObj', 'configobj.ConfigObj', (['param_file'], {}), '(param_file)\n', (1258, 1270), False, 'import configobj\n'), ((4602, 4629), 'numpy.concatenate', 'np.concatenate', (['param_array'], {}), '(param_array)\n', (4616, 4629), True, 'import numpy as np\n'), ((4631, 4651), 'numpy.concatenate', 'np.concatenate', (['eigs'], {}), '(eigs)\n', (4645, 4651), True, 'import numpy as np\n'), ((7826, 7841), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (7839, 7841), False, 'import pdb\n'), ((1030, 1066), 'glob.glob', 'glob.glob', (["(source + '/*.pmor.sharpy')"], {}), "(source + '/*.pmor.sharpy')\n", (1039, 1066), False, 'import glob\n'), ((3362, 3398), 'linear.stability.Stability', 'Stability', (["(case.path + '/stability/')"], {}), "(case.path + '/stability/')\n", (3371, 3398), False, 'from linear.stability import Stability\n'), ((5090, 5127), 'numpy.array', 'np.array', (['[0, alpha * np.pi / 180, 0]'], {}), '([0, alpha * np.pi / 180, 0])\n', (5098, 5127), True, 'import numpy as np\n'), ((4376, 4405), 'numpy.ones_like', 'np.ones_like', (['case.eigs[:, 0]'], {}), '(case.eigs[:, 0])\n', (4388, 4405), True, 'import numpy as np\n')] |
import time
import gym
import numpy as np
import concurrent.futures
import os
import sys
from stable_baselines3 import PPO, SAC, TD3
from stable_baselines3.common.vec_env import VecEnv, VecEnvWrapper, DummyVecEnv, SubprocVecEnv, VecNormalize, VecMonitor, VecCheckNan
from stable_baselines3.common.env_util import make_vec_env
from stable_baselines3.common.env_checker import check_env
from stable_baselines3.common.noise import NormalActionNoise
from stable_baselines3.common.utils import set_random_seed
from stable_baselines3.common.logger import configure
from functools import reduce
from yaml import scan
# Get ./src/ folder & add it to path
current_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(current_dir)
# import your drivers here
from pkg.drivers import PureFTG, DisparityExtender, GapFollower
# choose your drivers here (1-4)
drivers = [PureFTG()]
# choose your racetrack here (SILVERSTONE, SILVERSTONE_OBS)
RACETRACK = 'SOCHI'
root_path = reduce(lambda path, _: os.path.dirname(path), range(3), os.path.dirname(os.path.realpath(__file__)))
env_path = os.path.join(root_path, 'gym', 'f110_gym', 'envs')
map_path = os.path.join(root_path, 'pkg', 'src', 'pkg', 'maps')
sys.path.append(env_path)
from f110_env import F110Env
def make_env( rank, seed=0):
"""
Utility function for multiprocessed env.
:param env_id: (str) the environment ID
:param num_env: (int) the number of environments you wish to have in subprocesses
:param seed: (int) the inital seed for RNG
:param rank: (int) index of the subprocess
"""
def _init():
env = F110Env(map_path + '/' + RACETRACK, '.png', len(drivers))
#env.seed(seed + rank)
return env
set_random_seed(seed+rank)
return _init
class GymRunner(object):
def __init__(self, racetrack, drivers):
self.racetrack = racetrack
self.drivers = drivers
def run(self):
tmp_path = "./tmp/sb3_log/"
# set up logger
new_logger = configure(tmp_path, ["stdout", "csv", "tensorboard"])
# load map
# env = gym.make('f110_gym:f110-v0',
# map="{}/maps/{}".format(current_dir, RACETRACK),
# map_ext=".png", num_agents=len(drivers))
# print(f'Initializing env with: {map_path + "/" + RACETRACK, ".png", len(drivers)}')
env = F110Env(map_path + '/' + RACETRACK, '.png', len(drivers))
check_env(env)
env = SubprocVecEnv([make_env(i) for i in range(8)])
#env = DummyVecEnv([make_env(i) for i in range(12)])
#env = DummyVecEnv([lambda: env])
#env = VecNormalize(env, norm_reward=False, training=True)
env = VecMonitor(env)
env = VecCheckNan(env, raise_exception=True)
# noise objects for td3
n_actions = env.action_space.shape[-1]
action_noise = NormalActionNoise(mean=np.zeros(n_actions), sigma = 0.05 * np.ones(n_actions))
# modesl
model = PPO('MlpPolicy', env, n_steps=1024, learning_rate=0.0003, batch_size=128, clip_range=0.2, clip_range_vf=0.2, n_epochs=20, ent_coef=0.05, target_kl= 0.2, use_sde=True, sde_sample_freq=512, verbose=2)
#model = SAC("MultiInputPolicy", env, verbose=2)
#model = TD3("MultiInputPolicy", env, buffer_size=200000, learning_starts=10000, gamma=0.98, learning_rate=0.003, action_noise=action_noise, verbose=2)
#model.learn(total_timesteps=400000, log_interval=1)
#model = PPO.load("ppo_f1tenth")
#env = VecNormalize.load('saved_env.pkl',env)
model.set_env(env)
model.set_logger(new_logger)
while True:
print("done")
#model.save("ppo_f1tenth")
#model = PPO.load("ppo_f1tenth")
#model = SAC.load("sac_f1tenth")
model.learn(total_timesteps=10000000, log_interval=1)
model.save("ppo_f1tenth")
#env.save('saved_env.pkl')
obs = env.reset()
while True:
action, _states = model.predict(obs, deterministic=True)
obs, reward, done, info = env.step(action)
env.render(mode='human_fast')
if done:
obs = env.reset()
# specify starting positions of each agent
# poses = np.array([[0. + (i * 0.75), 0. - (i*1.5), np.radians(60)] for i in range(len(drivers))])
# obs, step_reward, done, info = env.reset(poses=poses)
# env.render()
# laptime = 0.0
# start = time.time()
# while not done:
# actions = []
# futures = []
# with concurrent.futures.ThreadPoolExecutor() as executor:
# for i, driver in enumerate(drivers):
# futures.append(executor.submit(driver.process_lidar, obs['scans'][i]))
# print(len( obs['scans'][i]))
# for future in futures:
# speed, steer = future.result()
# actions.append([steer, speed])
# actions = np.array(actions)
# obs, step_reward, done, info = env.step(actions)
# laptime += step_reward
# env.render(mode='human_fast')
# print('Sim elapsed time:', laptime, 'Real elapsed time:', time.time() - start)
if __name__ == '__main__':
runner = GymRunner(RACETRACK, drivers)
runner.run()
| [
"sys.path.append",
"stable_baselines3.PPO",
"stable_baselines3.common.env_checker.check_env",
"os.path.dirname",
"os.path.realpath",
"stable_baselines3.common.logger.configure",
"numpy.zeros",
"numpy.ones",
"stable_baselines3.common.utils.set_random_seed",
"pkg.drivers.PureFTG",
"stable_baseline... | [((706, 734), 'sys.path.append', 'sys.path.append', (['current_dir'], {}), '(current_dir)\n', (721, 734), False, 'import sys\n'), ((1089, 1139), 'os.path.join', 'os.path.join', (['root_path', '"""gym"""', '"""f110_gym"""', '"""envs"""'], {}), "(root_path, 'gym', 'f110_gym', 'envs')\n", (1101, 1139), False, 'import os\n'), ((1151, 1203), 'os.path.join', 'os.path.join', (['root_path', '"""pkg"""', '"""src"""', '"""pkg"""', '"""maps"""'], {}), "(root_path, 'pkg', 'src', 'pkg', 'maps')\n", (1163, 1203), False, 'import os\n'), ((1204, 1229), 'sys.path.append', 'sys.path.append', (['env_path'], {}), '(env_path)\n', (1219, 1229), False, 'import sys\n'), ((679, 704), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (694, 704), False, 'import os\n'), ((872, 881), 'pkg.drivers.PureFTG', 'PureFTG', ([], {}), '()\n', (879, 881), False, 'from pkg.drivers import PureFTG, DisparityExtender, GapFollower\n'), ((1727, 1755), 'stable_baselines3.common.utils.set_random_seed', 'set_random_seed', (['(seed + rank)'], {}), '(seed + rank)\n', (1742, 1755), False, 'from stable_baselines3.common.utils import set_random_seed\n'), ((1000, 1021), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (1015, 1021), False, 'import os\n'), ((1049, 1075), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1065, 1075), False, 'import os\n'), ((2009, 2062), 'stable_baselines3.common.logger.configure', 'configure', (['tmp_path', "['stdout', 'csv', 'tensorboard']"], {}), "(tmp_path, ['stdout', 'csv', 'tensorboard'])\n", (2018, 2062), False, 'from stable_baselines3.common.logger import configure\n'), ((2467, 2481), 'stable_baselines3.common.env_checker.check_env', 'check_env', (['env'], {}), '(env)\n', (2476, 2481), False, 'from stable_baselines3.common.env_checker import check_env\n'), ((2727, 2742), 'stable_baselines3.common.vec_env.VecMonitor', 'VecMonitor', (['env'], {}), '(env)\n', (2737, 2742), False, 'from stable_baselines3.common.vec_env import VecEnv, VecEnvWrapper, DummyVecEnv, SubprocVecEnv, VecNormalize, VecMonitor, VecCheckNan\n'), ((2757, 2795), 'stable_baselines3.common.vec_env.VecCheckNan', 'VecCheckNan', (['env'], {'raise_exception': '(True)'}), '(env, raise_exception=True)\n', (2768, 2795), False, 'from stable_baselines3.common.vec_env import VecEnv, VecEnvWrapper, DummyVecEnv, SubprocVecEnv, VecNormalize, VecMonitor, VecCheckNan\n'), ((3012, 3217), 'stable_baselines3.PPO', 'PPO', (['"""MlpPolicy"""', 'env'], {'n_steps': '(1024)', 'learning_rate': '(0.0003)', 'batch_size': '(128)', 'clip_range': '(0.2)', 'clip_range_vf': '(0.2)', 'n_epochs': '(20)', 'ent_coef': '(0.05)', 'target_kl': '(0.2)', 'use_sde': '(True)', 'sde_sample_freq': '(512)', 'verbose': '(2)'}), "('MlpPolicy', env, n_steps=1024, learning_rate=0.0003, batch_size=128,\n clip_range=0.2, clip_range_vf=0.2, n_epochs=20, ent_coef=0.05,\n target_kl=0.2, use_sde=True, sde_sample_freq=512, verbose=2)\n", (3015, 3217), False, 'from stable_baselines3 import PPO, SAC, TD3\n'), ((2922, 2941), 'numpy.zeros', 'np.zeros', (['n_actions'], {}), '(n_actions)\n', (2930, 2941), True, 'import numpy as np\n'), ((2958, 2976), 'numpy.ones', 'np.ones', (['n_actions'], {}), '(n_actions)\n', (2965, 2976), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch import nn
import numpy as np
import math
##########################################################################
##### P A R T S ###############################################
####################################################
class ResBlocks(nn.Module):
def __init__(self, num_blocks, dim, norm, act, pad_type, use_sn=False):
super(ResBlocks, self).__init__()
self.model = nn.ModuleList()
for i in range(num_blocks):
self.model.append(ResBlock(dim, norm=norm, act=act, pad_type=pad_type, use_sn=use_sn))
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x)
class ResBlock(nn.Module):
def __init__(self, dim, norm='in', act='relu', pad_type='zero', use_sn=False):
super(ResBlock, self).__init__()
self.model = nn.Sequential(Conv2dBlock(dim, dim, 3, 1, 1,
norm=norm,
act=act,
pad_type=pad_type, use_sn=use_sn),
Conv2dBlock(dim, dim, 3, 1, 1,
norm=norm,
act='none',
pad_type=pad_type, use_sn=use_sn))
def forward(self, x):
x_org = x
residual = self.model(x)
out = x_org + 0.1 * residual
return out
class ActFirstResBlk(nn.Module):
def __init__(self, dim_in, dim_out, downsample=True):
super(ActFirstResBlk, self).__init__()
self.norm1 = FRN(dim_in)
self.norm2 = FRN(dim_in)
self.conv1 = nn.Conv2d(dim_in, dim_in, 3, 1, 1)
self.conv2 = nn.Conv2d(dim_in, dim_out, 3, 1, 1)
self.downsample = downsample
self.learned_sc = (dim_in != dim_out)
if self.learned_sc:
self.conv1x1 = nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)
def _shortcut(self, x):
if self.learned_sc:
x = self.conv1x1(x)
if self.downsample:
x = F.avg_pool2d(x, 2)
return x
def _residual(self, x):
x = self.norm1(x)
x = self.conv1(x)
if self.downsample:
x = F.avg_pool2d(x, 2)
x = self.norm2(x)
x = self.conv2(x)
return x
def forward(self, x):
return torch.rsqrt(torch.tensor(2.0)) * self._shortcut(x) + torch.rsqrt(torch.tensor(2.0)) * self._residual(x)
class LinearBlock(nn.Module):
def __init__(self, in_dim, out_dim, norm='none', act='relu', use_sn=False):
super(LinearBlock, self).__init__()
use_bias = True
self.fc = nn.Linear(in_dim, out_dim, bias=use_bias)
if use_sn:
self.fc = nn.utils.spectral_norm(self.fc)
# initialize normalization
norm_dim = out_dim
if norm == 'bn':
self.norm = nn.BatchNorm1d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm1d(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if act == 'relu':
self.activation = nn.ReLU(inplace=True)
elif act == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif act == 'tanh':
self.activation = nn.Tanh()
elif act == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(act)
def forward(self, x):
out = self.fc(x)
if self.norm:
out = self.norm(out)
if self.activation:
out = self.activation(out)
return out
class Conv2dBlock(nn.Module):
def __init__(self, in_dim, out_dim, ks, st, padding=0,
norm='none', act='relu', pad_type='zero',
use_bias=True, use_sn=False):
super(Conv2dBlock, self).__init__()
self.use_bias = use_bias
# initialize padding
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, "Unsupported padding type: {}".format(pad_type)
# initialize normalization
norm_dim = out_dim
if norm == 'bn':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm2d(norm_dim)
elif norm == 'adain':
self.norm = AdaIN2d(norm_dim)
elif norm == 'none':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if act == 'relu':
self.activation = nn.ReLU(inplace=True)
elif act == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif act == 'tanh':
self.activation = nn.Tanh()
elif act == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(act)
self.conv = nn.Conv2d(in_dim, out_dim, ks, st, bias=self.use_bias)
if use_sn:
self.conv = nn.utils.spectral_norm(self.conv)
def forward(self, x):
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class FRN(nn.Module):
def __init__(self, num_features, eps=1e-6):
super(FRN, self).__init__()
self.tau = nn.Parameter(torch.zeros(1, num_features, 1, 1))
self.gamma = nn.Parameter(torch.ones(1, num_features, 1, 1))
self.beta = nn.Parameter(torch.zeros(1, num_features, 1, 1))
self.eps = eps
def forward(self, x):
x = x * torch.rsqrt(torch.mean(x**2, dim=[2, 3], keepdim=True) + self.eps)
return torch.max(self.gamma * x + self.beta, self.tau)
class AdaIN2d(nn.Module):
def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=False, track_running_stats=True):
super(AdaIN2d, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
if self.affine:
self.weight = nn.Parameter(torch.Tensor(num_features))
self.bias = nn.Parameter(torch.Tensor(num_features))
else:
self.weight = None
self.bias = None
if self.track_running_stats:
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
else:
self.register_buffer('running_mean', None)
self.register_buffer('running_var', None)
def forward(self, x):
assert self.weight is not None and self.bias is not None, "AdaIN params are None"
N, C, H, W = x.size()
running_mean = self.running_mean.repeat(N)
running_var = self.running_var.repeat(N)
x_ = x.contiguous().view(1, N * C, H * W)
normed = F.batch_norm(x_, running_mean, running_var,
self.weight, self.bias,
True, self.momentum, self.eps)
return normed.view(N, C, H, W)
def __repr__(self):
return self.__class__.__name__ + '(num_features=' + str(self.num_features) + ')'
##############################################################################################
############################# D I S C R I M I N A T O R ############################
#########################################################################
class Discriminator(nn.Module):
"""Discriminator: (image x, domain y) -> (logit out)."""
def __init__(self, image_size=256, num_domains=2, max_conv_dim=1024):
super(Discriminator, self).__init__()
dim_in = 64 if image_size < 256 else 32
blocks = []
blocks += [nn.Conv2d(3, dim_in, 3, 1, 1)]
repeat_num = int(np.log2(image_size)) - 2
for _ in range(repeat_num):
dim_out = min(dim_in*2, max_conv_dim)
blocks += [ActFirstResBlk(dim_in, dim_in, downsample=False)]
blocks += [ActFirstResBlk(dim_in, dim_out, downsample=True)]
dim_in = dim_out
blocks += [nn.LeakyReLU(0.2)]
blocks += [nn.Conv2d(dim_out, dim_out, 4, 1, 0)]
blocks += [nn.LeakyReLU(0.2)]
blocks += [nn.Conv2d(dim_out, num_domains, 1, 1, 0)]
self.main = nn.Sequential(*blocks)
self.apply(disc_weights_init('kaiming'))
def forward(self, x, y):
"""
Inputs:
- x: images of shape (batch, 3, image_size, image_size).
- y: domain indices of shape (batch).
Output:
- out: logits of shape (batch).
"""
out = self.main(x)
feat = out
out = out.view(out.size(0), -1) # (batch, num_domains)
idx = torch.LongTensor(range(y.size(0))).to(y.device)
out = out[idx, y] # (batch)
return out, feat
def _initialize_weights(self, mode='fan_in'):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode=mode, nonlinearity='relu')
if m.bias is not None:
m.bias.data.zero_()
def disc_weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find(
'Linear') == 0) and hasattr(m, 'weight'):
if init_type == 'gaussian':
init.normal_(m.weight.data, 0.0, 0.02)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=math.sqrt(2))
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, "Unsupported initialization: {}".format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
return init_fun
#####################################################################################
###################################### G E N E R A T O R #####################
######################################################################
class Generator(nn.Module):
def __init__(self, img_size=128, sty_dim=64, n_res=2, use_sn=False):
super(Generator, self).__init__()
print("Init Generator")
self.nf = 64 if img_size < 256 else 32
self.nf_mlp = 256
self.decoder_norm = 'adain'
self.adaptive_param_getter = get_num_adain_params
self.adaptive_param_assign = assign_adain_params
print("GENERATOR NF : ", self.nf)
s0 = 16
n_downs = int(np.log2(img_size//s0))
nf_dec = self.nf * 2**n_downs
self.cnt_encoder = ContentEncoder(self.nf, n_downs, n_res, 'in', 'relu', 'reflect')
self.decoder = Decoder(nf_dec, sty_dim, n_downs, n_res, self.decoder_norm, self.decoder_norm, 'relu', 'reflect', use_sn=use_sn)
self.mlp = MLP(sty_dim, self.adaptive_param_getter(self.decoder), self.nf_mlp, 3, 'none', 'relu')
self.apply(weights_init('kaiming'))
def forward(self, x_src, s_ref):
c_src = self.cnt_encoder(x_src)
x_out = self.decode(c_src, s_ref)
return x_out
def decode(self, cnt, sty):
adapt_params = self.mlp(sty)
self.adaptive_param_assign(adapt_params, self.decoder)
out = self.decoder(cnt)
return out
def _initialize_weights(self, mode='fan_in'):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode=mode, nonlinearity='relu')
if m.bias is not None:
m.bias.data.zero_()
class Decoder(nn.Module):
def __init__(self, nf_dec, sty_dim, n_downs, n_res, res_norm, dec_norm, act, pad, use_sn=False):
super(Decoder, self).__init__()
print("Init Decoder")
nf = nf_dec
self.model = nn.ModuleList()
self.model.append(ResBlocks(n_res, nf, res_norm, act, pad, use_sn=use_sn))
for _ in range(n_downs):
self.model.append(nn.Upsample(scale_factor=2))
self.model.append(Conv2dBlock(nf, nf//2, 5, 1, 2, norm=dec_norm, act=act, pad_type=pad, use_sn=use_sn))
nf //= 2
self.model.append(Conv2dBlock(nf, 3, 7, 1, 3, norm='none', act='tanh', pad_type=pad, use_sn=use_sn))
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x)
class ContentEncoder(nn.Module):
def __init__(self, nf_cnt, n_downs, n_res, norm, act, pad, use_sn=False):
super(ContentEncoder, self).__init__()
print("Init ContentEncoder")
nf = nf_cnt
self.model = nn.ModuleList()
self.model.append(Conv2dBlock(3, nf, 7, 1, 3, norm=norm, act=act, pad_type=pad, use_sn=use_sn))
for _ in range(n_downs):
self.model.append(Conv2dBlock(nf, 2 * nf, 4, 2, 1, norm=norm, act=act, pad_type=pad, use_sn=use_sn))
nf *= 2
self.model.append(ResBlocks(n_res, nf, norm=norm, act=act, pad_type=pad, use_sn=use_sn))
self.model = nn.Sequential(*self.model)
self.out_dim = nf
def forward(self, x):
return self.model(x)
class MLP(nn.Module):
def __init__(self, nf_in, nf_out, nf_mlp, num_blocks, norm, act, use_sn=False):
super(MLP, self).__init__()
self.model = nn.ModuleList()
nf = nf_mlp
self.model.append(LinearBlock(nf_in, nf, norm=norm, act=act, use_sn=use_sn))
for _ in range(num_blocks - 2):
self.model.append(LinearBlock(nf, nf, norm=norm, act=act, use_sn=use_sn))
self.model.append(LinearBlock(nf, nf_out, norm='none', act='none', use_sn=use_sn))
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x.view(x.size(0), -1))
def weights_init(init_type='gaussian'):
def init_fun(m):
classname = m.__class__.__name__
if (classname.find('Conv') == 0 or classname.find(
'Linear') == 0) and hasattr(m, 'weight'):
if init_type == 'gaussian':
init.normal_(m.weight.data, 0.0, 0.02)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=math.sqrt(2))
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=math.sqrt(2))
elif init_type == 'default':
pass
else:
assert 0, "Unsupported initialization: {}".format(init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
return init_fun
def assign_adain_params(adain_params, model):
# assign the adain_params to the AdaIN layers in model
for m in model.modules():
if m.__class__.__name__ == "AdaIN2d":
mean = adain_params[:, :m.num_features]
std = adain_params[:, m.num_features:2*m.num_features]
m.bias = mean.contiguous().view(-1)
m.weight = std.contiguous().view(-1)
if adain_params.size(1) > 2*m.num_features:
adain_params = adain_params[:, 2*m.num_features:]
def get_num_adain_params(model):
# return the number of AdaIN parameters needed by the model
num_adain_params = 0
for m in model.modules():
if m.__class__.__name__ == "AdaIN2d":
num_adain_params += 2*m.num_features
return num_adain_params
###################################################################################
### GUIDING NET
cfg = {
'vgg11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'vgg16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'vgg19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
'vgg19cut': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'N'],
}
class GuidingNet(nn.Module):
def __init__(self, img_size=64, output_k={'cont': 128, 'disc': 10}, config_idx=0):
super(GuidingNet, self).__init__()
# network layers setting
self.features = make_layers(cfg[config_idx], True)
self.disc = nn.Linear(512, output_k['disc'])
self.cont = nn.Linear(512, output_k['cont'])
self._initialize_weights()
def forward(self, x, sty=False):
x = self.features(x)
x = F.adaptive_avg_pool2d(x, (1, 1))
flat = x.view(x.size(0), -1)
cont = self.cont(flat)
if sty:
return cont
disc = self.disc(flat)
return {'cont': cont, 'disc': disc}
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def moco(self, x):
x = self.features(x)
x = F.adaptive_avg_pool2d(x, (1, 1))
flat = x.view(x.size(0), -1)
cont = self.cont(flat)
return cont
def iic(self, x):
x = self.features(x)
x = F.adaptive_avg_pool2d(x, (1, 1))
flat = x.view(x.size(0), -1)
disc = self.disc(flat)
return disc
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=False)]
else:
layers += [conv2d, nn.ReLU(inplace=False)]
in_channels = v
return nn.Sequential(*layers)
| [
"torch.nn.InstanceNorm2d",
"torch.nn.InstanceNorm1d",
"torch.nn.init.constant_",
"torch.ones",
"torch.nn.init.kaiming_normal_",
"torch.nn.ReflectionPad2d",
"torch.nn.functional.avg_pool2d",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.Upsample",
"torch.Tensor",
"torch.nn.Linear",
"torc... | [((19115, 19137), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (19128, 19137), False, 'from torch import nn\n'), ((510, 525), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (523, 525), False, 'from torch import nn\n'), ((682, 708), 'torch.nn.Sequential', 'nn.Sequential', (['*self.model'], {}), '(*self.model)\n', (695, 708), False, 'from torch import nn\n'), ((1804, 1838), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim_in', 'dim_in', '(3)', '(1)', '(1)'], {}), '(dim_in, dim_in, 3, 1, 1)\n', (1813, 1838), False, 'from torch import nn\n'), ((1860, 1895), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim_in', 'dim_out', '(3)', '(1)', '(1)'], {}), '(dim_in, dim_out, 3, 1, 1)\n', (1869, 1895), False, 'from torch import nn\n'), ((2807, 2848), 'torch.nn.Linear', 'nn.Linear', (['in_dim', 'out_dim'], {'bias': 'use_bias'}), '(in_dim, out_dim, bias=use_bias)\n', (2816, 2848), False, 'from torch import nn\n'), ((5378, 5432), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_dim', 'out_dim', 'ks', 'st'], {'bias': 'self.use_bias'}), '(in_dim, out_dim, ks, st, bias=self.use_bias)\n', (5387, 5432), False, 'from torch import nn\n'), ((6164, 6211), 'torch.max', 'torch.max', (['(self.gamma * x + self.beta)', 'self.tau'], {}), '(self.gamma * x + self.beta, self.tau)\n', (6173, 6211), False, 'import torch\n'), ((7418, 7520), 'torch.nn.functional.batch_norm', 'F.batch_norm', (['x_', 'running_mean', 'running_var', 'self.weight', 'self.bias', '(True)', 'self.momentum', 'self.eps'], {}), '(x_, running_mean, running_var, self.weight, self.bias, True,\n self.momentum, self.eps)\n', (7430, 7520), True, 'import torch.nn.functional as F\n'), ((8844, 8866), 'torch.nn.Sequential', 'nn.Sequential', (['*blocks'], {}), '(*blocks)\n', (8857, 8866), False, 'from torch import nn\n'), ((12708, 12723), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (12721, 12723), False, 'from torch import nn\n'), ((13168, 13194), 'torch.nn.Sequential', 'nn.Sequential', (['*self.model'], {}), '(*self.model)\n', (13181, 13194), False, 'from torch import nn\n'), ((13491, 13506), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (13504, 13506), False, 'from torch import nn\n'), ((13906, 13932), 'torch.nn.Sequential', 'nn.Sequential', (['*self.model'], {}), '(*self.model)\n', (13919, 13932), False, 'from torch import nn\n'), ((14180, 14195), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (14193, 14195), False, 'from torch import nn\n'), ((14539, 14565), 'torch.nn.Sequential', 'nn.Sequential', (['*self.model'], {}), '(*self.model)\n', (14552, 14565), False, 'from torch import nn\n'), ((17276, 17308), 'torch.nn.Linear', 'nn.Linear', (['(512)', "output_k['disc']"], {}), "(512, output_k['disc'])\n", (17285, 17308), False, 'from torch import nn\n'), ((17329, 17361), 'torch.nn.Linear', 'nn.Linear', (['(512)', "output_k['cont']"], {}), "(512, output_k['cont'])\n", (17338, 17361), False, 'from torch import nn\n'), ((17477, 17509), 'torch.nn.functional.adaptive_avg_pool2d', 'F.adaptive_avg_pool2d', (['x', '(1, 1)'], {}), '(x, (1, 1))\n', (17498, 17509), True, 'import torch.nn.functional as F\n'), ((18322, 18354), 'torch.nn.functional.adaptive_avg_pool2d', 'F.adaptive_avg_pool2d', (['x', '(1, 1)'], {}), '(x, (1, 1))\n', (18343, 18354), True, 'import torch.nn.functional as F\n'), ((18507, 18539), 'torch.nn.functional.adaptive_avg_pool2d', 'F.adaptive_avg_pool2d', (['x', '(1, 1)'], {}), '(x, (1, 1))\n', (18528, 18539), True, 'import torch.nn.functional as F\n'), ((2034, 2081), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim_in', 'dim_out', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(dim_in, dim_out, 1, 1, 0, bias=False)\n', (2043, 2081), False, 'from torch import nn\n'), ((2215, 2233), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['x', '(2)'], {}), '(x, 2)\n', (2227, 2233), True, 'import torch.nn.functional as F\n'), ((2376, 2394), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['x', '(2)'], {}), '(x, 2)\n', (2388, 2394), True, 'import torch.nn.functional as F\n'), ((2890, 2921), 'torch.nn.utils.spectral_norm', 'nn.utils.spectral_norm', (['self.fc'], {}), '(self.fc)\n', (2912, 2921), False, 'from torch import nn\n'), ((3034, 3058), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['norm_dim'], {}), '(norm_dim)\n', (3048, 3058), False, 'from torch import nn\n'), ((3366, 3387), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3373, 3387), False, 'from torch import nn\n'), ((4240, 4267), 'torch.nn.ReflectionPad2d', 'nn.ReflectionPad2d', (['padding'], {}), '(padding)\n', (4258, 4267), False, 'from torch import nn\n'), ((4632, 4656), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['norm_dim'], {}), '(norm_dim)\n', (4646, 4656), False, 'from torch import nn\n'), ((5036, 5057), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5043, 5057), False, 'from torch import nn\n'), ((5476, 5509), 'torch.nn.utils.spectral_norm', 'nn.utils.spectral_norm', (['self.conv'], {}), '(self.conv)\n', (5498, 5509), False, 'from torch import nn\n'), ((5842, 5876), 'torch.zeros', 'torch.zeros', (['(1)', 'num_features', '(1)', '(1)'], {}), '(1, num_features, 1, 1)\n', (5853, 5876), False, 'import torch\n'), ((5912, 5945), 'torch.ones', 'torch.ones', (['(1)', 'num_features', '(1)', '(1)'], {}), '(1, num_features, 1, 1)\n', (5922, 5945), False, 'import torch\n'), ((5980, 6014), 'torch.zeros', 'torch.zeros', (['(1)', 'num_features', '(1)', '(1)'], {}), '(1, num_features, 1, 1)\n', (5991, 6014), False, 'import torch\n'), ((8286, 8315), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', 'dim_in', '(3)', '(1)', '(1)'], {}), '(3, dim_in, 3, 1, 1)\n', (8295, 8315), False, 'from torch import nn\n'), ((8649, 8666), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (8661, 8666), False, 'from torch import nn\n'), ((8687, 8723), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim_out', 'dim_out', '(4)', '(1)', '(0)'], {}), '(dim_out, dim_out, 4, 1, 0)\n', (8696, 8723), False, 'from torch import nn\n'), ((8744, 8761), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (8756, 8761), False, 'from torch import nn\n'), ((8782, 8822), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim_out', 'num_domains', '(1)', '(1)', '(0)'], {}), '(dim_out, num_domains, 1, 1, 0)\n', (8791, 8822), False, 'from torch import nn\n'), ((11414, 11437), 'numpy.log2', 'np.log2', (['(img_size // s0)'], {}), '(img_size // s0)\n', (11421, 11437), True, 'import numpy as np\n'), ((18842, 18893), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'v'], {'kernel_size': '(3)', 'padding': '(1)'}), '(in_channels, v, kernel_size=3, padding=1)\n', (18851, 18893), False, 'from torch import nn\n'), ((3110, 3137), 'torch.nn.InstanceNorm1d', 'nn.InstanceNorm1d', (['norm_dim'], {}), '(norm_dim)\n', (3127, 3137), False, 'from torch import nn\n'), ((3447, 3478), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (3459, 3478), False, 'from torch import nn\n'), ((4329, 4357), 'torch.nn.ReplicationPad2d', 'nn.ReplicationPad2d', (['padding'], {}), '(padding)\n', (4348, 4357), False, 'from torch import nn\n'), ((4708, 4735), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['norm_dim'], {}), '(norm_dim)\n', (4725, 4735), False, 'from torch import nn\n'), ((5117, 5148), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (5129, 5148), False, 'from torch import nn\n'), ((6626, 6652), 'torch.Tensor', 'torch.Tensor', (['num_features'], {}), '(num_features)\n', (6638, 6652), False, 'import torch\n'), ((6691, 6717), 'torch.Tensor', 'torch.Tensor', (['num_features'], {}), '(num_features)\n', (6703, 6717), False, 'import torch\n'), ((6880, 6905), 'torch.zeros', 'torch.zeros', (['num_features'], {}), '(num_features)\n', (6891, 6905), False, 'import torch\n'), ((6955, 6979), 'torch.ones', 'torch.ones', (['num_features'], {}), '(num_features)\n', (6965, 6979), False, 'import torch\n'), ((8343, 8362), 'numpy.log2', 'np.log2', (['image_size'], {}), '(image_size)\n', (8350, 8362), True, 'import numpy as np\n'), ((9604, 9669), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight'], {'mode': 'mode', 'nonlinearity': '"""relu"""'}), "(m.weight, mode=mode, nonlinearity='relu')\n", (9627, 9669), False, 'from torch import nn\n'), ((10031, 10069), 'torch.nn.init.normal_', 'init.normal_', (['m.weight.data', '(0.0)', '(0.02)'], {}), '(m.weight.data, 0.0, 0.02)\n', (10043, 10069), True, 'import torch.nn.init as init\n'), ((10635, 10667), 'torch.nn.init.constant_', 'init.constant_', (['m.bias.data', '(0.0)'], {}), '(m.bias.data, 0.0)\n', (10649, 10667), True, 'import torch.nn.init as init\n'), ((12322, 12387), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight'], {'mode': 'mode', 'nonlinearity': '"""relu"""'}), "(m.weight, mode=mode, nonlinearity='relu')\n", (12345, 12387), False, 'from torch import nn\n'), ((12871, 12898), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)'}), '(scale_factor=2)\n', (12882, 12898), False, 'from torch import nn\n'), ((14919, 14957), 'torch.nn.init.normal_', 'init.normal_', (['m.weight.data', '(0.0)', '(0.02)'], {}), '(m.weight.data, 0.0, 0.02)\n', (14931, 14957), True, 'import torch.nn.init as init\n'), ((15523, 15555), 'torch.nn.init.constant_', 'init.constant_', (['m.bias.data', '(0.0)'], {}), '(m.bias.data, 0.0)\n', (15537, 15555), True, 'import torch.nn.init as init\n'), ((17819, 17889), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight'], {'mode': '"""fan_out"""', 'nonlinearity': '"""relu"""'}), "(m.weight, mode='fan_out', nonlinearity='relu')\n", (17842, 17889), False, 'from torch import nn\n'), ((18768, 18805), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (18780, 18805), False, 'from torch import nn\n'), ((2518, 2535), 'torch.tensor', 'torch.tensor', (['(2.0)'], {}), '(2.0)\n', (2530, 2535), False, 'import torch\n'), ((2571, 2588), 'torch.tensor', 'torch.tensor', (['(2.0)'], {}), '(2.0)\n', (2583, 2588), False, 'import torch\n'), ((3537, 3546), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (3544, 3546), False, 'from torch import nn\n'), ((4414, 4435), 'torch.nn.ZeroPad2d', 'nn.ZeroPad2d', (['padding'], {}), '(padding)\n', (4426, 4435), False, 'from torch import nn\n'), ((5207, 5216), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (5214, 5216), False, 'from torch import nn\n'), ((6094, 6138), 'torch.mean', 'torch.mean', (['(x ** 2)'], {'dim': '[2, 3]', 'keepdim': '(True)'}), '(x ** 2, dim=[2, 3], keepdim=True)\n', (6104, 6138), False, 'import torch\n'), ((17949, 17977), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (17966, 17977), False, 'from torch import nn\n'), ((18042, 18072), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.weight', '(1)'], {}), '(m.weight, 1)\n', (18059, 18072), False, 'from torch import nn\n'), ((18089, 18117), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (18106, 18117), False, 'from torch import nn\n'), ((18956, 18973), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['v'], {}), '(v)\n', (18970, 18973), False, 'from torch import nn\n'), ((18975, 18997), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (18982, 18997), False, 'from torch import nn\n'), ((19052, 19074), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(False)'}), '(inplace=False)\n', (19059, 19074), False, 'from torch import nn\n'), ((10237, 10292), 'torch.nn.init.kaiming_normal_', 'init.kaiming_normal_', (['m.weight.data'], {'a': '(0)', 'mode': '"""fan_in"""'}), "(m.weight.data, a=0, mode='fan_in')\n", (10257, 10292), True, 'import torch.nn.init as init\n'), ((15125, 15180), 'torch.nn.init.kaiming_normal_', 'init.kaiming_normal_', (['m.weight.data'], {'a': '(0)', 'mode': '"""fan_in"""'}), "(m.weight.data, a=0, mode='fan_in')\n", (15145, 15180), True, 'import torch.nn.init as init\n'), ((18177, 18211), 'torch.nn.init.normal_', 'nn.init.normal_', (['m.weight', '(0)', '(0.01)'], {}), '(m.weight, 0, 0.01)\n', (18192, 18211), False, 'from torch import nn\n'), ((18228, 18256), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (18245, 18256), False, 'from torch import nn\n'), ((10166, 10178), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (10175, 10178), False, 'import math\n'), ((15054, 15066), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (15063, 15066), False, 'import math\n'), ((10390, 10402), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (10399, 10402), False, 'import math\n'), ((15278, 15290), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (15287, 15290), False, 'import math\n')] |
"""
Step 3: Download the results, train and evaluate the model, and upload it to S3.
The S3 part requires you to have AWS configured in your engine/conf/config.json file.
It may also be necessary to call GlobalInit() before upload to ensure the config is loaded
"""
import logging
from pathlib import Path
import numpy as np
import pandas as pd
from catboost import sum_models
from metaspace.sm_annotation_utils import SMInstance
from sm.engine.util import GlobalInit
from sm.engine.annotation.fdr import run_fdr_ranking, run_fdr_ranking_labeled
from sm.engine.annotation.scoring_model import (
add_derived_features,
upload_catboost_scoring_model,
save_scoring_model_to_db,
)
from sm.fdr_engineering.train_model import (
get_ranking_data,
get_cv_splits,
cv_train,
get_many_fdr_diagnostics_remote,
train_catboost_model,
)
logger = logging.getLogger(__name__)
DST_SUFFIX = '_ml_training'
data_dir = Path('local/ml_scoring').resolve() # the "local" subdirectory is .gitignored
data_dir.parent.mkdir(parents=True, exist_ok=True)
dataset_ids_file = data_dir / 'dataset_ids.txt'
dataset_ids = [ds_id.strip() for ds_id in dataset_ids_file.open().readlines()]
dst_dataset_ids = [ds_id + DST_SUFFIX for ds_id in dataset_ids]
all_features = [
'chaos',
'spatial',
'spectral',
# _abserr suffix applies the 1-abs(val) transformation
'mz_err_abs_abserr',
'mz_err_rel_abserr',
# _fdr suffix applies the FDR transformation
'chaos_fdr',
'spatial_fdr',
'spectral_fdr',
'mz_err_abs_fdr',
'mz_err_rel_fdr',
]
#%% Download the data or load it from a local cache file
downloaded_data_file = data_dir / 'metrics_df_fdr20.parquet'
FORCE_REDOWNLOAD = False
if downloaded_data_file.exists() and not FORCE_REDOWNLOAD:
metrics_df = pd.read_parquet(downloaded_data_file)
logger.info(f'Loaded {downloaded_data_file}')
else:
sm_dst = SMInstance(config_path=str(Path.home() / '.metaspace.local'))
# ds_diags is an iterable to save temp memory
ds_diags = get_many_fdr_diagnostics_remote(sm_dst, dst_dataset_ids)
metrics_df = get_ranking_data(ds_diags, all_features)
metrics_df.to_parquet(downloaded_data_file)
#%% Recalculate FDR fields
def calc_fdr_fields(df):
target = df.target == 1.0
target_df = df[target].copy()
decoy_df = df[~target].copy()
# FIXME: Remove hard-coded value 20 - should be decoy_sample_size
decoy_sample_size = 20 / df[df.target == 1].modifier.nunique()
add_derived_features(target_df, decoy_df, decoy_sample_size, all_features)
fdr_cols = [c for c in target_df.columns if c.endswith('_fdr')]
return pd.concat([target_df, decoy_df])[fdr_cols].add_suffix('g')
# NOTE: This groups by ds_id instead of group_name when running the FDR ranking. This means all
# adducts are combined into a single ranking
fdr_fields_df = pd.concat([calc_fdr_fields(df) for ds_id, df in metrics_df.groupby('ds_id')])
train_metrics_df = metrics_df = metrics_df.drop(
columns=fdr_fields_df.columns, errors='ignore'
).join(fdr_fields_df)
#%% Make a smaller dataset for training, using this opportunity to balance targets & decoys
# (Skip this unless using very expensive loss functions like YetiRank)
# def subsample(df, max_group_size=5000):
# target = df.target == 1.0
# target_df = df[target].copy()
# decoy_df = df[~target].copy()
# return pd.concat(
# [
# target_df.sample(n=min(len(target_df), max(0, max_group_size - len(decoy_df)))),
# decoy_df.sample(n=min(len(decoy_df), max(0, max_group_size - len(target_df)))),
# ]
# )
#
#
# train_metrics_df = pd.concat(
# [subsample(df) for ds_id, df in metrics_df.groupby('group_name', observed=True)]
# )
#%% Model parameters
features = [
'chaos',
'spatial',
'spectral',
'mz_err_abs_abserr',
'mz_err_rel_abserr',
]
cb_params = {
# 100 iterations is usually consistent enough for comparing methods, but typically the best
# for the eval set is around ~600
'iterations': 1000,
# Ranking loss functions work best: https://catboost.ai/en/docs/concepts/loss-functions-ranking
# Be careful about YetiRank - it doesn't support max_pairs and has a tendency to suddenly eat
# all your RAM
# CatBoost docs say PairLogitPairwise is better than PairLogit, but I found it was slower and
# gave worse results
'loss_function': 'PairLogit:max_pairs=10000',
'use_best_model': True,
# Non-FDR features are designed so that higher values are better.
# Enforce this as a monotonicity constraint to reduce overfitting & improve interpretability.
# This seems to reduce accuracy, but I feel it's a necessary constraint unless
# we can find an explanation for why a worse score could increase the likelihood
# of an annotation.
'monotone_constraints': {i: 0 if '_fdr' in f else 1 for i, f in enumerate(features)},
'verbose': True,
# 'task_type': 'GPU',
}
#%% Evaluate with cross-validation if desired
splits = get_cv_splits(metrics_df.ds_id.unique(), 5)
results = cv_train(train_metrics_df, splits, features, cb_params)
# Sum to make an ensemble model - sometimes it's interesting for debugging
ens_model = sum_models(results.model.to_list())
#%% Make final model from all data
final_params = {
**cb_params,
'iterations': 1000,
'loss_function': 'PairLogit:max_pairs=10000', # Reduce max_pairs if CatBoost complains
'use_best_model': False, # Must be disabled when eval set is None
# CatBoost quantizes all inputs into bins, and border_count determines their granularity.
# 254 is the default, higher gives slightly better accuracy but is slower to train
'border_count': 1024,
}
final_model = train_catboost_model(
metrics_df, metrics_df.ds_id.unique(), None, features, final_params
)
#%% Evaluate the model and print a summary
def eval_model(model, metrics_df):
res = []
# observed=True prevents empty grp_metrics_dfs when there's an empty group_name category
for _, grp_metrics_df in metrics_df.groupby('group_name', observed=True):
grp_msm = grp_metrics_df.chaos * grp_metrics_df.spatial * grp_metrics_df.spectral
grp_preds = pd.Series(model.predict(grp_metrics_df[features]), index=grp_metrics_df.index)
target = grp_metrics_df.target == 1.0
msm_fdrs = run_fdr_ranking(grp_msm[target], grp_msm[~target], 20, True, True)
preds_fdrs = run_fdr_ranking(grp_preds[target], grp_preds[~target], 20, True, True)
res.append(
{
# 'group_name': grp,
'ds_id': grp_metrics_df.ds_id.iloc[0],
'msm_fdr10': np.count_nonzero(msm_fdrs <= 0.1),
'preds_fdr10': np.count_nonzero(preds_fdrs <= 0.1),
'msm_fdr20': np.count_nonzero(msm_fdrs <= 0.2),
'preds_fdr20': np.count_nonzero(preds_fdrs <= 0.2),
}
)
stats = pd.DataFrame(res).groupby('ds_id').sum().reset_index()
stats['delta_fdr10'] = stats['preds_fdr10'] / stats['msm_fdr10']
stats['delta_fdr20'] = stats['preds_fdr20'] / stats['msm_fdr20']
return stats
# Cross-validated stats
# stats_df = pd.concat(
# [
# eval_model(model, metrics_df[metrics_df.ds_id.isin(eval_ds_ids)])
# for (_, eval_ds_ids), model in zip(splits, results.model)
# ]
# )
# Ensemble model stats
# stats_df = eval_model(ens_model, metrics_df)
# Final model stats (NOTE: This model is trained on the eval set, so this should not be used for
# reporting, only diagnostics)
stats_df = eval_model(final_model, metrics_df)
print(stats_df.delta_fdr10.describe())
n_fewer_anns = stats_df.delta_fdr10[stats_df.delta_fdr10 < 1].count()
print(
f'Datasets with fewer annotations: {n_fewer_anns}/{len(stats_df)}'
f'={n_fewer_anns / len(stats_df):.2%}'
)
#%% Export raw results for comparison with other implementations
export_df = metrics_df.drop(
columns=[
'total_iso_ints',
'min_iso_ints',
'max_iso_ints',
'mz_mean',
'mz_stddev',
'theo_mz',
'theo_ints',
'formula_i',
],
errors='ignore',
)
export_df['pred_score'] = final_model.predict(export_df[features])
export_df['pred_fdr'] = pd.concat(
[
run_fdr_ranking_labeled(grp.pred_score, grp.target == 1.0, 20, True, True)
for _, grp in export_df.groupby('group_name')[['pred_score', 'target']]
]
)
export_df.to_csv('local/ml_scoring/prod_impl.csv', index=False)
#%% Save model to S3
MODEL_NAME = 'v3_default'
# Remove unwanted fields from metrics_df for saving training data
train_data = metrics_df[[*features, 'target', 'group_name', 'formula', 'modifier', 'decoy_i']]
params = upload_catboost_scoring_model(
final_model, # '../fdr-models/model-2022-01-05T13-45-26.947188-416b1311.cbm',
'sm-engine',
f'scoring_models/{MODEL_NAME}',
is_public=True,
train_data=train_data,
)
print(params)
# Update DB with model (if running a local METASPACE environment)
GlobalInit()
save_scoring_model_to_db(MODEL_NAME, 'catboost', params)
| [
"pandas.DataFrame",
"numpy.count_nonzero",
"sm.engine.annotation.scoring_model.save_scoring_model_to_db",
"sm.engine.annotation.scoring_model.upload_catboost_scoring_model",
"pathlib.Path.home",
"sm.engine.annotation.scoring_model.add_derived_features",
"sm.engine.util.GlobalInit",
"sm.engine.annotati... | [((867, 894), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (884, 894), False, 'import logging\n'), ((5072, 5127), 'sm.fdr_engineering.train_model.cv_train', 'cv_train', (['train_metrics_df', 'splits', 'features', 'cb_params'], {}), '(train_metrics_df, splits, features, cb_params)\n', (5080, 5127), False, 'from sm.fdr_engineering.train_model import get_ranking_data, get_cv_splits, cv_train, get_many_fdr_diagnostics_remote, train_catboost_model\n'), ((8713, 8843), 'sm.engine.annotation.scoring_model.upload_catboost_scoring_model', 'upload_catboost_scoring_model', (['final_model', '"""sm-engine"""', 'f"""scoring_models/{MODEL_NAME}"""'], {'is_public': '(True)', 'train_data': 'train_data'}), "(final_model, 'sm-engine',\n f'scoring_models/{MODEL_NAME}', is_public=True, train_data=train_data)\n", (8742, 8843), False, 'from sm.engine.annotation.scoring_model import add_derived_features, upload_catboost_scoring_model, save_scoring_model_to_db\n'), ((9010, 9022), 'sm.engine.util.GlobalInit', 'GlobalInit', ([], {}), '()\n', (9020, 9022), False, 'from sm.engine.util import GlobalInit\n'), ((9023, 9079), 'sm.engine.annotation.scoring_model.save_scoring_model_to_db', 'save_scoring_model_to_db', (['MODEL_NAME', '"""catboost"""', 'params'], {}), "(MODEL_NAME, 'catboost', params)\n", (9047, 9079), False, 'from sm.engine.annotation.scoring_model import add_derived_features, upload_catboost_scoring_model, save_scoring_model_to_db\n'), ((1796, 1833), 'pandas.read_parquet', 'pd.read_parquet', (['downloaded_data_file'], {}), '(downloaded_data_file)\n', (1811, 1833), True, 'import pandas as pd\n'), ((2031, 2087), 'sm.fdr_engineering.train_model.get_many_fdr_diagnostics_remote', 'get_many_fdr_diagnostics_remote', (['sm_dst', 'dst_dataset_ids'], {}), '(sm_dst, dst_dataset_ids)\n', (2062, 2087), False, 'from sm.fdr_engineering.train_model import get_ranking_data, get_cv_splits, cv_train, get_many_fdr_diagnostics_remote, train_catboost_model\n'), ((2105, 2145), 'sm.fdr_engineering.train_model.get_ranking_data', 'get_ranking_data', (['ds_diags', 'all_features'], {}), '(ds_diags, all_features)\n', (2121, 2145), False, 'from sm.fdr_engineering.train_model import get_ranking_data, get_cv_splits, cv_train, get_many_fdr_diagnostics_remote, train_catboost_model\n'), ((2486, 2560), 'sm.engine.annotation.scoring_model.add_derived_features', 'add_derived_features', (['target_df', 'decoy_df', 'decoy_sample_size', 'all_features'], {}), '(target_df, decoy_df, decoy_sample_size, all_features)\n', (2506, 2560), False, 'from sm.engine.annotation.scoring_model import add_derived_features, upload_catboost_scoring_model, save_scoring_model_to_db\n'), ((935, 959), 'pathlib.Path', 'Path', (['"""local/ml_scoring"""'], {}), "('local/ml_scoring')\n", (939, 959), False, 'from pathlib import Path\n'), ((6343, 6409), 'sm.engine.annotation.fdr.run_fdr_ranking', 'run_fdr_ranking', (['grp_msm[target]', 'grp_msm[~target]', '(20)', '(True)', '(True)'], {}), '(grp_msm[target], grp_msm[~target], 20, True, True)\n', (6358, 6409), False, 'from sm.engine.annotation.fdr import run_fdr_ranking, run_fdr_ranking_labeled\n'), ((6431, 6501), 'sm.engine.annotation.fdr.run_fdr_ranking', 'run_fdr_ranking', (['grp_preds[target]', 'grp_preds[~target]', '(20)', '(True)', '(True)'], {}), '(grp_preds[target], grp_preds[~target], 20, True, True)\n', (6446, 6501), False, 'from sm.engine.annotation.fdr import run_fdr_ranking, run_fdr_ranking_labeled\n'), ((8266, 8340), 'sm.engine.annotation.fdr.run_fdr_ranking_labeled', 'run_fdr_ranking_labeled', (['grp.pred_score', '(grp.target == 1.0)', '(20)', '(True)', '(True)'], {}), '(grp.pred_score, grp.target == 1.0, 20, True, True)\n', (8289, 8340), False, 'from sm.engine.annotation.fdr import run_fdr_ranking, run_fdr_ranking_labeled\n'), ((2641, 2673), 'pandas.concat', 'pd.concat', (['[target_df, decoy_df]'], {}), '([target_df, decoy_df])\n', (2650, 2673), True, 'import pandas as pd\n'), ((6657, 6690), 'numpy.count_nonzero', 'np.count_nonzero', (['(msm_fdrs <= 0.1)'], {}), '(msm_fdrs <= 0.1)\n', (6673, 6690), True, 'import numpy as np\n'), ((6723, 6758), 'numpy.count_nonzero', 'np.count_nonzero', (['(preds_fdrs <= 0.1)'], {}), '(preds_fdrs <= 0.1)\n', (6739, 6758), True, 'import numpy as np\n'), ((6789, 6822), 'numpy.count_nonzero', 'np.count_nonzero', (['(msm_fdrs <= 0.2)'], {}), '(msm_fdrs <= 0.2)\n', (6805, 6822), True, 'import numpy as np\n'), ((6855, 6890), 'numpy.count_nonzero', 'np.count_nonzero', (['(preds_fdrs <= 0.2)'], {}), '(preds_fdrs <= 0.2)\n', (6871, 6890), True, 'import numpy as np\n'), ((1930, 1941), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (1939, 1941), False, 'from pathlib import Path\n'), ((6929, 6946), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {}), '(res)\n', (6941, 6946), True, 'import pandas as pd\n')] |
import h5py
import numpy as np
complex32 = np.dtype([('r', np.float16), ('i', np.float16)])
def to_complex32(z: np.array):
zf = np.zeros(z.shape, dtype=complex32)
zf['r'] = z.real
zf['i'] = z.imag
return zf
def read_c4_dataset_as_c8(ds: h5py.Dataset, key=np.s_[...]):
"""
Read a complex float16 HDF5 dataset as a numpy.complex64 array.
Avoids h5py/numpy dtype bugs and uses numpy float16 -> float32 conversions
which are about 10x faster than HDF5 ones.
"""
# This context manager avoids h5py exception:
# TypeError: data type '<c4' not understood
with ds.astype(complex32):
z = ds[key]
# Define a similar datatype for complex64 to be sure we cast safely.
complex64 = np.dtype([("r", np.float32), ("i", np.float32)])
# Cast safely and then view as native complex64 numpy dtype.
return z.astype(complex64).view(np.complex64)
| [
"numpy.dtype",
"numpy.zeros"
] | [((44, 92), 'numpy.dtype', 'np.dtype', (["[('r', np.float16), ('i', np.float16)]"], {}), "([('r', np.float16), ('i', np.float16)])\n", (52, 92), True, 'import numpy as np\n'), ((135, 169), 'numpy.zeros', 'np.zeros', (['z.shape'], {'dtype': 'complex32'}), '(z.shape, dtype=complex32)\n', (143, 169), True, 'import numpy as np\n'), ((738, 786), 'numpy.dtype', 'np.dtype', (["[('r', np.float32), ('i', np.float32)]"], {}), "([('r', np.float32), ('i', np.float32)])\n", (746, 786), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import mpl_toolkits
plt.rcParams['font.size'] = 9.0
arr=[]
data = pd.read_csv("kc_house_data.csv")
data2= pd.read_csv("datafile.csv")
data3=pd.read_csv("houseShortage.csv")
while(1):
print("Choose the parameters to visualize\n")
print(" 1-Bedrooms vs count\n 2-Price vs Latitude\n 3-Price vs Square Feet\n 4-Bedroom vs price\n 5-Price vs location(postal code)\n 6-Know your scheme\n 7-tate vs Estimated number of households\n 8-Housing 2012\n key board interrupt-exit\n")
vch=raw_input("enter the choice")
if(vch=="1"):
data['bedrooms'].value_counts().plot(kind='bar')
plt.title('number of Bedroom')
plt.xlabel('Bedrooms')
plt.ylabel('Count')
sns.despine
plt.show()
elif(vch=="2"):
plt.scatter(data.price,data.lat)
plt.xlabel("Price")
plt.ylabel('Latitude')
plt.title("Latitude vs Price")
plt.show()
elif(vch=="3"):
plt.scatter(data.price,data.sqft_living)
plt.title("Price vs Square Feet")
plt.show()
elif(vch=="4"):
plt.scatter(data.bedrooms,data.price)
plt.title("Bedroom and Price ")
plt.xlabel("Bedrooms")
plt.ylabel("Price")
plt.show()
sns.despine
plt.show()
elif(vch=="5"):
plt.scatter(data.zipcode,data.price)
plt.title("Which is the pricey location by zipcode?")
plt.show()
elif(vch=="6"):
plt.scatter(data2.Started_in,data2.Name_of_the_Scheme)
plt.title("Housing schemes in India")
plt.show()
elif(vch=="7"):
N = data3.Name_State.count()
print(N)
ind = np.arange(N)
width = 0.22
plt.bar(ind, data3.Estimated_households_BPL_Urban, width, label='Houses below poverty line')
plt.bar(ind + width,data3.Number_Katcha_households, width,
label=' katcha houses')
plt.ylabel('Hoseholds')
plt.xlabel('States')
plt.title('State vs Estimated number of households')
plt.xticks(ind + width / 2,data3.Name_State )
plt.legend(loc='best')
plt.show()
elif(vch=="8"):
labels = data3.Name_State
sizes = data3.State_wise_Dist_Housing_shortage2012_inMllions
#explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
fig1, ax1 = plt.subplots()
ax1.pie(sizes, labels=labels, autopct='%1.1f%%',
shadow=False, startangle=4,pctdistance=1.1, labeldistance=1.2)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.title("Housing 2012")
plt.show()
else:
exit()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.legend",
"numpy.arange",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots"
] | [((161, 193), 'pandas.read_csv', 'pd.read_csv', (['"""kc_house_data.csv"""'], {}), "('kc_house_data.csv')\n", (172, 193), True, 'import pandas as pd\n'), ((201, 228), 'pandas.read_csv', 'pd.read_csv', (['"""datafile.csv"""'], {}), "('datafile.csv')\n", (212, 228), True, 'import pandas as pd\n'), ((235, 267), 'pandas.read_csv', 'pd.read_csv', (['"""houseShortage.csv"""'], {}), "('houseShortage.csv')\n", (246, 267), True, 'import pandas as pd\n'), ((672, 702), 'matplotlib.pyplot.title', 'plt.title', (['"""number of Bedroom"""'], {}), "('number of Bedroom')\n", (681, 702), True, 'import matplotlib.pyplot as plt\n'), ((705, 727), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Bedrooms"""'], {}), "('Bedrooms')\n", (715, 727), True, 'import matplotlib.pyplot as plt\n'), ((730, 749), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Count"""'], {}), "('Count')\n", (740, 749), True, 'import matplotlib.pyplot as plt\n'), ((766, 776), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (774, 776), True, 'import matplotlib.pyplot as plt\n'), ((796, 829), 'matplotlib.pyplot.scatter', 'plt.scatter', (['data.price', 'data.lat'], {}), '(data.price, data.lat)\n', (807, 829), True, 'import matplotlib.pyplot as plt\n'), ((831, 850), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Price"""'], {}), "('Price')\n", (841, 850), True, 'import matplotlib.pyplot as plt\n'), ((853, 875), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Latitude"""'], {}), "('Latitude')\n", (863, 875), True, 'import matplotlib.pyplot as plt\n'), ((878, 908), 'matplotlib.pyplot.title', 'plt.title', (['"""Latitude vs Price"""'], {}), "('Latitude vs Price')\n", (887, 908), True, 'import matplotlib.pyplot as plt\n'), ((911, 921), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (919, 921), True, 'import matplotlib.pyplot as plt\n'), ((942, 983), 'matplotlib.pyplot.scatter', 'plt.scatter', (['data.price', 'data.sqft_living'], {}), '(data.price, data.sqft_living)\n', (953, 983), True, 'import matplotlib.pyplot as plt\n'), ((985, 1018), 'matplotlib.pyplot.title', 'plt.title', (['"""Price vs Square Feet"""'], {}), "('Price vs Square Feet')\n", (994, 1018), True, 'import matplotlib.pyplot as plt\n'), ((1021, 1031), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1029, 1031), True, 'import matplotlib.pyplot as plt\n'), ((1052, 1090), 'matplotlib.pyplot.scatter', 'plt.scatter', (['data.bedrooms', 'data.price'], {}), '(data.bedrooms, data.price)\n', (1063, 1090), True, 'import matplotlib.pyplot as plt\n'), ((1092, 1123), 'matplotlib.pyplot.title', 'plt.title', (['"""Bedroom and Price """'], {}), "('Bedroom and Price ')\n", (1101, 1123), True, 'import matplotlib.pyplot as plt\n'), ((1126, 1148), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Bedrooms"""'], {}), "('Bedrooms')\n", (1136, 1148), True, 'import matplotlib.pyplot as plt\n'), ((1151, 1170), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (1161, 1170), True, 'import matplotlib.pyplot as plt\n'), ((1173, 1183), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1181, 1183), True, 'import matplotlib.pyplot as plt\n'), ((1200, 1210), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1208, 1210), True, 'import matplotlib.pyplot as plt\n'), ((1231, 1268), 'matplotlib.pyplot.scatter', 'plt.scatter', (['data.zipcode', 'data.price'], {}), '(data.zipcode, data.price)\n', (1242, 1268), True, 'import matplotlib.pyplot as plt\n'), ((1270, 1323), 'matplotlib.pyplot.title', 'plt.title', (['"""Which is the pricey location by zipcode?"""'], {}), "('Which is the pricey location by zipcode?')\n", (1279, 1323), True, 'import matplotlib.pyplot as plt\n'), ((1326, 1336), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1334, 1336), True, 'import matplotlib.pyplot as plt\n'), ((1357, 1412), 'matplotlib.pyplot.scatter', 'plt.scatter', (['data2.Started_in', 'data2.Name_of_the_Scheme'], {}), '(data2.Started_in, data2.Name_of_the_Scheme)\n', (1368, 1412), True, 'import matplotlib.pyplot as plt\n'), ((1415, 1452), 'matplotlib.pyplot.title', 'plt.title', (['"""Housing schemes in India"""'], {}), "('Housing schemes in India')\n", (1424, 1452), True, 'import matplotlib.pyplot as plt\n'), ((1455, 1465), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1463, 1465), True, 'import matplotlib.pyplot as plt\n'), ((1534, 1546), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1543, 1546), True, 'import numpy as np\n'), ((1570, 1667), 'matplotlib.pyplot.bar', 'plt.bar', (['ind', 'data3.Estimated_households_BPL_Urban', 'width'], {'label': '"""Houses below poverty line"""'}), "(ind, data3.Estimated_households_BPL_Urban, width, label=\n 'Houses below poverty line')\n", (1577, 1667), True, 'import matplotlib.pyplot as plt\n'), ((1665, 1753), 'matplotlib.pyplot.bar', 'plt.bar', (['(ind + width)', 'data3.Number_Katcha_households', 'width'], {'label': '""" katcha houses"""'}), "(ind + width, data3.Number_Katcha_households, width, label=\n ' katcha houses')\n", (1672, 1753), True, 'import matplotlib.pyplot as plt\n'), ((1757, 1780), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Hoseholds"""'], {}), "('Hoseholds')\n", (1767, 1780), True, 'import matplotlib.pyplot as plt\n'), ((1783, 1803), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""States"""'], {}), "('States')\n", (1793, 1803), True, 'import matplotlib.pyplot as plt\n'), ((1806, 1858), 'matplotlib.pyplot.title', 'plt.title', (['"""State vs Estimated number of households"""'], {}), "('State vs Estimated number of households')\n", (1815, 1858), True, 'import matplotlib.pyplot as plt\n'), ((1862, 1907), 'matplotlib.pyplot.xticks', 'plt.xticks', (['(ind + width / 2)', 'data3.Name_State'], {}), '(ind + width / 2, data3.Name_State)\n', (1872, 1907), True, 'import matplotlib.pyplot as plt\n'), ((1910, 1932), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (1920, 1932), True, 'import matplotlib.pyplot as plt\n'), ((1935, 1945), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1943, 1945), True, 'import matplotlib.pyplot as plt\n'), ((2144, 2158), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2156, 2158), True, 'import matplotlib.pyplot as plt\n'), ((2366, 2391), 'matplotlib.pyplot.title', 'plt.title', (['"""Housing 2012"""'], {}), "('Housing 2012')\n", (2375, 2391), True, 'import matplotlib.pyplot as plt\n'), ((2394, 2404), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2402, 2404), True, 'import matplotlib.pyplot as plt\n')] |
## ObjectiveFunc.py -- Perform Gradient Estimation and Evaluation for a Given Function
##
## Copyright (C) 2018, IBM Corp
## <NAME> <<EMAIL>>
## <NAME> <<EMAIL>>
## <NAME> <<EMAIL>>
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
import numpy as np
import Utils as util
np.random.seed(2018)
class OBJFUNC:
def __init__(self, MGR, model, origImgs, origLabels):
self.const = MGR.parSet['const']
self.model = model
self.origImgs = origImgs
self.origImgsAT = np.arctanh(origImgs*1.9999999)
self.origLabels = origLabels
self.nFunc = origImgs.shape[0]
self.imageSize = np.size(origImgs)/self.nFunc
self.query_count = 0
self.Loss_L2 = 1e10
self.Loss_Attack = 1e10
self.Loss_Overall = self.Loss_L2 + self.const*self.Loss_Attack
if(MGR.parSet['rv_dist'] == 'UnitBall'):
self.RV_Gen = self.Draw_UnitBall
elif(MGR.parSet['rv_dist'] == 'UnitSphere'):
self.RV_Gen = self.Draw_UnitSphere
else:
print('Please specify a valid distribution for random perturbation')
def Draw_UnitBall(self):
sample = np.random.uniform(-1.0, 1.0, size=self.origImgs[0].shape)
return sample/np.linalg.norm(sample.flatten())
def Draw_UnitSphere(self):
sample = np.random.normal(0.0, 1.0, size=self.origImgs[0].shape)
return sample/np.linalg.norm(sample.flatten())
def evaluate(self, delImgAT, randBatchIdx, addQueryCount = True):
if( randBatchIdx.size == 0 ):
randBatchIdx = np.arange(0, self.nFunc)
batchSize = randBatchIdx.size
origLabels_Batched = self.origLabels[randBatchIdx]
delImgsAT = np.repeat(np.expand_dims(delImgAT, axis=0), self.nFunc, axis=0)
advImgs = np.tanh(self.origImgsAT + delImgsAT)/2.0
advImgs_Batched = advImgs[randBatchIdx]
if(addQueryCount):
self.query_count += batchSize
Score_AdvImgs_Batched = self.model.model.predict(advImgs_Batched)
Score_TargetLab = np.maximum(1e-20, np.sum(origLabels_Batched*Score_AdvImgs_Batched, 1))
Score_NonTargetLab = np.maximum(1e-20, np.amax((1-origLabels_Batched)*Score_AdvImgs_Batched - (origLabels_Batched*10000),1))
self.Loss_Attack = np.amax(np.maximum(0.0, -np.log(Score_NonTargetLab) + np.log(Score_TargetLab) ) )
self.Loss_L2 = self.imageSize * np.mean(np.square(advImgs-self.origImgs)/2.0)
self.Loss_Overall = self.Loss_L2 + self.const*self.Loss_Attack
return self.Loss_Overall
def gradient_estimation(self, delImgAT, mu, q, randBatchIdx = np.array([])):
f = self.evaluate(delImgAT, randBatchIdx)
grad_avg = np.zeros(delImgAT.shape)
for q_idx in range(q):
u_rand = self.RV_Gen()
f_perturb = self.evaluate(delImgAT + mu*u_rand, randBatchIdx)
grad_avg += (f_perturb-f)*u_rand
return (delImgAT.size/mu)*(grad_avg/q)
def print_current_loss(self):
print('Loss_Overall: ', self.Loss_Overall, ' Loss_L2: ', self.Loss_L2, ' Loss_Attack: ', self.Loss_Attack)
| [
"numpy.random.uniform",
"numpy.arctanh",
"numpy.size",
"numpy.random.seed",
"numpy.tanh",
"numpy.sum",
"numpy.log",
"numpy.square",
"numpy.zeros",
"numpy.expand_dims",
"numpy.amax",
"numpy.array",
"numpy.arange",
"numpy.random.normal"
] | [((864, 884), 'numpy.random.seed', 'np.random.seed', (['(2018)'], {}), '(2018)\n', (878, 884), True, 'import numpy as np\n'), ((1097, 1129), 'numpy.arctanh', 'np.arctanh', (['(origImgs * 1.9999999)'], {}), '(origImgs * 1.9999999)\n', (1107, 1129), True, 'import numpy as np\n'), ((1774, 1831), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)'], {'size': 'self.origImgs[0].shape'}), '(-1.0, 1.0, size=self.origImgs[0].shape)\n', (1791, 1831), True, 'import numpy as np\n'), ((1940, 1995), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1.0)'], {'size': 'self.origImgs[0].shape'}), '(0.0, 1.0, size=self.origImgs[0].shape)\n', (1956, 1995), True, 'import numpy as np\n'), ((3270, 3282), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3278, 3282), True, 'import numpy as np\n'), ((3356, 3380), 'numpy.zeros', 'np.zeros', (['delImgAT.shape'], {}), '(delImgAT.shape)\n', (3364, 3380), True, 'import numpy as np\n'), ((1232, 1249), 'numpy.size', 'np.size', (['origImgs'], {}), '(origImgs)\n', (1239, 1249), True, 'import numpy as np\n'), ((2194, 2218), 'numpy.arange', 'np.arange', (['(0)', 'self.nFunc'], {}), '(0, self.nFunc)\n', (2203, 2218), True, 'import numpy as np\n'), ((2351, 2383), 'numpy.expand_dims', 'np.expand_dims', (['delImgAT'], {'axis': '(0)'}), '(delImgAT, axis=0)\n', (2365, 2383), True, 'import numpy as np\n'), ((2424, 2460), 'numpy.tanh', 'np.tanh', (['(self.origImgsAT + delImgsAT)'], {}), '(self.origImgsAT + delImgsAT)\n', (2431, 2460), True, 'import numpy as np\n'), ((2709, 2762), 'numpy.sum', 'np.sum', (['(origLabels_Batched * Score_AdvImgs_Batched)', '(1)'], {}), '(origLabels_Batched * Score_AdvImgs_Batched, 1)\n', (2715, 2762), True, 'import numpy as np\n'), ((2810, 2904), 'numpy.amax', 'np.amax', (['((1 - origLabels_Batched) * Score_AdvImgs_Batched - origLabels_Batched * 10000)', '(1)'], {}), '((1 - origLabels_Batched) * Score_AdvImgs_Batched - \n origLabels_Batched * 10000, 1)\n', (2817, 2904), True, 'import numpy as np\n'), ((2978, 3001), 'numpy.log', 'np.log', (['Score_TargetLab'], {}), '(Score_TargetLab)\n', (2984, 3001), True, 'import numpy as np\n'), ((3055, 3089), 'numpy.square', 'np.square', (['(advImgs - self.origImgs)'], {}), '(advImgs - self.origImgs)\n', (3064, 3089), True, 'import numpy as np\n'), ((2949, 2975), 'numpy.log', 'np.log', (['Score_NonTargetLab'], {}), '(Score_NonTargetLab)\n', (2955, 2975), True, 'import numpy as np\n')] |
import unittest
import numpy as np
import scipy.signal
import sys
sys.path.append("..")
import features
def zeros(sig_len=1024):
signal = np.zeros(sig_len)
return (signal, np.fft.rfft(signal))
def ones(sig_len=1024):
signal = np.ones(sig_len)
return (signal, np.fft.rfft(signal))
def sine(periods=1, sig_len=1024):
# use cosine instead of sine, so sine(sig_len/2, sig_len) returns
# alternating 1 and -1 instead of zeros
signal = np.cos(np.linspace(0, periods*2*np.pi, sig_len+1))[:-1]
return (signal, np.fft.rfft(signal))
def square(periods=1, sig_len=1024):
signal = scipy.signal.square(np.linspace(0, periods*2*np.pi, sig_len))
return (signal, np.fft.rfft(signal))
def sawtooth(periods=1, sig_len=1024):
signal = scipy.signal.sawtooth(np.linspace(0, periods*2*np.pi, sig_len))
return (signal, np.fft.rfft(signal))
def dirac(sig_len=1024):
signal = np.concatenate(([1], np.zeros(sig_len-1)))
return (signal, np.fft.rfft(signal))
class SignalsTestCase(unittest.TestCase):
def test_zeros_spectrum(self):
"""A zeros spectrum should be zero."""
signal, fft_signal = zeros(sig_len=1024)
self.assertTrue(np.all(fft_signal == np.zeros(513)))
def test_ones_spectrum(self):
"""A ones spectrum should be a dirac."""
signal, fft_signal = ones(sig_len=1024)
expected = np.concatenate([[1024], np.zeros(513-1)])
self.assertTrue(np.all(fft_signal == expected))
def test_low_sine_spectrum(self):
"""A single sine wave spectrum should be a dirac at index 1."""
signal, fft_signal = sine(periods=1, sig_len=1024)
expected = np.concatenate([[0, 512], np.zeros(513-2)])
self.assertTrue(np.allclose(fft_signal, expected))
def test_nyquist_sine(self):
"""A nyquist sine should consist of alternating 1 and -1."""
signal, fft_signal = sine(periods=512, sig_len=1024)
expected = np.ones(1024)
expected[1::2] = -1
self.assertTrue(np.allclose(signal, expected))
def test_nyquist_sine_spectrum(self):
"""A nyquist sine spectrum should be a dirac at index -1."""
signal, fft_signal = sine(periods=512, sig_len=1024)
expected = np.concatenate([np.zeros(513-1), [1024]])
self.assertTrue(np.allclose(fft_signal, expected))
def test_dirac_spectrum(self):
"""A dirac spectrum should be ones."""
signal, fft_signal = dirac(sig_len=1024)
expected = np.ones(513)
self.assertTrue(np.allclose(fft_signal, expected))
class RMSTestCase(unittest.TestCase):
def test_zeros(self):
"""The RMS of zeros should be 0."""
self.assertEqual(features.rms(*zeros()), 0)
def test_ones(self):
"""The RMS of ones should be 1."""
self.assertEqual(features.rms(*ones()), 1)
def test_square(self):
"""The RMS of a square wave should be 1."""
self.assertEqual(features.rms(*square()), 1)
class PeakTestCase(unittest.TestCase):
def test_zeros(self):
"""The peak of zeros should be 0."""
self.assertEqual(features.peak(*zeros()), 0)
def test_ones(self):
"""The peak of ones should be 1."""
self.assertEqual(features.peak(*ones()), 1)
def test_square(self):
"""The peak of a square wave should be 1."""
self.assertEqual(features.peak(*square()), 1)
def test_saw(self):
"""The peak of a sawtooth wave should be about 1."""
self.assertAlmostEqual(features.peak(*sawtooth(10)), 1)
class CrestTestCase(unittest.TestCase):
def test_factor_zeros(self):
"""The crest factor of zeros should be 1."""
self.assertEqual(features.crest_factor(*zeros()), 1)
def test_factor_ones(self):
"""The crests factor of ones should be 1."""
self.assertEqual(features.crest_factor(*ones()), 1)
def test_factor_rect(self):
"""The crest factor of a square wave should be 1."""
self.assertEqual(features.crest_factor(*square()), 1)
def test_factor_dirac(self):
"""The crest factor of a dirac should be sqrt(len(dirac))."""
self.assertEqual(features.crest_factor(*dirac()), 32)
class SpectralCentroidTestCase(unittest.TestCase):
def test_zeros(self):
"""The spectral centroid of zeros should be 0.5/2."""
self.assertEqual(features.spectral_centroid(*zeros()), 0.25)
def test_ones(self):
"""The spectral centroid of ones should be 0."""
self.assertEqual(features.spectral_centroid(*ones()), 0.0)
def test_dirac(self):
"""The spectral centroid of a dirac should be 0.5/2."""
# spectrum of a dirac should be constant
self.assertEqual(features.spectral_centroid(*dirac()), 0.25)
def test_sine(self):
"""The spectral centroid of a sine wave should be 1/len(sine)."""
# results in peak at freq[1]=1/sig_len with height of 512
self.assertAlmostEqual(features.spectral_centroid(*sine()), 1/1024)
class LogSpectralCentroidTestCase(unittest.TestCase):
def test_zeros(self):
"""The log spectral centroid of zeros should be 3*log(1.5)-1."""
self.assertEqual(
features.log_spectral_centroid(*zeros()), 3*np.log(1.5) - 1)
def test_dirac(self):
"""The log spectral centroid of a dirac should be close to 3*log(1.5)-1."""
# result gets closer to analytical value for longer signals.
self.assertAlmostEqual(
features.log_spectral_centroid(*dirac(4096)), 3*np.log(1.5) - 1, 4)
class SpectralVarianceTestCase(unittest.TestCase):
def test_zeros(self):
"""The spectral variance of zeros should be 0."""
self.assertEqual(features.spectral_variance(*zeros()), 0)
def test_dirac(self):
"""The spectral variance of a dirac should be 0."""
self.assertEqual(features.spectral_variance(*dirac()), 0)
class SpectralSkewnessTestCase(unittest.TestCase):
def test_zeros(self):
"""The spectral skewness of zeros should be 0."""
self.assertEqual(features.spectral_skewness(*zeros()), 0)
def test_dirac(self):
"""The spectral skewness of a dirac should be 0."""
self.assertEqual(features.spectral_skewness(*dirac()), 0)
class SpectralFlatnessTestCase(unittest.TestCase):
def test_zeros(self):
"""The spectral flatness of zeros should be 1."""
self.assertEqual(features.spectral_flatness(*zeros()), 1)
def test_dirac(self):
"""The spectral flatness of a dirac should be 1."""
self.assertEqual(features.spectral_flatness(*dirac()), 1)
def test_sine(self):
"""The spectral flatness of a sine should be 0"""
self.assertAlmostEqual(features.spectral_flatness(*sine()), 0)
class SpectralBrightnessTestCase(unittest.TestCase):
def test_zeros(self):
"""The spectral brightness of zeros should be 1."""
self.assertEqual(features.spectral_brightness(*zeros()), 1)
def test_dc(self):
"""The spectral brightness of ones should be 0."""
self.assertEqual(features.spectral_brightness(*ones()), 0)
def test_nyquist_sine(self):
"""The spectral brightness of a nyquist sine should be 1."""
self.assertEqual(features.spectral_brightness(*sine(512)), 1)
def test_high_noise(self):
"""The spectral brightness of highpass noise should be >1."""
fft_test_data = np.linspace(0, 1, 512)
test_data = np.fft.irfft(fft_test_data)
self.assertGreater(
features.spectral_brightness(test_data, fft_test_data), 1)
def test_white_noise(self):
"""The spectral brightness of white noise should be >1."""
test_data = np.random.randn(1024)
fft_test_data = np.fft.rfft(test_data)
self.assertGreater(
features.spectral_brightness(test_data, fft_test_data), 1)
def test_low_noise(self):
"""The spectral brightness of lowpass noise should be <1."""
test_data = np.random.randn(1024)
b, a = scipy.signal.butter(8, [0.1, 0.2]) # lowpass filter
test_data = scipy.signal.lfilter(b, a, test_data)
fft_test_data = np.fft.rfft(test_data)
self.assertLess(
features.spectral_brightness(test_data, fft_test_data), 1)
class SpectralAbsSlopeMeanTestCase(unittest.TestCase):
def test_zeros(self):
"""The spectral abs slope mean of zeros should be 0."""
self.assertEqual(features.spectral_abs_slope_mean(*zeros()), 0)
def test_dirac(self):
"""The spectral abs slope mean of a dirac should be 0."""
self.assertEqual(features.spectral_abs_slope_mean(*dirac()), 0)
def test_sine(self):
"""The spectral abs slope mean of a sine should be 2."""
self.assertEqual(features.spectral_abs_slope_mean(*sine()), 2)
if __name__ == '__main__':
unittest.main()
| [
"sys.path.append",
"unittest.main",
"numpy.fft.rfft",
"features.spectral_brightness",
"numpy.fft.irfft",
"numpy.log",
"numpy.random.randn",
"numpy.allclose",
"numpy.zeros",
"numpy.ones",
"numpy.linspace",
"numpy.all"
] | [((66, 87), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (81, 87), False, 'import sys\n'), ((144, 161), 'numpy.zeros', 'np.zeros', (['sig_len'], {}), '(sig_len)\n', (152, 161), True, 'import numpy as np\n'), ((242, 258), 'numpy.ones', 'np.ones', (['sig_len'], {}), '(sig_len)\n', (249, 258), True, 'import numpy as np\n'), ((8900, 8915), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8913, 8915), False, 'import unittest\n'), ((182, 201), 'numpy.fft.rfft', 'np.fft.rfft', (['signal'], {}), '(signal)\n', (193, 201), True, 'import numpy as np\n'), ((279, 298), 'numpy.fft.rfft', 'np.fft.rfft', (['signal'], {}), '(signal)\n', (290, 298), True, 'import numpy as np\n'), ((540, 559), 'numpy.fft.rfft', 'np.fft.rfft', (['signal'], {}), '(signal)\n', (551, 559), True, 'import numpy as np\n'), ((633, 677), 'numpy.linspace', 'np.linspace', (['(0)', '(periods * 2 * np.pi)', 'sig_len'], {}), '(0, periods * 2 * np.pi, sig_len)\n', (644, 677), True, 'import numpy as np\n'), ((695, 714), 'numpy.fft.rfft', 'np.fft.rfft', (['signal'], {}), '(signal)\n', (706, 714), True, 'import numpy as np\n'), ((792, 836), 'numpy.linspace', 'np.linspace', (['(0)', '(periods * 2 * np.pi)', 'sig_len'], {}), '(0, periods * 2 * np.pi, sig_len)\n', (803, 836), True, 'import numpy as np\n'), ((854, 873), 'numpy.fft.rfft', 'np.fft.rfft', (['signal'], {}), '(signal)\n', (865, 873), True, 'import numpy as np\n'), ((978, 997), 'numpy.fft.rfft', 'np.fft.rfft', (['signal'], {}), '(signal)\n', (989, 997), True, 'import numpy as np\n'), ((1959, 1972), 'numpy.ones', 'np.ones', (['(1024)'], {}), '(1024)\n', (1966, 1972), True, 'import numpy as np\n'), ((2500, 2512), 'numpy.ones', 'np.ones', (['(513)'], {}), '(513)\n', (2507, 2512), True, 'import numpy as np\n'), ((7453, 7475), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(512)'], {}), '(0, 1, 512)\n', (7464, 7475), True, 'import numpy as np\n'), ((7496, 7523), 'numpy.fft.irfft', 'np.fft.irfft', (['fft_test_data'], {}), '(fft_test_data)\n', (7508, 7523), True, 'import numpy as np\n'), ((7743, 7764), 'numpy.random.randn', 'np.random.randn', (['(1024)'], {}), '(1024)\n', (7758, 7764), True, 'import numpy as np\n'), ((7789, 7811), 'numpy.fft.rfft', 'np.fft.rfft', (['test_data'], {}), '(test_data)\n', (7800, 7811), True, 'import numpy as np\n'), ((8031, 8052), 'numpy.random.randn', 'np.random.randn', (['(1024)'], {}), '(1024)\n', (8046, 8052), True, 'import numpy as np\n'), ((8202, 8224), 'numpy.fft.rfft', 'np.fft.rfft', (['test_data'], {}), '(test_data)\n', (8213, 8224), True, 'import numpy as np\n'), ((471, 519), 'numpy.linspace', 'np.linspace', (['(0)', '(periods * 2 * np.pi)', '(sig_len + 1)'], {}), '(0, periods * 2 * np.pi, sig_len + 1)\n', (482, 519), True, 'import numpy as np\n'), ((936, 957), 'numpy.zeros', 'np.zeros', (['(sig_len - 1)'], {}), '(sig_len - 1)\n', (944, 957), True, 'import numpy as np\n'), ((1452, 1482), 'numpy.all', 'np.all', (['(fft_signal == expected)'], {}), '(fft_signal == expected)\n', (1458, 1482), True, 'import numpy as np\n'), ((1741, 1774), 'numpy.allclose', 'np.allclose', (['fft_signal', 'expected'], {}), '(fft_signal, expected)\n', (1752, 1774), True, 'import numpy as np\n'), ((2025, 2054), 'numpy.allclose', 'np.allclose', (['signal', 'expected'], {}), '(signal, expected)\n', (2036, 2054), True, 'import numpy as np\n'), ((2314, 2347), 'numpy.allclose', 'np.allclose', (['fft_signal', 'expected'], {}), '(fft_signal, expected)\n', (2325, 2347), True, 'import numpy as np\n'), ((2537, 2570), 'numpy.allclose', 'np.allclose', (['fft_signal', 'expected'], {}), '(fft_signal, expected)\n', (2548, 2570), True, 'import numpy as np\n'), ((7564, 7618), 'features.spectral_brightness', 'features.spectral_brightness', (['test_data', 'fft_test_data'], {}), '(test_data, fft_test_data)\n', (7592, 7618), False, 'import features\n'), ((7852, 7906), 'features.spectral_brightness', 'features.spectral_brightness', (['test_data', 'fft_test_data'], {}), '(test_data, fft_test_data)\n', (7880, 7906), False, 'import features\n'), ((8262, 8316), 'features.spectral_brightness', 'features.spectral_brightness', (['test_data', 'fft_test_data'], {}), '(test_data, fft_test_data)\n', (8290, 8316), False, 'import features\n'), ((1410, 1427), 'numpy.zeros', 'np.zeros', (['(513 - 1)'], {}), '(513 - 1)\n', (1418, 1427), True, 'import numpy as np\n'), ((1699, 1716), 'numpy.zeros', 'np.zeros', (['(513 - 2)'], {}), '(513 - 2)\n', (1707, 1716), True, 'import numpy as np\n'), ((2264, 2281), 'numpy.zeros', 'np.zeros', (['(513 - 1)'], {}), '(513 - 1)\n', (2272, 2281), True, 'import numpy as np\n'), ((1219, 1232), 'numpy.zeros', 'np.zeros', (['(513)'], {}), '(513)\n', (1227, 1232), True, 'import numpy as np\n'), ((5264, 5275), 'numpy.log', 'np.log', (['(1.5)'], {}), '(1.5)\n', (5270, 5275), True, 'import numpy as np\n'), ((5553, 5564), 'numpy.log', 'np.log', (['(1.5)'], {}), '(1.5)\n', (5559, 5564), True, 'import numpy as np\n')] |
import numpy as np
import numpy.testing as npt
import pandas as pd
from stumpy import aampi, core, config
import pytest
import naive
substitution_locations = [(slice(0, 0), 0, -1, slice(1, 3), [0, 3])]
substitution_values = [np.nan, np.inf]
def test_aampi_int_input():
with pytest.raises(TypeError):
aampi(np.arange(10), 5)
def test_aampi_self_join():
m = 3
zone = int(np.ceil(m / 4))
seed = np.random.randint(100000)
np.random.seed(seed)
n = 30
T = np.random.rand(n)
stream = aampi(T, m, egress=False)
for i in range(34):
t = np.random.rand()
stream.update(t)
right_P = stream.P_
right_I = stream.I_
right_left_P = stream.left_P_
right_left_I = stream.left_I_
left = naive.aamp(stream.T_, m)
left_P = left[:, 0]
left_I = left[:, 1]
left_left_P = np.full(left_P.shape, np.inf)
left_left_I = left[:, 2]
for i, j in enumerate(left_left_I):
if j >= 0:
D = core.mass_absolute(stream.T_[i : i + m], stream.T_[j : j + m])
left_left_P[i] = D[0]
naive.replace_inf(left_P)
naive.replace_inf(left_left_P)
naive.replace_inf(right_P)
naive.replace_inf(right_left_P)
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
npt.assert_almost_equal(left_left_P, right_left_P)
npt.assert_almost_equal(left_left_I, right_left_I)
np.random.seed(seed)
n = 30
T = np.random.rand(n)
T = pd.Series(T)
stream = aampi(T, m, egress=False)
for i in range(34):
t = np.random.rand()
stream.update(t)
right_P = stream.P_
right_I = stream.I_
right_left_P = stream.left_P_
right_left_I = stream.left_I_
naive.replace_inf(right_P)
naive.replace_inf(right_left_P)
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
npt.assert_almost_equal(left_left_P, right_left_P)
npt.assert_almost_equal(left_left_I, right_left_I)
def test_aampi_self_join_egress():
m = 3
zone = int(np.ceil(m / 4))
seed = np.random.randint(100000)
np.random.seed(seed)
n = 30
T = np.random.rand(n)
left = naive.aampi_egress(T, m)
left_P = left.P_.copy()
left_I = left.I_
left_left_P = left.left_P_.copy()
left_left_I = left.left_I_
stream = aampi(T, m, egress=True)
right_P = stream.P_.copy()
right_I = stream.I_
right_left_P = stream.left_P_.copy()
right_left_I = stream.left_I_
naive.replace_inf(left_P)
naive.replace_inf(left_left_P)
naive.replace_inf(right_P)
naive.replace_inf(right_left_P)
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
npt.assert_almost_equal(left_left_P, right_left_P)
npt.assert_almost_equal(left_left_I, right_left_I)
for i in range(34):
t = np.random.rand()
left.update(t)
stream.update(t)
right_P = stream.P_.copy()
right_I = stream.I_
right_left_P = stream.left_P_.copy()
right_left_I = stream.left_I_
left_P = left.P_.copy()
left_I = left.I_
left_left_P = left.left_P_.copy()
left_left_I = left.left_I_
naive.replace_inf(left_P)
naive.replace_inf(left_left_P)
naive.replace_inf(right_P)
naive.replace_inf(right_left_P)
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
npt.assert_almost_equal(left_left_P, right_left_P)
npt.assert_almost_equal(left_left_I, right_left_I)
np.random.seed(seed)
T = np.random.rand(n)
T = pd.Series(T)
left = naive.aampi_egress(T, m)
left_P = left.P_.copy()
left_I = left.I_
stream = aampi(T, m, egress=True)
right_P = stream.P_.copy()
right_I = stream.I_
naive.replace_inf(left_P)
naive.replace_inf(right_P)
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
for i in range(34):
t = np.random.rand()
left.update(t)
stream.update(t)
right_P = stream.P_.copy()
right_I = stream.I_
right_left_P = stream.left_P_.copy()
right_left_I = stream.left_I_
left_P = left.P_.copy()
left_I = left.I_
left_left_P = left.left_P_.copy()
left_left_I = left.left_I_
naive.replace_inf(left_P)
naive.replace_inf(left_left_P)
naive.replace_inf(right_P)
naive.replace_inf(right_left_P)
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
npt.assert_almost_equal(left_left_P, right_left_P)
npt.assert_almost_equal(left_left_I, right_left_I)
@pytest.mark.parametrize("substitute", substitution_values)
@pytest.mark.parametrize("substitution_locations", substitution_locations)
def test_aampi_init_nan_inf_self_join(substitute, substitution_locations):
m = 3
zone = int(np.ceil(m / 4))
seed = np.random.randint(100000)
# seed = 58638
for substitution_location in substitution_locations:
np.random.seed(seed)
n = 30
T = np.random.rand(n)
if substitution_location == -1:
substitution_location = T.shape[0] - 1
T[substitution_location] = substitute
stream = aampi(T, m, egress=False)
for i in range(34):
t = np.random.rand()
stream.update(t)
right_P = stream.P_
right_I = stream.I_
stream.T_[substitution_location] = substitute
left = naive.aamp(stream.T_, m)
left_P = left[:, 0]
left_I = left[:, 1]
naive.replace_inf(left_P)
naive.replace_inf(right_P)
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
np.random.seed(seed)
n = 30
T = np.random.rand(n)
if substitution_location == -1:
substitution_location = T.shape[0] - 1
T[substitution_location] = substitute
T = pd.Series(T)
stream = aampi(T, m, egress=False)
for i in range(34):
t = np.random.rand()
stream.update(t)
right_P = stream.P_
right_I = stream.I_
naive.replace_inf(right_P)
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
@pytest.mark.parametrize("substitute", substitution_values)
@pytest.mark.parametrize("substitution_locations", substitution_locations)
def test_aampi_init_nan_inf_self_join_egress(substitute, substitution_locations):
m = 3
zone = int(np.ceil(m / 4))
seed = np.random.randint(100000)
# seed = 58638
for substitution_location in substitution_locations:
np.random.seed(seed)
n = 30
T = np.random.rand(n)
if substitution_location == -1:
substitution_location = T.shape[0] - 1
T[substitution_location] = substitute
left = naive.aampi_egress(T, m)
left_P = left.P_.copy()
left_I = left.I_
left_left_P = left.left_P_.copy()
left_left_I = left.left_I_
stream = aampi(T, m, egress=True)
right_P = stream.P_.copy()
right_I = stream.I_
right_left_P = stream.left_P_.copy()
right_left_I = stream.left_I_
naive.replace_inf(left_P)
naive.replace_inf(right_P)
naive.replace_inf(left_left_P)
naive.replace_inf(right_left_P)
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
npt.assert_almost_equal(left_left_P, right_left_P)
npt.assert_almost_equal(left_left_I, right_left_I)
for i in range(34):
t = np.random.rand()
left.update(t)
stream.update(t)
right_P = stream.P_.copy()
right_I = stream.I_
right_left_P = stream.left_P_.copy()
right_left_I = stream.left_I_
left_P = left.P_.copy()
left_I = left.I_
left_left_P = left.left_P_.copy()
left_left_I = left.left_I_
naive.replace_inf(left_P)
naive.replace_inf(left_left_P)
naive.replace_inf(right_P)
naive.replace_inf(right_left_P)
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
npt.assert_almost_equal(left_left_P, right_left_P)
npt.assert_almost_equal(left_left_I, right_left_I)
np.random.seed(seed)
n = 30
T = np.random.rand(n)
T = pd.Series(T)
left = naive.aampi_egress(T, m)
left_P = left.P_.copy()
left_I = left.I_
left_left_P = left.left_P_.copy()
left_left_I = left.left_I_
stream = aampi(T, m, egress=True)
right_P = stream.P_.copy()
right_I = stream.I_
right_left_P = stream.left_P_.copy()
right_left_I = stream.left_I_
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
naive.replace_inf(left_left_P)
naive.replace_inf(right_left_P)
for i in range(34):
t = np.random.rand()
left.update(t)
stream.update(t)
right_P = stream.P_.copy()
right_I = stream.I_
right_left_P = stream.left_P_.copy()
right_left_I = stream.left_I_
left_P = left.P_.copy()
left_I = left.I_
left_left_P = left.left_P_.copy()
left_left_I = left.left_I_
naive.replace_inf(left_P)
naive.replace_inf(left_left_P)
naive.replace_inf(right_P)
naive.replace_inf(right_left_P)
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
npt.assert_almost_equal(left_left_P, right_left_P)
npt.assert_almost_equal(left_left_I, right_left_I)
@pytest.mark.parametrize("substitute", substitution_values)
@pytest.mark.parametrize("substitution_locations", substitution_locations)
def test_aampi_stream_nan_inf_self_join(substitute, substitution_locations):
m = 3
zone = int(np.ceil(m / 4))
seed = np.random.randint(100000)
for substitution_location in substitution_locations:
np.random.seed(seed)
n = 30
T = np.random.rand(64)
stream = aampi(T[:n], m, egress=False)
if substitution_location == -1:
substitution_location = T[n:].shape[0] - 1
T[n:][substitution_location] = substitute
for t in T[n:]:
stream.update(t)
right_P = stream.P_
right_I = stream.I_
stream.T_[n:][substitution_location] = substitute
left = naive.aamp(stream.T_, m)
left_P = left[:, 0]
left_I = left[:, 1]
naive.replace_inf(left_P)
naive.replace_inf(right_P)
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
np.random.seed(seed)
T = np.random.rand(64)
stream = aampi(pd.Series(T[:n]), m, egress=False)
if substitution_location == -1:
substitution_location = T[n:].shape[0] - 1
T[n:][substitution_location] = substitute
for t in T[n:]:
stream.update(t)
right_P = stream.P_
right_I = stream.I_
naive.replace_inf(right_P)
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
@pytest.mark.parametrize("substitute", substitution_values)
@pytest.mark.parametrize("substitution_locations", substitution_locations)
def test_aampi_stream_nan_inf_self_join_egress(substitute, substitution_locations):
m = 3
zone = int(np.ceil(m / 4))
seed = np.random.randint(100000)
for substitution_location in substitution_locations:
np.random.seed(seed)
n = 30
T = np.random.rand(64)
left = naive.aampi_egress(T[:n], m)
left_P = left.P_.copy()
left_I = left.I_
left_left_P = left.left_P_.copy()
left_left_I = left.left_I_
stream = aampi(T[:n], m, egress=True)
right_P = stream.P_.copy()
right_I = stream.I_
right_left_P = stream.left_P_.copy()
right_left_I = stream.left_I_
naive.replace_inf(left_P)
naive.replace_inf(right_P)
naive.replace_inf(left_left_P)
naive.replace_inf(right_left_P)
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
npt.assert_almost_equal(left_left_P, right_left_P)
npt.assert_almost_equal(left_left_I, right_left_I)
if substitution_location == -1:
substitution_location = T[n:].shape[0] - 1
T[n:][substitution_location] = substitute
for t in T[n:]:
left.update(t)
stream.update(t)
right_P = stream.P_.copy()
right_I = stream.I_
right_left_P = stream.left_P_.copy()
right_left_I = stream.left_I_
left_P = left.P_.copy()
left_I = left.I_
left_left_P = left.left_P_.copy()
left_left_I = left.left_I_
naive.replace_inf(left_P)
naive.replace_inf(left_left_P)
naive.replace_inf(right_P)
naive.replace_inf(right_left_P)
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
npt.assert_almost_equal(left_left_P, right_left_P)
npt.assert_almost_equal(left_left_I, right_left_I)
np.random.seed(seed)
T = np.random.rand(64)
left = naive.aampi_egress(T[:n], m)
left_P = left.P_.copy()
left_I = left.I_
left_left_P = left.left_P_.copy()
left_left_I = left.left_I_
stream = aampi(pd.Series(T[:n]), m, egress=True)
right_P = stream.P_.copy()
right_I = stream.I_
right_left_P = stream.left_P_.copy()
right_left_I = stream.left_I_
naive.replace_inf(left_P)
naive.replace_inf(right_P)
naive.replace_inf(left_left_P)
naive.replace_inf(right_left_P)
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
npt.assert_almost_equal(left_left_P, right_left_P)
npt.assert_almost_equal(left_left_I, right_left_I)
if substitution_location == -1:
substitution_location = T[n:].shape[0] - 1
T[n:][substitution_location] = substitute
for t in T[n:]:
left.update(t)
stream.update(t)
right_P = stream.P_.copy()
right_I = stream.I_
right_left_P = stream.left_P_.copy()
right_left_I = stream.left_I_
left_P = left.P_.copy()
left_I = left.I_
left_left_P = left.left_P_.copy()
left_left_I = left.left_I_
naive.replace_inf(left_P)
naive.replace_inf(left_left_P)
naive.replace_inf(right_P)
naive.replace_inf(right_left_P)
npt.assert_almost_equal(left_P, right_P)
npt.assert_almost_equal(left_I, right_I)
npt.assert_almost_equal(left_left_P, right_left_P)
npt.assert_almost_equal(left_left_I, right_left_I)
def test_aampi_constant_subsequence_self_join():
m = 3
zone = int(np.ceil(m / 4))
seed = np.random.randint(100000)
np.random.seed(seed)
T = np.concatenate((np.zeros(20, dtype=np.float64), np.ones(10, dtype=np.float64)))
stream = aampi(T, m, egress=False)
for i in range(34):
t = np.random.rand()
stream.update(t)
right_P = stream.P_
right_I = stream.I_
left = naive.aamp(stream.T_, m)
left_P = left[:, 0]
left_I = left[:, 1]
naive.replace_inf(left_P)
naive.replace_inf(right_P)
npt.assert_almost_equal(left_P, right_P)
# npt.assert_almost_equal(left_I, right_I)
np.random.seed(seed)
T = np.concatenate((np.zeros(20, dtype=np.float64), np.ones(10, dtype=np.float64)))
T = pd.Series(T)
stream = aampi(T, m, egress=False)
for i in range(34):
t = np.random.rand()
stream.update(t)
right_P = stream.P_
right_I = stream.I_
naive.replace_inf(right_P)
npt.assert_almost_equal(left_P, right_P)
# npt.assert_almost_equal(left_I, right_I)
def test_aampi_constant_subsequence_self_join_egress():
m = 3
zone = int(np.ceil(m / 4))
seed = np.random.randint(100000)
np.random.seed(seed)
T = np.concatenate((np.zeros(20, dtype=np.float64), np.ones(10, dtype=np.float64)))
left = naive.aampi_egress(T, m)
left_P = left.P_.copy()
left_I = left.I_
left_left_P = left.left_P_.copy()
left_left_I = left.left_I_
stream = aampi(T, m, egress=True)
right_P = stream.P_.copy()
right_I = stream.I_
right_left_P = stream.left_P_.copy()
right_left_I = stream.left_I_
naive.replace_inf(left_P)
naive.replace_inf(right_P)
naive.replace_inf(left_left_P)
naive.replace_inf(right_left_P)
npt.assert_almost_equal(left_P, right_P)
# npt.assert_almost_equal(left_I, right_I)
npt.assert_almost_equal(left_left_P, right_left_P)
# npt.assert_almost_equal(left_left_I, right_left_I)
for i in range(34):
t = np.random.rand()
left.update(t)
stream.update(t)
right_P = stream.P_.copy()
right_I = stream.I_
right_left_P = stream.left_P_.copy()
right_left_I = stream.left_I_
left_P = left.P_.copy()
left_I = left.I_
left_left_P = left.left_P_.copy()
left_left_I = left.left_I_
naive.replace_inf(left_P)
naive.replace_inf(left_left_P)
naive.replace_inf(right_P)
naive.replace_inf(right_left_P)
npt.assert_almost_equal(left_P, right_P)
# npt.assert_almost_equal(left_I, right_I)
npt.assert_almost_equal(left_left_P, right_left_P)
# npt.assert_almost_equal(left_left_I, right_left_I)
np.random.seed(seed)
T = np.concatenate((np.zeros(20, dtype=np.float64), np.ones(10, dtype=np.float64)))
T = pd.Series(T)
left = naive.aampi_egress(T, m)
left_P = left.P_.copy()
left_I = left.I_
left_left_P = left.left_P_.copy()
left_left_I = left.left_I_
stream = aampi(T, m, egress=True)
right_P = stream.P_.copy()
right_I = stream.I_
right_left_P = stream.left_P_.copy()
right_left_I = stream.left_I_
naive.replace_inf(left_P)
naive.replace_inf(right_P)
naive.replace_inf(left_left_P)
naive.replace_inf(right_left_P)
npt.assert_almost_equal(left_P, right_P)
# npt.assert_almost_equal(left_I, right_I)
npt.assert_almost_equal(left_left_P, right_left_P)
# npt.assert_almost_equal(left_left_I, right_left_I)
for i in range(34):
t = np.random.rand()
left.update(t)
stream.update(t)
right_P = stream.P_.copy()
right_I = stream.I_
right_left_P = stream.left_P_.copy()
right_left_I = stream.left_I_
left_P = left.P_.copy()
left_I = left.I_
left_left_P = left.left_P_.copy()
left_left_I = left.left_I_
naive.replace_inf(left_P)
naive.replace_inf(left_left_P)
naive.replace_inf(right_P)
naive.replace_inf(right_left_P)
npt.assert_almost_equal(left_P, right_P)
# npt.assert_almost_equal(left_I, right_I)
npt.assert_almost_equal(left_left_P, right_left_P)
# npt.assert_almost_equal(left_left_I, right_left_I)
def test_aampi_identical_subsequence_self_join():
m = 3
zone = int(np.ceil(m / 4))
seed = np.random.randint(100000)
np.random.seed(seed)
identical = np.random.rand(8)
T = np.random.rand(20)
T[1 : 1 + identical.shape[0]] = identical
T[11 : 11 + identical.shape[0]] = identical
stream = aampi(T, m, egress=False)
for i in range(34):
t = np.random.rand()
stream.update(t)
right_P = stream.P_
right_I = stream.I_
left = naive.aamp(stream.T_, m)
left_P = left[:, 0]
left_I = left[:, 1]
naive.replace_inf(left_P)
naive.replace_inf(right_P)
npt.assert_almost_equal(left_P, right_P, decimal=config.STUMPY_TEST_PRECISION)
# npt.assert_almost_equal(left_I, right_I)
np.random.seed(seed)
identical = np.random.rand(8)
T = np.random.rand(20)
T[1 : 1 + identical.shape[0]] = identical
T[11 : 11 + identical.shape[0]] = identical
T = pd.Series(T)
stream = aampi(T, m, egress=False)
for i in range(34):
t = np.random.rand()
stream.update(t)
right_P = stream.P_
right_I = stream.I_
naive.replace_inf(right_P)
npt.assert_almost_equal(left_P, right_P, decimal=config.STUMPY_TEST_PRECISION)
# npt.assert_almost_equal(left_I, right_I)
def test_aampi_identical_subsequence_self_join_egress():
m = 3
zone = int(np.ceil(m / 4))
seed = np.random.randint(100000)
np.random.seed(seed)
identical = np.random.rand(8)
T = np.random.rand(20)
T[1 : 1 + identical.shape[0]] = identical
T[11 : 11 + identical.shape[0]] = identical
left = naive.aampi_egress(T, m)
left_P = left.P_.copy()
left_I = left.I_
left_left_P = left.left_P_.copy()
left_left_I = left.left_I_
stream = aampi(T, m, egress=True)
right_P = stream.P_.copy()
right_I = stream.I_
right_left_P = stream.left_P_.copy()
right_left_I = stream.left_I_
naive.replace_inf(left_P)
naive.replace_inf(right_P)
naive.replace_inf(left_left_P)
naive.replace_inf(right_left_P)
npt.assert_almost_equal(left_P, right_P, decimal=config.STUMPY_TEST_PRECISION)
# npt.assert_almost_equal(left_I, right_I)
npt.assert_almost_equal(
left_left_P, right_left_P, decimal=config.STUMPY_TEST_PRECISION
)
# npt.assert_almost_equal(left_left_I, right_left_I)
for i in range(34):
t = np.random.rand()
left.update(t)
stream.update(t)
right_P = stream.P_.copy()
right_I = stream.I_
right_left_P = stream.left_P_.copy()
right_left_I = stream.left_I_
left_P = left.P_.copy()
left_I = left.I_
left_left_P = left.left_P_.copy()
left_left_I = left.left_I_
naive.replace_inf(left_P)
naive.replace_inf(left_left_P)
naive.replace_inf(right_P)
naive.replace_inf(right_left_P)
npt.assert_almost_equal(left_P, right_P, decimal=config.STUMPY_TEST_PRECISION)
# npt.assert_almost_equal(left_I, right_I)
npt.assert_almost_equal(
left_left_P, right_left_P, decimal=config.STUMPY_TEST_PRECISION
)
# npt.assert_almost_equal(left_left_I, right_left_I)
np.random.seed(seed)
identical = np.random.rand(8)
T = np.random.rand(20)
T[1 : 1 + identical.shape[0]] = identical
T[11 : 11 + identical.shape[0]] = identical
T = pd.Series(T)
left = naive.aampi_egress(T, m)
left_P = left.P_.copy()
left_I = left.I_
left_left_P = left.left_P_.copy()
left_left_I = left.left_I_
stream = aampi(T, m, egress=True)
right_P = stream.P_.copy()
right_I = stream.I_
right_left_P = stream.left_P_.copy()
right_left_I = stream.left_I_
naive.replace_inf(left_P)
naive.replace_inf(right_P)
naive.replace_inf(left_left_P)
naive.replace_inf(right_left_P)
npt.assert_almost_equal(left_P, right_P, decimal=config.STUMPY_TEST_PRECISION)
# npt.assert_almost_equal(left_I, right_I)
npt.assert_almost_equal(
left_left_P, right_left_P, decimal=config.STUMPY_TEST_PRECISION
)
# npt.assert_almost_equal(left_left_I, right_left_I)
for i in range(34):
t = np.random.rand()
left.update(t)
stream.update(t)
right_P = stream.P_.copy()
right_I = stream.I_
right_left_P = stream.left_P_.copy()
right_left_I = stream.left_I_
left_P = left.P_.copy()
left_I = left.I_
left_left_P = left.left_P_.copy()
left_left_I = left.left_I_
naive.replace_inf(left_P)
naive.replace_inf(left_left_P)
naive.replace_inf(right_P)
naive.replace_inf(right_left_P)
npt.assert_almost_equal(left_P, right_P, decimal=config.STUMPY_TEST_PRECISION)
# npt.assert_almost_equal(left_I, right_I)
npt.assert_almost_equal(
left_left_P, right_left_P, decimal=config.STUMPY_TEST_PRECISION
)
# npt.assert_almost_equal(left_left_I, right_left_I)
def test_aampi_profile_index_match():
T_full = np.random.rand(64)
m = 3
T_full_subseq = core.rolling_window(T_full, m)
warm_start = 8
T_stream = T_full[:warm_start].copy()
stream = aampi(T_stream, m, egress=True)
P = np.full(stream.P_.shape, np.inf)
left_P = np.full(stream.left_P_.shape, np.inf)
n = 0
for i in range(len(T_stream), len(T_full)):
t = T_full[i]
stream.update(t)
P[:] = np.inf
idx = np.argwhere(stream.I_ >= 0).flatten()
P[idx] = naive.distance(
T_full_subseq[idx + n + 1], T_full_subseq[stream.I_[idx]], axis=1
)
left_P[:] = np.inf
idx = np.argwhere(stream.left_I_ >= 0).flatten()
left_P[idx] = naive.distance(
T_full_subseq[idx + n + 1], T_full_subseq[stream.left_I_[idx]], axis=1
)
npt.assert_almost_equal(stream.P_, P)
npt.assert_almost_equal(stream.left_P_, left_P)
n += 1
| [
"numpy.random.seed",
"numpy.ones",
"naive.aamp",
"numpy.random.randint",
"numpy.arange",
"pytest.mark.parametrize",
"numpy.full",
"numpy.testing.assert_almost_equal",
"pytest.raises",
"naive.replace_inf",
"numpy.ceil",
"naive.distance",
"naive.aampi_egress",
"pandas.Series",
"numpy.argwh... | [((4750, 4808), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""substitute"""', 'substitution_values'], {}), "('substitute', substitution_values)\n", (4773, 4808), False, 'import pytest\n'), ((4810, 4883), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""substitution_locations"""', 'substitution_locations'], {}), "('substitution_locations', substitution_locations)\n", (4833, 4883), False, 'import pytest\n'), ((6402, 6460), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""substitute"""', 'substitution_values'], {}), "('substitute', substitution_values)\n", (6425, 6460), False, 'import pytest\n'), ((6462, 6535), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""substitution_locations"""', 'substitution_locations'], {}), "('substitution_locations', substitution_locations)\n", (6485, 6535), False, 'import pytest\n'), ((10025, 10083), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""substitute"""', 'substitution_values'], {}), "('substitute', substitution_values)\n", (10048, 10083), False, 'import pytest\n'), ((10085, 10158), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""substitution_locations"""', 'substitution_locations'], {}), "('substitution_locations', substitution_locations)\n", (10108, 10158), False, 'import pytest\n'), ((11588, 11646), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""substitute"""', 'substitution_values'], {}), "('substitute', substitution_values)\n", (11611, 11646), False, 'import pytest\n'), ((11648, 11721), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""substitution_locations"""', 'substitution_locations'], {}), "('substitution_locations', substitution_locations)\n", (11671, 11721), False, 'import pytest\n'), ((422, 447), 'numpy.random.randint', 'np.random.randint', (['(100000)'], {}), '(100000)\n', (439, 447), True, 'import numpy as np\n'), ((452, 472), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (466, 472), True, 'import numpy as np\n'), ((493, 510), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (507, 510), True, 'import numpy as np\n'), ((524, 549), 'stumpy.aampi', 'aampi', (['T', 'm'], {'egress': '(False)'}), '(T, m, egress=False)\n', (529, 549), False, 'from stumpy import aampi, core, config\n'), ((757, 781), 'naive.aamp', 'naive.aamp', (['stream.T_', 'm'], {}), '(stream.T_, m)\n', (767, 781), False, 'import naive\n'), ((848, 877), 'numpy.full', 'np.full', (['left_P.shape', 'np.inf'], {}), '(left_P.shape, np.inf)\n', (855, 877), True, 'import numpy as np\n'), ((1084, 1109), 'naive.replace_inf', 'naive.replace_inf', (['left_P'], {}), '(left_P)\n', (1101, 1109), False, 'import naive\n'), ((1114, 1144), 'naive.replace_inf', 'naive.replace_inf', (['left_left_P'], {}), '(left_left_P)\n', (1131, 1144), False, 'import naive\n'), ((1149, 1175), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (1166, 1175), False, 'import naive\n'), ((1180, 1211), 'naive.replace_inf', 'naive.replace_inf', (['right_left_P'], {}), '(right_left_P)\n', (1197, 1211), False, 'import naive\n'), ((1217, 1257), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {}), '(left_P, right_P)\n', (1240, 1257), True, 'import numpy.testing as npt\n'), ((1262, 1302), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_I', 'right_I'], {}), '(left_I, right_I)\n', (1285, 1302), True, 'import numpy.testing as npt\n'), ((1307, 1357), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_P', 'right_left_P'], {}), '(left_left_P, right_left_P)\n', (1330, 1357), True, 'import numpy.testing as npt\n'), ((1362, 1412), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_I', 'right_left_I'], {}), '(left_left_I, right_left_I)\n', (1385, 1412), True, 'import numpy.testing as npt\n'), ((1418, 1438), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1432, 1438), True, 'import numpy as np\n'), ((1458, 1475), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (1472, 1475), True, 'import numpy as np\n'), ((1484, 1496), 'pandas.Series', 'pd.Series', (['T'], {}), '(T)\n', (1493, 1496), True, 'import pandas as pd\n'), ((1510, 1535), 'stumpy.aampi', 'aampi', (['T', 'm'], {'egress': '(False)'}), '(T, m, egress=False)\n', (1515, 1535), False, 'from stumpy import aampi, core, config\n'), ((1736, 1762), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (1753, 1762), False, 'import naive\n'), ((1767, 1798), 'naive.replace_inf', 'naive.replace_inf', (['right_left_P'], {}), '(right_left_P)\n', (1784, 1798), False, 'import naive\n'), ((1804, 1844), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {}), '(left_P, right_P)\n', (1827, 1844), True, 'import numpy.testing as npt\n'), ((1849, 1889), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_I', 'right_I'], {}), '(left_I, right_I)\n', (1872, 1889), True, 'import numpy.testing as npt\n'), ((1894, 1944), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_P', 'right_left_P'], {}), '(left_left_P, right_left_P)\n', (1917, 1944), True, 'import numpy.testing as npt\n'), ((1949, 1999), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_I', 'right_left_I'], {}), '(left_left_I, right_left_I)\n', (1972, 1999), True, 'import numpy.testing as npt\n'), ((2090, 2115), 'numpy.random.randint', 'np.random.randint', (['(100000)'], {}), '(100000)\n', (2107, 2115), True, 'import numpy as np\n'), ((2120, 2140), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2134, 2140), True, 'import numpy as np\n'), ((2161, 2178), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (2175, 2178), True, 'import numpy as np\n'), ((2191, 2215), 'naive.aampi_egress', 'naive.aampi_egress', (['T', 'm'], {}), '(T, m)\n', (2209, 2215), False, 'import naive\n'), ((2348, 2372), 'stumpy.aampi', 'aampi', (['T', 'm'], {'egress': '(True)'}), '(T, m, egress=True)\n', (2353, 2372), False, 'from stumpy import aampi, core, config\n'), ((2509, 2534), 'naive.replace_inf', 'naive.replace_inf', (['left_P'], {}), '(left_P)\n', (2526, 2534), False, 'import naive\n'), ((2539, 2569), 'naive.replace_inf', 'naive.replace_inf', (['left_left_P'], {}), '(left_left_P)\n', (2556, 2569), False, 'import naive\n'), ((2574, 2600), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (2591, 2600), False, 'import naive\n'), ((2605, 2636), 'naive.replace_inf', 'naive.replace_inf', (['right_left_P'], {}), '(right_left_P)\n', (2622, 2636), False, 'import naive\n'), ((2642, 2682), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {}), '(left_P, right_P)\n', (2665, 2682), True, 'import numpy.testing as npt\n'), ((2687, 2727), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_I', 'right_I'], {}), '(left_I, right_I)\n', (2710, 2727), True, 'import numpy.testing as npt\n'), ((2732, 2782), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_P', 'right_left_P'], {}), '(left_left_P, right_left_P)\n', (2755, 2782), True, 'import numpy.testing as npt\n'), ((2787, 2837), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_I', 'right_left_I'], {}), '(left_left_I, right_left_I)\n', (2810, 2837), True, 'import numpy.testing as npt\n'), ((3594, 3614), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3608, 3614), True, 'import numpy as np\n'), ((3623, 3640), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (3637, 3640), True, 'import numpy as np\n'), ((3649, 3661), 'pandas.Series', 'pd.Series', (['T'], {}), '(T)\n', (3658, 3661), True, 'import pandas as pd\n'), ((3674, 3698), 'naive.aampi_egress', 'naive.aampi_egress', (['T', 'm'], {}), '(T, m)\n', (3692, 3698), False, 'import naive\n'), ((3762, 3786), 'stumpy.aampi', 'aampi', (['T', 'm'], {'egress': '(True)'}), '(T, m, egress=True)\n', (3767, 3786), False, 'from stumpy import aampi, core, config\n'), ((3848, 3873), 'naive.replace_inf', 'naive.replace_inf', (['left_P'], {}), '(left_P)\n', (3865, 3873), False, 'import naive\n'), ((3878, 3904), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (3895, 3904), False, 'import naive\n'), ((3910, 3950), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {}), '(left_P, right_P)\n', (3933, 3950), True, 'import numpy.testing as npt\n'), ((3955, 3995), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_I', 'right_I'], {}), '(left_I, right_I)\n', (3978, 3995), True, 'import numpy.testing as npt\n'), ((5012, 5037), 'numpy.random.randint', 'np.random.randint', (['(100000)'], {}), '(100000)\n', (5029, 5037), True, 'import numpy as np\n'), ((6671, 6696), 'numpy.random.randint', 'np.random.randint', (['(100000)'], {}), '(100000)\n', (6688, 6696), True, 'import numpy as np\n'), ((10289, 10314), 'numpy.random.randint', 'np.random.randint', (['(100000)'], {}), '(100000)\n', (10306, 10314), True, 'import numpy as np\n'), ((11859, 11884), 'numpy.random.randint', 'np.random.randint', (['(100000)'], {}), '(100000)\n', (11876, 11884), True, 'import numpy as np\n'), ((15547, 15572), 'numpy.random.randint', 'np.random.randint', (['(100000)'], {}), '(100000)\n', (15564, 15572), True, 'import numpy as np\n'), ((15577, 15597), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (15591, 15597), True, 'import numpy as np\n'), ((15700, 15725), 'stumpy.aampi', 'aampi', (['T', 'm'], {'egress': '(False)'}), '(T, m, egress=False)\n', (15705, 15725), False, 'from stumpy import aampi, core, config\n'), ((15865, 15889), 'naive.aamp', 'naive.aamp', (['stream.T_', 'm'], {}), '(stream.T_, m)\n', (15875, 15889), False, 'import naive\n'), ((15943, 15968), 'naive.replace_inf', 'naive.replace_inf', (['left_P'], {}), '(left_P)\n', (15960, 15968), False, 'import naive\n'), ((15973, 15999), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (15990, 15999), False, 'import naive\n'), ((16005, 16045), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {}), '(left_P, right_P)\n', (16028, 16045), True, 'import numpy.testing as npt\n'), ((16098, 16118), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (16112, 16118), True, 'import numpy as np\n'), ((16215, 16227), 'pandas.Series', 'pd.Series', (['T'], {}), '(T)\n', (16224, 16227), True, 'import pandas as pd\n'), ((16241, 16266), 'stumpy.aampi', 'aampi', (['T', 'm'], {'egress': '(False)'}), '(T, m, egress=False)\n', (16246, 16266), False, 'from stumpy import aampi, core, config\n'), ((16399, 16425), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (16416, 16425), False, 'import naive\n'), ((16431, 16471), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {}), '(left_P, right_P)\n', (16454, 16471), True, 'import numpy.testing as npt\n'), ((16630, 16655), 'numpy.random.randint', 'np.random.randint', (['(100000)'], {}), '(100000)\n', (16647, 16655), True, 'import numpy as np\n'), ((16660, 16680), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (16674, 16680), True, 'import numpy as np\n'), ((16782, 16806), 'naive.aampi_egress', 'naive.aampi_egress', (['T', 'm'], {}), '(T, m)\n', (16800, 16806), False, 'import naive\n'), ((16939, 16963), 'stumpy.aampi', 'aampi', (['T', 'm'], {'egress': '(True)'}), '(T, m, egress=True)\n', (16944, 16963), False, 'from stumpy import aampi, core, config\n'), ((17100, 17125), 'naive.replace_inf', 'naive.replace_inf', (['left_P'], {}), '(left_P)\n', (17117, 17125), False, 'import naive\n'), ((17130, 17156), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (17147, 17156), False, 'import naive\n'), ((17161, 17191), 'naive.replace_inf', 'naive.replace_inf', (['left_left_P'], {}), '(left_left_P)\n', (17178, 17191), False, 'import naive\n'), ((17196, 17227), 'naive.replace_inf', 'naive.replace_inf', (['right_left_P'], {}), '(right_left_P)\n', (17213, 17227), False, 'import naive\n'), ((17233, 17273), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {}), '(left_P, right_P)\n', (17256, 17273), True, 'import numpy.testing as npt\n'), ((17325, 17375), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_P', 'right_left_P'], {}), '(left_left_P, right_left_P)\n', (17348, 17375), True, 'import numpy.testing as npt\n'), ((18192, 18212), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (18206, 18212), True, 'import numpy as np\n'), ((18309, 18321), 'pandas.Series', 'pd.Series', (['T'], {}), '(T)\n', (18318, 18321), True, 'import pandas as pd\n'), ((18334, 18358), 'naive.aampi_egress', 'naive.aampi_egress', (['T', 'm'], {}), '(T, m)\n', (18352, 18358), False, 'import naive\n'), ((18491, 18515), 'stumpy.aampi', 'aampi', (['T', 'm'], {'egress': '(True)'}), '(T, m, egress=True)\n', (18496, 18515), False, 'from stumpy import aampi, core, config\n'), ((18652, 18677), 'naive.replace_inf', 'naive.replace_inf', (['left_P'], {}), '(left_P)\n', (18669, 18677), False, 'import naive\n'), ((18682, 18708), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (18699, 18708), False, 'import naive\n'), ((18713, 18743), 'naive.replace_inf', 'naive.replace_inf', (['left_left_P'], {}), '(left_left_P)\n', (18730, 18743), False, 'import naive\n'), ((18748, 18779), 'naive.replace_inf', 'naive.replace_inf', (['right_left_P'], {}), '(right_left_P)\n', (18765, 18779), False, 'import naive\n'), ((18785, 18825), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {}), '(left_P, right_P)\n', (18808, 18825), True, 'import numpy.testing as npt\n'), ((18877, 18927), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_P', 'right_left_P'], {}), '(left_left_P, right_left_P)\n', (18900, 18927), True, 'import numpy.testing as npt\n'), ((19844, 19869), 'numpy.random.randint', 'np.random.randint', (['(100000)'], {}), '(100000)\n', (19861, 19869), True, 'import numpy as np\n'), ((19874, 19894), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (19888, 19894), True, 'import numpy as np\n'), ((19912, 19929), 'numpy.random.rand', 'np.random.rand', (['(8)'], {}), '(8)\n', (19926, 19929), True, 'import numpy as np\n'), ((19938, 19956), 'numpy.random.rand', 'np.random.rand', (['(20)'], {}), '(20)\n', (19952, 19956), True, 'import numpy as np\n'), ((20064, 20089), 'stumpy.aampi', 'aampi', (['T', 'm'], {'egress': '(False)'}), '(T, m, egress=False)\n', (20069, 20089), False, 'from stumpy import aampi, core, config\n'), ((20229, 20253), 'naive.aamp', 'naive.aamp', (['stream.T_', 'm'], {}), '(stream.T_, m)\n', (20239, 20253), False, 'import naive\n'), ((20307, 20332), 'naive.replace_inf', 'naive.replace_inf', (['left_P'], {}), '(left_P)\n', (20324, 20332), False, 'import naive\n'), ((20337, 20363), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (20354, 20363), False, 'import naive\n'), ((20369, 20447), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {'decimal': 'config.STUMPY_TEST_PRECISION'}), '(left_P, right_P, decimal=config.STUMPY_TEST_PRECISION)\n', (20392, 20447), True, 'import numpy.testing as npt\n'), ((20500, 20520), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (20514, 20520), True, 'import numpy as np\n'), ((20537, 20554), 'numpy.random.rand', 'np.random.rand', (['(8)'], {}), '(8)\n', (20551, 20554), True, 'import numpy as np\n'), ((20563, 20581), 'numpy.random.rand', 'np.random.rand', (['(20)'], {}), '(20)\n', (20577, 20581), True, 'import numpy as np\n'), ((20684, 20696), 'pandas.Series', 'pd.Series', (['T'], {}), '(T)\n', (20693, 20696), True, 'import pandas as pd\n'), ((20710, 20735), 'stumpy.aampi', 'aampi', (['T', 'm'], {'egress': '(False)'}), '(T, m, egress=False)\n', (20715, 20735), False, 'from stumpy import aampi, core, config\n'), ((20868, 20894), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (20885, 20894), False, 'import naive\n'), ((20900, 20978), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {'decimal': 'config.STUMPY_TEST_PRECISION'}), '(left_P, right_P, decimal=config.STUMPY_TEST_PRECISION)\n', (20923, 20978), True, 'import numpy.testing as npt\n'), ((21138, 21163), 'numpy.random.randint', 'np.random.randint', (['(100000)'], {}), '(100000)\n', (21155, 21163), True, 'import numpy as np\n'), ((21168, 21188), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (21182, 21188), True, 'import numpy as np\n'), ((21206, 21223), 'numpy.random.rand', 'np.random.rand', (['(8)'], {}), '(8)\n', (21220, 21223), True, 'import numpy as np\n'), ((21232, 21250), 'numpy.random.rand', 'np.random.rand', (['(20)'], {}), '(20)\n', (21246, 21250), True, 'import numpy as np\n'), ((21357, 21381), 'naive.aampi_egress', 'naive.aampi_egress', (['T', 'm'], {}), '(T, m)\n', (21375, 21381), False, 'import naive\n'), ((21514, 21538), 'stumpy.aampi', 'aampi', (['T', 'm'], {'egress': '(True)'}), '(T, m, egress=True)\n', (21519, 21538), False, 'from stumpy import aampi, core, config\n'), ((21675, 21700), 'naive.replace_inf', 'naive.replace_inf', (['left_P'], {}), '(left_P)\n', (21692, 21700), False, 'import naive\n'), ((21705, 21731), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (21722, 21731), False, 'import naive\n'), ((21736, 21766), 'naive.replace_inf', 'naive.replace_inf', (['left_left_P'], {}), '(left_left_P)\n', (21753, 21766), False, 'import naive\n'), ((21771, 21802), 'naive.replace_inf', 'naive.replace_inf', (['right_left_P'], {}), '(right_left_P)\n', (21788, 21802), False, 'import naive\n'), ((21808, 21886), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {'decimal': 'config.STUMPY_TEST_PRECISION'}), '(left_P, right_P, decimal=config.STUMPY_TEST_PRECISION)\n', (21831, 21886), True, 'import numpy.testing as npt\n'), ((21938, 22031), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_P', 'right_left_P'], {'decimal': 'config.STUMPY_TEST_PRECISION'}), '(left_left_P, right_left_P, decimal=config.\n STUMPY_TEST_PRECISION)\n', (21961, 22031), True, 'import numpy.testing as npt\n'), ((22955, 22975), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (22969, 22975), True, 'import numpy as np\n'), ((22992, 23009), 'numpy.random.rand', 'np.random.rand', (['(8)'], {}), '(8)\n', (23006, 23009), True, 'import numpy as np\n'), ((23018, 23036), 'numpy.random.rand', 'np.random.rand', (['(20)'], {}), '(20)\n', (23032, 23036), True, 'import numpy as np\n'), ((23139, 23151), 'pandas.Series', 'pd.Series', (['T'], {}), '(T)\n', (23148, 23151), True, 'import pandas as pd\n'), ((23164, 23188), 'naive.aampi_egress', 'naive.aampi_egress', (['T', 'm'], {}), '(T, m)\n', (23182, 23188), False, 'import naive\n'), ((23321, 23345), 'stumpy.aampi', 'aampi', (['T', 'm'], {'egress': '(True)'}), '(T, m, egress=True)\n', (23326, 23345), False, 'from stumpy import aampi, core, config\n'), ((23482, 23507), 'naive.replace_inf', 'naive.replace_inf', (['left_P'], {}), '(left_P)\n', (23499, 23507), False, 'import naive\n'), ((23512, 23538), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (23529, 23538), False, 'import naive\n'), ((23543, 23573), 'naive.replace_inf', 'naive.replace_inf', (['left_left_P'], {}), '(left_left_P)\n', (23560, 23573), False, 'import naive\n'), ((23578, 23609), 'naive.replace_inf', 'naive.replace_inf', (['right_left_P'], {}), '(right_left_P)\n', (23595, 23609), False, 'import naive\n'), ((23615, 23693), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {'decimal': 'config.STUMPY_TEST_PRECISION'}), '(left_P, right_P, decimal=config.STUMPY_TEST_PRECISION)\n', (23638, 23693), True, 'import numpy.testing as npt\n'), ((23745, 23838), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_P', 'right_left_P'], {'decimal': 'config.STUMPY_TEST_PRECISION'}), '(left_left_P, right_left_P, decimal=config.\n STUMPY_TEST_PRECISION)\n', (23768, 23838), True, 'import numpy.testing as npt\n'), ((24810, 24828), 'numpy.random.rand', 'np.random.rand', (['(64)'], {}), '(64)\n', (24824, 24828), True, 'import numpy as np\n'), ((24859, 24889), 'stumpy.core.rolling_window', 'core.rolling_window', (['T_full', 'm'], {}), '(T_full, m)\n', (24878, 24889), False, 'from stumpy import aampi, core, config\n'), ((24965, 24996), 'stumpy.aampi', 'aampi', (['T_stream', 'm'], {'egress': '(True)'}), '(T_stream, m, egress=True)\n', (24970, 24996), False, 'from stumpy import aampi, core, config\n'), ((25005, 25037), 'numpy.full', 'np.full', (['stream.P_.shape', 'np.inf'], {}), '(stream.P_.shape, np.inf)\n', (25012, 25037), True, 'import numpy as np\n'), ((25051, 25088), 'numpy.full', 'np.full', (['stream.left_P_.shape', 'np.inf'], {}), '(stream.left_P_.shape, np.inf)\n', (25058, 25088), True, 'import numpy as np\n'), ((281, 305), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (294, 305), False, 'import pytest\n'), ((394, 408), 'numpy.ceil', 'np.ceil', (['(m / 4)'], {}), '(m / 4)\n', (401, 408), True, 'import numpy as np\n'), ((586, 602), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (600, 602), True, 'import numpy as np\n'), ((1572, 1588), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1586, 1588), True, 'import numpy as np\n'), ((2062, 2076), 'numpy.ceil', 'np.ceil', (['(m / 4)'], {}), '(m / 4)\n', (2069, 2076), True, 'import numpy as np\n'), ((2875, 2891), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2889, 2891), True, 'import numpy as np\n'), ((3232, 3257), 'naive.replace_inf', 'naive.replace_inf', (['left_P'], {}), '(left_P)\n', (3249, 3257), False, 'import naive\n'), ((3266, 3296), 'naive.replace_inf', 'naive.replace_inf', (['left_left_P'], {}), '(left_left_P)\n', (3283, 3296), False, 'import naive\n'), ((3305, 3331), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (3322, 3331), False, 'import naive\n'), ((3340, 3371), 'naive.replace_inf', 'naive.replace_inf', (['right_left_P'], {}), '(right_left_P)\n', (3357, 3371), False, 'import naive\n'), ((3381, 3421), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {}), '(left_P, right_P)\n', (3404, 3421), True, 'import numpy.testing as npt\n'), ((3430, 3470), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_I', 'right_I'], {}), '(left_I, right_I)\n', (3453, 3470), True, 'import numpy.testing as npt\n'), ((3479, 3529), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_P', 'right_left_P'], {}), '(left_left_P, right_left_P)\n', (3502, 3529), True, 'import numpy.testing as npt\n'), ((3538, 3588), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_I', 'right_left_I'], {}), '(left_left_I, right_left_I)\n', (3561, 3588), True, 'import numpy.testing as npt\n'), ((4033, 4049), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4047, 4049), True, 'import numpy as np\n'), ((4390, 4415), 'naive.replace_inf', 'naive.replace_inf', (['left_P'], {}), '(left_P)\n', (4407, 4415), False, 'import naive\n'), ((4424, 4454), 'naive.replace_inf', 'naive.replace_inf', (['left_left_P'], {}), '(left_left_P)\n', (4441, 4454), False, 'import naive\n'), ((4463, 4489), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (4480, 4489), False, 'import naive\n'), ((4498, 4529), 'naive.replace_inf', 'naive.replace_inf', (['right_left_P'], {}), '(right_left_P)\n', (4515, 4529), False, 'import naive\n'), ((4539, 4579), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {}), '(left_P, right_P)\n', (4562, 4579), True, 'import numpy.testing as npt\n'), ((4588, 4628), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_I', 'right_I'], {}), '(left_I, right_I)\n', (4611, 4628), True, 'import numpy.testing as npt\n'), ((4637, 4687), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_P', 'right_left_P'], {}), '(left_left_P, right_left_P)\n', (4660, 4687), True, 'import numpy.testing as npt\n'), ((4696, 4746), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_I', 'right_left_I'], {}), '(left_left_I, right_left_I)\n', (4719, 4746), True, 'import numpy.testing as npt\n'), ((4984, 4998), 'numpy.ceil', 'np.ceil', (['(m / 4)'], {}), '(m / 4)\n', (4991, 4998), True, 'import numpy as np\n'), ((5123, 5143), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (5137, 5143), True, 'import numpy as np\n'), ((5171, 5188), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (5185, 5188), True, 'import numpy as np\n'), ((5344, 5369), 'stumpy.aampi', 'aampi', (['T', 'm'], {'egress': '(False)'}), '(T, m, egress=False)\n', (5349, 5369), False, 'from stumpy import aampi, core, config\n'), ((5587, 5611), 'naive.aamp', 'naive.aamp', (['stream.T_', 'm'], {}), '(stream.T_, m)\n', (5597, 5611), False, 'import naive\n'), ((5677, 5702), 'naive.replace_inf', 'naive.replace_inf', (['left_P'], {}), '(left_P)\n', (5694, 5702), False, 'import naive\n'), ((5711, 5737), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (5728, 5737), False, 'import naive\n'), ((5746, 5786), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {}), '(left_P, right_P)\n', (5769, 5786), True, 'import numpy.testing as npt\n'), ((5795, 5835), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_I', 'right_I'], {}), '(left_I, right_I)\n', (5818, 5835), True, 'import numpy.testing as npt\n'), ((5845, 5865), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (5859, 5865), True, 'import numpy as np\n'), ((5893, 5910), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (5907, 5910), True, 'import numpy as np\n'), ((6061, 6073), 'pandas.Series', 'pd.Series', (['T'], {}), '(T)\n', (6070, 6073), True, 'import pandas as pd\n'), ((6091, 6116), 'stumpy.aampi', 'aampi', (['T', 'm'], {'egress': '(False)'}), '(T, m, egress=False)\n', (6096, 6116), False, 'from stumpy import aampi, core, config\n'), ((6273, 6299), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (6290, 6299), False, 'import naive\n'), ((6309, 6349), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {}), '(left_P, right_P)\n', (6332, 6349), True, 'import numpy.testing as npt\n'), ((6358, 6398), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_I', 'right_I'], {}), '(left_I, right_I)\n', (6381, 6398), True, 'import numpy.testing as npt\n'), ((6643, 6657), 'numpy.ceil', 'np.ceil', (['(m / 4)'], {}), '(m / 4)\n', (6650, 6657), True, 'import numpy as np\n'), ((6782, 6802), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (6796, 6802), True, 'import numpy as np\n'), ((6830, 6847), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (6844, 6847), True, 'import numpy as np\n'), ((7002, 7026), 'naive.aampi_egress', 'naive.aampi_egress', (['T', 'm'], {}), '(T, m)\n', (7020, 7026), False, 'import naive\n'), ((7179, 7203), 'stumpy.aampi', 'aampi', (['T', 'm'], {'egress': '(True)'}), '(T, m, egress=True)\n', (7184, 7203), False, 'from stumpy import aampi, core, config\n'), ((7360, 7385), 'naive.replace_inf', 'naive.replace_inf', (['left_P'], {}), '(left_P)\n', (7377, 7385), False, 'import naive\n'), ((7394, 7420), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (7411, 7420), False, 'import naive\n'), ((7429, 7459), 'naive.replace_inf', 'naive.replace_inf', (['left_left_P'], {}), '(left_left_P)\n', (7446, 7459), False, 'import naive\n'), ((7468, 7499), 'naive.replace_inf', 'naive.replace_inf', (['right_left_P'], {}), '(right_left_P)\n', (7485, 7499), False, 'import naive\n'), ((7509, 7549), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {}), '(left_P, right_P)\n', (7532, 7549), True, 'import numpy.testing as npt\n'), ((7558, 7598), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_I', 'right_I'], {}), '(left_I, right_I)\n', (7581, 7598), True, 'import numpy.testing as npt\n'), ((7607, 7657), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_P', 'right_left_P'], {}), '(left_left_P, right_left_P)\n', (7630, 7657), True, 'import numpy.testing as npt\n'), ((7666, 7716), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_I', 'right_left_I'], {}), '(left_left_I, right_left_I)\n', (7689, 7716), True, 'import numpy.testing as npt\n'), ((8557, 8577), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (8571, 8577), True, 'import numpy as np\n'), ((8605, 8622), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (8619, 8622), True, 'import numpy as np\n'), ((8635, 8647), 'pandas.Series', 'pd.Series', (['T'], {}), '(T)\n', (8644, 8647), True, 'import pandas as pd\n'), ((8664, 8688), 'naive.aampi_egress', 'naive.aampi_egress', (['T', 'm'], {}), '(T, m)\n', (8682, 8688), False, 'import naive\n'), ((8841, 8865), 'stumpy.aampi', 'aampi', (['T', 'm'], {'egress': '(True)'}), '(T, m, egress=True)\n', (8846, 8865), False, 'from stumpy import aampi, core, config\n'), ((9022, 9062), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {}), '(left_P, right_P)\n', (9045, 9062), True, 'import numpy.testing as npt\n'), ((9071, 9111), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_I', 'right_I'], {}), '(left_I, right_I)\n', (9094, 9111), True, 'import numpy.testing as npt\n'), ((9120, 9150), 'naive.replace_inf', 'naive.replace_inf', (['left_left_P'], {}), '(left_left_P)\n', (9137, 9150), False, 'import naive\n'), ((9159, 9190), 'naive.replace_inf', 'naive.replace_inf', (['right_left_P'], {}), '(right_left_P)\n', (9176, 9190), False, 'import naive\n'), ((10261, 10275), 'numpy.ceil', 'np.ceil', (['(m / 4)'], {}), '(m / 4)\n', (10268, 10275), True, 'import numpy as np\n'), ((10381, 10401), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (10395, 10401), True, 'import numpy as np\n'), ((10429, 10447), 'numpy.random.rand', 'np.random.rand', (['(64)'], {}), '(64)\n', (10443, 10447), True, 'import numpy as np\n'), ((10466, 10495), 'stumpy.aampi', 'aampi', (['T[:n]', 'm'], {'egress': '(False)'}), '(T[:n], m, egress=False)\n', (10471, 10495), False, 'from stumpy import aampi, core, config\n'), ((10825, 10849), 'naive.aamp', 'naive.aamp', (['stream.T_', 'm'], {}), '(stream.T_, m)\n', (10835, 10849), False, 'import naive\n'), ((10915, 10940), 'naive.replace_inf', 'naive.replace_inf', (['left_P'], {}), '(left_P)\n', (10932, 10940), False, 'import naive\n'), ((10949, 10975), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (10966, 10975), False, 'import naive\n'), ((10985, 11025), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {}), '(left_P, right_P)\n', (11008, 11025), True, 'import numpy.testing as npt\n'), ((11034, 11074), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_I', 'right_I'], {}), '(left_I, right_I)\n', (11057, 11074), True, 'import numpy.testing as npt\n'), ((11084, 11104), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (11098, 11104), True, 'import numpy as np\n'), ((11117, 11135), 'numpy.random.rand', 'np.random.rand', (['(64)'], {}), '(64)\n', (11131, 11135), True, 'import numpy as np\n'), ((11459, 11485), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (11476, 11485), False, 'import naive\n'), ((11495, 11535), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {}), '(left_P, right_P)\n', (11518, 11535), True, 'import numpy.testing as npt\n'), ((11544, 11584), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_I', 'right_I'], {}), '(left_I, right_I)\n', (11567, 11584), True, 'import numpy.testing as npt\n'), ((11831, 11845), 'numpy.ceil', 'np.ceil', (['(m / 4)'], {}), '(m / 4)\n', (11838, 11845), True, 'import numpy as np\n'), ((11951, 11971), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (11965, 11971), True, 'import numpy as np\n'), ((11999, 12017), 'numpy.random.rand', 'np.random.rand', (['(64)'], {}), '(64)\n', (12013, 12017), True, 'import numpy as np\n'), ((12034, 12062), 'naive.aampi_egress', 'naive.aampi_egress', (['T[:n]', 'm'], {}), '(T[:n], m)\n', (12052, 12062), False, 'import naive\n'), ((12215, 12243), 'stumpy.aampi', 'aampi', (['T[:n]', 'm'], {'egress': '(True)'}), '(T[:n], m, egress=True)\n', (12220, 12243), False, 'from stumpy import aampi, core, config\n'), ((12400, 12425), 'naive.replace_inf', 'naive.replace_inf', (['left_P'], {}), '(left_P)\n', (12417, 12425), False, 'import naive\n'), ((12434, 12460), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (12451, 12460), False, 'import naive\n'), ((12469, 12499), 'naive.replace_inf', 'naive.replace_inf', (['left_left_P'], {}), '(left_left_P)\n', (12486, 12499), False, 'import naive\n'), ((12508, 12539), 'naive.replace_inf', 'naive.replace_inf', (['right_left_P'], {}), '(right_left_P)\n', (12525, 12539), False, 'import naive\n'), ((12549, 12589), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {}), '(left_P, right_P)\n', (12572, 12589), True, 'import numpy.testing as npt\n'), ((12598, 12638), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_I', 'right_I'], {}), '(left_I, right_I)\n', (12621, 12638), True, 'import numpy.testing as npt\n'), ((12647, 12697), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_P', 'right_left_P'], {}), '(left_left_P, right_left_P)\n', (12670, 12697), True, 'import numpy.testing as npt\n'), ((12706, 12756), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_I', 'right_left_I'], {}), '(left_left_I, right_left_I)\n', (12729, 12756), True, 'import numpy.testing as npt\n'), ((13704, 13724), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (13718, 13724), True, 'import numpy as np\n'), ((13737, 13755), 'numpy.random.rand', 'np.random.rand', (['(64)'], {}), '(64)\n', (13751, 13755), True, 'import numpy as np\n'), ((13772, 13800), 'naive.aampi_egress', 'naive.aampi_egress', (['T[:n]', 'm'], {}), '(T[:n], m)\n', (13790, 13800), False, 'import naive\n'), ((14149, 14174), 'naive.replace_inf', 'naive.replace_inf', (['left_P'], {}), '(left_P)\n', (14166, 14174), False, 'import naive\n'), ((14183, 14209), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (14200, 14209), False, 'import naive\n'), ((14218, 14248), 'naive.replace_inf', 'naive.replace_inf', (['left_left_P'], {}), '(left_left_P)\n', (14235, 14248), False, 'import naive\n'), ((14257, 14288), 'naive.replace_inf', 'naive.replace_inf', (['right_left_P'], {}), '(right_left_P)\n', (14274, 14288), False, 'import naive\n'), ((14298, 14338), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {}), '(left_P, right_P)\n', (14321, 14338), True, 'import numpy.testing as npt\n'), ((14347, 14387), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_I', 'right_I'], {}), '(left_I, right_I)\n', (14370, 14387), True, 'import numpy.testing as npt\n'), ((14396, 14446), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_P', 'right_left_P'], {}), '(left_left_P, right_left_P)\n', (14419, 14446), True, 'import numpy.testing as npt\n'), ((14455, 14505), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_I', 'right_left_I'], {}), '(left_left_I, right_left_I)\n', (14478, 14505), True, 'import numpy.testing as npt\n'), ((15519, 15533), 'numpy.ceil', 'np.ceil', (['(m / 4)'], {}), '(m / 4)\n', (15526, 15533), True, 'import numpy as np\n'), ((15762, 15778), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (15776, 15778), True, 'import numpy as np\n'), ((16303, 16319), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (16317, 16319), True, 'import numpy as np\n'), ((16602, 16616), 'numpy.ceil', 'np.ceil', (['(m / 4)'], {}), '(m / 4)\n', (16609, 16616), True, 'import numpy as np\n'), ((17470, 17486), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (17484, 17486), True, 'import numpy as np\n'), ((17826, 17851), 'naive.replace_inf', 'naive.replace_inf', (['left_P'], {}), '(left_P)\n', (17843, 17851), False, 'import naive\n'), ((17860, 17890), 'naive.replace_inf', 'naive.replace_inf', (['left_left_P'], {}), '(left_left_P)\n', (17877, 17890), False, 'import naive\n'), ((17899, 17925), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (17916, 17925), False, 'import naive\n'), ((17934, 17965), 'naive.replace_inf', 'naive.replace_inf', (['right_left_P'], {}), '(right_left_P)\n', (17951, 17965), False, 'import naive\n'), ((17975, 18015), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {}), '(left_P, right_P)\n', (17998, 18015), True, 'import numpy.testing as npt\n'), ((18075, 18125), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_P', 'right_left_P'], {}), '(left_left_P, right_left_P)\n', (18098, 18125), True, 'import numpy.testing as npt\n'), ((19022, 19038), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (19036, 19038), True, 'import numpy as np\n'), ((19378, 19403), 'naive.replace_inf', 'naive.replace_inf', (['left_P'], {}), '(left_P)\n', (19395, 19403), False, 'import naive\n'), ((19412, 19442), 'naive.replace_inf', 'naive.replace_inf', (['left_left_P'], {}), '(left_left_P)\n', (19429, 19442), False, 'import naive\n'), ((19451, 19477), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (19468, 19477), False, 'import naive\n'), ((19486, 19517), 'naive.replace_inf', 'naive.replace_inf', (['right_left_P'], {}), '(right_left_P)\n', (19503, 19517), False, 'import naive\n'), ((19527, 19567), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {}), '(left_P, right_P)\n', (19550, 19567), True, 'import numpy.testing as npt\n'), ((19627, 19677), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_P', 'right_left_P'], {}), '(left_left_P, right_left_P)\n', (19650, 19677), True, 'import numpy.testing as npt\n'), ((19816, 19830), 'numpy.ceil', 'np.ceil', (['(m / 4)'], {}), '(m / 4)\n', (19823, 19830), True, 'import numpy as np\n'), ((20126, 20142), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (20140, 20142), True, 'import numpy as np\n'), ((20772, 20788), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (20786, 20788), True, 'import numpy as np\n'), ((21110, 21124), 'numpy.ceil', 'np.ceil', (['(m / 4)'], {}), '(m / 4)\n', (21117, 21124), True, 'import numpy as np\n'), ((22135, 22151), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (22149, 22151), True, 'import numpy as np\n'), ((22491, 22516), 'naive.replace_inf', 'naive.replace_inf', (['left_P'], {}), '(left_P)\n', (22508, 22516), False, 'import naive\n'), ((22525, 22555), 'naive.replace_inf', 'naive.replace_inf', (['left_left_P'], {}), '(left_left_P)\n', (22542, 22555), False, 'import naive\n'), ((22564, 22590), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (22581, 22590), False, 'import naive\n'), ((22599, 22630), 'naive.replace_inf', 'naive.replace_inf', (['right_left_P'], {}), '(right_left_P)\n', (22616, 22630), False, 'import naive\n'), ((22640, 22718), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {'decimal': 'config.STUMPY_TEST_PRECISION'}), '(left_P, right_P, decimal=config.STUMPY_TEST_PRECISION)\n', (22663, 22718), True, 'import numpy.testing as npt\n'), ((22778, 22871), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_P', 'right_left_P'], {'decimal': 'config.STUMPY_TEST_PRECISION'}), '(left_left_P, right_left_P, decimal=config.\n STUMPY_TEST_PRECISION)\n', (22801, 22871), True, 'import numpy.testing as npt\n'), ((23942, 23958), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (23956, 23958), True, 'import numpy as np\n'), ((24298, 24323), 'naive.replace_inf', 'naive.replace_inf', (['left_P'], {}), '(left_P)\n', (24315, 24323), False, 'import naive\n'), ((24332, 24362), 'naive.replace_inf', 'naive.replace_inf', (['left_left_P'], {}), '(left_left_P)\n', (24349, 24362), False, 'import naive\n'), ((24371, 24397), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (24388, 24397), False, 'import naive\n'), ((24406, 24437), 'naive.replace_inf', 'naive.replace_inf', (['right_left_P'], {}), '(right_left_P)\n', (24423, 24437), False, 'import naive\n'), ((24447, 24525), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {'decimal': 'config.STUMPY_TEST_PRECISION'}), '(left_P, right_P, decimal=config.STUMPY_TEST_PRECISION)\n', (24470, 24525), True, 'import numpy.testing as npt\n'), ((24585, 24678), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_P', 'right_left_P'], {'decimal': 'config.STUMPY_TEST_PRECISION'}), '(left_left_P, right_left_P, decimal=config.\n STUMPY_TEST_PRECISION)\n', (24608, 24678), True, 'import numpy.testing as npt\n'), ((25287, 25372), 'naive.distance', 'naive.distance', (['T_full_subseq[idx + n + 1]', 'T_full_subseq[stream.I_[idx]]'], {'axis': '(1)'}), '(T_full_subseq[idx + n + 1], T_full_subseq[stream.I_[idx]],\n axis=1)\n', (25301, 25372), False, 'import naive\n'), ((25498, 25589), 'naive.distance', 'naive.distance', (['T_full_subseq[idx + n + 1]', 'T_full_subseq[stream.left_I_[idx]]'], {'axis': '(1)'}), '(T_full_subseq[idx + n + 1], T_full_subseq[stream.left_I_[idx\n ]], axis=1)\n', (25512, 25589), False, 'import naive\n'), ((25616, 25653), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['stream.P_', 'P'], {}), '(stream.P_, P)\n', (25639, 25653), True, 'import numpy.testing as npt\n'), ((25662, 25709), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['stream.left_P_', 'left_P'], {}), '(stream.left_P_, left_P)\n', (25685, 25709), True, 'import numpy.testing as npt\n'), ((321, 334), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (330, 334), True, 'import numpy as np\n'), ((982, 1040), 'stumpy.core.mass_absolute', 'core.mass_absolute', (['stream.T_[i:i + m]', 'stream.T_[j:j + m]'], {}), '(stream.T_[i:i + m], stream.T_[j:j + m])\n', (1000, 1040), False, 'from stumpy import aampi, core, config\n'), ((5414, 5430), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (5428, 5430), True, 'import numpy as np\n'), ((6161, 6177), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (6175, 6177), True, 'import numpy as np\n'), ((7762, 7778), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (7776, 7778), True, 'import numpy as np\n'), ((8163, 8188), 'naive.replace_inf', 'naive.replace_inf', (['left_P'], {}), '(left_P)\n', (8180, 8188), False, 'import naive\n'), ((8201, 8231), 'naive.replace_inf', 'naive.replace_inf', (['left_left_P'], {}), '(left_left_P)\n', (8218, 8231), False, 'import naive\n'), ((8244, 8270), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (8261, 8270), False, 'import naive\n'), ((8283, 8314), 'naive.replace_inf', 'naive.replace_inf', (['right_left_P'], {}), '(right_left_P)\n', (8300, 8314), False, 'import naive\n'), ((8328, 8368), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {}), '(left_P, right_P)\n', (8351, 8368), True, 'import numpy.testing as npt\n'), ((8381, 8421), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_I', 'right_I'], {}), '(left_I, right_I)\n', (8404, 8421), True, 'import numpy.testing as npt\n'), ((8434, 8484), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_P', 'right_left_P'], {}), '(left_left_P, right_left_P)\n', (8457, 8484), True, 'import numpy.testing as npt\n'), ((8497, 8547), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_I', 'right_left_I'], {}), '(left_left_I, right_left_I)\n', (8520, 8547), True, 'import numpy.testing as npt\n'), ((9236, 9252), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (9250, 9252), True, 'import numpy as np\n'), ((9637, 9662), 'naive.replace_inf', 'naive.replace_inf', (['left_P'], {}), '(left_P)\n', (9654, 9662), False, 'import naive\n'), ((9675, 9705), 'naive.replace_inf', 'naive.replace_inf', (['left_left_P'], {}), '(left_left_P)\n', (9692, 9705), False, 'import naive\n'), ((9718, 9744), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (9735, 9744), False, 'import naive\n'), ((9757, 9788), 'naive.replace_inf', 'naive.replace_inf', (['right_left_P'], {}), '(right_left_P)\n', (9774, 9788), False, 'import naive\n'), ((9802, 9842), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {}), '(left_P, right_P)\n', (9825, 9842), True, 'import numpy.testing as npt\n'), ((9855, 9895), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_I', 'right_I'], {}), '(left_I, right_I)\n', (9878, 9895), True, 'import numpy.testing as npt\n'), ((9908, 9958), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_P', 'right_left_P'], {}), '(left_left_P, right_left_P)\n', (9931, 9958), True, 'import numpy.testing as npt\n'), ((9971, 10021), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_I', 'right_left_I'], {}), '(left_left_I, right_left_I)\n', (9994, 10021), True, 'import numpy.testing as npt\n'), ((11160, 11176), 'pandas.Series', 'pd.Series', (['T[:n]'], {}), '(T[:n])\n', (11169, 11176), True, 'import pandas as pd\n'), ((13310, 13335), 'naive.replace_inf', 'naive.replace_inf', (['left_P'], {}), '(left_P)\n', (13327, 13335), False, 'import naive\n'), ((13348, 13378), 'naive.replace_inf', 'naive.replace_inf', (['left_left_P'], {}), '(left_left_P)\n', (13365, 13378), False, 'import naive\n'), ((13391, 13417), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (13408, 13417), False, 'import naive\n'), ((13430, 13461), 'naive.replace_inf', 'naive.replace_inf', (['right_left_P'], {}), '(right_left_P)\n', (13447, 13461), False, 'import naive\n'), ((13475, 13515), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {}), '(left_P, right_P)\n', (13498, 13515), True, 'import numpy.testing as npt\n'), ((13528, 13568), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_I', 'right_I'], {}), '(left_I, right_I)\n', (13551, 13568), True, 'import numpy.testing as npt\n'), ((13581, 13631), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_P', 'right_left_P'], {}), '(left_left_P, right_left_P)\n', (13604, 13631), True, 'import numpy.testing as npt\n'), ((13644, 13694), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_I', 'right_left_I'], {}), '(left_left_I, right_left_I)\n', (13667, 13694), True, 'import numpy.testing as npt\n'), ((13959, 13975), 'pandas.Series', 'pd.Series', (['T[:n]'], {}), '(T[:n])\n', (13968, 13975), True, 'import pandas as pd\n'), ((15058, 15083), 'naive.replace_inf', 'naive.replace_inf', (['left_P'], {}), '(left_P)\n', (15075, 15083), False, 'import naive\n'), ((15096, 15126), 'naive.replace_inf', 'naive.replace_inf', (['left_left_P'], {}), '(left_left_P)\n', (15113, 15126), False, 'import naive\n'), ((15139, 15165), 'naive.replace_inf', 'naive.replace_inf', (['right_P'], {}), '(right_P)\n', (15156, 15165), False, 'import naive\n'), ((15178, 15209), 'naive.replace_inf', 'naive.replace_inf', (['right_left_P'], {}), '(right_left_P)\n', (15195, 15209), False, 'import naive\n'), ((15223, 15263), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_P', 'right_P'], {}), '(left_P, right_P)\n', (15246, 15263), True, 'import numpy.testing as npt\n'), ((15276, 15316), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_I', 'right_I'], {}), '(left_I, right_I)\n', (15299, 15316), True, 'import numpy.testing as npt\n'), ((15329, 15379), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_P', 'right_left_P'], {}), '(left_left_P, right_left_P)\n', (15352, 15379), True, 'import numpy.testing as npt\n'), ((15392, 15442), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['left_left_I', 'right_left_I'], {}), '(left_left_I, right_left_I)\n', (15415, 15442), True, 'import numpy.testing as npt\n'), ((15623, 15653), 'numpy.zeros', 'np.zeros', (['(20)'], {'dtype': 'np.float64'}), '(20, dtype=np.float64)\n', (15631, 15653), True, 'import numpy as np\n'), ((15655, 15684), 'numpy.ones', 'np.ones', (['(10)'], {'dtype': 'np.float64'}), '(10, dtype=np.float64)\n', (15662, 15684), True, 'import numpy as np\n'), ((16143, 16173), 'numpy.zeros', 'np.zeros', (['(20)'], {'dtype': 'np.float64'}), '(20, dtype=np.float64)\n', (16151, 16173), True, 'import numpy as np\n'), ((16175, 16204), 'numpy.ones', 'np.ones', (['(10)'], {'dtype': 'np.float64'}), '(10, dtype=np.float64)\n', (16182, 16204), True, 'import numpy as np\n'), ((16706, 16736), 'numpy.zeros', 'np.zeros', (['(20)'], {'dtype': 'np.float64'}), '(20, dtype=np.float64)\n', (16714, 16736), True, 'import numpy as np\n'), ((16738, 16767), 'numpy.ones', 'np.ones', (['(10)'], {'dtype': 'np.float64'}), '(10, dtype=np.float64)\n', (16745, 16767), True, 'import numpy as np\n'), ((18237, 18267), 'numpy.zeros', 'np.zeros', (['(20)'], {'dtype': 'np.float64'}), '(20, dtype=np.float64)\n', (18245, 18267), True, 'import numpy as np\n'), ((18269, 18298), 'numpy.ones', 'np.ones', (['(10)'], {'dtype': 'np.float64'}), '(10, dtype=np.float64)\n', (18276, 18298), True, 'import numpy as np\n'), ((25232, 25259), 'numpy.argwhere', 'np.argwhere', (['(stream.I_ >= 0)'], {}), '(stream.I_ >= 0)\n', (25243, 25259), True, 'import numpy as np\n'), ((25433, 25465), 'numpy.argwhere', 'np.argwhere', (['(stream.left_I_ >= 0)'], {}), '(stream.left_I_ >= 0)\n', (25444, 25465), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from endochrone.utils import lazy_test_runner as ltr
import endochrone.time_series.arima as arima
__author__ = "nickwood"
__copyright__ = "nickwood"
__license__ = "mit"
def test_ar1_model():
x = np.arange(0, 10, 0.2)
AR1 = arima.ArModel(order=1, calculate_residuals=True)
assert np.all(AR1.generate_lags(x) == x[:-1, np.newaxis])
assert np.all(AR1.generate_lags(x, include_last=True) == x[:, np.newaxis])
assert np.all(AR1.generate_targets(x) == x[1:])
AR1.fit(x)
assert AR1.coef_[0] == pytest.approx(1)
assert AR1.intercept_ == pytest.approx(0.2)
assert np.all(AR1.residuals_ == pytest.approx(0))
x_to_pred = np.array([4, 7.4, 9])
y_exp = np.array([4.2, 7.6, 9.2])
assert np.all(AR1.predict(x_to_pred) == pytest.approx(y_exp))
def test_ar2_model():
'''t_2 = t_1 + t_0 i.e. fibonacci'''
x = np.array([0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610,
987, 1597, 2584, 4181, 6765, 10946, 17711, 28657, 46368])
AR2 = arima.ArModel(order=2, calculate_residuals=True)
lags = AR2.generate_lags(np.array(x))
assert lags.shape == (len(x)-2, 2)
assert np.all(lags[:, 0] == x[1:-1])
assert np.all(lags[:, 1] == x[:-2])
lags_wl = AR2.generate_lags(x, include_last=True)
assert lags_wl.shape == (len(x)-1, 2)
assert np.all(lags_wl[:, 0] == x[1:])
assert np.all(lags_wl[:, 1] == x[:-1])
assert np.all(AR2.generate_targets(x) == x[2:])
AR2.fit(x)
assert np.all(AR2.coef_ == pytest.approx(1))
assert AR2.intercept_ == pytest.approx(0, abs=0.005)
assert np.all(AR2.residuals_ == pytest.approx(0, abs=0.005))
x_to_pred = np.array([4, 8, 12, 24])
y_exp = np.array([12, 20, 36])
assert np.all(AR2.predict(x_to_pred) == pytest.approx(y_exp, abs=0.005))
def test_ma1_model():
x = np.array([9, 10, 11, 12, 11, 10, 9, 8])
MA1 = arima.MaModel(order=1)
residuals = MA1.residuals(x, [np.mean(x), 0.5])
exp_res = [-1.0, 0.5, 0.75, 1.625, 0.1875, -0.09375, -0.953125, -1.5234375]
assert np.all(residuals == pytest.approx(exp_res))
assert MA1.fit(x)
exp_thetas = [9.6237, 0.7531]
assert np.all(MA1.thetas_ == pytest.approx(exp_thetas, abs=0.0001))
with pytest.raises(ValueError):
preds = MA1.predict([])
preds = MA1.predict([10, 11, 9, 10, 12])
exp = [9.90709153, 10.44676937, 8.534137988, 10.72764068, 10.5819138]
assert np.all(preds == pytest.approx(exp, abs=0.001))
def test_ma2_model():
x = np.array([9, 10, 11, 12, 11, 10, 9, 8])
MA2 = arima.MaModel(order=2)
residuals = MA2.residuals(x, [np.mean(x), 0.5, 0.5])
exp_res = [-1.0, 0.5, 1.25, 1.125, -0.1875, -0.46875, -0.671875, -1.429688]
assert np.all(residuals == pytest.approx(exp_res))
assert MA2.fit(x)
assert np.sum(MA2.residuals_**2) == pytest.approx(1.7175879816717088)
exp_thetas = [8.86689311, 1.38181157, 1.98175309]
assert np.all(MA2.thetas_ == pytest.approx(exp_thetas))
with pytest.raises(ValueError):
preds = MA2.predict([10])
preds = MA2.predict([10, 11, 9, 10, 12])
exp = [11.89642503, 5.988960158, 8.669395113, 21.41805208, 15.46732964]
assert np.all(preds == pytest.approx(exp, abs=0.001))
ltr()
| [
"endochrone.time_series.arima.MaModel",
"endochrone.time_series.arima.ArModel",
"numpy.sum",
"pytest.raises",
"numpy.mean",
"numpy.array",
"numpy.arange",
"endochrone.utils.lazy_test_runner",
"pytest.approx",
"numpy.all"
] | [((3288, 3293), 'endochrone.utils.lazy_test_runner', 'ltr', ([], {}), '()\n', (3291, 3293), True, 'from endochrone.utils import lazy_test_runner as ltr\n'), ((260, 281), 'numpy.arange', 'np.arange', (['(0)', '(10)', '(0.2)'], {}), '(0, 10, 0.2)\n', (269, 281), True, 'import numpy as np\n'), ((293, 341), 'endochrone.time_series.arima.ArModel', 'arima.ArModel', ([], {'order': '(1)', 'calculate_residuals': '(True)'}), '(order=1, calculate_residuals=True)\n', (306, 341), True, 'import endochrone.time_series.arima as arima\n'), ((714, 735), 'numpy.array', 'np.array', (['[4, 7.4, 9]'], {}), '([4, 7.4, 9])\n', (722, 735), True, 'import numpy as np\n'), ((748, 773), 'numpy.array', 'np.array', (['[4.2, 7.6, 9.2]'], {}), '([4.2, 7.6, 9.2])\n', (756, 773), True, 'import numpy as np\n'), ((913, 1045), 'numpy.array', 'np.array', (['[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, \n 2584, 4181, 6765, 10946, 17711, 28657, 46368]'], {}), '([0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987,\n 1597, 2584, 4181, 6765, 10946, 17711, 28657, 46368])\n', (921, 1045), True, 'import numpy as np\n'), ((1071, 1119), 'endochrone.time_series.arima.ArModel', 'arima.ArModel', ([], {'order': '(2)', 'calculate_residuals': '(True)'}), '(order=2, calculate_residuals=True)\n', (1084, 1119), True, 'import endochrone.time_series.arima as arima\n'), ((1212, 1241), 'numpy.all', 'np.all', (['(lags[:, 0] == x[1:-1])'], {}), '(lags[:, 0] == x[1:-1])\n', (1218, 1241), True, 'import numpy as np\n'), ((1253, 1281), 'numpy.all', 'np.all', (['(lags[:, 1] == x[:-2])'], {}), '(lags[:, 1] == x[:-2])\n', (1259, 1281), True, 'import numpy as np\n'), ((1390, 1420), 'numpy.all', 'np.all', (['(lags_wl[:, 0] == x[1:])'], {}), '(lags_wl[:, 0] == x[1:])\n', (1396, 1420), True, 'import numpy as np\n'), ((1432, 1463), 'numpy.all', 'np.all', (['(lags_wl[:, 1] == x[:-1])'], {}), '(lags_wl[:, 1] == x[:-1])\n', (1438, 1463), True, 'import numpy as np\n'), ((1720, 1744), 'numpy.array', 'np.array', (['[4, 8, 12, 24]'], {}), '([4, 8, 12, 24])\n', (1728, 1744), True, 'import numpy as np\n'), ((1757, 1779), 'numpy.array', 'np.array', (['[12, 20, 36]'], {}), '([12, 20, 36])\n', (1765, 1779), True, 'import numpy as np\n'), ((1889, 1928), 'numpy.array', 'np.array', (['[9, 10, 11, 12, 11, 10, 9, 8]'], {}), '([9, 10, 11, 12, 11, 10, 9, 8])\n', (1897, 1928), True, 'import numpy as np\n'), ((1939, 1961), 'endochrone.time_series.arima.MaModel', 'arima.MaModel', ([], {'order': '(1)'}), '(order=1)\n', (1952, 1961), True, 'import endochrone.time_series.arima as arima\n'), ((2558, 2597), 'numpy.array', 'np.array', (['[9, 10, 11, 12, 11, 10, 9, 8]'], {}), '([9, 10, 11, 12, 11, 10, 9, 8])\n', (2566, 2597), True, 'import numpy as np\n'), ((2608, 2630), 'endochrone.time_series.arima.MaModel', 'arima.MaModel', ([], {'order': '(2)'}), '(order=2)\n', (2621, 2630), True, 'import endochrone.time_series.arima as arima\n'), ((578, 594), 'pytest.approx', 'pytest.approx', (['(1)'], {}), '(1)\n', (591, 594), False, 'import pytest\n'), ((624, 642), 'pytest.approx', 'pytest.approx', (['(0.2)'], {}), '(0.2)\n', (637, 642), False, 'import pytest\n'), ((1149, 1160), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1157, 1160), True, 'import numpy as np\n'), ((1610, 1637), 'pytest.approx', 'pytest.approx', (['(0)'], {'abs': '(0.005)'}), '(0, abs=0.005)\n', (1623, 1637), False, 'import pytest\n'), ((2289, 2314), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2302, 2314), False, 'import pytest\n'), ((2858, 2885), 'numpy.sum', 'np.sum', (['(MA2.residuals_ ** 2)'], {}), '(MA2.residuals_ ** 2)\n', (2864, 2885), True, 'import numpy as np\n'), ((2887, 2920), 'pytest.approx', 'pytest.approx', (['(1.7175879816717088)'], {}), '(1.7175879816717088)\n', (2900, 2920), False, 'import pytest\n'), ((3045, 3070), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3058, 3070), False, 'import pytest\n'), ((679, 695), 'pytest.approx', 'pytest.approx', (['(0)'], {}), '(0)\n', (692, 695), False, 'import pytest\n'), ((818, 838), 'pytest.approx', 'pytest.approx', (['y_exp'], {}), '(y_exp)\n', (831, 838), False, 'import pytest\n'), ((1563, 1579), 'pytest.approx', 'pytest.approx', (['(1)'], {}), '(1)\n', (1576, 1579), False, 'import pytest\n'), ((1674, 1701), 'pytest.approx', 'pytest.approx', (['(0)'], {'abs': '(0.005)'}), '(0, abs=0.005)\n', (1687, 1701), False, 'import pytest\n'), ((1824, 1855), 'pytest.approx', 'pytest.approx', (['y_exp'], {'abs': '(0.005)'}), '(y_exp, abs=0.005)\n', (1837, 1855), False, 'import pytest\n'), ((1997, 2007), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (2004, 2007), True, 'import numpy as np\n'), ((2126, 2148), 'pytest.approx', 'pytest.approx', (['exp_res'], {}), '(exp_res)\n', (2139, 2148), False, 'import pytest\n'), ((2240, 2277), 'pytest.approx', 'pytest.approx', (['exp_thetas'], {'abs': '(0.0001)'}), '(exp_thetas, abs=0.0001)\n', (2253, 2277), False, 'import pytest\n'), ((2495, 2524), 'pytest.approx', 'pytest.approx', (['exp'], {'abs': '(0.001)'}), '(exp, abs=0.001)\n', (2508, 2524), False, 'import pytest\n'), ((2666, 2676), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (2673, 2676), True, 'import numpy as np\n'), ((2800, 2822), 'pytest.approx', 'pytest.approx', (['exp_res'], {}), '(exp_res)\n', (2813, 2822), False, 'import pytest\n'), ((3008, 3033), 'pytest.approx', 'pytest.approx', (['exp_thetas'], {}), '(exp_thetas)\n', (3021, 3033), False, 'import pytest\n'), ((3255, 3284), 'pytest.approx', 'pytest.approx', (['exp'], {'abs': '(0.001)'}), '(exp, abs=0.001)\n', (3268, 3284), False, 'import pytest\n')] |
"""Tests for the quantized layers."""
import numpy
import pytest
from concrete.quantization import QuantizedArray, QuantizedLinear
# QuantizedLinear unstable with n_bits>23
# and hard to test with numpy.isclose with n_bits < 8
N_BITS_LIST = [20, 16, 8]
@pytest.mark.parametrize(
"n_bits",
[pytest.param(n_bits) for n_bits in N_BITS_LIST],
)
@pytest.mark.parametrize(
"n_examples, n_features, n_neurons",
[
pytest.param(50, 3, 4),
pytest.param(20, 500, 30),
pytest.param(200, 300, 50),
pytest.param(10000, 100, 1),
pytest.param(10, 20, 1),
],
)
@pytest.mark.parametrize("is_signed", [pytest.param(True), pytest.param(False)])
def test_quantized_linear(n_examples, n_features, n_neurons, n_bits, is_signed):
"""Test the quantization linear layer of numpy.array.
With n_bits>>0 we expect the results of the quantized linear
to be the same as the standard linear layer.
"""
inputs = numpy.random.uniform(size=(n_examples, n_features))
q_inputs = QuantizedArray(n_bits, inputs)
# shape of weights: (n_features, n_neurons)
weights = numpy.random.uniform(size=(n_features, n_neurons))
q_weights = QuantizedArray(n_bits, weights, is_signed)
bias = numpy.random.uniform(size=(1, n_neurons))
q_bias = QuantizedArray(n_bits, bias, is_signed)
# Define our QuantizedLinear layer
q_linear = QuantizedLinear(n_bits, q_weights, q_bias)
# Calibrate the Quantized layer
q_linear.calibrate(inputs)
expected_outputs = q_linear.q_out.values
actual_output = q_linear(q_inputs).dequant()
assert numpy.isclose(expected_outputs, actual_output, atol=10 ** -0).all()
# Same test without bias
q_linear = QuantizedLinear(n_bits, q_weights)
# Calibrate the Quantized layer
q_linear.calibrate(inputs)
expected_outputs = q_linear.q_out.values
actual_output = q_linear(q_inputs).dequant()
assert numpy.isclose(expected_outputs, actual_output, atol=10 ** -0).all()
| [
"numpy.random.uniform",
"pytest.param",
"numpy.isclose",
"concrete.quantization.QuantizedLinear",
"concrete.quantization.QuantizedArray"
] | [((964, 1015), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'size': '(n_examples, n_features)'}), '(size=(n_examples, n_features))\n', (984, 1015), False, 'import numpy\n'), ((1031, 1061), 'concrete.quantization.QuantizedArray', 'QuantizedArray', (['n_bits', 'inputs'], {}), '(n_bits, inputs)\n', (1045, 1061), False, 'from concrete.quantization import QuantizedArray, QuantizedLinear\n'), ((1125, 1175), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'size': '(n_features, n_neurons)'}), '(size=(n_features, n_neurons))\n', (1145, 1175), False, 'import numpy\n'), ((1192, 1234), 'concrete.quantization.QuantizedArray', 'QuantizedArray', (['n_bits', 'weights', 'is_signed'], {}), '(n_bits, weights, is_signed)\n', (1206, 1234), False, 'from concrete.quantization import QuantizedArray, QuantizedLinear\n'), ((1247, 1288), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'size': '(1, n_neurons)'}), '(size=(1, n_neurons))\n', (1267, 1288), False, 'import numpy\n'), ((1302, 1341), 'concrete.quantization.QuantizedArray', 'QuantizedArray', (['n_bits', 'bias', 'is_signed'], {}), '(n_bits, bias, is_signed)\n', (1316, 1341), False, 'from concrete.quantization import QuantizedArray, QuantizedLinear\n'), ((1397, 1439), 'concrete.quantization.QuantizedLinear', 'QuantizedLinear', (['n_bits', 'q_weights', 'q_bias'], {}), '(n_bits, q_weights, q_bias)\n', (1412, 1439), False, 'from concrete.quantization import QuantizedArray, QuantizedLinear\n'), ((1728, 1762), 'concrete.quantization.QuantizedLinear', 'QuantizedLinear', (['n_bits', 'q_weights'], {}), '(n_bits, q_weights)\n', (1743, 1762), False, 'from concrete.quantization import QuantizedArray, QuantizedLinear\n'), ((302, 322), 'pytest.param', 'pytest.param', (['n_bits'], {}), '(n_bits)\n', (314, 322), False, 'import pytest\n'), ((434, 456), 'pytest.param', 'pytest.param', (['(50)', '(3)', '(4)'], {}), '(50, 3, 4)\n', (446, 456), False, 'import pytest\n'), ((466, 491), 'pytest.param', 'pytest.param', (['(20)', '(500)', '(30)'], {}), '(20, 500, 30)\n', (478, 491), False, 'import pytest\n'), ((501, 527), 'pytest.param', 'pytest.param', (['(200)', '(300)', '(50)'], {}), '(200, 300, 50)\n', (513, 527), False, 'import pytest\n'), ((537, 564), 'pytest.param', 'pytest.param', (['(10000)', '(100)', '(1)'], {}), '(10000, 100, 1)\n', (549, 564), False, 'import pytest\n'), ((574, 597), 'pytest.param', 'pytest.param', (['(10)', '(20)', '(1)'], {}), '(10, 20, 1)\n', (586, 597), False, 'import pytest\n'), ((647, 665), 'pytest.param', 'pytest.param', (['(True)'], {}), '(True)\n', (659, 665), False, 'import pytest\n'), ((667, 686), 'pytest.param', 'pytest.param', (['(False)'], {}), '(False)\n', (679, 686), False, 'import pytest\n'), ((1615, 1676), 'numpy.isclose', 'numpy.isclose', (['expected_outputs', 'actual_output'], {'atol': '(10 ** -0)'}), '(expected_outputs, actual_output, atol=10 ** -0)\n', (1628, 1676), False, 'import numpy\n'), ((1937, 1998), 'numpy.isclose', 'numpy.isclose', (['expected_outputs', 'actual_output'], {'atol': '(10 ** -0)'}), '(expected_outputs, actual_output, atol=10 ** -0)\n', (1950, 1998), False, 'import numpy\n')] |
import numpy as np
class Random(object):
def __init__(self, min_range, max_range, distribution):
self.__minRange = min_range
self.__maxRange = max_range
self.__distribution = distribution
if self.__distribution == 'normal':
interval = range(self.__minRange, self.__maxRange+1)
self.__mean = np.mean(interval)
self.__std = np.std(interval)
@property
def minRange(self):
return self.__minRange
@property
def maxRange(self):
return self.__maxRange
@property
def distribution(self):
return self.__distribution
def getRandomNumber(self):
num = None
while not num or (num < self.__minRange or num > self.__maxRange):
if self.__distribution == 'uniform':
num = np.random.uniform(self.__minRange, self.__maxRange)
elif self.__distribution == 'normal':
num = np.random.normal(self.__mean, self.__std)
return int(np.rint(num)) | [
"numpy.random.uniform",
"numpy.std",
"numpy.rint",
"numpy.mean",
"numpy.random.normal"
] | [((358, 375), 'numpy.mean', 'np.mean', (['interval'], {}), '(interval)\n', (365, 375), True, 'import numpy as np\n'), ((401, 417), 'numpy.std', 'np.std', (['interval'], {}), '(interval)\n', (407, 417), True, 'import numpy as np\n'), ((1027, 1039), 'numpy.rint', 'np.rint', (['num'], {}), '(num)\n', (1034, 1039), True, 'import numpy as np\n'), ((841, 892), 'numpy.random.uniform', 'np.random.uniform', (['self.__minRange', 'self.__maxRange'], {}), '(self.__minRange, self.__maxRange)\n', (858, 892), True, 'import numpy as np\n'), ((965, 1006), 'numpy.random.normal', 'np.random.normal', (['self.__mean', 'self.__std'], {}), '(self.__mean, self.__std)\n', (981, 1006), True, 'import numpy as np\n')] |
import gym
import random
import numpy as np
import keras
from statistics import mean, median
from collections import Counter
from keras.models import Model, Sequential, load_model
from keras.layers import Input, Dense
LR = 1e-3
env = gym.make('CartPole-v1')
env.reset()
def saveGoodGames(nr=10000):
observations = []
actions = []
minReward = 70
for i in range(nr):
env.reset()
action = env.action_space.sample()
obserVationList = []
actionList = []
score = 0
while True:
observation, reward, done, info = env.step(action)
action = env.action_space.sample()
obserVationList.append(observation)
if action == 1:
actionList.append([0,1] )
elif action == 0:
actionList.append([1,0])
score += reward
if done: break
if score > minReward:
observations.extend(obserVationList)
actions.extend(actionList)
observations = np.array(observations)
actions = np.array(actions)
return observations, actions
def trainModell(modelName, observations=None, actions= None, ):
if observations== None:
observations = np.load('observations.npy')
if actions == None:
actions = np.load('actions.npy')
model = Sequential()
model.add(Dense(64, input_dim=4, activation='relu'))
model.add(Dense(128, activation='relu'))
# model.add(Dense(256, activation='relu'))
# model.add(Dense(256, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(2, activation='sigmoid'))
model.compile(optimizer='adam', loss='categorical_crossentropy')
model.fit(observations, actions, epochs=10)
model.save('{}.h5'.format(modelName))
return model
def playGames( ai,nr, minScore=300):
observations = []
actions = []
scores=0
scores = []
for i in range(nr):
if i%50==0: print ('step {}'.format(i))
env.reset()
action = env.action_space.sample()
obserVationList = []
actionList = []
score=0
while True:
# env.render()
observation, reward, done, info = env.step(action)
action = np.argmax(ai.predict(observation.reshape(1,4)))
obserVationList.append(observation)
if action == 1:
actionList.append([0,1] )
elif action == 0:
actionList.append([1,0])
score += 1
# score += reward
if done: break
# print (score )
scores.append(score)
if score > minScore:
observations.extend(obserVationList)
actions.extend(actionList)
observations = np.array(observations)
actions = np.array(actions)
print ('mean: ', np.mean(scores))
return observations, actions
obs, acts = saveGoodGames()
print ('training 1st modell')
firstModel = trainModell( 'v1',obs, acts)
obs, acts = playGames(firstModel, 1000,400)
print ('training 2nd modell')
secondModel = trainModell('v2',obs, acts)
obs, acts = playGames(secondModel, 1000, 490)
print ('training 3rd modell')
thirdModel = trainModell('v3',obs, acts)
playGames(thirdModel, 100)
| [
"numpy.load",
"gym.make",
"keras.layers.Dense",
"numpy.array",
"numpy.mean",
"keras.models.Sequential"
] | [((237, 260), 'gym.make', 'gym.make', (['"""CartPole-v1"""'], {}), "('CartPole-v1')\n", (245, 260), False, 'import gym\n'), ((1058, 1080), 'numpy.array', 'np.array', (['observations'], {}), '(observations)\n', (1066, 1080), True, 'import numpy as np\n'), ((1095, 1112), 'numpy.array', 'np.array', (['actions'], {}), '(actions)\n', (1103, 1112), True, 'import numpy as np\n'), ((1371, 1383), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1381, 1383), False, 'from keras.models import Model, Sequential, load_model\n'), ((2832, 2854), 'numpy.array', 'np.array', (['observations'], {}), '(observations)\n', (2840, 2854), True, 'import numpy as np\n'), ((2869, 2886), 'numpy.array', 'np.array', (['actions'], {}), '(actions)\n', (2877, 2886), True, 'import numpy as np\n'), ((1264, 1291), 'numpy.load', 'np.load', (['"""observations.npy"""'], {}), "('observations.npy')\n", (1271, 1291), True, 'import numpy as np\n'), ((1334, 1356), 'numpy.load', 'np.load', (['"""actions.npy"""'], {}), "('actions.npy')\n", (1341, 1356), True, 'import numpy as np\n'), ((1398, 1439), 'keras.layers.Dense', 'Dense', (['(64)'], {'input_dim': '(4)', 'activation': '"""relu"""'}), "(64, input_dim=4, activation='relu')\n", (1403, 1439), False, 'from keras.layers import Input, Dense\n'), ((1455, 1484), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (1460, 1484), False, 'from keras.layers import Input, Dense\n'), ((1595, 1623), 'keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (1600, 1623), False, 'from keras.layers import Input, Dense\n'), ((1640, 1670), 'keras.layers.Dense', 'Dense', (['(2)'], {'activation': '"""sigmoid"""'}), "(2, activation='sigmoid')\n", (1645, 1670), False, 'from keras.layers import Input, Dense\n'), ((2908, 2923), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (2915, 2923), True, 'import numpy as np\n')] |
import json
import numpy as np
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, range):
value = list(obj)
return [value[0], value[-1] + 1]
return super().default(obj)
def a(*w, **k):
return np.array(*w, **k)
| [
"numpy.array"
] | [((490, 507), 'numpy.array', 'np.array', (['*w'], {}), '(*w, **k)\n', (498, 507), True, 'import numpy as np\n')] |
# The heat conduction test case is from Smith, "Uncertainty quantification: theory, implementation and applications", 2013.
# In this test case, experimental data are provided.
# We implement the experimental data here and save them in a csv file.
# This script needs to be run when no data_heat_conduction_0.csv file is not present in the case folder.
# Import modules
import numpy as np
import matplotlib.pyplot as plt
import random
import pandas as pd
# Import the model
import heat_conduction
# Nominal parameter
param = [0.95, 0.95, 2.37, 21.29, -18.41, 0.00191]
x = np.arange(10.0, 70.0, 4.0)
model_def = heat_conduction.HeatConduction()
model_def.param = param
model_def.x = x
# Standard deviation
std_param = [0, 0, 0, 0, 0.1450, 1.4482e-5] # Not used here
std_y=0.2504 # 0.2604
array_std_y = np.ones(len(x))
array_std_y *= std_y
# Experimental data provided (see Smith. Tab. 3.2 p. 57, aluminium rod)
y = [96.14, 80.12, 67.66, 57.96, 50.90, 44.84, 39.75, 36.16, 33.31, 31.15, 29.28, 27.88, 27.18, 26.40, 25.86]
df = pd.DataFrame({'x': x, 'T': y, 'std_T': array_std_y})
df.to_csv("heat_conduction_data.csv")
plt.plot(x, model_def.fun_x())
plt.plot(x, y, 'o', color='C0')
plt.show()
| [
"pandas.DataFrame",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.arange",
"heat_conduction.HeatConduction"
] | [((578, 604), 'numpy.arange', 'np.arange', (['(10.0)', '(70.0)', '(4.0)'], {}), '(10.0, 70.0, 4.0)\n', (587, 604), True, 'import numpy as np\n'), ((617, 649), 'heat_conduction.HeatConduction', 'heat_conduction.HeatConduction', ([], {}), '()\n', (647, 649), False, 'import heat_conduction\n'), ((1035, 1087), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x, 'T': y, 'std_T': array_std_y}"], {}), "({'x': x, 'T': y, 'std_T': array_std_y})\n", (1047, 1087), True, 'import pandas as pd\n'), ((1158, 1189), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""o"""'], {'color': '"""C0"""'}), "(x, y, 'o', color='C0')\n", (1166, 1189), True, 'import matplotlib.pyplot as plt\n'), ((1191, 1201), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1199, 1201), True, 'import matplotlib.pyplot as plt\n')] |
import os
import re
import sys
import numpy as np
from constant import *
import torch
#wordVec=np.load(dataPath+"wordVec.npy")
class Dataset:
def __init__(self,Tag):
print(Tag)
self.words=np.load(Tag+"_wordEmb.npy")
self.inMask=np.load(Tag+"_inMask.npy")
self.label=np.load(Tag+"_label.npy")
self.maskL=np.load(Tag+"_maskL.npy")
self.maskR=np.load(Tag+"_maskR.npy")
def batchs(self):
indices=np.random.permutation(np.arange(len(self.words)))
self.words=self.words[indices]
self.inMask=self.inMask[indices]
self.label=self.label[indices]
self.maskL=self.maskL[indices]
self.maskR=self.maskR[indices]
for i in range(0,len(self.words)//BatchSize+1):
L=i*BatchSize
if L>=len(self.words):
break
R=min((i+1)*BatchSize,len(self.words))
yield torch.LongTensor(self.words[L:R]),torch.LongTensor(self.inMask[L:R]),torch.FloatTensor(self.maskL[L:R]),torch.FloatTensor(self.maskR[L:R]),torch.LongTensor(self.label[L:R])
class uDataset:
def __init__(self,Tag):
print(Tag)
self.words=np.load(Tag+"_wordEmb.npy")
self.pos=np.load(Tag+"_inMask.npy")
self.label=np.load(Tag+"_label.npy")
self.maskL=np.load(Tag+"_maskL.npy")
self.maskR=np.load(Tag+"_maskR.npy")
def batchs(self):
indices=np.random.permutation(np.arange(len(self.words)))
self.words=self.words[indices]
self.inMask=self.inMask[indices]
self.label=self.label[indices]
self.maskL=self.maskL[indices]
self.maskR=self.maskR[indices]
for i in range(0,len(self.words)//BatchSize+1):
L=i*BatchSize
if L>=len(self.words):
break
R=min((i+1)*BatchSize,len(self.words))
bwords=self.words[L:R]
bMask=self.inMask[L:R]
bmaskL=self.maskL[L:R]
bmaskR=self.maskR[L:R]
blabel=self.label[L:R]
nidx=np.where(blabel==0)
uidx=np.where(blabel!=0)
yield torch.LongTensor(bwords[nidx]),torch.LongTensor(bMask[nidx]),torch.FloatTensor(bmaskL[nidx]),torch.FloatTensor(bmaskR[nidx]),torch.LongTensor(blabel[nidx]),torch.LongTensor(bwords[uidx]),torch.LongTensor(bMask[uidx]),torch.FloatTensor(bmaskL[uidx]),torch.FloatTensor(bmaskR[uidx]),torch.LongTensor(blabel[uidx])
class joinDataset:
def __init__(self,cTag,uTag):
self.cwords=np.load(cTag+"_wordEmb.npy")
self.cMask=np.load(cTag+"_inMask.npy")
self.clabel=np.load(cTag+"_label.npy")
self.cmaskL=np.load(cTag+"_maskL.npy")
self.cmaskR=np.load(cTag+"_maskR.npy")
#self.cindex=np.load(cTag+"_index.npy")
#self.uindex=np.load(uTag+"_index.npy")
'''
for i in range(0,0):
self.cwords=np.concatenate((self.cwords,self.cwords),axis=0)
self.cpos=np.concatenate((self.cpos,self.cpos),axis=0)
self.cloc=np.concatenate((self.cloc,self.cloc),axis=0)
self.clabel=np.concatenate((self.clabel,self.clabel),axis=0)
self.cmaskL=np.concatenate((self.cmaskL,self.cmaskL),axis=0)
self.cmaskR=np.concatenate((self.cmaskR,self.cmaskR),axis=0)
'''
self.uwords=np.load(uTag+"_wordEmb.npy")
self.uMask=np.load(uTag+"_inMask.npy")
self.ulabel=np.load(uTag+"_label.npy")
self.umaskL=np.load(uTag+"_maskL.npy")
self.umaskR=np.load(uTag+"_maskR.npy")
self.utimes=np.zeros_like(self.ulabel)
self.it_times=len(self.uwords)//BatchSize+1
self.c_BatchSize=len(self.cwords)//self.it_times
def conf_batch(self):
indices=np.random.permutation(np.arange(len(self.cwords)))
print(self.cwords.shape)
print(self.clabel.shape)
print(self.cmaskL.shape)
print(self.cmaskR.shape)
self.cwords=self.cwords[indices]
self.cMask=self.cMask[indices]
self.clabel=self.clabel[indices]
self.cmaskL=self.cmaskL[indices]
self.cmaskR=self.cmaskR[indices]
#self.cindex=self.cindex[indices]
for i in range(0,self.it_times):
L=i*self.c_BatchSize
if L>=len(self.cwords):
break
R=min((i+1)*self.c_BatchSize,len(self.cwords))
yield torch.LongTensor(self.cwords[L:R]),torch.LongTensor(self.cMask[L:R]),torch.FloatTensor(self.cmaskL[L:R]),torch.FloatTensor(self.cmaskR[L:R]),torch.LongTensor(self.clabel[L:R])
def unconf_batch(self):
print("MAXX %d"%(np.max(self.utimes)))
indices=np.random.permutation(np.arange(len(self.uwords)))
self.uwords=self.uwords[indices]
self.uMask=self.uMask[indices]
self.ulabel=self.ulabel[indices]
self.umaskL=self.umaskL[indices]
self.umaskR=self.umaskR[indices]
self.utimes=self.utimes[indices]
#self.uindex=self.uindex[indices]
for i in range(0,self.it_times):
L=i*BatchSize
if L>=len(self.uwords):
break
R=min((i+1)*BatchSize,len(self.uwords))
yield torch.LongTensor(self.uwords[L:R]),torch.LongTensor(self.uMask[L:R]),torch.FloatTensor(self.umaskL[L:R]),torch.FloatTensor(self.umaskR[L:R]),torch.LongTensor(self.ulabel[L:R]),(L,R)
def dump(self,threshold,cTag,uTag):
nc_idx=np.where(self.utimes>=threshold,1,0)
nc_idx2=np.where(self.ulabel!=0,1,0)
tmp=nc_idx*nc_idx2
nc_idx=np.where(tmp==1)
uc_idx=np.where(tmp==0)
print("Transfer Size %d %d"%(nc_idx[0].shape[0],np.max(self.utimes)))
self.cwords=np.append(self.cwords,self.uwords[nc_idx],axis=0)
self.cMask=np.append(self.cMask,self.uMask[nc_idx],axis=0)
self.cmaskL=np.append(self.cmaskL,self.umaskL[nc_idx],axis=0)
self.cmaskR=np.append(self.cmaskR,self.umaskR[nc_idx],axis=0)
self.clabel=np.append(self.clabel,self.ulabel[nc_idx],axis=0)
#self.cindex=np.append(self.cindex,self.uindex[nc_idx],axis=0)
self.uwords=self.uwords[uc_idx]
self.uMask=self.uMask[uc_idx]
self.umaskL=self.umaskL[uc_idx]
self.umaskR=self.umaskR[uc_idx]
self.ulabel=self.ulabel[uc_idx]
#self.uindex=self.uindex[uc_idx]
np.save(cTag+"_wordEmb.npy",self.cwords)
np.save(cTag+"_inMask.npy",self.cMask)
np.save(cTag+"_label.npy",self.clabel)
np.save(cTag+"_maskL.npy",self.cmaskL)
np.save(cTag+"_maskR.npy",self.cmaskR)
#np.save(cTag+"_index.npy",self.cindex)
np.save(uTag+"_wordEmb.npy",self.uwords)
np.save(uTag+"_inMask.npy",self.uMask)
np.save(uTag+"_label.npy",self.ulabel)
np.save(uTag+"_maskL.npy",self.umaskL)
np.save(uTag+"_maskR.npy",self.umaskR)
#np.save(uTag+"_index.npy",self.uindex)
| [
"numpy.load",
"numpy.zeros_like",
"numpy.save",
"torch.LongTensor",
"torch.FloatTensor",
"numpy.append",
"numpy.max",
"numpy.where"
] | [((209, 238), 'numpy.load', 'np.load', (["(Tag + '_wordEmb.npy')"], {}), "(Tag + '_wordEmb.npy')\n", (216, 238), True, 'import numpy as np\n'), ((257, 285), 'numpy.load', 'np.load', (["(Tag + '_inMask.npy')"], {}), "(Tag + '_inMask.npy')\n", (264, 285), True, 'import numpy as np\n'), ((303, 330), 'numpy.load', 'np.load', (["(Tag + '_label.npy')"], {}), "(Tag + '_label.npy')\n", (310, 330), True, 'import numpy as np\n'), ((348, 375), 'numpy.load', 'np.load', (["(Tag + '_maskL.npy')"], {}), "(Tag + '_maskL.npy')\n", (355, 375), True, 'import numpy as np\n'), ((393, 420), 'numpy.load', 'np.load', (["(Tag + '_maskR.npy')"], {}), "(Tag + '_maskR.npy')\n", (400, 420), True, 'import numpy as np\n'), ((1168, 1197), 'numpy.load', 'np.load', (["(Tag + '_wordEmb.npy')"], {}), "(Tag + '_wordEmb.npy')\n", (1175, 1197), True, 'import numpy as np\n'), ((1213, 1241), 'numpy.load', 'np.load', (["(Tag + '_inMask.npy')"], {}), "(Tag + '_inMask.npy')\n", (1220, 1241), True, 'import numpy as np\n'), ((1259, 1286), 'numpy.load', 'np.load', (["(Tag + '_label.npy')"], {}), "(Tag + '_label.npy')\n", (1266, 1286), True, 'import numpy as np\n'), ((1304, 1331), 'numpy.load', 'np.load', (["(Tag + '_maskL.npy')"], {}), "(Tag + '_maskL.npy')\n", (1311, 1331), True, 'import numpy as np\n'), ((1349, 1376), 'numpy.load', 'np.load', (["(Tag + '_maskR.npy')"], {}), "(Tag + '_maskR.npy')\n", (1356, 1376), True, 'import numpy as np\n'), ((2515, 2545), 'numpy.load', 'np.load', (["(cTag + '_wordEmb.npy')"], {}), "(cTag + '_wordEmb.npy')\n", (2522, 2545), True, 'import numpy as np\n'), ((2563, 2592), 'numpy.load', 'np.load', (["(cTag + '_inMask.npy')"], {}), "(cTag + '_inMask.npy')\n", (2570, 2592), True, 'import numpy as np\n'), ((2611, 2639), 'numpy.load', 'np.load', (["(cTag + '_label.npy')"], {}), "(cTag + '_label.npy')\n", (2618, 2639), True, 'import numpy as np\n'), ((2658, 2686), 'numpy.load', 'np.load', (["(cTag + '_maskL.npy')"], {}), "(cTag + '_maskL.npy')\n", (2665, 2686), True, 'import numpy as np\n'), ((2705, 2733), 'numpy.load', 'np.load', (["(cTag + '_maskR.npy')"], {}), "(cTag + '_maskR.npy')\n", (2712, 2733), True, 'import numpy as np\n'), ((3328, 3358), 'numpy.load', 'np.load', (["(uTag + '_wordEmb.npy')"], {}), "(uTag + '_wordEmb.npy')\n", (3335, 3358), True, 'import numpy as np\n'), ((3376, 3405), 'numpy.load', 'np.load', (["(uTag + '_inMask.npy')"], {}), "(uTag + '_inMask.npy')\n", (3383, 3405), True, 'import numpy as np\n'), ((3424, 3452), 'numpy.load', 'np.load', (["(uTag + '_label.npy')"], {}), "(uTag + '_label.npy')\n", (3431, 3452), True, 'import numpy as np\n'), ((3471, 3499), 'numpy.load', 'np.load', (["(uTag + '_maskL.npy')"], {}), "(uTag + '_maskL.npy')\n", (3478, 3499), True, 'import numpy as np\n'), ((3518, 3546), 'numpy.load', 'np.load', (["(uTag + '_maskR.npy')"], {}), "(uTag + '_maskR.npy')\n", (3525, 3546), True, 'import numpy as np\n'), ((3565, 3591), 'numpy.zeros_like', 'np.zeros_like', (['self.ulabel'], {}), '(self.ulabel)\n', (3578, 3591), True, 'import numpy as np\n'), ((5416, 5456), 'numpy.where', 'np.where', (['(self.utimes >= threshold)', '(1)', '(0)'], {}), '(self.utimes >= threshold, 1, 0)\n', (5424, 5456), True, 'import numpy as np\n'), ((5469, 5501), 'numpy.where', 'np.where', (['(self.ulabel != 0)', '(1)', '(0)'], {}), '(self.ulabel != 0, 1, 0)\n', (5477, 5501), True, 'import numpy as np\n'), ((5540, 5558), 'numpy.where', 'np.where', (['(tmp == 1)'], {}), '(tmp == 1)\n', (5548, 5558), True, 'import numpy as np\n'), ((5572, 5590), 'numpy.where', 'np.where', (['(tmp == 0)'], {}), '(tmp == 0)\n', (5580, 5590), True, 'import numpy as np\n'), ((5687, 5738), 'numpy.append', 'np.append', (['self.cwords', 'self.uwords[nc_idx]'], {'axis': '(0)'}), '(self.cwords, self.uwords[nc_idx], axis=0)\n', (5696, 5738), True, 'import numpy as np\n'), ((5756, 5805), 'numpy.append', 'np.append', (['self.cMask', 'self.uMask[nc_idx]'], {'axis': '(0)'}), '(self.cMask, self.uMask[nc_idx], axis=0)\n', (5765, 5805), True, 'import numpy as np\n'), ((5824, 5875), 'numpy.append', 'np.append', (['self.cmaskL', 'self.umaskL[nc_idx]'], {'axis': '(0)'}), '(self.cmaskL, self.umaskL[nc_idx], axis=0)\n', (5833, 5875), True, 'import numpy as np\n'), ((5894, 5945), 'numpy.append', 'np.append', (['self.cmaskR', 'self.umaskR[nc_idx]'], {'axis': '(0)'}), '(self.cmaskR, self.umaskR[nc_idx], axis=0)\n', (5903, 5945), True, 'import numpy as np\n'), ((5964, 6015), 'numpy.append', 'np.append', (['self.clabel', 'self.ulabel[nc_idx]'], {'axis': '(0)'}), '(self.clabel, self.ulabel[nc_idx], axis=0)\n', (5973, 6015), True, 'import numpy as np\n'), ((6334, 6377), 'numpy.save', 'np.save', (["(cTag + '_wordEmb.npy')", 'self.cwords'], {}), "(cTag + '_wordEmb.npy', self.cwords)\n", (6341, 6377), True, 'import numpy as np\n'), ((6383, 6424), 'numpy.save', 'np.save', (["(cTag + '_inMask.npy')", 'self.cMask'], {}), "(cTag + '_inMask.npy', self.cMask)\n", (6390, 6424), True, 'import numpy as np\n'), ((6430, 6471), 'numpy.save', 'np.save', (["(cTag + '_label.npy')", 'self.clabel'], {}), "(cTag + '_label.npy', self.clabel)\n", (6437, 6471), True, 'import numpy as np\n'), ((6477, 6518), 'numpy.save', 'np.save', (["(cTag + '_maskL.npy')", 'self.cmaskL'], {}), "(cTag + '_maskL.npy', self.cmaskL)\n", (6484, 6518), True, 'import numpy as np\n'), ((6524, 6565), 'numpy.save', 'np.save', (["(cTag + '_maskR.npy')", 'self.cmaskR'], {}), "(cTag + '_maskR.npy', self.cmaskR)\n", (6531, 6565), True, 'import numpy as np\n'), ((6628, 6671), 'numpy.save', 'np.save', (["(uTag + '_wordEmb.npy')", 'self.uwords'], {}), "(uTag + '_wordEmb.npy', self.uwords)\n", (6635, 6671), True, 'import numpy as np\n'), ((6677, 6718), 'numpy.save', 'np.save', (["(uTag + '_inMask.npy')", 'self.uMask'], {}), "(uTag + '_inMask.npy', self.uMask)\n", (6684, 6718), True, 'import numpy as np\n'), ((6724, 6765), 'numpy.save', 'np.save', (["(uTag + '_label.npy')", 'self.ulabel'], {}), "(uTag + '_label.npy', self.ulabel)\n", (6731, 6765), True, 'import numpy as np\n'), ((6771, 6812), 'numpy.save', 'np.save', (["(uTag + '_maskL.npy')", 'self.umaskL'], {}), "(uTag + '_maskL.npy', self.umaskL)\n", (6778, 6812), True, 'import numpy as np\n'), ((6818, 6859), 'numpy.save', 'np.save', (["(uTag + '_maskR.npy')", 'self.umaskR'], {}), "(uTag + '_maskR.npy', self.umaskR)\n", (6825, 6859), True, 'import numpy as np\n'), ((2042, 2063), 'numpy.where', 'np.where', (['(blabel == 0)'], {}), '(blabel == 0)\n', (2050, 2063), True, 'import numpy as np\n'), ((2079, 2100), 'numpy.where', 'np.where', (['(blabel != 0)'], {}), '(blabel != 0)\n', (2087, 2100), True, 'import numpy as np\n'), ((4609, 4628), 'numpy.max', 'np.max', (['self.utimes'], {}), '(self.utimes)\n', (4615, 4628), True, 'import numpy as np\n'), ((912, 945), 'torch.LongTensor', 'torch.LongTensor', (['self.words[L:R]'], {}), '(self.words[L:R])\n', (928, 945), False, 'import torch\n'), ((946, 980), 'torch.LongTensor', 'torch.LongTensor', (['self.inMask[L:R]'], {}), '(self.inMask[L:R])\n', (962, 980), False, 'import torch\n'), ((981, 1015), 'torch.FloatTensor', 'torch.FloatTensor', (['self.maskL[L:R]'], {}), '(self.maskL[L:R])\n', (998, 1015), False, 'import torch\n'), ((1016, 1050), 'torch.FloatTensor', 'torch.FloatTensor', (['self.maskR[L:R]'], {}), '(self.maskR[L:R])\n', (1033, 1050), False, 'import torch\n'), ((1051, 1084), 'torch.LongTensor', 'torch.LongTensor', (['self.label[L:R]'], {}), '(self.label[L:R])\n', (1067, 1084), False, 'import torch\n'), ((2117, 2147), 'torch.LongTensor', 'torch.LongTensor', (['bwords[nidx]'], {}), '(bwords[nidx])\n', (2133, 2147), False, 'import torch\n'), ((2148, 2177), 'torch.LongTensor', 'torch.LongTensor', (['bMask[nidx]'], {}), '(bMask[nidx])\n', (2164, 2177), False, 'import torch\n'), ((2178, 2209), 'torch.FloatTensor', 'torch.FloatTensor', (['bmaskL[nidx]'], {}), '(bmaskL[nidx])\n', (2195, 2209), False, 'import torch\n'), ((2210, 2241), 'torch.FloatTensor', 'torch.FloatTensor', (['bmaskR[nidx]'], {}), '(bmaskR[nidx])\n', (2227, 2241), False, 'import torch\n'), ((2242, 2272), 'torch.LongTensor', 'torch.LongTensor', (['blabel[nidx]'], {}), '(blabel[nidx])\n', (2258, 2272), False, 'import torch\n'), ((2273, 2303), 'torch.LongTensor', 'torch.LongTensor', (['bwords[uidx]'], {}), '(bwords[uidx])\n', (2289, 2303), False, 'import torch\n'), ((2304, 2333), 'torch.LongTensor', 'torch.LongTensor', (['bMask[uidx]'], {}), '(bMask[uidx])\n', (2320, 2333), False, 'import torch\n'), ((2334, 2365), 'torch.FloatTensor', 'torch.FloatTensor', (['bmaskL[uidx]'], {}), '(bmaskL[uidx])\n', (2351, 2365), False, 'import torch\n'), ((2366, 2397), 'torch.FloatTensor', 'torch.FloatTensor', (['bmaskR[uidx]'], {}), '(bmaskR[uidx])\n', (2383, 2397), False, 'import torch\n'), ((2398, 2428), 'torch.LongTensor', 'torch.LongTensor', (['blabel[uidx]'], {}), '(blabel[uidx])\n', (2414, 2428), False, 'import torch\n'), ((4380, 4414), 'torch.LongTensor', 'torch.LongTensor', (['self.cwords[L:R]'], {}), '(self.cwords[L:R])\n', (4396, 4414), False, 'import torch\n'), ((4415, 4448), 'torch.LongTensor', 'torch.LongTensor', (['self.cMask[L:R]'], {}), '(self.cMask[L:R])\n', (4431, 4448), False, 'import torch\n'), ((4449, 4484), 'torch.FloatTensor', 'torch.FloatTensor', (['self.cmaskL[L:R]'], {}), '(self.cmaskL[L:R])\n', (4466, 4484), False, 'import torch\n'), ((4485, 4520), 'torch.FloatTensor', 'torch.FloatTensor', (['self.cmaskR[L:R]'], {}), '(self.cmaskR[L:R])\n', (4502, 4520), False, 'import torch\n'), ((4521, 4555), 'torch.LongTensor', 'torch.LongTensor', (['self.clabel[L:R]'], {}), '(self.clabel[L:R])\n', (4537, 4555), False, 'import torch\n'), ((5179, 5213), 'torch.LongTensor', 'torch.LongTensor', (['self.uwords[L:R]'], {}), '(self.uwords[L:R])\n', (5195, 5213), False, 'import torch\n'), ((5214, 5247), 'torch.LongTensor', 'torch.LongTensor', (['self.uMask[L:R]'], {}), '(self.uMask[L:R])\n', (5230, 5247), False, 'import torch\n'), ((5248, 5283), 'torch.FloatTensor', 'torch.FloatTensor', (['self.umaskL[L:R]'], {}), '(self.umaskL[L:R])\n', (5265, 5283), False, 'import torch\n'), ((5284, 5319), 'torch.FloatTensor', 'torch.FloatTensor', (['self.umaskR[L:R]'], {}), '(self.umaskR[L:R])\n', (5301, 5319), False, 'import torch\n'), ((5320, 5354), 'torch.LongTensor', 'torch.LongTensor', (['self.ulabel[L:R]'], {}), '(self.ulabel[L:R])\n', (5336, 5354), False, 'import torch\n'), ((5645, 5664), 'numpy.max', 'np.max', (['self.utimes'], {}), '(self.utimes)\n', (5651, 5664), True, 'import numpy as np\n')] |
import os
import tensorflow as tf
import numpy as np
from tensorflow.contrib.rnn import LSTMCell
from q_learning.q_network import MentorAgent, ExperienceBuffer, update_target_graph, perform_update, process_capture
import gym
import universe
tf.reset_default_graph()
env = gym.make('Pong-v0')
# Network constants
FILTER_DIMS = [[8, 8], [4, 4], [3, 3], [6, 6]]
FILTER_NUMS = [32, 64, 64, 512]
STRIDES = [[4, 4], [2, 2], [1, 1], [1, 1]]
HIDDEN_SIZE = 512
ACTION_NUM = 6 # According to documentation
LEARNING_RATE = 1e-4
BUFFER_SIZE = 1000
# Session constants
BATCH_SIZE = 4
TRACE_LENGTH = 8
UPDATE_FREQ = 5
TAU = 0.99 # Discount factor on target Q-values
START_RAND = 1.0
END_RAND = 0.1
ANN_STEPS = 10000
NUM_EPISODES = 10000
PRE_TRAIN_STEPS = 10000
LOAD_MODEL = False
PATH = os.curdir + '/rdqn/model'
MAX_EPISODE_LENGTH = 50
SUMMARY_LENGTH = 100
SAVING_FREQ = 10000
# Defines cells to be used in the actor and the target network
actor_cell = LSTMCell(num_units=HIDDEN_SIZE, state_is_tuple=True)
target_cell = LSTMCell(num_units=HIDDEN_SIZE, state_is_tuple=True)
# Initialize networks and buffer
actor_qn = MentorAgent(HIDDEN_SIZE, actor_cell, FILTER_DIMS, FILTER_NUMS, STRIDES, 'actor', ACTION_NUM, LEARNING_RATE)
target_qn = \
MentorAgent(HIDDEN_SIZE, target_cell, FILTER_DIMS, FILTER_NUMS, STRIDES, 'target', ACTION_NUM, LEARNING_RATE)
session_buffer = ExperienceBuffer(BUFFER_SIZE)
# Define target_qn update OPs to be used in the session (tf.trainable_variables() operates on the graph)
tvars = tf.trainable_variables()
actor_tvars, target_tvars = tvars[:len(tvars)//2], tvars[len(tvars)//2:]
target_ops = update_target_graph(actor_tvars, target_tvars, TAU)
saver = tf.train.Saver(max_to_keep=5)
# Scheduling e-greedy exploration
epsilon = START_RAND
drop_per_step = (START_RAND - END_RAND) / ANN_STEPS
# Initialize tracking variables
steps_per_episode = list()
total_rewards = list()
total_steps = 0
# Make path for model saving
if not os.path.exists(PATH):
os.makedirs(PATH)
# Start the session
with tf.Session() as sess:
if LOAD_MODEL:
print('Loading model ... ')
checkpoint = tf.train.get_checkpoint_state(PATH)
saver.restore(sess, checkpoint.model_checkpoint_path)
sess.run(tf.global_variables_initializer())
# Set target network equal to the agent network
perform_update(target_ops, sess)
# Manage summaries
merged = tf.summary.merge_all()
training_writer = tf.summary.FileWriter('./train', sess.graph)
# Enter training loop
for i in range(NUM_EPISODES):
# Keep track of episodes and steps completed
print('Episode %d | Total steps taken: %d' % (i, total_steps))
episode_buffer = list()
# Get new observations
env_state = env.reset()
proc_env_state = process_capture(env_state)
done = False
running_reward = 0
step = 0
# Reset RNN hidden state
rnn_state = (np.zeros([1, HIDDEN_SIZE]), np.zeros([1, HIDDEN_SIZE]))
# Enter the Q-Network loop (play until a single game is completed, alternatively uncomment for max_ep_len)
# while step < MAX_EPISODE_LENGTH:
while True:
# step += 1
feed_dict = {actor_qn.scalar_input: proc_env_state, actor_qn.trace_length: 1,
actor_qn.state_in: rnn_state, actor_qn.batch_size: 1}
# Choose action following the e-greedy strategy
if np.random.rand(1) < epsilon or total_steps < PRE_TRAIN_STEPS:
# Take a random action
rnn_state_1 = sess.run(actor_qn.final_state, feed_dict=feed_dict)
action = np.random.randint(0, 3)
else:
# Obtain action from model
action, rnn_state_1 = sess.run([actor_qn.prediction, actor_qn.final_state], feed_dict=feed_dict)
action = action[0]
# Take a step in the environment
env_state_1, reward, done, _ = env.step(action)
proc_env_state_1 = process_capture(env_state_1)
total_steps += 1
# Add interaction to the episode buffer
episode_buffer.append(np.reshape([proc_env_state, action, reward, proc_env_state_1, done], [1, 5]))
# Proceed with exploitation once the exploration phase is concluded
if total_steps > PRE_TRAIN_STEPS:
if epsilon > END_RAND:
epsilon -= drop_per_step
# Update target network
if total_steps % (UPDATE_FREQ * 1000) == 0:
perform_update(target_ops, sess)
# Update agent network
if total_steps % UPDATE_FREQ == 0:
# Reset the RNN hidden state
rnn_state_train = (np.zeros([BATCH_SIZE, HIDDEN_SIZE]), np.zeros([BATCH_SIZE, HIDDEN_SIZE]))
# Get random batch of experiences from the experience buffer
train_batch = session_buffer.sample_experience(BATCH_SIZE, TRACE_LENGTH)
# Perform the Double-DQN update to the target Q-values
# Agent network
q_1 = sess.run(actor_qn.prediction,
feed_dict={actor_qn.scalar_input: (np.vstack(train_batch[:, 3]) / 255.0),
actor_qn.trace_length: TRACE_LENGTH,
actor_qn.state_in: rnn_state_train,
actor_qn.batch_size: BATCH_SIZE})
# Target network
q_2 = sess.run(target_qn.q_out,
feed_dict={target_qn.scalar_input: (np.vstack(train_batch[:, 3]) / 255.0),
target_qn.trace_length: TRACE_LENGTH,
target_qn.state_in: rnn_state_train,
target_qn.batch_size: BATCH_SIZE})
# Exclude final steps in each episode
end_multiplier = np.abs(train_batch[:, 4] - 1)
# Select q-values from target network based on actions predicted by the agent network
double_q = q_2[range(BATCH_SIZE * TRACE_LENGTH), q_1]
# See traget-Q double-DQN update equation
target_q = train_batch[:, 2] + (TAU * double_q * end_multiplier)
# Update agent network with the so obtained target_q values
_ = sess.run(actor_qn.update_model,
feed_dict={actor_qn.scalar_input: (np.vstack(train_batch[:, 0]) / 255.0),
actor_qn.target_q_holder: target_q,
actor_qn.action_holder: train_batch[:, 1],
actor_qn.trace_length: TRACE_LENGTH,
actor_qn.state_in: rnn_state_train,
actor_qn.batch_size: BATCH_SIZE})
# Update environment interaction variables
running_reward += reward
proc_env_state = proc_env_state_1
env_state = env_state_1
rnn_state = rnn_state_1
# Terminate episode once done
if done:
break
# Add episode to the experience buffer
buffer_array = np.array(episode_buffer)
# episode_buffer = zip(buffer_array)
session_buffer.add_experience(buffer_array, TRACE_LENGTH, buffer_array.shape[0])
# Update tracking lists
steps_per_episode.append(step)
total_rewards.append(running_reward)
# Save model periodically
if i % SAVING_FREQ == 0 and i != 0:
saver.save(sess, PATH + '/model-' + str(i) + '.cptk')
print('Model saved after %d steps!' % i)
# Report on the training performance of the actor network
if i % SUMMARY_LENGTH == 0 and i != 0:
print('Episode: %d | Steps taken: %d | Average episodic reward: %.4f | epsilon value: %.4f'
% (i, total_steps, np.mean(total_rewards[-SUMMARY_LENGTH:]), epsilon))
# Save final model
saver.save(sess, PATH + '/model-final' + '.cptk')
| [
"numpy.abs",
"tensorflow.trainable_variables",
"tensorflow.reset_default_graph",
"numpy.random.randint",
"numpy.mean",
"q_learning.q_network.perform_update",
"q_learning.q_network.process_capture",
"os.path.exists",
"q_learning.q_network.MentorAgent",
"tensorflow.summary.FileWriter",
"numpy.resh... | [((243, 267), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (265, 267), True, 'import tensorflow as tf\n'), ((275, 294), 'gym.make', 'gym.make', (['"""Pong-v0"""'], {}), "('Pong-v0')\n", (283, 294), False, 'import gym\n'), ((948, 1000), 'tensorflow.contrib.rnn.LSTMCell', 'LSTMCell', ([], {'num_units': 'HIDDEN_SIZE', 'state_is_tuple': '(True)'}), '(num_units=HIDDEN_SIZE, state_is_tuple=True)\n', (956, 1000), False, 'from tensorflow.contrib.rnn import LSTMCell\n'), ((1015, 1067), 'tensorflow.contrib.rnn.LSTMCell', 'LSTMCell', ([], {'num_units': 'HIDDEN_SIZE', 'state_is_tuple': '(True)'}), '(num_units=HIDDEN_SIZE, state_is_tuple=True)\n', (1023, 1067), False, 'from tensorflow.contrib.rnn import LSTMCell\n'), ((1112, 1223), 'q_learning.q_network.MentorAgent', 'MentorAgent', (['HIDDEN_SIZE', 'actor_cell', 'FILTER_DIMS', 'FILTER_NUMS', 'STRIDES', '"""actor"""', 'ACTION_NUM', 'LEARNING_RATE'], {}), "(HIDDEN_SIZE, actor_cell, FILTER_DIMS, FILTER_NUMS, STRIDES,\n 'actor', ACTION_NUM, LEARNING_RATE)\n", (1123, 1223), False, 'from q_learning.q_network import MentorAgent, ExperienceBuffer, update_target_graph, perform_update, process_capture\n'), ((1238, 1351), 'q_learning.q_network.MentorAgent', 'MentorAgent', (['HIDDEN_SIZE', 'target_cell', 'FILTER_DIMS', 'FILTER_NUMS', 'STRIDES', '"""target"""', 'ACTION_NUM', 'LEARNING_RATE'], {}), "(HIDDEN_SIZE, target_cell, FILTER_DIMS, FILTER_NUMS, STRIDES,\n 'target', ACTION_NUM, LEARNING_RATE)\n", (1249, 1351), False, 'from q_learning.q_network import MentorAgent, ExperienceBuffer, update_target_graph, perform_update, process_capture\n'), ((1365, 1394), 'q_learning.q_network.ExperienceBuffer', 'ExperienceBuffer', (['BUFFER_SIZE'], {}), '(BUFFER_SIZE)\n', (1381, 1394), False, 'from q_learning.q_network import MentorAgent, ExperienceBuffer, update_target_graph, perform_update, process_capture\n'), ((1509, 1533), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (1531, 1533), True, 'import tensorflow as tf\n'), ((1620, 1671), 'q_learning.q_network.update_target_graph', 'update_target_graph', (['actor_tvars', 'target_tvars', 'TAU'], {}), '(actor_tvars, target_tvars, TAU)\n', (1639, 1671), False, 'from q_learning.q_network import MentorAgent, ExperienceBuffer, update_target_graph, perform_update, process_capture\n'), ((1681, 1710), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(5)'}), '(max_to_keep=5)\n', (1695, 1710), True, 'import tensorflow as tf\n'), ((1955, 1975), 'os.path.exists', 'os.path.exists', (['PATH'], {}), '(PATH)\n', (1969, 1975), False, 'import os\n'), ((1981, 1998), 'os.makedirs', 'os.makedirs', (['PATH'], {}), '(PATH)\n', (1992, 1998), False, 'import os\n'), ((2025, 2037), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2035, 2037), True, 'import tensorflow as tf\n'), ((2327, 2359), 'q_learning.q_network.perform_update', 'perform_update', (['target_ops', 'sess'], {}), '(target_ops, sess)\n', (2341, 2359), False, 'from q_learning.q_network import MentorAgent, ExperienceBuffer, update_target_graph, perform_update, process_capture\n'), ((2396, 2418), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (2416, 2418), True, 'import tensorflow as tf\n'), ((2441, 2485), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""./train"""', 'sess.graph'], {}), "('./train', sess.graph)\n", (2462, 2485), True, 'import tensorflow as tf\n'), ((2123, 2158), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['PATH'], {}), '(PATH)\n', (2152, 2158), True, 'import tensorflow as tf\n'), ((2235, 2268), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2266, 2268), True, 'import tensorflow as tf\n'), ((2791, 2817), 'q_learning.q_network.process_capture', 'process_capture', (['env_state'], {}), '(env_state)\n', (2806, 2817), False, 'from q_learning.q_network import MentorAgent, ExperienceBuffer, update_target_graph, perform_update, process_capture\n'), ((7467, 7491), 'numpy.array', 'np.array', (['episode_buffer'], {}), '(episode_buffer)\n', (7475, 7491), True, 'import numpy as np\n'), ((2937, 2963), 'numpy.zeros', 'np.zeros', (['[1, HIDDEN_SIZE]'], {}), '([1, HIDDEN_SIZE])\n', (2945, 2963), True, 'import numpy as np\n'), ((2965, 2991), 'numpy.zeros', 'np.zeros', (['[1, HIDDEN_SIZE]'], {}), '([1, HIDDEN_SIZE])\n', (2973, 2991), True, 'import numpy as np\n'), ((4017, 4045), 'q_learning.q_network.process_capture', 'process_capture', (['env_state_1'], {}), '(env_state_1)\n', (4032, 4045), False, 'from q_learning.q_network import MentorAgent, ExperienceBuffer, update_target_graph, perform_update, process_capture\n'), ((3648, 3671), 'numpy.random.randint', 'np.random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (3665, 3671), True, 'import numpy as np\n'), ((4161, 4237), 'numpy.reshape', 'np.reshape', (['[proc_env_state, action, reward, proc_env_state_1, done]', '[1, 5]'], {}), '([proc_env_state, action, reward, proc_env_state_1, done], [1, 5])\n', (4171, 4237), True, 'import numpy as np\n'), ((3440, 3457), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (3454, 3457), True, 'import numpy as np\n'), ((4570, 4602), 'q_learning.q_network.perform_update', 'perform_update', (['target_ops', 'sess'], {}), '(target_ops, sess)\n', (4584, 4602), False, 'from q_learning.q_network import MentorAgent, ExperienceBuffer, update_target_graph, perform_update, process_capture\n'), ((6094, 6123), 'numpy.abs', 'np.abs', (['(train_batch[:, 4] - 1)'], {}), '(train_batch[:, 4] - 1)\n', (6100, 6123), True, 'import numpy as np\n'), ((4782, 4817), 'numpy.zeros', 'np.zeros', (['[BATCH_SIZE, HIDDEN_SIZE]'], {}), '([BATCH_SIZE, HIDDEN_SIZE])\n', (4790, 4817), True, 'import numpy as np\n'), ((4819, 4854), 'numpy.zeros', 'np.zeros', (['[BATCH_SIZE, HIDDEN_SIZE]'], {}), '([BATCH_SIZE, HIDDEN_SIZE])\n', (4827, 4854), True, 'import numpy as np\n'), ((8194, 8234), 'numpy.mean', 'np.mean', (['total_rewards[-SUMMARY_LENGTH:]'], {}), '(total_rewards[-SUMMARY_LENGTH:])\n', (8201, 8234), True, 'import numpy as np\n'), ((5268, 5296), 'numpy.vstack', 'np.vstack', (['train_batch[:, 3]'], {}), '(train_batch[:, 3])\n', (5277, 5296), True, 'import numpy as np\n'), ((5712, 5740), 'numpy.vstack', 'np.vstack', (['train_batch[:, 3]'], {}), '(train_batch[:, 3])\n', (5721, 5740), True, 'import numpy as np\n'), ((6655, 6683), 'numpy.vstack', 'np.vstack', (['train_batch[:, 0]'], {}), '(train_batch[:, 0])\n', (6664, 6683), True, 'import numpy as np\n')] |
import argparse
from itertools import product
import os.path as osp
import numpy as np
import pandas as pd
import scipy.spatial
import scipy.optimize
import tqdm
def x_3(array):
return array ** 3
def module_x(array):
return np.abs(array - 0.2)
def sin(array):
return array * np.sin(1 / array)
def brute_force(func, left, right, epsilon=0.001):
x_array = np.arange(left, right, epsilon)
values = func(x_array)
min_f = np.min(values)
idx_min = np.argmin(values)
return {"min": x_array[idx_min], "func_min": min_f, "iterations": x_array.shape[0]}
def dichotomy(func, left, right, epsilon=0.001, delta=None):
if delta is None:
delta = epsilon / 2
coords = [[left, right]]
iters = 0
while (right - left) > epsilon:
x1, x2 = (left + right - delta) / 2, (left + right + delta) / 2
f_x1, f_x2 = map(func, [x1, x2])
if f_x1 <= f_x2:
right = x2
else:
left = x1
iters += 2
coords.append([left, right])
min_f = func((right - left) / 2 + right)
return {"min": (right - left) / 2 + right, "func_min": min_f, "iterations": iters, "coords": coords}
def golden_section(func, left, right, epsilon=0.001):
delta = (3 - np.sqrt(5)) / 2
coords = [[left, right]]
x1, x2 = left + delta * (right - left), right - delta * (right - left)
f_x1, f_x2 = map(func, [x1, x2])
iters = 2
while (right - left) > epsilon:
if f_x1 <= f_x2:
right = x2
x2 = x1
f_x2 = f_x1
calc_x2 = False
else:
left = x1
x1 = x2
f_x1 = f_x2
calc_x2 = True
coords.append([left, right])
if calc_x2:
x2 = right - delta * (right - left)
f_x2 = func(x2)
else:
x1 = left + delta * (right - left)
f_x1 = func(x1)
iters += 1
min_f = func((right - left) / 2 + right)
return {"min": (right - left) / 2 + right, "func_min": min_f, "iterations": iters, "coords": coords}
def linear_approx(X, a, b):
return X * a + b
def rational_approx(X, a, b):
return a / (1 + b * X)
def loss(func, X, a, b, y_true, a_bounds=(0, 1), b_bounds=(0, 1), apply_bounds=False):
# Artificial bounds are here are for Nelder-Mead algorithm which have no constraints
# and tends to find optimal solution outside the bounds for bruteforce algorithm
if apply_bounds:
if a < a_bounds[0] or a > a_bounds[1]:
return 10 ** 10
if b < b_bounds[0] or b > b_bounds[1]:
return 10 ** 10
approx = func(X, a, b)
return np.sum((approx - y_true) ** 2)
def brute_force_opt(func, X, y_true, a_bounds=(0, 1), b_bounds=(0, 1), epsilon=0.001):
a_values = np.arange(a_bounds[0], a_bounds[1] + epsilon, epsilon)
b_values = np.arange(b_bounds[0], b_bounds[1] + epsilon, epsilon)
min_loss = 10 ** 10
min_args = None
for a in a_values:
for b in b_values:
loss_value = loss(func, X, a, b, y_true)
if loss_value < min_loss:
min_args = {"a": a, "b": b}
min_loss = loss_value
return {"loss": min_loss, "args": min_args, "iterations": a_values.shape[0] * b_values.shape[0]}
def gauss_opt(func, X, y_true, a_bounds=(0, 1), b_bounds=(0, 1), epsilon=0.001):
a, b = map(np.mean, [a_bounds, b_bounds])
a_prev, b_prev = a_bounds[0], b_bounds[0]
loss_prev = loss(func, X, a, b, y_true)
min_loss = 10 ** 10
iters = 1
loss_values = []
coords = [[a, b]]
while (
scipy.spatial.distance.euclidean([a_prev, b_prev], [a, b]) > epsilon and np.abs(
loss_prev - min_loss) > epsilon
):
for opt_var in ["a", "b"]:
if opt_var == "a":
aux_func = lambda x: loss(func, X, a=x, b=b, y_true=y_true)
opt = golden_section(aux_func, a_bounds[0], b_bounds[1], epsilon=epsilon)
a_prev = a
a = opt["min"]
else:
aux_func = lambda x: loss(func, X, a=a, b=x, y_true=y_true)
opt = golden_section(aux_func, b_bounds[0], b_bounds[1], epsilon=epsilon)
b_prev = b
b = opt["min"]
iters += opt["iterations"]
min_loss = opt["func_min"]
loss_values.append(min_loss)
coords.append([a, b])
return {
"loss": min_loss,
"args": {"a": a, "b": b},
"iterations": iters,
"loss_values": loss_values,
"coords": coords,
}
def nelder_mead_opt(func, X, y_true, a_bounds=(0, 1), b_bounds=(0, 1), epsilon=0.001):
opt_func = lambda x: loss(func, X=X, a=x[0], b=x[1], y_true=y_true, a_bounds=a_bounds, b_bounds=b_bounds,
apply_bounds=True)
a0, b0 = map(np.mean, [a_bounds, b_bounds])
result = scipy.optimize.minimize(
opt_func, x0=np.asarray([a0, b0]), method="Nelder-Mead", options={"xatol": epsilon, "fatol": epsilon}
)
return {"loss": result.fun, "args": {"a": result.x[0], "b": result.x[1]}, "iterations": result.nfev}
if __name__ == "__main__":
parser = argparse.ArgumentParser("Gather data for task 2")
parser.add_argument("--output_1d",
default=osp.join(osp.dirname(osp.realpath(__file__)), "..", "data", "task2_1d.csv"),
help="Output file")
parser.add_argument("--output_data_2d",
default=osp.join(osp.dirname(osp.realpath(__file__)), "..", "data", "task2_data_2d.csv"),
help="Output file")
parser.add_argument("--output_2d",
default=osp.join(osp.dirname(osp.realpath(__file__)), "..", "data", "task2_2d.csv"),
help="Output file")
parser.add_argument("--random_state", type=int, default=111, help="Random state for random generator")
args = parser.parse_args()
np.random.seed(args.random_state)
# 1d optimization
data = []
for optimizer in ("brute_force", "dichotomy", "golden_section"):
for func, interval in zip(("x_3", "module_x", "sin"), ([0, 1], [0, 1], [0.1, 1])):
result = eval(optimizer)(eval(func), interval[0], interval[1])
data.append(
{"optimizer": optimizer, "func": func, "min": result["min"], "func_min": result["func_min"],
"iterations": result["iterations"]}
)
data = pd.DataFrame(data)
data.to_csv(args.output_1d, index=False)
# 2d optimization
# Generate data
alpha, beta = np.random.uniform(size=2)
print(f'Alpha: {alpha}, beta: {beta}')
X = np.arange(0, 1.01, 0.01)
deltas = np.random.normal(size=X.shape)
y_clean = alpha * X + beta
y = alpha * X + beta + deltas
opt_data = pd.DataFrame(np.vstack([X, y_clean, y]).T, columns=['X', 'y_clean', 'y'])
opt_data.to_csv(args.output_data_2d, index=False)
# Gather optimization data
data_opt = []
for method, approx_func in tqdm.tqdm(
product(["brute_force_opt", "gauss_opt", "nelder_mead_opt"], ["linear_approx", "rational_approx"]),
total=6,
desc="Optimizing 2D",
):
opt_res = eval(method)(eval(approx_func), X, y, a_bounds=(-2, 2), b_bounds=(-2, 2))
data_opt.append(
{
"method": method,
"approx_func": approx_func,
"loss": opt_res["loss"],
"a": opt_res["args"]["a"],
"b": opt_res["args"]["b"],
"iterations": opt_res["iterations"],
}
)
data_opt = pd.DataFrame(data_opt)
data_opt.to_csv(args.output_2d, index=False)
| [
"pandas.DataFrame",
"numpy.random.uniform",
"numpy.sum",
"numpy.abs",
"argparse.ArgumentParser",
"numpy.random.seed",
"numpy.asarray",
"os.path.realpath",
"numpy.argmin",
"numpy.min",
"numpy.sin",
"numpy.arange",
"numpy.random.normal",
"itertools.product",
"numpy.vstack",
"numpy.sqrt"
... | [((237, 256), 'numpy.abs', 'np.abs', (['(array - 0.2)'], {}), '(array - 0.2)\n', (243, 256), True, 'import numpy as np\n'), ((379, 410), 'numpy.arange', 'np.arange', (['left', 'right', 'epsilon'], {}), '(left, right, epsilon)\n', (388, 410), True, 'import numpy as np\n'), ((450, 464), 'numpy.min', 'np.min', (['values'], {}), '(values)\n', (456, 464), True, 'import numpy as np\n'), ((479, 496), 'numpy.argmin', 'np.argmin', (['values'], {}), '(values)\n', (488, 496), True, 'import numpy as np\n'), ((2662, 2692), 'numpy.sum', 'np.sum', (['((approx - y_true) ** 2)'], {}), '((approx - y_true) ** 2)\n', (2668, 2692), True, 'import numpy as np\n'), ((2797, 2851), 'numpy.arange', 'np.arange', (['a_bounds[0]', '(a_bounds[1] + epsilon)', 'epsilon'], {}), '(a_bounds[0], a_bounds[1] + epsilon, epsilon)\n', (2806, 2851), True, 'import numpy as np\n'), ((2867, 2921), 'numpy.arange', 'np.arange', (['b_bounds[0]', '(b_bounds[1] + epsilon)', 'epsilon'], {}), '(b_bounds[0], b_bounds[1] + epsilon, epsilon)\n', (2876, 2921), True, 'import numpy as np\n'), ((5198, 5247), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Gather data for task 2"""'], {}), "('Gather data for task 2')\n", (5221, 5247), False, 'import argparse\n'), ((5977, 6010), 'numpy.random.seed', 'np.random.seed', (['args.random_state'], {}), '(args.random_state)\n', (5991, 6010), True, 'import numpy as np\n'), ((6495, 6513), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (6507, 6513), True, 'import pandas as pd\n'), ((6620, 6645), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(2)'}), '(size=2)\n', (6637, 6645), True, 'import numpy as np\n'), ((6697, 6721), 'numpy.arange', 'np.arange', (['(0)', '(1.01)', '(0.01)'], {}), '(0, 1.01, 0.01)\n', (6706, 6721), True, 'import numpy as np\n'), ((6735, 6765), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'X.shape'}), '(size=X.shape)\n', (6751, 6765), True, 'import numpy as np\n'), ((7670, 7692), 'pandas.DataFrame', 'pd.DataFrame', (['data_opt'], {}), '(data_opt)\n', (7682, 7692), True, 'import pandas as pd\n'), ((294, 311), 'numpy.sin', 'np.sin', (['(1 / array)'], {}), '(1 / array)\n', (300, 311), True, 'import numpy as np\n'), ((7079, 7182), 'itertools.product', 'product', (["['brute_force_opt', 'gauss_opt', 'nelder_mead_opt']", "['linear_approx', 'rational_approx']"], {}), "(['brute_force_opt', 'gauss_opt', 'nelder_mead_opt'], [\n 'linear_approx', 'rational_approx'])\n", (7086, 7182), False, 'from itertools import product\n'), ((1253, 1263), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (1260, 1263), True, 'import numpy as np\n'), ((3690, 3718), 'numpy.abs', 'np.abs', (['(loss_prev - min_loss)'], {}), '(loss_prev - min_loss)\n', (3696, 3718), True, 'import numpy as np\n'), ((4956, 4976), 'numpy.asarray', 'np.asarray', (['[a0, b0]'], {}), '([a0, b0])\n', (4966, 4976), True, 'import numpy as np\n'), ((6859, 6885), 'numpy.vstack', 'np.vstack', (['[X, y_clean, y]'], {}), '([X, y_clean, y])\n', (6868, 6885), True, 'import numpy as np\n'), ((5340, 5362), 'os.path.realpath', 'osp.realpath', (['__file__'], {}), '(__file__)\n', (5352, 5362), True, 'import os.path as osp\n'), ((5537, 5559), 'os.path.realpath', 'osp.realpath', (['__file__'], {}), '(__file__)\n', (5549, 5559), True, 'import os.path as osp\n'), ((5734, 5756), 'os.path.realpath', 'osp.realpath', (['__file__'], {}), '(__file__)\n', (5746, 5756), True, 'import os.path as osp\n')] |
from kapteyn import maputils
import numpy
from service import *
fignum = 20
fig = plt.figure(figsize=figsize)
frame = fig.add_axes(plotbox)
theta_a = 45
t1 = 20.0; t2 = 70.0
eta = abs(t1-t2)/2.0
title = r"""Conic perspective projection (COP) with:
$\theta_a=45^\circ$, $\theta_1=20^\circ$ and $\theta_2=70^\circ$. (Cal. fig.24)"""
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---COP',
'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -5.5,
'CTYPE2' : 'DEC--COP',
'CRVAL2' : theta_a, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 5.5,
'PV2_1' : theta_a, 'PV2_2' : eta
}
X = numpy.arange(0,370.0,30.0); X[-1] = 180+epsilon
Y = numpy.arange(-30,90,15.0) # Diverges at theta_a +- 90.0
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-30,90.0), wxlim=(0,360),
startx=X, starty=Y)
grat.setp_lineswcs1(-30, linestyle='--', color='g')
grat.setp_lineswcs0(0, lw=2)
grat.setp_lineswcs1(0, lw=2)
lon_world = list(range(0,360,30))
lon_world.append(180+epsilon)
lat_world = [-30, 0, 30, 60]
addangle0 = -90
lat_constval = -31
labkwargs0 = {'color':'r', 'va':'center', 'ha':'center'}
labkwargs1 = {'color':'b', 'va':'bottom', 'ha':'left'}
doplot(frame, fignum, annim, grat, title,
lon_world=lon_world, lat_world=lat_world,
lat_constval=lat_constval,
labkwargs0=labkwargs0, labkwargs1=labkwargs1,
addangle0=addangle0, markerpos=markerpos) | [
"kapteyn.maputils.FITSimage",
"numpy.arange"
] | [((665, 693), 'numpy.arange', 'numpy.arange', (['(0)', '(370.0)', '(30.0)'], {}), '(0, 370.0, 30.0)\n', (677, 693), False, 'import numpy\n'), ((718, 745), 'numpy.arange', 'numpy.arange', (['(-30)', '(90)', '(15.0)'], {}), '(-30, 90, 15.0)\n', (730, 745), False, 'import numpy\n'), ((779, 820), 'kapteyn.maputils.FITSimage', 'maputils.FITSimage', ([], {'externalheader': 'header'}), '(externalheader=header)\n', (797, 820), False, 'from kapteyn import maputils\n')] |
# This program is for being able to measure model accuracy in the Kaggle Digit
# Recognizer competition. This program only uses the training set, subsetting
# part of the data to be used as a test set. This lets the user get instant
# feedback as to the accuracy of the algorithm and more quickly tweak it for
# improvement.
#
# The basic algorithm is as follows:
# 1. Read in and normalize our data, setting some aside for testing.
# 2. In order to reduce the number of features, Principal Component Analysis
# Decomposition is performed on the training data.
# 3. With a reduced number of features, a Support Vector Classification
# model is trained on the training data.
# 4. The model is then used to predict labels for the test set of data.
#
#
# Information on the Kaggle competition can be found here:
# https://www.kaggle.com/c/digit-recognizer
#
# <NAME> - <EMAIL>
import matplotlib.pyplot as plt
import numpy as np
import pandas
from sklearn import svm, metrics, decomposition
print('Reading in data...')
# Fraction of data to train on, 1-FRACTION will be tested
FRACTION = 9.0 / 10.0
# Read in our training data
train_data_df = pandas.read_csv('train.csv', header = 0)
# Build our labels and data separately
train_labels = train_data_df.ix[:,0:1].copy()
train_data = train_data_df.ix[:,1:].copy()
# Display the first few images with labels
display_images = []
display_labels = []
for i in range(0,4):
display_images.append(train_data.as_matrix()[i,:].reshape(28,28))
display_labels.append(train_labels.as_matrix()[i,:])
# Plot images and labels
plt.subplot(2, 4, i + 1)
plt.axis('off')
plt.imshow(display_images[i], cmap=plt.get_cmap('gray'), interpolation='nearest')
plt.title('Training: %i' % display_labels[i])
# Need to get a total number of images
num_images = train_labels.shape[0]
# Normalize images so that there is a consistent range in values for pixels
print('Normalizing...')
# Normalize by dividing each row by the maximum in that row
maxs = train_data.max(axis=1)
train_data = train_data.div(maxs, axis=0)
# Next, we want to do some PCA decomposition to reduce the dimensionality
print('Decomposing...')
# The number of principal components to reduce the feature set to. The larger
# the number of components, the more accurate prediction will be. However,
# execution will take much longer. I found 50 was a decent sweet spot with
# diminishing returns beyond this point.
num_components = 50
# Create our PCA operator and calculate the components
pca_components = decomposition.PCA(n_components=num_components, whiten=True)
pca_components.fit(train_data.as_matrix()[:num_images*FRACTION,:])
# Transform our data into the features calculated via PCA
transformed = pca_components.transform(train_data.as_matrix()[:num_images*FRACTION,:])
# Now we are prepared for the Support Vector Classifier
print('Training the SVC...')
# Create our SVC classifier
classifier = svm.SVC()
# Train our model
classifier.fit(transformed, np.ravel(train_labels.as_matrix()[:num_images*FRACTION,:]))
# After training our model, we are ready to test its accuracy with the test data
print('Predicting with the SVC...')
# Now, predict the rest of the train data so that we can score our model making
# sure that the test data is decomposed via PCA like before
expected = train_labels.as_matrix()[num_images*FRACTION:,:]
test_transformed = pca_components.transform(train_data.as_matrix()[num_images*FRACTION:,:])
predicted = classifier.predict(test_transformed)
# Print report
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
predicted.shape = (predicted.shape[0], 1)
difference = np.not_equal(expected, predicted)
num_different = difference.astype(int).sum()
print('Number incorrectly labeled: %i' % num_different)
# Plot and label some predicted results
predicted_images = []
predicted_labels = []
for i in range(0,4):
predicted_images.append(train_data.as_matrix()[num_images*FRACTION + i,:].reshape(28,28))
predicted_labels.append(predicted[i])
# Plot the predictions
plt.subplot(2, 4, i + 5)
plt.axis('off')
plt.imshow(predicted_images[i], cmap=plt.get_cmap('gray'), interpolation='nearest')
plt.title('Prediction: %i' % predicted_labels[i])
# Show the plot finally
plt.show() | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.get_cmap",
"pandas.read_csv",
"matplotlib.pyplot.axis",
"sklearn.metrics.classification_report",
"numpy.not_equal",
"sklearn.decomposition.PCA",
"sklearn.svm.SVC",
"sklearn.metrics.confusion_matr... | [((1176, 1214), 'pandas.read_csv', 'pandas.read_csv', (['"""train.csv"""'], {'header': '(0)'}), "('train.csv', header=0)\n", (1191, 1214), False, 'import pandas\n'), ((2592, 2651), 'sklearn.decomposition.PCA', 'decomposition.PCA', ([], {'n_components': 'num_components', 'whiten': '(True)'}), '(n_components=num_components, whiten=True)\n', (2609, 2651), False, 'from sklearn import svm, metrics, decomposition\n'), ((2996, 3005), 'sklearn.svm.SVC', 'svm.SVC', ([], {}), '()\n', (3003, 3005), False, 'from sklearn import svm, metrics, decomposition\n'), ((3857, 3890), 'numpy.not_equal', 'np.not_equal', (['expected', 'predicted'], {}), '(expected, predicted)\n', (3869, 3890), True, 'import numpy as np\n'), ((4499, 4509), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4507, 4509), True, 'import matplotlib.pyplot as plt\n'), ((1629, 1653), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(4)', '(i + 1)'], {}), '(2, 4, i + 1)\n', (1640, 1653), True, 'import matplotlib.pyplot as plt\n'), ((1658, 1673), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1666, 1673), True, 'import matplotlib.pyplot as plt\n'), ((1764, 1809), 'matplotlib.pyplot.title', 'plt.title', (["('Training: %i' % display_labels[i])"], {}), "('Training: %i' % display_labels[i])\n", (1773, 1809), True, 'import matplotlib.pyplot as plt\n'), ((4286, 4310), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(4)', '(i + 5)'], {}), '(2, 4, i + 5)\n', (4297, 4310), True, 'import matplotlib.pyplot as plt\n'), ((4315, 4330), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4323, 4330), True, 'import matplotlib.pyplot as plt\n'), ((4423, 4472), 'matplotlib.pyplot.title', 'plt.title', (["('Prediction: %i' % predicted_labels[i])"], {}), "('Prediction: %i' % predicted_labels[i])\n", (4432, 4472), True, 'import matplotlib.pyplot as plt\n'), ((3754, 3799), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['expected', 'predicted'], {}), '(expected, predicted)\n', (3778, 3799), False, 'from sklearn import svm, metrics, decomposition\n'), ((1713, 1733), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gray"""'], {}), "('gray')\n", (1725, 1733), True, 'import matplotlib.pyplot as plt\n'), ((3669, 3719), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['expected', 'predicted'], {}), '(expected, predicted)\n', (3698, 3719), False, 'from sklearn import svm, metrics, decomposition\n'), ((4372, 4392), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gray"""'], {}), "('gray')\n", (4384, 4392), True, 'import matplotlib.pyplot as plt\n')] |
import gym
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
import matplotlib.pyplot as plt
import tensorflow_probability as tfp
# inspired from https://github.com/lubiluk/ddpg
from Lux_Project_Env import frozen_lake
class DDPG:
def __init__(self, config):
self.state_dim = config['state_dim']
print("Size of State Space -> {}".format(self.state_dim))
self.state_n = config['state_n']
self.subgoal_dim = config['subgoal_dim']
print("Size of Subgoal Space -> {}".format(self.subgoal_dim))
self.subgoal_n = config['subgoal_n']
self.std_dev = config['std_dev'] # 0.2
self.ou_noise = OUActionNoise(mean=np.zeros(1), std_deviation=float(self.std_dev) * np.ones(1))
self.actor_model = self.get_actor()
self.critic_model = self.get_critic()
self.target_actor = self.get_actor()
self.target_critic = self.get_critic()
# Making the weights equal initially
self.target_actor.set_weights(self.actor_model.get_weights())
self.target_critic.set_weights(self.critic_model.get_weights())
# Learning rate for actor-critic models
self.critic_lr = config['critic_lr'] # 0.002
self.actor_lr = config['actor_lr'] #0.001
self.critic_optimizer = tf.keras.optimizers.Adam(self.critic_lr)
self.actor_optimizer = tf.keras.optimizers.Adam(self.actor_lr)
# Discount factor for future rewards
self.gamma = config['gamma']
# Used to update target networks
self.tau = config['tau']
self.buffer = BufferH(self, self.state_dim, self.subgoal_n, 50000, 64)
def policy(self, state):
state = tf.expand_dims(tf.convert_to_tensor(state), 0)
noise_object = self.ou_noise
prob = tf.squeeze(self.actor_model(state))
# noise = noise_object()
# Adding noise to action
# sampled_subgoal = sampled_subgoal.numpy() + noise
prob = prob.numpy()
#print(prob)
# prob += noise_object()
dist = tfp.distributions.Categorical(probs=prob, dtype=tf.float32)
subgoal = dist.sample()
# We make sure action is within bounds
# legal_subgoal = np.clip(sampled_subgoal, self.lower_bound, self.upper_bound)
return subgoal.numpy()
def get_actor(self):
# Initialize weights between -3e-3 and 3-e3
last_init = tf.random_uniform_initializer(minval=-0.003, maxval=0.003)
inputs = layers.Input(shape=(self.state_dim,))
out = layers.Reshape((1, self.state_dim,))(inputs)
out = layers.LSTM(4)(out)
out = layers.Dense(256, activation="relu")(out)
out = layers.Dense(256, activation="relu")(out)
outputs = layers.Dense(self.subgoal_n, activation="softmax", kernel_initializer=last_init)(out)
# Our upper bound is 2.0 for Pendulum.
# outputs = outputs * self.upper_bound
model = tf.keras.Model(inputs, outputs)
return model
def get_critic(self):
# State as input
state_input = layers.Input(shape=(self.state_dim))
state_out = layers.Dense(16, activation="relu")(state_input)
state_out = layers.Dense(32, activation="relu")(state_out)
# Action as input
action_input = layers.Input(shape=(self.subgoal_n))
action_out = layers.Dense(32, activation="relu")(action_input)
# Both are passed through seperate layer before concatenating
concat = layers.Concatenate()([state_out, action_out])
out = layers.Dense(256, activation="relu")(concat)
out = layers.Dense(256, activation="relu")(out)
outputs = layers.Dense(1)(out)
# Outputs single value for give state-action
model = tf.keras.Model([state_input, action_input], outputs)
return model
def learn(self):
self.buffer.learn()
update_target(self.target_actor.variables, self.actor_model.variables, self.tau)
update_target(self.target_critic.variables, self.critic_model.variables, self.tau)
def record(self, state, subgoal, reward, prev_state):
self.buffer.record((prev_state, subgoal, reward, state))
class OUActionNoise:
def __init__(self, mean, std_deviation, theta=0.15, dt=1e-2, x_initial=None):
self.theta = theta
self.mean = mean
self.std_dev = std_deviation
self.dt = dt
self.x_initial = x_initial
self.reset()
def __call__(self):
# Formula taken from https://www.wikipedia.org/wiki/Ornstein-Uhlenbeck_process.
x = (
self.x_prev
+ self.theta * (self.mean - self.x_prev) * self.dt
+ self.std_dev * np.sqrt(self.dt) * np.random.normal(size=self.mean.shape)
)
# Store x into x_prev
# Makes next noise dependent on current one
self.x_prev = x
return x
def reset(self):
if self.x_initial is not None:
self.x_prev = self.x_initial
else:
self.x_prev = np.zeros_like(self.mean)
class BufferH:
def __init__(self, ddpg: DDPG, num_states, num_subgoal, buffer_capacity=100000, batch_size=64):
# Number of "experiences" to store at max
self.buffer_capacity = buffer_capacity
# Num of tuples to train on.
self.batch_size = batch_size
# Its tells us num of times record() was called.
self.buffer_counter = 0
self.ddpg = ddpg
self.num_states = num_states
self.num_subgoal = num_subgoal
# Instead of list of tuples as the exp.replay concept go
# We use different np.arrays for each tuple element
self.state_buffer = np.zeros((self.buffer_capacity, num_states))
self.subgoal_buffer = np.zeros((self.buffer_capacity, num_subgoal))
self.reward_buffer = np.zeros((self.buffer_capacity, 1))
self.next_state_buffer = np.zeros((self.buffer_capacity, num_states))
def clear(self):
self.buffer_counter = 0
self.state_buffer = np.zeros((self.buffer_capacity, self.num_states))
self.subgoal_buffer = np.zeros((self.buffer_capacity, self.num_subgoal))
self.reward_buffer = np.zeros((self.buffer_capacity, 1))
self.next_state_buffer = np.zeros((self.buffer_capacity, self.num_states))
# Takes (s,g,r,s') obervation tuple as input
def record(self, obs_tuple):
# Set index to zero if buffer_capacity is exceeded,
# replacing old records
index = self.buffer_counter % self.buffer_capacity
self.state_buffer[index] = obs_tuple[0]
self.subgoal_buffer[index] = obs_tuple[1]
self.reward_buffer[index] = obs_tuple[2]
self.next_state_buffer[index] = obs_tuple[3]
self.buffer_counter += 1
# Eager execution is turned on by default in TensorFlow 2. Decorating with tf.function allows
# TensorFlow to build a static graph out of the logic and computations in our function.
# This provides a large speed up for blocks of code that contain many small TensorFlow operations such as this one.
# @tf.function
def update(
self, state_batch, subgoal_batch, reward_batch, next_state_batch,
):
# Training and updating Actor & Critic networks.
# See Pseudo Code.
with tf.GradientTape() as tape:
target_actions = self.ddpg.target_actor(next_state_batch, training=True)
y = reward_batch + self.ddpg.gamma * self.ddpg.target_critic(
[next_state_batch, target_actions], training=True
)
critic_value = self.ddpg.critic_model([state_batch, subgoal_batch], training=True)
critic_loss = tf.math.reduce_mean(tf.math.square(y - critic_value))
critic_grad = tape.gradient(critic_loss, self.ddpg.critic_model.trainable_variables)
self.ddpg.critic_optimizer.apply_gradients(
zip(critic_grad, self.ddpg.critic_model.trainable_variables)
)
with tf.GradientTape() as tape:
actions = self.ddpg.actor_model(state_batch, training=True)
critic_value = self.ddpg.critic_model([state_batch, actions], training=True)
# Used `-value` as we want to maximize the value given
# by the critic for our actions
actor_loss = -tf.math.reduce_mean(critic_value)
actor_grad = tape.gradient(actor_loss, self.ddpg.actor_model.trainable_variables)
self.ddpg.actor_optimizer.apply_gradients(
zip(actor_grad, self.ddpg.actor_model.trainable_variables)
)
# We compute the loss and update parameters
def learn(self):
# Get sampling range
record_range = min(self.buffer_counter, self.buffer_capacity)
# Randomly sample indices
batch_indices = np.random.choice(record_range, self.batch_size)
# Convert to tensors
state_batch = tf.convert_to_tensor(self.state_buffer[batch_indices])
action_batch = tf.convert_to_tensor(self.subgoal_buffer[batch_indices])
reward_batch = tf.convert_to_tensor(self.reward_buffer[batch_indices])
reward_batch = tf.cast(reward_batch, dtype=tf.float32)
next_state_batch = tf.convert_to_tensor(self.next_state_buffer[batch_indices])
self.update(state_batch, action_batch, reward_batch, next_state_batch)
# This update target parameters slowly
# Based on rate `tau`, which is much less than one.
@tf.function
def update_target(target_weights, weights, tau):
for (a, b) in zip(target_weights, weights):
a.assign(b * tau + a * (1 - tau))
if __name__ == '__main__':
env = frozen_lake.FrozenLakeEnv(is_slippery=False)
config = {
'state_dim':1, # env.observation_space.shape[0],
'state_n' : 16,
'subgoal_dim': 1, #env.action_space.shape[0],
'subgoal_n' : 4,
'std_dev':0.2,
'critic_lr': 0.0002,
'actor_lr': 0.0001,
'gamma' : 0.99,
'tau': 0.005,
}
ddpg = DDPG(config)
# To store reward history of each episode
ep_reward_list = []
# To store average reward history of last few episodes
avg_reward_list = []
total_episodes = 200
# Takes about 4 min to train
for ep in range(total_episodes):
prev_state = env.reset()
episodic_reward = 0
while True:
# Uncomment this to see the Actor in action
# But not in a python notebook.
# env.render()
subgoal = ddpg.policy(prev_state)
# Recieve state and reward from environment.
state, reward, done, info = env.step(subgoal)
# print(state, reward,done)
ddpg.record(state, subgoal, reward, prev_state)
episodic_reward += reward
# End this episode when `done` is True
if done:
break
prev_state = state
ddpg.learn()
ep_reward_list.append(episodic_reward)
# Mean of last 40 episodes
avg_reward = np.mean(ep_reward_list[-40:])
print("Episode * {} * Avg Reward is ==> {}".format(ep, avg_reward))
avg_reward_list.append(avg_reward)
# Plotting graph
# Episodes versus Avg. Rewards
plt.plot(avg_reward_list)
plt.xlabel("Episode")
plt.ylabel("Avg. Epsiodic Reward")
plt.show()
| [
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Dense",
"numpy.ones",
"numpy.mean",
"numpy.random.normal",
"tensorflow.math.square",
"tensorflow.random_uniform_initializer",
"numpy.zeros_like",
"tensorflow.keras.layers.Concatenate",
"tensorflow_probability.distributions.Categorical",
... | [((9677, 9721), 'Lux_Project_Env.frozen_lake.FrozenLakeEnv', 'frozen_lake.FrozenLakeEnv', ([], {'is_slippery': '(False)'}), '(is_slippery=False)\n', (9702, 9721), False, 'from Lux_Project_Env import frozen_lake\n'), ((11278, 11303), 'matplotlib.pyplot.plot', 'plt.plot', (['avg_reward_list'], {}), '(avg_reward_list)\n', (11286, 11303), True, 'import matplotlib.pyplot as plt\n'), ((11308, 11329), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episode"""'], {}), "('Episode')\n", (11318, 11329), True, 'import matplotlib.pyplot as plt\n'), ((11334, 11368), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Avg. Epsiodic Reward"""'], {}), "('Avg. Epsiodic Reward')\n", (11344, 11368), True, 'import matplotlib.pyplot as plt\n'), ((11373, 11383), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11381, 11383), True, 'import matplotlib.pyplot as plt\n'), ((1324, 1364), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['self.critic_lr'], {}), '(self.critic_lr)\n', (1348, 1364), True, 'import tensorflow as tf\n'), ((1396, 1435), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['self.actor_lr'], {}), '(self.actor_lr)\n', (1420, 1435), True, 'import tensorflow as tf\n'), ((2078, 2137), 'tensorflow_probability.distributions.Categorical', 'tfp.distributions.Categorical', ([], {'probs': 'prob', 'dtype': 'tf.float32'}), '(probs=prob, dtype=tf.float32)\n', (2107, 2137), True, 'import tensorflow_probability as tfp\n'), ((2434, 2492), 'tensorflow.random_uniform_initializer', 'tf.random_uniform_initializer', ([], {'minval': '(-0.003)', 'maxval': '(0.003)'}), '(minval=-0.003, maxval=0.003)\n', (2463, 2492), True, 'import tensorflow as tf\n'), ((2511, 2548), 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': '(self.state_dim,)'}), '(shape=(self.state_dim,))\n', (2523, 2548), False, 'from tensorflow.keras import layers\n'), ((2969, 3000), 'tensorflow.keras.Model', 'tf.keras.Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (2983, 3000), True, 'import tensorflow as tf\n'), ((3096, 3130), 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': 'self.state_dim'}), '(shape=self.state_dim)\n', (3108, 3130), False, 'from tensorflow.keras import layers\n'), ((3319, 3353), 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': 'self.subgoal_n'}), '(shape=self.subgoal_n)\n', (3331, 3353), False, 'from tensorflow.keras import layers\n'), ((3786, 3838), 'tensorflow.keras.Model', 'tf.keras.Model', (['[state_input, action_input]', 'outputs'], {}), '([state_input, action_input], outputs)\n', (3800, 3838), True, 'import tensorflow as tf\n'), ((5734, 5778), 'numpy.zeros', 'np.zeros', (['(self.buffer_capacity, num_states)'], {}), '((self.buffer_capacity, num_states))\n', (5742, 5778), True, 'import numpy as np\n'), ((5809, 5854), 'numpy.zeros', 'np.zeros', (['(self.buffer_capacity, num_subgoal)'], {}), '((self.buffer_capacity, num_subgoal))\n', (5817, 5854), True, 'import numpy as np\n'), ((5884, 5919), 'numpy.zeros', 'np.zeros', (['(self.buffer_capacity, 1)'], {}), '((self.buffer_capacity, 1))\n', (5892, 5919), True, 'import numpy as np\n'), ((5953, 5997), 'numpy.zeros', 'np.zeros', (['(self.buffer_capacity, num_states)'], {}), '((self.buffer_capacity, num_states))\n', (5961, 5997), True, 'import numpy as np\n'), ((6080, 6129), 'numpy.zeros', 'np.zeros', (['(self.buffer_capacity, self.num_states)'], {}), '((self.buffer_capacity, self.num_states))\n', (6088, 6129), True, 'import numpy as np\n'), ((6160, 6210), 'numpy.zeros', 'np.zeros', (['(self.buffer_capacity, self.num_subgoal)'], {}), '((self.buffer_capacity, self.num_subgoal))\n', (6168, 6210), True, 'import numpy as np\n'), ((6240, 6275), 'numpy.zeros', 'np.zeros', (['(self.buffer_capacity, 1)'], {}), '((self.buffer_capacity, 1))\n', (6248, 6275), True, 'import numpy as np\n'), ((6309, 6358), 'numpy.zeros', 'np.zeros', (['(self.buffer_capacity, self.num_states)'], {}), '((self.buffer_capacity, self.num_states))\n', (6317, 6358), True, 'import numpy as np\n'), ((8849, 8896), 'numpy.random.choice', 'np.random.choice', (['record_range', 'self.batch_size'], {}), '(record_range, self.batch_size)\n', (8865, 8896), True, 'import numpy as np\n'), ((8949, 9003), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['self.state_buffer[batch_indices]'], {}), '(self.state_buffer[batch_indices])\n', (8969, 9003), True, 'import tensorflow as tf\n'), ((9027, 9083), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['self.subgoal_buffer[batch_indices]'], {}), '(self.subgoal_buffer[batch_indices])\n', (9047, 9083), True, 'import tensorflow as tf\n'), ((9107, 9162), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['self.reward_buffer[batch_indices]'], {}), '(self.reward_buffer[batch_indices])\n', (9127, 9162), True, 'import tensorflow as tf\n'), ((9186, 9225), 'tensorflow.cast', 'tf.cast', (['reward_batch'], {'dtype': 'tf.float32'}), '(reward_batch, dtype=tf.float32)\n', (9193, 9225), True, 'import tensorflow as tf\n'), ((9253, 9312), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['self.next_state_buffer[batch_indices]'], {}), '(self.next_state_buffer[batch_indices])\n', (9273, 9312), True, 'import tensorflow as tf\n'), ((11068, 11097), 'numpy.mean', 'np.mean', (['ep_reward_list[-40:]'], {}), '(ep_reward_list[-40:])\n', (11075, 11097), True, 'import numpy as np\n'), ((1734, 1761), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['state'], {}), '(state)\n', (1754, 1761), True, 'import tensorflow as tf\n'), ((2563, 2598), 'tensorflow.keras.layers.Reshape', 'layers.Reshape', (['(1, self.state_dim)'], {}), '((1, self.state_dim))\n', (2577, 2598), False, 'from tensorflow.keras import layers\n'), ((2622, 2636), 'tensorflow.keras.layers.LSTM', 'layers.LSTM', (['(4)'], {}), '(4)\n', (2633, 2636), False, 'from tensorflow.keras import layers\n'), ((2656, 2692), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (2668, 2692), False, 'from tensorflow.keras import layers\n'), ((2712, 2748), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (2724, 2748), False, 'from tensorflow.keras import layers\n'), ((2772, 2857), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['self.subgoal_n'], {'activation': '"""softmax"""', 'kernel_initializer': 'last_init'}), "(self.subgoal_n, activation='softmax', kernel_initializer=last_init\n )\n", (2784, 2857), False, 'from tensorflow.keras import layers\n'), ((3153, 3188), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(16)'], {'activation': '"""relu"""'}), "(16, activation='relu')\n", (3165, 3188), False, 'from tensorflow.keras import layers\n'), ((3222, 3257), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (3234, 3257), False, 'from tensorflow.keras import layers\n'), ((3377, 3412), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (3389, 3412), False, 'from tensorflow.keras import layers\n'), ((3515, 3535), 'tensorflow.keras.layers.Concatenate', 'layers.Concatenate', ([], {}), '()\n', (3533, 3535), False, 'from tensorflow.keras import layers\n'), ((3576, 3612), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (3588, 3612), False, 'from tensorflow.keras import layers\n'), ((3635, 3671), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (3647, 3671), False, 'from tensorflow.keras import layers\n'), ((3695, 3710), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {}), '(1)\n', (3707, 3710), False, 'from tensorflow.keras import layers\n'), ((5074, 5098), 'numpy.zeros_like', 'np.zeros_like', (['self.mean'], {}), '(self.mean)\n', (5087, 5098), True, 'import numpy as np\n'), ((7356, 7373), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (7371, 7373), True, 'import tensorflow as tf\n'), ((8040, 8057), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (8055, 8057), True, 'import tensorflow as tf\n'), ((706, 717), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (714, 717), True, 'import numpy as np\n'), ((4760, 4798), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'self.mean.shape'}), '(size=self.mean.shape)\n', (4776, 4798), True, 'import numpy as np\n'), ((7763, 7795), 'tensorflow.math.square', 'tf.math.square', (['(y - critic_value)'], {}), '(y - critic_value)\n', (7777, 7795), True, 'import tensorflow as tf\n'), ((8365, 8398), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['critic_value'], {}), '(critic_value)\n', (8384, 8398), True, 'import tensorflow as tf\n'), ((755, 765), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (762, 765), True, 'import numpy as np\n'), ((4741, 4757), 'numpy.sqrt', 'np.sqrt', (['self.dt'], {}), '(self.dt)\n', (4748, 4757), True, 'import numpy as np\n')] |
import itertools
import os
import subprocess
import sys
import typing
import numpy as np
from analysis import plot_constants, video_data, video_utils, video_analysis
from analysis import video_analysis
def gnuplot_write_arrays(stream: typing.TextIO=sys.stdout,
*args: np.ndarray) -> None:
# Requires arg[:, 0] to be ordered.
times = set()
for arg in args:
if arg.ndim != 2:
raise ValueError('Arrays must be 2D not shape: {}'.format(arg.shape))
times |= set(arg[:,0])
timebase = sorted(times)
indices = [0,] * len(args)
for t in timebase:
part_line = [
'{:<8.1f}'.format(t)
]
for i_arg, arg in enumerate(args):
i = indices[i_arg]
if i < len(arg) and arg[i, 0] == t:
part_line.extend(
['{:8.3f}'.format(arg[i, j]) for j in range(1, arg.shape[1])]
)
indices[i_arg] += 1
else:
part_line.extend(
['{:8s}'.format('NaN') for j in range(1, arg.shape[1])]
)
stream.write(' '.join(part_line))
stream.write('\n')
__GROUND_SPEED_FITS: typing.Dict[video_data.ErrorDirection, typing.List[float]] = {}
def get_gs_fit(err: video_data.ErrorDirection) -> typing.List[float]:
if err not in __GROUND_SPEED_FITS:
if err == video_data.ErrorDirection.MIN:
__GROUND_SPEED_FITS[err] = video_analysis.ground_speed_curve_fit_with_offset(
plot_constants.GROUND_SPEED_OFFSETS[0]
)
elif err == video_data.ErrorDirection.MID:
__GROUND_SPEED_FITS[err] = video_analysis.ground_speed_curve_fit_with_offset(
plot_constants.GROUND_SPEED_OFFSETS[1]
)
elif err == video_data.ErrorDirection.MAX:
__GROUND_SPEED_FITS[err] = video_analysis.ground_speed_curve_fit_with_offset(
plot_constants.GROUND_SPEED_OFFSETS[2]
)
else:
assert 0
return __GROUND_SPEED_FITS[err]
def plot_file(name: str) -> str:
if name == '':
return os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir, 'plots'))
return os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir, 'plots', name))
def write_dat_plt_call(name: str, fn_dat: typing.Callable, fn_plt: typing.Callable) -> None:
"""
Create the plot for name.
fn_data is a function that takes a stream and writes the 'name.dat' file. This must return
a list of strings, if non-empty then they are written into the plt file as {computed_data}.
fn_plot is a function that return the 'name.plt' string ready to insert the 'name.dat'
into the format variable 'file_name'.
"""
print('Writing "{}"'.format(name))
with open(plot_file('{}.dat'.format(name)), 'w') as outfile:
computed_data_strings = fn_dat(outfile)
if len(computed_data_strings):
plot_data = fn_plt().format(
file_name=name, computed_data='\n'.join(computed_data_strings)
)
else:
plot_data = fn_plt().format(file_name=name)
plt_file_path = plot_file('{}.plt'.format(name))
with open(plt_file_path, 'w') as outfile:
outfile.write(plot_data)
proc = subprocess.Popen(
args=['gnuplot', '-p', os.path.basename(plt_file_path)],
shell=False,
cwd=os.path.dirname(plt_file_path),
)
try:
outs, errs = proc.communicate(timeout=1)
except subprocess.TimeoutExpired as err:
print('ERROR:', err)
proc.kill()
outs, errs = proc.communicate()
# print(outs, errs, proc.returncode)
def observer_xy_with_std_from_aspects():
"""
Returns ((observer_x_mean, observer_x_std), (observer_y_mean, observer_y_std))
in metres from aircraft aspects.
"""
return video_analysis.observer_position_mean_std_from_aspects(
baseline=plot_constants.OBSERVER_XY_MINIMUM_BASELINE,
ignore_first_n=plot_constants.OBSERVER_XY_IGNORE_N_FIRST_BEARINGS,
t_range=plot_constants.OBSERVER_XY_TIME_RANGE,
)
def x_offset():
"""The x offset at t=0 from the runway start based on integrating the ground speed integral."""
distance_to_end = video_analysis.ground_speed_integral(
0,
video_data.TIME_VIDEO_END_ASPHALT.time,
get_gs_fit(video_data.ErrorDirection.MID)
)
result = video_data.RUNWAY_LEN_M - distance_to_end
return result
def observer_xy():
"""Returns (x, y) in metres from start of runway."""
# ((observer_x_mean, observer_x_std), (observer_y_mean, observer_y_std)) = observer_xy_with_std_from_aspects()
((observer_x_mean, observer_x_std), (observer_y_mean, observer_y_std)) = \
video_analysis.observer_position_mean_std_from_full_transits()
observer_xy_start_runway = observer_x_mean, observer_y_mean
return observer_xy_start_runway
def full_transit_labels_and_arrows(arrow_rgb: str, line_width: float) -> typing.List[str]:
"""
Returns a list of gnuplot directives that are lines between the from/to full transit points
and labels the from/to points.
"""
ret = []
observer = video_utils.XY(*observer_xy())
for transit_line in video_data.GOOGLE_EARTH_FULL_TRANSITS:
# Compute a position past the observer
end_point = video_utils.transit_line_past_observer(
transit_line.frm.xy, transit_line.to.xy, observer, 250.0
)
# Line between from/to points
ret.append(
'set arrow from {x0:.0f},{y0:.0f} to {x1:.0f},{y1:.0f} nohead lw {lw:0.2f} lc rgb "{arrow_rgb}"'.format(
x0=transit_line.frm.xy.x,
y0=transit_line.frm.xy.y,
x1=end_point.x,
y1=end_point.y,
arrow_rgb=arrow_rgb,
lw=line_width,
)
)
# Label from point
ret.append(
'set label "{label:}" at {x:.1f},{y:.1f} right font ",9" rotate by -30'.format(
label=transit_line.frm.label,
x=transit_line.frm.xy.x - 50,
y=transit_line.frm.xy.y,
)
)
# Label to point
ret.append(
'set label "{label:}" at {x:.1f},{y:.1f} right font ",9" rotate by -30'.format(
label=transit_line.to.label,
x=transit_line.to.xy.x - 50,
y=transit_line.to.xy.y,
)
)
return ret
def full_transit_arrows_with_position_error(arrow_rgb: str,
error: typing.Union[int, float],
line_width: float) -> typing.List[str]:
"""
Returns a list of gnuplot directives that are lines between the from/to full transit points
with the given positional error.
"""
ret = []
observer = video_utils.XY(*observer_xy())
for transit_line in video_data.GOOGLE_EARTH_FULL_TRANSITS:
new_from, new_to, new_bearing = video_utils.transit_point_with_error(
transit_line.frm.xy, transit_line.to.xy, error=error
)
end_point = video_utils.transit_line_past_observer(
new_from, new_to, observer, 250.0
)
ret.append(
'set arrow from {x0:.0f},{y0:.0f} to {x1:.0f},{y1:.0f} nohead lw {lw:0.2f} lc rgb "{arrow_rgb}" dt 4'.format(
x0=new_from.x,
y0=new_from.y,
x1=end_point.x,
y1=end_point.y,
arrow_rgb=arrow_rgb,
lw=line_width,
)
)
return ret
def observer_position_from_full_transits():
crossing_x = []
crossing_y = []
for num, (i, j) in enumerate(itertools.combinations(range(len(video_data.GOOGLE_EARTH_FULL_TRANSITS)), 2)):
transit1: video_data.FullTransitLine = video_data.GOOGLE_EARTH_FULL_TRANSITS[i]
transit2: video_data.FullTransitLine = video_data.GOOGLE_EARTH_FULL_TRANSITS[j]
crossing = video_utils.intersect_two_lines(
transit1.frm.xy, transit1.to.xy, transit2.frm.xy, transit2.to.xy,
)
crossing_x.append(crossing.x)
crossing_y.append(crossing.y)
return sum(crossing_x) / len(crossing_x), (max(crossing_x) - min(crossing_x)) / 2.0, \
sum(crossing_y) / len(crossing_y), (max(crossing_y) - min(crossing_y)) / 2.0
def get_gs_fits_corrected() -> typing.List[typing.List[float]]:
# Apply an offset of +5 knots and a tolerance of ±5knots
gs_fits = [
video_analysis.ground_speed_curve_fit_with_offset(
video_utils.knots_to_m_p_s(5.0 + -5.0)
),
video_analysis.ground_speed_curve_fit_with_offset(
video_utils.knots_to_m_p_s(5.0 + 0.0)
),
video_analysis.ground_speed_curve_fit_with_offset(
video_utils.knots_to_m_p_s(5.0 + 5.0)
),
]
return gs_fits
def return_equations_of_motion() -> typing.List[str]:
ret = []
gs_fits = get_gs_fits_corrected()
ret.append('Ground speed (m/s) = {:.1f} {:+.2f} * t {:+.5f} * t^2 {:+.7f} * t^3'.format(*(gs_fits[1])))
ret.append('Acceleration (m/s^2) = {:.2f} {:+.4f} * t {:+.6f} * t^2'.format(
gs_fits[1][1],
2 * gs_fits[1][2],
3 * gs_fits[1][3],
))
ret.append('Distance (m) = 1110 + {:.1f} * t {:+.3f} * t^2 {:+.5f} * t^3 {:+.7f} * t^4'.format(
gs_fits[1][0],
gs_fits[1][1] / 2.0,
gs_fits[1][2] / 3.0,
gs_fits[1][3] / 4.0,
))
return ret
def markdown_table_equations_of_motion() -> typing.Tuple[typing.List[str], str]:
"""
Returns equations of motion as a list of strings and the title in markdown format.
For example::
| Measure | Units | Formulae | Tolerance | Notes |
| --- | --- | --- | --- | --- |
| Ground speed | m/s | 58.3 + 1.48 * t - 0.00794 * t^2 - 0.0000418 * t^3 | ±2.5 | See note 1 below. |
| Acceleration | m/s^s | 1.48 - 0.0159 * t - 0.000125 * t^2 | ±0.17 | See note 2 below. |
| Distance | m | 1110 + 58.3 * t + 0.741 * t^2 - 0.00265 * t^3 - 0.0000104 * t^4 | ±25 for t>=0 | See note 3 below. |
"""
ret = [
'| Measure | Units | Formulae | Tolerance | Notes |',
'| --- | --- | --- | --- | --- |',
]
gs_fit = get_gs_fits_corrected()[1]
offset_video_start = video_data.RUNWAY_LEN_M
offset_video_start -= video_analysis.ground_speed_integral(
0, video_data.TIME_VIDEO_END_ASPHALT.time, gs_fit
)
values = [gs_fit[i] for i in range(4)]
ret.append(
'| Ground speed | m/s | {:.2e} {:+.2e} * t {:+.2e} * t^2 {:+.2e} * t^3 | {} | {} |'.format(
*values,
'±2.5',
'See note 1 below.',
)
)
values = [i * gs_fit[i] for i in range(1, 4)]
ret.append(
'| Acceleration | m/s^2 | {:.2e} {:+.2e} * t {:+.2e} * t^2 | {} | {} |'.format(
*values,
'±0.17',
'See note 2 below.',
)
)
values = [offset_video_start]
values += [gs_fit[i] / (i + 1) for i in range(4)]
ret.append(
'| Distance | m | {:.2e} + {:.2e} * t {:+.2e} * t^2 {:+.2e} * t^3 {:+.2e} * t^4 | {} | {} |'.format(
*values,
'±25 for t>=0',
'See note 3 below.',
))
return ret, 'Equations of Motion'
def get_distances_min_mid_max(offset_distance_at_t: float) -> typing.Tuple[np.ndarray]:
"""Returns a tuple of three np.ndarray of (time, distance) corresponding to the the
-10, mid, +10 knot fits of ground speed.
If offset_distance_at_t is non-zero an offset will be applied, that is the
runway length - the distance at that offset time.
So if the time is video_data.TIME_VIDEO_END_ASPHALT.time the distance is from the runway start.
"""
gs_fits = [get_gs_fit(err) for err in video_data.ErrorDirection]
three_dist_arrays = [] # Three different fits: -10, 0, +10 knots
if offset_distance_at_t != 0.0:
# Offsets of: [3240 - 1919, 3240 - 2058, 3240 - 2197,]
offsets = [
video_data.RUNWAY_LEN_M - video_analysis.ground_speed_integral(0, offset_distance_at_t, gs_fit)
for gs_fit in gs_fits
]
else:
offsets = [0.0] * len(gs_fits)
for i, gs_fit in enumerate(gs_fits):
t = np.roots(list(reversed(gs_fit)))[-1]
times = []
while t < plot_constants.EXTRAPOLATED_RANGE.stop:
times.append(t)
t += 1
# Add as special cases: t=0, t=27+24/30 - end of asphalt, t=end.
for special_t in (
0.0,
video_data.TIME_VIDEO_NOSEWHEEL_OFF.time,
video_data.TIME_VIDEO_MAINWHEEL_OFF.time,
video_data.TIME_VIDEO_END_ASPHALT.time,
video_data.TIME_VIDEO_END.time,
):
if special_t not in times:
times.append(special_t)
array = []
for t in sorted(times):
array.append((t, video_analysis.ground_speed_integral(0, t, gs_fit) + offsets[i]))
three_dist_arrays.append(np.array(array))
return tuple(three_dist_arrays) | [
"analysis.video_utils.transit_point_with_error",
"os.path.basename",
"os.path.dirname",
"analysis.video_utils.knots_to_m_p_s",
"analysis.video_utils.transit_line_past_observer",
"analysis.video_utils.intersect_two_lines",
"analysis.video_analysis.observer_position_mean_std_from_full_transits",
"numpy.... | [((3887, 4124), 'analysis.video_analysis.observer_position_mean_std_from_aspects', 'video_analysis.observer_position_mean_std_from_aspects', ([], {'baseline': 'plot_constants.OBSERVER_XY_MINIMUM_BASELINE', 'ignore_first_n': 'plot_constants.OBSERVER_XY_IGNORE_N_FIRST_BEARINGS', 't_range': 'plot_constants.OBSERVER_XY_TIME_RANGE'}), '(baseline=\n plot_constants.OBSERVER_XY_MINIMUM_BASELINE, ignore_first_n=\n plot_constants.OBSERVER_XY_IGNORE_N_FIRST_BEARINGS, t_range=\n plot_constants.OBSERVER_XY_TIME_RANGE)\n', (3941, 4124), False, 'from analysis import video_analysis\n'), ((4787, 4849), 'analysis.video_analysis.observer_position_mean_std_from_full_transits', 'video_analysis.observer_position_mean_std_from_full_transits', ([], {}), '()\n', (4847, 4849), False, 'from analysis import video_analysis\n'), ((10442, 10534), 'analysis.video_analysis.ground_speed_integral', 'video_analysis.ground_speed_integral', (['(0)', 'video_data.TIME_VIDEO_END_ASPHALT.time', 'gs_fit'], {}), '(0, video_data.TIME_VIDEO_END_ASPHALT.\n time, gs_fit)\n', (10478, 10534), False, 'from analysis import video_analysis\n'), ((5379, 5480), 'analysis.video_utils.transit_line_past_observer', 'video_utils.transit_line_past_observer', (['transit_line.frm.xy', 'transit_line.to.xy', 'observer', '(250.0)'], {}), '(transit_line.frm.xy, transit_line.to\n .xy, observer, 250.0)\n', (5417, 5480), False, 'from analysis import plot_constants, video_data, video_utils, video_analysis\n'), ((7049, 7144), 'analysis.video_utils.transit_point_with_error', 'video_utils.transit_point_with_error', (['transit_line.frm.xy', 'transit_line.to.xy'], {'error': 'error'}), '(transit_line.frm.xy, transit_line.to.\n xy, error=error)\n', (7085, 7144), False, 'from analysis import plot_constants, video_data, video_utils, video_analysis\n'), ((7182, 7255), 'analysis.video_utils.transit_line_past_observer', 'video_utils.transit_line_past_observer', (['new_from', 'new_to', 'observer', '(250.0)'], {}), '(new_from, new_to, observer, 250.0)\n', (7220, 7255), False, 'from analysis import plot_constants, video_data, video_utils, video_analysis\n'), ((8046, 8148), 'analysis.video_utils.intersect_two_lines', 'video_utils.intersect_two_lines', (['transit1.frm.xy', 'transit1.to.xy', 'transit2.frm.xy', 'transit2.to.xy'], {}), '(transit1.frm.xy, transit1.to.xy, transit2.\n frm.xy, transit2.to.xy)\n', (8077, 8148), False, 'from analysis import plot_constants, video_data, video_utils, video_analysis\n'), ((1477, 1571), 'analysis.video_analysis.ground_speed_curve_fit_with_offset', 'video_analysis.ground_speed_curve_fit_with_offset', (['plot_constants.GROUND_SPEED_OFFSETS[0]'], {}), '(plot_constants.\n GROUND_SPEED_OFFSETS[0])\n', (1526, 1571), False, 'from analysis import video_analysis\n'), ((2277, 2302), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2292, 2302), False, 'import os\n'), ((3426, 3456), 'os.path.dirname', 'os.path.dirname', (['plt_file_path'], {}), '(plt_file_path)\n', (3441, 3456), False, 'import os\n'), ((8634, 8672), 'analysis.video_utils.knots_to_m_p_s', 'video_utils.knots_to_m_p_s', (['(5.0 + -5.0)'], {}), '(5.0 + -5.0)\n', (8660, 8672), False, 'from analysis import plot_constants, video_data, video_utils, video_analysis\n'), ((8755, 8792), 'analysis.video_utils.knots_to_m_p_s', 'video_utils.knots_to_m_p_s', (['(5.0 + 0.0)'], {}), '(5.0 + 0.0)\n', (8781, 8792), False, 'from analysis import plot_constants, video_data, video_utils, video_analysis\n'), ((8875, 8912), 'analysis.video_utils.knots_to_m_p_s', 'video_utils.knots_to_m_p_s', (['(5.0 + 5.0)'], {}), '(5.0 + 5.0)\n', (8901, 8912), False, 'from analysis import plot_constants, video_data, video_utils, video_analysis\n'), ((13137, 13152), 'numpy.array', 'np.array', (['array'], {}), '(array)\n', (13145, 13152), True, 'import numpy as np\n'), ((1687, 1781), 'analysis.video_analysis.ground_speed_curve_fit_with_offset', 'video_analysis.ground_speed_curve_fit_with_offset', (['plot_constants.GROUND_SPEED_OFFSETS[1]'], {}), '(plot_constants.\n GROUND_SPEED_OFFSETS[1])\n', (1736, 1781), False, 'from analysis import video_analysis\n'), ((2188, 2213), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2203, 2213), False, 'import os\n'), ((3359, 3390), 'os.path.basename', 'os.path.basename', (['plt_file_path'], {}), '(plt_file_path)\n', (3375, 3390), False, 'import os\n'), ((12142, 12211), 'analysis.video_analysis.ground_speed_integral', 'video_analysis.ground_speed_integral', (['(0)', 'offset_distance_at_t', 'gs_fit'], {}), '(0, offset_distance_at_t, gs_fit)\n', (12178, 12211), False, 'from analysis import video_analysis\n'), ((1897, 1991), 'analysis.video_analysis.ground_speed_curve_fit_with_offset', 'video_analysis.ground_speed_curve_fit_with_offset', (['plot_constants.GROUND_SPEED_OFFSETS[2]'], {}), '(plot_constants.\n GROUND_SPEED_OFFSETS[2])\n', (1946, 1991), False, 'from analysis import video_analysis\n'), ((13038, 13088), 'analysis.video_analysis.ground_speed_integral', 'video_analysis.ground_speed_integral', (['(0)', 't', 'gs_fit'], {}), '(0, t, gs_fit)\n', (13074, 13088), False, 'from analysis import video_analysis\n')] |
# coding: utf-8
# In[1]:
import os
import datetime
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import statsmodels.api as sm
import skimage
from skimage import data, img_as_float
import skimage.transform as trans
import rasterio
import fiona
import cartopy
import numpy as np
# In[2]:
import sys
import cartopy.crs as ccrs
import cartopy.feature as cfeature
sys.path.append(r"..\..")
import pyphenocam
sys.path.append(r"J:\Projects\NCCSC\phenocam\Tools\DaymetPy\daymetpy")
import daymetpy
# In[3]:
base_dname = r"J:\Projects\NCCSC\phenocam\DerivedData\nationalelkrefuge"
site_name = "nationalelkrefuge"
# In[4]:
site = pyphenocam.dataaccess.get_site(site_name)
site.x, site.y
# In[5]:
year = 2016
jday = 153
# ### Read in landsat data
# In[6]:
get_ipython().magic('matplotlib inline')
landsat_dname = os.path.join(base_dname, 'Landsat')
scene_name = [f for f in os.listdir(landsat_dname) if f.startswith('LC')][0]
landsat_dname = os.path.join(landsat_dname, 'SceneSubset')
landsat_fname = os.path.join(landsat_dname, scene_name[:9]+'{}{}LGN00_ndvi.tif'.format(year, jday))
scene_name = os.path.split(landsat_fname)[1].split('_')[0]
landsat = rasterio.open(landsat_fname)
landsat_data = np.squeeze(landsat.read(masked=True))
utm_zone = landsat.crs.wkt.split('UTM zone ')[1].split('N"')[0]
landsat_proj = ccrs.UTM(zone=utm_zone, globe=ccrs.Globe(datum='WGS84',
ellipse='WGS84'))
landsat_extents = [landsat.bounds.left, landsat.bounds.right,
landsat.bounds.bottom, landsat.bounds.top]
print(scene_name)
fig = plt.figure(figsize=(10, 10))
ax = plt.axes(projection=landsat_proj)
im = ax.imshow(landsat_data, origin='upper', extent=landsat_extents, transform=landsat_proj,
cmap=mpl.cm.RdYlGn, interpolation='none')
plt.colorbar(im)
# ### Read in 30m elevation data
# In[8]:
utm_dname = os.path.join(base_dname, "ArcScene", "InputData", "UTM")
elev_subset_fname = os.path.join(utm_dname, "NED_30m.tif")
elev = rasterio.open(elev_subset_fname)
dem_data = np.squeeze(elev.read(masked=True))
elev_extents = [elev.bounds.left, elev.bounds.right, elev.bounds.bottom, elev.bounds.top]
# In[9]:
get_ipython().magic('matplotlib notebook')
fig = plt.figure(figsize=(10, 10))
ax = plt.axes()#projection=landsat_proj)
im = ax.imshow(dem_data, origin='upper', #extent=elev_extents, transform=landsat_proj,
cmap=mpl.cm.gist_earth, interpolation='none')
plt.colorbar(im)
# # Fit a plane to the pixels around a single pixel
# In[9]:
from affine import Affine
T1 = landsat.affine * Affine.translation(0.5, 0.5)
rc2xy = lambda rc: (rc[1], rc[0]) * T1
# In[10]:
dem_indices = np.indices(dem_data.shape)
dem_xy = np.apply_along_axis(func1d=rc2xy ,arr=dem_indices, axis=0)
# In[11]:
row = 242
col = 127
def get_pix_data(row, col):
data = dem_data[row-1:row+2, col-1:col+2]
xy = dem_xy[:, row-1:row+2, col-1:col+2]
return data, xy
data, xy = get_pix_data(row, col)
# In[12]:
def set_aspect_equal_3d(ax):
"""Fix equal aspect bug for 3D plots."""
xlim = ax.get_xlim3d()
ylim = ax.get_ylim3d()
zlim = ax.get_zlim3d()
from numpy import mean
xmean = mean(xlim)
ymean = mean(ylim)
zmean = mean(zlim)
plot_radius = max([abs(lim - mean_)
for lims, mean_ in ((xlim, xmean),
(ylim, ymean),
(zlim, zmean))
for lim in lims])
ax.set_xlim3d([xmean - plot_radius, xmean + plot_radius])
ax.set_ylim3d([ymean - plot_radius, ymean + plot_radius])
ax.set_zlim3d([zmean - plot_radius, zmean + plot_radius])
# In[13]:
def calc_aspect(est):
params2 = est.params
intercept, xslope, yslope = params2
yslope *= -1 #accound for descending utm coordingates
aspect = 57.29578 * np.arctan2(yslope, -1*xslope)
if aspect < 0:
cell = 90.0 - aspect
elif aspect > 90.0:
cell = 360.0 - aspect + 90.0
else:
cell = 90.0 - aspect
return cell
def calc_slope(est):
params2 = est.params
intercept, xslope, yslope = params2
yslope *= -1 #account for descending utm coordingates
max_slope = (yslope**2 + xslope**2)**.5
return np.degrees(max_slope)
# aspect = calc_aspect(est)
# slope = calc_slope(est)
# print calc_aspect(est)
# print calc_slope(est)
# In[14]:
get_ipython().magic('matplotlib inline')
import matplotlib.pyplot as plt
import numpy as np
import statsmodels.api as sm
# TODO add image and put this code into an appendix at the bottom
from mpl_toolkits.mplot3d import Axes3D
row, col = 210, 131
def plot_3d_slope(row, col, ax, fall_line=False):
data, xy = get_pix_data(row, col)
X = xy.reshape(2, 9).T
y = data.flatten()
X = sm.add_constant(X)
est = sm.OLS(y, X).fit()
xx1, xx2 = xy[0,:,:], xy[1,:,:]
# plot the hyperplane by evaluating the parameters on the grid
Z = est.params[0] + est.params[1] * xx1 + est.params[2] * xx2
# plot hyperplane
surf = ax.plot_surface(xx1, xx2, Z, cmap=plt.cm.RdBu_r, alpha=0.6, linewidth=0)
# plot data points - points over the HP are white, points below are black
resid = y - est.predict(X)
ax.scatter(xx1, xx2, y, color='black', alpha=1.0, facecolor='white')
xpos2 = xx1.flatten()-15
ypos2 = xx2.flatten()-15
zpos2 = np.repeat(y.min(), y.flatten().shape).reshape(y.flatten().shape)
dx2 = 30 * np.ones_like(xx1.flatten())
dy2 = 30 * np.ones_like(xx2.flatten())
dz2 = y.flatten() - y.min()
ax.bar3d(xpos2, ypos2, zpos2, dx2, dy2, dz2, color='b', zsort='average', alpha=0.10)
if fall_line:
center_x = xpos2[4]+15
center_y = ypos2[4]+15
center_z = y.flatten()[4]
aspect = calc_aspect(est)
slope = calc_slope(est)
dx = 30 * np.sin(np.deg2rad(aspect))
dy = 30 * np.cos(np.deg2rad(aspect))
fall_x = center_x+dx
fall_y = center_y+dy
fall_dist = distance.euclidean((center_x, center_y),
(fall_x, fall_y))
fall_z = center_z -(fall_dist*slope)
ax.plot((center_x, fall_x),
(center_y, fall_y),
(center_z, fall_z), color='r', lw=6, alpha=0.5)
# create matplotlib 3d axes
fig = plt.figure(figsize=(12, 8))
ax = Axes3D(fig, azim=-115, elev=15)
# ax.plot((xpos2[4]+15, xpos2[5]+15),
# (ypos2[4]+15, ypos2[5]),
# (y.flatten()[4], y.flatten()[5]), color='r', solid_capstyle="projecting")
# set axis labels
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('elev')
plot_3d_slope(row, col, ax)
set_aspect_equal_3d(ax)
# In[ ]:
# In[7]:
get_ipython().magic('matplotlib inline')
from scipy.spatial import distance
row, col = 207, 132
data, xy = get_pix_data(row, col)
X = xy.reshape(2, 9).T
y = data.flatten()
## fit a OLS model with intercept on TV and Radio
X = sm.add_constant(X)
est = sm.OLS(y, X).fit()
xx1, xx2 = xy[0,:,:], xy[1,:,:]
# plot the hyperplane by evaluating the parameters on the grid
Z = est.params[0] + est.params[1] * xx1 + est.params[2] * xx2
# create matplotlib 3d axes
fig = plt.figure(figsize=(12, 8))
ax = Axes3D(fig, azim=-115, elev=15)
# plot hyperplane
surf = ax.plot_surface(xx1, xx2, Z, cmap=plt.cm.RdBu_r, alpha=0.6, linewidth=0)
# plot data points - points over the HP are white, points below are black
resid = y - est.predict(X)
ax.scatter(xx1, xx2, y, color='black', alpha=1.0, facecolor='white')
xpos2 = xx1.flatten()-15
ypos2 = xx2.flatten()-15
zpos2 = np.repeat(y.min(), y.flatten().shape).reshape(y.flatten().shape)
dx2 = 30 * np.ones_like(xx1.flatten())
dy2 = 30 * np.ones_like(xx2.flatten())
dz2 = y.flatten() - y.min()
ax.bar3d(xpos2, ypos2, zpos2, dx2, dy2, dz2, color='b', zsort='average', alpha=0.10)
center_x = xpos2[4]+15
center_y = ypos2[4]+15
center_z = y.flatten()[4]
aspect = calc_aspect(est)
slope = calc_slope(est)
dx = 30 * np.sin(np.deg2rad(aspect))
dy = 30 * np.cos(np.deg2rad(aspect))
fall_x = center_x+dx
fall_y = center_y+dy
fall_dist = distance.euclidean((center_x, center_y),
(fall_x, fall_y))
fall_z = center_z -(fall_dist*np.deg2rad(slope))
ax.plot((center_x, fall_x),
(center_y, fall_y),
(center_z, fall_z), color='r', lw=6, alpha=0.5)
# set axis labels
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('elev')
set_aspect_equal_3d(ax)
# ## Now that we've got all that worked out run it through all the landsat pixels in our image
# In[9]:
landsat_fishnet_fname = os.path.join(base_dname, "ArcScene", "landsat_fishnet.bmp")
landsat_index_fname = os.path.join(base_dname, "ArcScene", "landsat_subset_index.bmp")
phenosite = pyphenocam.dataaccess.get_site(site_name)
closest_date = datetime.datetime(year, 1, 1, 12) + datetime.timedelta(jday)
print(closest_date)
closest_photo_fname = phenosite.get_closest_fname(closest_date)
closest_photo_fname = phenosite.get_local_image_fname(closest_photo_fname, IR=False)
closest_photo_fname_ir = phenosite.get_local_image_fname(closest_photo_fname, IR=True)
# In[10]:
get_ipython().magic('matplotlib inline')
exposure = pyphenocam.headerextraction.get_exposure(closest_photo_fname)
exposure_ir = pyphenocam.headerextraction.get_exposure(closest_photo_fname_ir)
print("Extracted exposure: ", exposure)
sample_photo_fname = phenosite.get_closest_fname(closest_date)
local_fname = phenosite.get_local_image_fname(sample_photo_fname)
local_fname_ir = phenosite.get_local_image_fname(sample_photo_fname, IR=True)
sample_image = phenosite.get_local_image(sample_photo_fname)
sample_image_ir = phenosite.get_local_image(sample_photo_fname, IR=True)
corrected_ndvi = pyphenocam.imageprocessing._get_corrected_ndvi(local_fname,
local_fname_ir,
float(exposure),
float(exposure_ir))
def plot_compare():
fig = plt.figure(figsize=(18, 8))
ax = fig.add_subplot(121)
ax.imshow(sample_image)
pyphenocam.plotting.format_photo_axes(ax)
ax2 = fig.add_subplot(122, sharex=ax, sharey=ax)
pyphenocam.plotting.format_photo_axes(ax2)
im = ax2.imshow(corrected_ndvi, vmin=0, cmap=mpl.cm.RdYlGn)
fig.colorbar(im)
# In[11]:
get_ipython().magic('matplotlib inline')
fig = plt.figure(figsize=(12, 3))
ax = fig.add_subplot(121)
ax.imshow(sample_image)
pyphenocam.plotting.format_photo_axes(ax)
ax2 = fig.add_subplot(122)
pyphenocam.plotting.format_photo_axes(ax2)
exposure = pyphenocam.headerextraction.get_exposure(local_fname)
exposure_ir = pyphenocam.headerextraction.get_exposure(local_fname_ir)
print("Extracted exposure: ", exposure)
corrected_ndvi = pyphenocam.imageprocessing._get_corrected_ndvi(local_fname,
local_fname_ir,
float(exposure),
float(exposure_ir))
im = ax2.imshow(corrected_ndvi, vmin=0, cmap=mpl.cm.RdYlGn)
fig.colorbar(im)
# In[12]:
index_grid = skimage.io.imread(landsat_index_fname)
index_grid = trans.resize(index_grid, (sample_image.shape[0], sample_image.shape[1], 3), preserve_range=True, order=0)
index_grid = np.ma.masked_where(index_grid > 254, index_grid)
# In[13]:
single_pixel = np.logical_and(index_grid[:,:,0]==54, index_grid[:,:,1]==148)
single_pixel = np.ma.asarray(trans.resize(single_pixel,
(sample_image.shape[0], sample_image.shape[1]), preserve_range=False))#[:,:,1]
single_pixel.mask = single_pixel==False
fig, ax = plt.subplots(1, figsize=(20,10))
ax.imshow(sample_image)
ax.imshow(single_pixel, alpha = 1.0, cmap=mpl.cm.Reds, interpolation='none')
pyphenocam.plotting.format_photo_axes(ax)
# In[ ]:
get_ipython().magic('matplotlib inline')
import numpy as np
import matplotlib.pyplot as plt
import cartopy
import cartopy.crs as ccrs
import cartopy.feature as cfeature
ax_proj = ccrs.LambertConformal()
landsat_proj = ccrs.UTM(zone=12, globe=ccrs.Globe(datum='WGS84',
ellipse='WGS84'))
geodetic = ccrs.Geodetic()
fig = plt.figure(figsize=(15, 15))
ax_extent = [phenosite.x - 0.02, phenosite.x + 0.02, phenosite.y - 0.002, phenosite.y + 0.040]
landsat_extents = [landsat.bounds.left, landsat.bounds.right, landsat.bounds.bottom, landsat.bounds.top]
ax = plt.axes(projection=ax_proj)
ax.set_extent(ax_extent, ccrs.Geodetic())
im = ax.imshow(landsat_data, origin='upper', extent=landsat_extents, transform=landsat_proj, interpolation='none',
cmap=mpl.cm.RdYlGn)
# # ax.set_xmargin(0.05)
# # ax.set_ymargin(0.10)
# mark a known place to help us geo-locate ourselves
locx, locy = list(ax_proj.transform_point(phenosite.x, phenosite.y, geodetic))
ax.plot(locx, locy, 'bo', markersize=15, color='red', alpha=0.5)
ax.text(locx+75, locy-15, 'Elk Refuge camera location', bbox={'facecolor':'white', 'alpha':0.5, 'pad':5})
ax.coastlines()
ax.add_feature(cartopy.feature.OCEAN)
ax.add_feature(cartopy.feature.BORDERS)
ax.gridlines()
states_provinces = cfeature.NaturalEarthFeature(
category='cultural',
name='admin_1_states_provinces_lines',
scale='50m',
facecolor='none')
ax.add_feature(states_provinces, edgecolor='gray')
plt.colorbar(im)
# lon_formatter = LongitudeFormatter(zero_direction_label=True)
# lat_formatter = LatitudeFormatter()
# ax.xaxis.set_major_formatter(lon_formatter)
# ax.yaxis.set_major_formatter(lat_formatter)
# In[ ]:
def get_slope_aspect(col, row):
data, xy = get_pix_data(row, col)
X = xy.reshape(2, 9).T
y = data.flatten()
X = sm.add_constant(X)
est = sm.OLS(y, X).fit()
aspect = calc_aspect(est)
slope = calc_slope(est)
return slope, aspect
# In[ ]:
camx, camy = list(landsat_proj.transform_point(phenosite.x, phenosite.y, geodetic))
camcol, camrow = ~elev.affine * (camx, camy)
cam_elev = dem_data[camrow, camcol]
TOWER_HEIGHT = 3 #meters
cam_elev += TOWER_HEIGHT
print(cam_elev)
# In[ ]:
import math
def dist3d(x1, y1, z1, x2, y2, z2):
return math.sqrt((x2-x1)**2+(y2-y1)**2+(z2 -z1)**2)
def azimuth3d(x1, y1, z1, x2, y2, z2):
return math.degrees(math.atan2((x2-x1),(y2-y1)))
def zenith3d(x1, y1, z1, x2, y2, z2, dist):
return math.degrees(math.asin((z2-z1)/dist))
# In[25]:
def plot_3d_slope(row, col, ax, fall_line=False):
data, xy = get_pix_data(row, col)
X = xy.reshape(2, 9).T
y = data.flatten()
X = sm.add_constant(X)
est = sm.OLS(y, X).fit()
xx1, xx2 = xy[0,:,:], xy[1,:,:]
# plot the hyperplane by evaluating the parameters on the grid
Z = est.params[0] + est.params[1] * xx1 + est.params[2] * xx2
# plot hyperplane
surf = ax.plot_surface(xx1, xx2, Z, cmap=plt.cm.RdBu_r, alpha=0.6, linewidth=0)
# plot data points - points over the HP are white, points below are black
resid = y - est.predict(X)
ax.scatter(xx1, xx2, y, color='black', alpha=1.0, facecolor='white')
xpos2 = xx1.flatten()-15
ypos2 = xx2.flatten()-15
zpos2 = np.repeat(y.min(), y.flatten().shape).reshape(y.flatten().shape)
dx2 = 30 * np.ones_like(xx1.flatten())
dy2 = 30 * np.ones_like(xx2.flatten())
dz2 = y.flatten() - y.min()
ax.bar3d(xpos2, ypos2, zpos2, dx2, dy2, dz2, color='b', zsort='average', alpha=0.10)
if fall_line:
center_x = xpos2[4]+15
center_y = ypos2[4]+15
center_z = y.flatten()[4]
aspect = calc_aspect(est)
slope = np.deg2rad(calc_slope(est))
dx = 30 * np.sin(np.deg2rad(aspect))
dy = 30 * np.cos(np.deg2rad(aspect))
fall_x = center_x+dx
fall_y = center_y+dy
fall_dist = distance.euclidean((center_x, center_y),
(fall_x, fall_y))
fall_z = center_z -(fall_dist*slope)
ax.plot((center_x, fall_x),
(center_y, fall_y),
(center_z, fall_z), color='r', lw=6, alpha=0.5)
# In[211]:
get_ipython().magic('matplotlib inline')
from ipywidgets import interactive
col_index, row_index = 0,0
#col_index=127, row_index=250
def plot_one(col_index=132, row_index=207):
single_pixel = np.logical_and(index_grid[:,:,0]==col_index, index_grid[:,:,1]==row_index)
single_pixel = np.ma.asarray(trans.resize(single_pixel,
(sample_image.shape[0], sample_image.shape[1]), preserve_range=False))#[:,:,1]
single_pixel.mask = single_pixel==False
fig = plt.figure(figsize=(25, 15))
ax = plt.subplot(131)
ax.imshow(sample_image)
ax.imshow(single_pixel, alpha = 0.75, cmap=mpl.cm.Reds, interpolation='none')
pyphenocam.plotting.format_photo_axes(ax)
ax_proj = landsat_proj
ax2 = plt.subplot(132, projection=ax_proj)
ax_extent = [phenosite.x - 0.04, phenosite.x + 0.04, phenosite.y - 0.002, phenosite.y + 0.040]
ax2.set_extent(ax_extent, ccrs.Geodetic())
ax2.imshow(landsat_data, origin='upper', extent=landsat_extents, transform=landsat_proj, interpolation='none',
cmap=mpl.cm.RdYlGn)
colx, coly = landsat.affine * (col_index, row_index)
colx += landsat.transform[1]/2.
coly += landsat.transform[5]/2.
colxgeo, colygeo = list(ax_proj.transform_point(colx, coly, landsat_proj))
ax2.plot(colxgeo, colygeo, 'bo', markersize=10, color='red', alpha=0.5)
ax2.text(colxgeo+75, colygeo-15, 'highlighted \n pixel', bbox={'facecolor':'white', 'alpha':0.5, 'pad':5})
# mark a known place to help us geo-locate ourselves
locx, locy = list(ax_proj.transform_point(phenosite.x, phenosite.y, geodetic))
ax2.plot(locx, locy, 'bo', markersize=15, color='red', alpha=0.5)
ax2.text(locx+75, locy-10, 'camera location', bbox={'facecolor':'white', 'alpha':0.5, 'pad':5})
ax3 = fig.add_subplot(1, 3, 3, projection='3d', azim=-90, elev=1)
ax3.set_xlabel('X')
ax3.set_ylabel('Y')
ax3.set_zlabel('elev')
plot_3d_slope(row_index, col_index, ax3, fall_line=True)
set_aspect_equal_3d(ax3)
plt.tight_layout()
fig.savefig(os.path.join(r"J:\Projects\NCCSC\phenocam\Doc\Presentation", "calculatingSlopeAspect.jpg"), dpi=270)
interactive(plot_one, col_index=(0, landsat.shape[0], 1), row_index=(0, landsat.shape[1], 1))
# In[14]:
#this isn't terribly efficient but a good demonstration/validation of the technique \n",
data = np.array(index_grid[:, :, :2]).astype(np.uint8)
dtype = data.dtype.descr * 2
struct = data.view(dtype)
uniq = np.unique(struct)
results = {}
mapped_output = np.zeros(np.squeeze(index_grid[:,:,1]).shape)
# In[15]:
import seaborn as sns
# In[144]:
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import shapely.geometry as sgeom
import cartopy.crs as ccrs
import cartopy.io.shapereader as shpreader
def get_locator(loc=[.9, 0.5, 0.3, 0.3]):
locator_ax = plt.axes(loc,
projection=ccrs.LambertConformal())
locator_ax.set_extent([-124, -71, 20, 50], ccrs.Geodetic())
shapename = 'admin_1_states_provinces_lakes_shp'
states_shp = shpreader.natural_earth(resolution='110m',
category='cultural', name=shapename)
for state in shpreader.Reader(states_shp).geometries():
# pick a default color for the land with a black outline,
# this will change if the storm intersects with our track
facecolor = [0.9375, 0.9375, 0.859375]
edgecolor = 'black'
locator_ax.add_geometries([state], ccrs.PlateCarree(),
facecolor=facecolor, edgecolor=edgecolor)
plt.plot(landsat_extents[0], landsat_extents[2], '*',
markersize=15, color='r', transform=landsat_proj)
# extent = landsat_proj
# bigger = 0
# extent_box = sgeom.box(extent[0]-bigger, extent[2]+bigger, extent[1]-bigger, extent[3]+bigger)
# locator_ax.add_geometries([extent_box], landsat_proj, color='none',
# edgecolor='blue', linewidth=2)
return locator_ax
# In[147]:
get_ipython().magic('matplotlib inline')
sns.set_style("white")
scene_dname = os.path.join(base_dname, "Landsat", "SceneSubset")
visible_pixels = np.zeros(landsat_data.shape)
for u in uniq:
try:
visible_pixels[u[1], u[0]] = 1
except:
pass
out_fname = os.path.join(scene_dname, 'mapped', "visible_mask.npy")
np.save(out_fname, visible_pixels)
plt.figure(figsize=(10,10))
landsat_data.mask += landsat_data==44
plt.imshow(landsat_data, vmin=0, vmax=0.7, cmap=mpl.cm.YlGn, interpolation=None, extent=landsat_extents)
# plt.colorbar()
visible_pixels = np.ma.MaskedArray(visible_pixels, mask=visible_pixels==0)
plt.imshow(visible_pixels, cmap='jet_r', alpha=0.6, interpolation=None, extent=landsat_extents)
ax = plt.gca()
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.ticker as tkr
def func(x, pos): # formatter function takes tick label and tick position
s = '%d' % x
groups = []
while s and s[-1].isdigit():
groups.append(s[-3:])
s = s[:-3]
return s + ','.join(reversed(groups))
y_format = tkr.FuncFormatter(func) # make formatter
majorFormatter = FormatStrFormatter('%d')
ax.yaxis.set_major_formatter(y_format)
ax.xaxis.set_major_formatter(y_format)
locator_ax = get_locator(loc=[.59, 0.65, 0.3, 0.3])
pyphenocam.plotting.format_photo_axes(locator_ax)
ax.xaxis.grid(True)
ax.yaxis.grid(True)
fig = plt.gcf()
fig.suptitle(" Landsat Pixels Visible in PhenoCam Image", fontsize=30)
fig.savefig(os.path.join(r"J:\Projects\NCCSC\phenocam\Doc\Presentation", "VisibleLandsatPixels.jpg"), dpi=270)
# In[48]:
extent
# In[49]:
landsat_extents
# In[ ]:
get_ipython().magic('matplotlib inline')
sns.set_style("white")
scene_dname = os.path.join(base_dname, "Landsat", "SceneSubset")
visible_pixels = np.zeros(landsat_data.shape)
for u in uniq:
try:
visible_pixels[u[1], u[0]] = 1
except:
pass
out_fname = os.path.join(scene_dname, 'mapped', "visible_mask.npy")
np.save(out_fname, visible_pixels)
plt.figure(figsize=(10,10))
ax = plt.axes(projection=ccrs.Mercator())
landsat_data.mask += landsat_data==44
ax.imshow(landsat_data, vmin=0, vmax=0.7, cmap=mpl.cm.YlGn, interpolation=None, extent=landsat_extents, transform=landsat_proj)
# # plt.colorbar()
# visible_pixels = np.ma.MaskedArray(visible_pixels, mask=visible_pixels==0)
# ax.imshow(visible_pixels, cmap='jet_r', alpha=0.6, interpolation=None, extent=landsat_extents, transform=landsat_proj)
# ax.gridlines(draw_labels=True)
ax.set_extent(ax_extent)
# fig = plt.gcf()
# # fig.suptitle("Visible Landsat pixels", fontsize=45)
# fig.savefig(os.path.join(r"J:\Projects\NCCSC\phenocam\Doc\Presentation", "VisibleLandsatPixels.jpg"), dpi=270)
# In[32]:
out_fname = os.path.join(scene_dname, 'mapped', "visible_mask.npy")
visible_pixels = np.load(out_fname)
visible_pixels = np.ma.MaskedArray(visible_pixels, mask=visible_pixels==0)
plt.imshow(visible_pixels)
plt.imshow(visible_pixels, cmap='jet_r', alpha=0.6)
# In[99]:
from IPython.html.widgets import FloatProgress
from IPython.display import display
f = FloatProgress(min=0, max=len(uniq[::skip])-1)
display(f)
results = {}
i = 0
skip = 1
for col, row in uniq[::skip]:
# print col, row,
# print ".",
f.value = i
single_pixel = np.logical_and(index_grid[:,:,0]==col, index_grid[:,:,1]==row)
single_pixel = np.ma.asarray(trans.resize(single_pixel,
(sample_image.shape[0], sample_image.shape[1]), preserve_range=False))#[:,:,1]
phenopixcount = np.count_nonzero(single_pixel)
try:
landsatx, landsaty = dem_xy[:, row, col]
pix_elev = dem_data[row, col]
slope, aspect = get_slope_aspect(col=col, row=row)
landsat_ndvi = landsat_data[row, col]
phenocam_ndvi = np.nanmean(corrected_ndvi[single_pixel>0.95])
phenocam_ndvi_median = np.nanmedian(corrected_ndvi[single_pixel>0.95])
dist = dist3d(camx, camy, cam_elev, landsatx, landsaty, pix_elev)
az = azimuth3d(camx, camy, cam_elev, landsatx, landsaty, pix_elev)
zen = zenith3d(camx, camy, cam_elev, landsatx, landsaty, pix_elev, dist)
results[i] = [col, row, phenopixcount, landsatx, landsaty, pix_elev,
dist, az, zen,
slope, aspect, landsat_ndvi, phenocam_ndvi, phenocam_ndvi_median]
mapped_output[single_pixel>0.95] = landsat_ndvi
except IndexError:
print("skipping", col, row)
i+=1
# if i > 30:
# break
# In[180]:
import pandas as pd
data = pd.DataFrame.from_dict(results, orient='index')
data.columns = ["col", "row", "photopixelcount", "landsatx", "landsaty", "landsatelevation",
"distance", "azimuth", "zenith",
"pixelslope", "pixelaspect", "landsat_ndvi", "phenocam_ndvi", "phenocam_ndvi_median"]
data.to_csv(os.path.join(base_dname, scene_name + "_results.csv"))
data.row()
# In[ ]:
import pandas as pd
data = pd.read_csv(os.path.join(base_dname, scene_name + "_results.csv"))
# In[ ]:
data.shape
# In[ ]:
import seaborn
data.pixelslope.plot(kind='hist')
# In[ ]:
ave_diff = data.phenocam_ndvi.mean() - data.landsat_ndvi.mean()
# In[ ]:
def shiftedColorMap(cmap, start=0, midpoint=0.5, stop=1.0, name='shiftedcmap'):
'''
Function to offset the "center" of a colormap. Useful for
data with a negative min and positive max and you want the
middle of the colormap's dynamic range to be at zero
Input
-----
cmap : The matplotlib colormap to be altered
start : Offset from lowest point in the colormap's range.
Defaults to 0.0 (no lower ofset). Should be between
0.0 and `midpoint`.
midpoint : The new center of the colormap. Defaults to
0.5 (no shift). Should be between 0.0 and 1.0. In
general, this should be 1 - vmax/(vmax + abs(vmin))
For example if your data range from -15.0 to +5.0 and
you want the center of the colormap at 0.0, `midpoint`
should be set to 1 - 5/(5 + 15)) or 0.75
stop : Offset from highets point in the colormap's range.
Defaults to 1.0 (no upper ofset). Should be between
`midpoint` and 1.0.
'''
cdict = {
'red': [],
'green': [],
'blue': [],
'alpha': []
}
# regular index to compute the colors
reg_index = np.linspace(start, stop, 257)
# shifted index to match the data
shift_index = np.hstack([
np.linspace(0.0, midpoint, 128, endpoint=False),
np.linspace(midpoint, 1.0, 129, endpoint=True)
])
for ri, si in zip(reg_index, shift_index):
r, g, b, a = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
newcmap = mpl.colors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=newcmap)
return newcmap
ans = data.landsat_ndvi-data.phenocam_ndvi+ ave_diff
vmax = ans.max()
vmin = ans.min()
center = 1 - vmax/(vmax + abs(vmin))
new_cm = shiftedColorMap(mpl.cm.RdBu, midpoint=center, name='shifted')
# In[ ]:
get_ipython().magic('matplotlib inline')
fig, ax = plt.subplots(figsize=(20, 12))
c = ax.scatter(data.azimuth, data.zenith, c=data.landsat_ndvi-data.phenocam_ndvi+ ave_diff,
cmap=new_cm, alpha=0.5, lw=0.5, s=((1-(data.distance-data.distance.min())/data.distance.max())*20)**2)
cbar = fig.colorbar(c)
cbar.set_label("diff")
cbar.solids.set_rasterized(True)
cbar.set_alpha(1)
cbar.draw_all()
ax.set_ylabel('zenith')
ax.set_xlabel('azimuth')
ax.set_xlim(-40, 12)
def resize_all(ax, fontsize=20):
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(20)
resize_all(ax, 20)
fig = plt.gcf()
fig.savefig(os.path.join(r"J:\Projects\NCCSC\phenocam\Doc\Presentation", "CorrelationAllPoints_residuals.jpg"), dpi=270)
# In[56]:
from statsmodels.nonparametric.smoothers_lowess import lowess
# In[57]:
from mpl_toolkits.axes_grid1 import AxesGrid
fig = plt.figure(figsize=(24, 8))
grid = AxesGrid(fig, 111, # similar to subplot(143)
nrows_ncols=(1, 3),
axes_pad=0.1,
label_mode="1",
share_all=True,
cbar_location="top",
cbar_mode="each",
cbar_size="7%",
cbar_pad="2%",
aspect=False
)
for i, which in enumerate(['pixelslope', 'pixelaspect', 'distance']):
c = grid[i].scatter(data.azimuth, data.zenith, cmap='jet', c=data[which], alpha=0.5, lw=0, s=80)
cbar = grid.cbar_axes[i].colorbar(c)
# cbar.set_label("slope")
cbar.solids.set_rasterized(True)
cbar.set_alpha(1)
# cbar.draw_all()
grid[i].set_ylabel('zenith')
grid[i].set_xlabel('azimuth')
pyphenocam.plotting.add_inner_title(grid[i], which, loc=8, font_kwargs=dict(size=25))
resize_all(grid[i], 20)
cbar.ax.tick_params(labelsize=20)
# im = grid[0].imshow(Z, extent=extent, interpolation="nearest")
# grid.cbar_axes[i].colorbar(im)
# In[64]:
big_pix_data = data[data.photopixelcount>300]
# In[86]:
import seaborn as sns
sns.set(style="white", color_codes=True)
sns.set_context("poster")
d = big_pix_data
g = sns.jointplot("landsat_ndvi", "phenocam_ndvi", data=d, kind="reg",
xlim=(0.1, 0.5), ylim=(0.4, 1.),
color="r", size=12,
marginal_kws=dict(bins=30),
annot_kws=dict(stat="r"))
# plt.gca().plot(ys[:,0], ys[:,1], 'blue', linewidth=2, alpha=0.5)
fig = plt.gcf()
fig.savefig(os.path.join(r"J:\Projects\NCCSC\phenocam\Doc\Presentation", "CorrelationAllPoints_big_pix_data.jpg"), dpi=270)
# In[60]:
g = sns.jointplot("landsat_ndvi", "phenocam_ndvi", data=data, kind="reg",
xlim=(0.1, 0.5), ylim=(0.1, 1.), color="r", size=14)
g.ax_joint.cla()
d = big_pix_data
c = g.ax_joint.scatter(d.landsat_ndvi, d.phenocam_ndvi, c=d.distance, cmap='jet', alpha=0.5, lw=0, s=75)
# g.fig.colorbar(c)
g.ax_joint.set_xlim((0.0, 0.5))
g.ax_joint.set_ylim((0.0, 1.))
cbaxes = g.fig.add_axes([0.4, 0.09, 0.4, 0.02])
cbar = g.fig.colorbar(c, cax=cbaxes, orientation='horizontal')
cbar.set_label('distance')
fig = plt.gcf()
ax_inset = fig.add_axes([0.07, 0.07, 0.2, 0.2], frameon=True)
ax_inset.scatter(d.azimuth, d.zenith, cmap='jet', c=d[which], alpha=0.5, lw=0, s=80)
ax_inset.get_xaxis().set_visible(False)
ax_inset.get_yaxis().set_visible(False)
ax_inset.frameon = True
ax_inset.set_xlim(-40, 20)
ax_inset.set_ylim(-5, 10)
ax_inset.spines['top'].set_color='red'
fig.savefig(os.path.join(r"J:\Projects\NCCSC\phenocam\Doc\Presentation", "CorrelationAllPoints_wDist_big_pix_data.jpg"), dpi=270)
# In[44]:
corrected_ndvi_m = np.ma.masked_where(index_grid.mask[:,:,0], corrected_ndvi)
mapped_output_m = np.ma.masked_where(index_grid.mask[:,:,0], mapped_output)
# In[225]:
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
import numpy as np
im = np.arange(100)
im.shape = 10, 10
fig = plt.figure(1, figsize=(20, 12))
grid = ImageGrid(fig, 111, # similar to subplot(111)
nrows_ncols=(1, 2), # creates 2x2 grid of axes
axes_pad=0.5, # pad between axes in inch.
label_mode="1",
share_all=True,
cbar_location="bottom",
cbar_mode="each",
cbar_size="7%",
cbar_pad="2%",
)
# Z, extent = get_demo_image()
# for i in range(4):
# im = grid[i].imshow(Z, extent=extent, interpolation="nearest")
# grid.cbar_axes[i].colorbar(im)
# for cax in grid.cbar_axes:
# cax.toggle_label(False)
# # This affects all axes because we set share_all = True.
# grid.axes_llc.set_xticks([-2, 0, 2])
# grid.axes_llc.set_yticks([-2, 0, 2])
# )
for i in range(2):
pyphenocam.plotting.format_photo_axes(grid[i])
im1 = grid[0].imshow(corrected_ndvi_m, vmin=0, vmax=1, cmap=mpl.cm.YlGn) # The AxesGrid object work as a list of axes.
im2 = grid[1].imshow(mapped_output_m, vmin=0, vmax=0.7, cmap=mpl.cm.YlGn)
grid.cbar_axes[0].colorbar(im1)
grid.cbar_axes[1].colorbar(im2)
pyphenocam.plotting.add_inner_title(grid[0], "phenocam corrected NDVI", 9, font_kwargs=dict(size=20))
pyphenocam.plotting.add_inner_title(grid[1], "landsat NDVI", 9, font_kwargs=dict(size=20))
fig.savefig(os.path.join(r"J:\Projects\NCCSC\phenocam\Doc\Presentation", "MappedLandsat.jpg"), dpi=270)
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# ### read in the MODIS data
# In[9]:
modis_dname = os.path.join(output_dir, 'MODIS', 'subset')
modis_subset_fname = os.path.join(modis_dname, "modis_subset.tif")
# In[11]:
modis = rasterio.open(modis_subset_fname)
modis_data = np.squeeze(modis.read(masked=True))
modis_proj = cartopy.crs.Sinusoidal.MODIS
modis_extents = [modis.bounds.left, modis.bounds.right, modis.bounds.bottom, modis.bounds.top]
# In[25]:
fig = plt.figure(figsize=(10, 10))
ax = plt.axes(projection=modis_proj)
im = ax.imshow(modis_data, origin='upper', extent=modis_extents, transform=modis_proj,
cmap=mpl.cm.viridis, interpolation='none')
ax.imshow(dem_data, origin='upper', extent=elev_extents, transform=landsat_proj,
cmap=mpl.cm.gist_earth, interpolation='none', alpha=0.7)
# # Calculate the slope and aspect under a single modis pixel
# In[13]:
modis_index_subset_fname = os.path.join(modis_dname, "modis_subset_index.tif")
modis_index = rasterio.open(modis_index_subset_fname)
modis_index_data = np.squeeze(modis_index.read(masked=True))
# In[26]:
selected_pixel = np.zeros(modis_data.shape)
selected_pixel[17, 34] = 1.
fig = plt.figure(figsize=(10, 10))
ax = plt.axes(projection=modis_proj)
im = ax.imshow(selected_pixel, origin='upper', extent=modis_extents, transform=modis_proj,
cmap=mpl.cm.Reds, interpolation='none')
ax.imshow(dem_data, origin='upper', extent=elev_extents, transform=landsat_proj,
cmap=mpl.cm.gist_earth, interpolation='none', alpha=0.7)
# ### Transform the selected pixels corner cordinates into the elevation crs
# In[15]:
from affine import Affine
T1 = modis.affine * Affine.translation(0,0)
rc2xy = lambda r, c: (c, r) * T1
# In[16]:
r, c = 17, 34
sel_pixulx, sel_pixuly = rc2xy (r+0, c+0)
sel_pixllx, sel_pixlly = rc2xy (r+1, c+0)
sel_pixurx, sel_pixury = rc2xy (r+0, c+1)
sel_pixlrx, sel_pixlry = rc2xy (r+1, c+1)
# In[59]:
get_ipython().magic('matplotlib inline')
fig = plt.figure(figsize=(10, 10))
ax = plt.axes(projection=modis_proj)
im = ax.imshow(selected_pixel, origin='upper', extent=modis_extents, transform=modis_proj,
cmap=mpl.cm.Reds, interpolation='none')
ax.imshow(dem_data, origin='upper', extent=elev_extents, transform=landsat_proj,
cmap=mpl.cm.gist_earth, interpolation='none', alpha=0.7)
ax.plot([sel_pixulx,sel_pixurx], [sel_pixuly, sel_pixury], 'k-', lw=1, c='red', transform=modis_proj)
ax.plot([sel_pixurx,sel_pixlrx], [sel_pixury, sel_pixlry], 'k-', lw=1, c='red', transform=modis_proj)
ax.plot([sel_pixlrx,sel_pixllx], [sel_pixlry, sel_pixlly], 'k-', lw=1, c='red', transform=modis_proj)
ax.plot([sel_pixllx,sel_pixulx], [sel_pixlly, sel_pixuly], 'k-', lw=1, c='red', transform=modis_proj)
# In[64]:
dem_urx, dem_ury = list(landsat_proj.transform_point(sel_pixurx, sel_pixury, modis_proj))
dem_llx, dem_lly = list(landsat_proj.transform_point(sel_pixllx, sel_pixlly, modis_proj))
dem_ulx, dem_uly = list(landsat_proj.transform_point(sel_pixulx, sel_pixuly, modis_proj))
dem_lrx, dem_lry = list(landsat_proj.transform_point(sel_pixlrx, sel_pixlry, modis_proj))
# In[65]:
dem_urx, dem_ury
# In[66]:
get_ipython().magic('matplotlib inline')
fig = plt.figure(figsize=(10, 10))
ax = plt.axes(projection=landsat_proj)
ax.imshow(dem_data, origin='upper', extent=elev_extents, transform=landsat_proj,
cmap=mpl.cm.gist_earth, interpolation='none')
ax.plot([dem_ulx,dem_urx], [dem_uly, dem_ury], 'k-', lw=1, c='red', transform=landsat_proj)
ax.plot([dem_urx,dem_lrx], [dem_ury, dem_lry], 'k-', lw=1, c='red', transform=landsat_proj)
ax.plot([dem_lrx,dem_llx], [dem_lry, dem_lly], 'k-', lw=1, c='red', transform=landsat_proj)
ax.plot([dem_llx,dem_ulx], [dem_lly, dem_uly], 'k-', lw=1, c='red', transform=landsat_proj)
# In[67]:
from rasterio.features import rasterize
from shapely.geometry import Polygon, mapping
poly = Polygon(((dem_ulx, dem_uly), (dem_urx, dem_ury), (dem_lrx, dem_lry), (dem_llx, dem_lly)))
output = rasterize([poly], transform=elev.transform, out_shape=dem_data.shape)
dem_pix_subset = dem_data.copy()
dem_pix_subset.mask = output==0
# In[69]:
get_ipython().magic('matplotlib inline')
fig = plt.figure(figsize=(10, 10))
ax = plt.axes(projection=landsat_proj)
ax.imshow(dem_pix_subset, origin='upper', extent=elev_extents, transform=landsat_proj,
cmap=mpl.cm.gist_earth, interpolation='none',
vmax=dem_data.max(), vmin=dem_data.min())
ax.plot([dem_ulx,dem_urx], [dem_uly, dem_ury], 'k-', lw=1, c='red', transform=landsat_proj)
ax.plot([dem_urx,dem_lrx], [dem_ury, dem_lry], 'k-', lw=1, c='red', transform=landsat_proj)
ax.plot([dem_lrx,dem_llx], [dem_lry, dem_lly], 'k-', lw=1, c='red', transform=landsat_proj)
ax.plot([dem_llx,dem_ulx], [dem_lly, dem_uly], 'k-', lw=1, c='red', transform=landsat_proj)
# In[42]:
from affine import Affine
T1 = landsat.affine * Affine.translation(0,0)
rc2xy = lambda rc: (rc[1], rc[0]) * T1
# In[38]:
dem_indices = np.indices(dem_data.shape)
dem_xy = np.apply_along_axis(func1d=rc2xy ,arr=dem_indices, axis=0)
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[28]:
rc2xy(1, 1)
# In[55]:
row = 41
col = 81
data = dem_data[row-1:row+2, col-1:col+2]
xy = dem_xy[:, row-1:row+2, col-1:col+2]
# In[31]:
plt.imshow(dem_data)
# In[56]:
xy
# In[34]:
np.indices(data)
# In[50]:
# In[54]:
dem_xy[:, 1, 1]
# In[52]:
dem_xy.shape
# In[49]:
dem_indices.shape
# In[77]:
# Generate artificial data (2 regressors + constant)
nobs = 100
X = xy.reshape(2, 9).T
X = sm.add_constant(X)
y = data.flatten()
e = np.random.random(nobs)
y = np.dot(X, beta) + e
# Fit regression model
results = sm.OLS(y, X).fit()
# Inspect the results
print(results.summary())
# In[63]:
X = np.random.random((100, 2))
X.shape
# In[76]:
data.flatten()
# In[71]:
# In[1]:
get_ipython().magic('matplotlib qt4')
import matplotlib.pyplot as plt
# TODO add image and put this code into an appendix at the bottom
from mpl_toolkits.mplot3d import Axes3D
X = xy.reshape(2, 9).T
y = data.flatten()
## fit a OLS model with intercept on TV and Radio
X = sm.add_constant(X)
est = sm.OLS(y, X).fit()
## Create the 3d plot -- skip reading this
# TV/Radio grid for 3d plot
xx1, xx2 = xy[0,:,:], xy[1,:,:]
# plot the hyperplane by evaluating the parameters on the grid
Z = est.params[0] + est.params[1] * xx1 + est.params[2] * xx2
# create matplotlib 3d axes
fig = plt.figure(figsize=(12, 8))
ax = Axes3D(fig, azim=-115, elev=15)
# plot hyperplane
surf = ax.plot_surface(xx1, xx2, Z, cmap=plt.cm.RdBu_r, alpha=0.6, linewidth=0)
# plot data points - points over the HP are white, points below are black
resid = y - est.predict(X)
ax.scatter(xx1, xx2, y, color='black', alpha=1.0, facecolor='white')
xpos2 = xx1.flatten()-5
ypos2 = xx2.flatten()-5
zpos2 = np.repeat(y.min(), y.flatten().shape).reshape(y.flatten().shape)
dx2 = 15 * np.ones_like(xx1.flatten())
dy2 = 15 * np.ones_like(xx2.flatten())
dz2 = y.flatten() - y.min()
ax.bar3d(xpos2, ypos2, zpos2, dx2, dy2, dz2, color='b', zsort='average', alpha=0.05)
# set axis labels
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('elev')
# In[134]:
y.min()
# In[92]:
xpos
# In[101]:
xpos2 = xx1.flatten()
# In[116]:
ypos.shape
# In[103]:
ypos2 = xx2.flatten()
# In[93]:
zpos
# In[105]:
np.zeros(y.flatten().shape)
# In[84]:
xx2[:2,:2]
# In[98]:
xx1.flatten()
# In[112]:
np.array(y.flatten())
# In[115]:
dz.shape
# In[117]:
np.array(y.flatten()).shape
# In[121]:
for thing in [xpos2, ypos2, zpos2, dx2, dy2, dz2]:
print(thing.shape)
# In[123]:
dx.shape
# In[127]:
np.ones_like(xx1.flatten())
# # Calculate azimuth and zenith
# In[192]:
import math
x1,y1,z1 = 521203.13, 4815131.82, 1907.55
x2,y2,z2 = 521199.24, 4815145.89, 1906.85
distance = math.sqrt((x2-x1)**2+(y2-y1)**2+(z2 -z1)**2)
print(distance)
5.5910642993977451
plunge = math.degrees(math.asin((z2-z1)/distance))
print("zenith = ", plunge)
azimuth = math.degrees(math.atan2((x2-x1),(y2-y1)))
print("azimuth = ", azimuth)
# In[82]:
data.describe()
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# ## landsat pheno mapped series
# In[50]:
scene_dname = os.path.join(base_dname, "Landsat", "SceneSubset")
ndvi_fnames = [f for f in os.listdir(scene_dname) if 'ndvi' in f]
gcc_fnames = [f for f in os.listdir(scene_dname) if 'gcc' in f]
# In[51]:
def map_to_phenocam(landsat_data):
mapped_output = np.zeros(np.squeeze(index_grid[:,:,1]).shape)
for col, row in uniq:
single_pixel = np.logical_and(index_grid[:,:,0]==col, index_grid[:,:,1]==row)
single_pixel = np.ma.asarray(trans.resize(single_pixel,
(sample_image.shape[0], sample_image.shape[1]), preserve_range=False))
phenopixcount = np.count_nonzero(single_pixel)
try:
landsatx, landsaty = dem_xy[:, row, col]
pix_elev = dem_data[row, col]
landsat_ndvi = landsat_data[row, col]
mapped_output[single_pixel>0.95] = landsat_ndvi
except IndexError:
print("skipping", col, row)
mapped_output_m = np.ma.masked_where(index_grid.mask[:,:,0], mapped_output)
return mapped_output_m
# In[56]:
for fname in ndvi_fnames:
year = int(fname[9:13])
jday = int(fname[13:16])
landsat_date = datetime.datetime(year, 1, 1, 12) + datetime.timedelta(jday)
landsat_fname = os.path.join(scene_dname, fname)
landsat = rasterio.open(landsat_fname)
landsat_data = np.squeeze(landsat.read(masked=True))
cloud_data = rasterio.open(landsat_fname.replace('ndvi', 'cloud')).read(1)
landsat_data[cloud_data==4] = 44
landsat_just_fname = os.path.split(landsat_fname)[-1]
mapped_dname = os.path.join(scene_dname, 'mapped')
if not os.path.exists(mapped_dname):
os.makedirs(mapped_dname)
out_fname = os.path.join(mapped_dname, 'landsat_{dt.year}_{dt.month:02d}_{dt.day:02d}.npy'.format(dt=landsat_date))
if not os.path.exists(out_fname):
print(out_fname)
mapped = map_to_phenocam(landsat_data)
data = mapped.data
data[mapped.mask]=-255
np.save(out_fname, data)
else:
print(("\tskipping " + out_fname))
# out_fname2 = os.path.join(scene_dname, 'mapped', landsat_just_fname.replace('subset.tif', 'mapped.npy'))
# os.rename(out_fname, out_fname2)
# print out_fname
# In[138]:
for fname in landsat_fnames:
year = int(fname[9:13])
jday = int(fname[13:16])
landsat_date = datetime.datetime(year, 1, 1, 12) + datetime.timedelta(jday)
landsat_fname = os.path.join(scene_dname, fname)
print(landsat_fname)
landsat = rasterio.open(landsat_fname)
landsat_data = np.squeeze(landsat.read(masked=True))
break
# mapped = map_to_phenocam(landsat_data)
landsat_just_fname = os.path.split(landsat_fname)[-1]
out_fname = os.path.join(scene_dname, 'mapped', 'landsat_{dt.year}_{dt.month:02d}_{dt.day:02d}.npy'.format(dt=landsat_date))
out_fname2 = os.path.join(scene_dname, 'mapped', landsat_just_fname.replace('subset.tif', 'mapped.npy'))
os.rename(out_fname, out_fname2)
# print out_fname
# np.save(out_fname, mapped)
# In[141]:
print(landsat_fname)
# In[130]:
mapped_fnames = os.listdir(os.path.join(scene_dname, 'mapped'))
data = {}
for i, fname in enumerate(mapped_fnames):
print(i)
fname_full = os.path.join(scene_dname, 'mapped', fname)
landsat_fname = fname_full.replace('\\mapped', '')
landsat = rasterio.open(landsat_fname)
landsat_data = np.squeeze(landsat.read(masked=True))
data[i] = (np.load(fname_full), landsat_data)
# In[170]:
print(__doc__)
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# Licence: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = data.landsat_ndvi
# Observations
y = data.phenocam_ndvi
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(X).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# # Plot the function, the prediction and the 95% confidence interval based on
# # the MSE
# fig = pl.figure()
# pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
# pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
# pl.plot(x, y_pred, 'b-', label=u'Prediction')
# pl.fill(np.concatenate([x, x[::-1]]),
# np.concatenate([y_pred - 1.9600 * sigma,
# (y_pred + 1.9600 * sigma)[::-1]]),
# alpha=.5, fc='b', ec='None', label='95% confidence interval')
# pl.xlabel('$x$')
# pl.ylabel('$f(x)$')
# pl.ylim(-10, 20)
# pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
# X = np.linspace(0.1, 9.9, 20)
# X = np.atleast_2d(X).T
# # Observations and noise
# y = f(X).ravel()
# dy = 0.5 + 1.0 * np.random.random(y.shape)
# noise = np.random.normal(0, dy)
# y += noise
# # Mesh the input space for evaluations of the real function, the prediction and
# # its MSE
# x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label='$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label='Observations')
pl.plot(x, y_pred, 'b-', label='Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
# In[177]:
X = data.landsat_ndvi
X = np.atleast_2d(X).T
# Observations and noise
y = data.phenocam_ndvi
dy = 0.5 + 1.0 * np.random.random(y.shape)
# noise = np.random.normal(0, dy)
# y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(-1, 1, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=0.1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label='$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label='Observations')
pl.plot(x, y_pred, 'b-', label='Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
# In[178]:
data.to_pickle(r"c:\temp\data.pd")
# In[174]:
dy.shape
# In[148]:
sys.getsizeof(data)
| [
"numpy.arctan2",
"cartopy.feature.NaturalEarthFeature",
"matplotlib.pyplot.figure",
"numpy.arange",
"pyphenocam.headerextraction.get_exposure",
"shapely.geometry.Polygon",
"numpy.degrees",
"matplotlib.pyplot.imshow",
"IPython.display.display",
"datetime.timedelta",
"matplotlib.pyplot.subplots",
... | [((436, 461), 'sys.path.append', 'sys.path.append', (['"""..\\\\.."""'], {}), "('..\\\\..')\n", (451, 461), False, 'import sys\n'), ((484, 559), 'sys.path.append', 'sys.path.append', (['"""J:\\\\Projects\\\\NCCSC\\\\phenocam\\\\Tools\\\\DaymetPy\\\\daymetpy"""'], {}), "('J:\\\\Projects\\\\NCCSC\\\\phenocam\\\\Tools\\\\DaymetPy\\\\daymetpy')\n", (499, 559), False, 'import sys\n'), ((719, 760), 'pyphenocam.dataaccess.get_site', 'pyphenocam.dataaccess.get_site', (['site_name'], {}), '(site_name)\n', (749, 760), False, 'import pyphenocam\n'), ((923, 958), 'os.path.join', 'os.path.join', (['base_dname', '"""Landsat"""'], {}), "(base_dname, 'Landsat')\n", (935, 958), False, 'import os\n'), ((1084, 1126), 'os.path.join', 'os.path.join', (['landsat_dname', '"""SceneSubset"""'], {}), "(landsat_dname, 'SceneSubset')\n", (1096, 1126), False, 'import os\n'), ((1301, 1329), 'rasterio.open', 'rasterio.open', (['landsat_fname'], {}), '(landsat_fname)\n', (1314, 1329), False, 'import rasterio\n'), ((1743, 1771), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1753, 1771), True, 'import matplotlib.pyplot as plt\n'), ((1778, 1811), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': 'landsat_proj'}), '(projection=landsat_proj)\n', (1786, 1811), True, 'import matplotlib.pyplot as plt\n'), ((1963, 1979), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {}), '(im)\n', (1975, 1979), True, 'import matplotlib.pyplot as plt\n'), ((2045, 2101), 'os.path.join', 'os.path.join', (['base_dname', '"""ArcScene"""', '"""InputData"""', '"""UTM"""'], {}), "(base_dname, 'ArcScene', 'InputData', 'UTM')\n", (2057, 2101), False, 'import os\n'), ((2123, 2161), 'os.path.join', 'os.path.join', (['utm_dname', '"""NED_30m.tif"""'], {}), "(utm_dname, 'NED_30m.tif')\n", (2135, 2161), False, 'import os\n'), ((2172, 2204), 'rasterio.open', 'rasterio.open', (['elev_subset_fname'], {}), '(elev_subset_fname)\n', (2185, 2204), False, 'import rasterio\n'), ((2412, 2440), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (2422, 2440), True, 'import matplotlib.pyplot as plt\n'), ((2447, 2457), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (2455, 2457), True, 'import matplotlib.pyplot as plt\n'), ((2630, 2646), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {}), '(im)\n', (2642, 2646), True, 'import matplotlib.pyplot as plt\n'), ((2871, 2897), 'numpy.indices', 'np.indices', (['dem_data.shape'], {}), '(dem_data.shape)\n', (2881, 2897), True, 'import numpy as np\n'), ((2908, 2966), 'numpy.apply_along_axis', 'np.apply_along_axis', ([], {'func1d': 'rc2xy', 'arr': 'dem_indices', 'axis': '(0)'}), '(func1d=rc2xy, arr=dem_indices, axis=0)\n', (2927, 2966), True, 'import numpy as np\n'), ((6683, 6710), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (6693, 6710), True, 'import matplotlib.pyplot as plt\n'), ((6717, 6748), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {'azim': '(-115)', 'elev': '(15)'}), '(fig, azim=-115, elev=15)\n', (6723, 6748), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((7270, 7284), 'skimage.data.flatten', 'data.flatten', ([], {}), '()\n', (7282, 7284), False, 'from skimage import data, img_as_float\n'), ((7343, 7361), 'statsmodels.api.add_constant', 'sm.add_constant', (['X'], {}), '(X)\n', (7358, 7361), True, 'import statsmodels.api as sm\n'), ((7588, 7615), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (7598, 7615), True, 'import matplotlib.pyplot as plt\n'), ((7622, 7653), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {'azim': '(-115)', 'elev': '(15)'}), '(fig, azim=-115, elev=15)\n', (7628, 7653), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((8534, 8592), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['(center_x, center_y)', '(fall_x, fall_y)'], {}), '((center_x, center_y), (fall_x, fall_y))\n', (8552, 8592), False, 'from scipy.spatial import distance\n'), ((9049, 9108), 'os.path.join', 'os.path.join', (['base_dname', '"""ArcScene"""', '"""landsat_fishnet.bmp"""'], {}), "(base_dname, 'ArcScene', 'landsat_fishnet.bmp')\n", (9061, 9108), False, 'import os\n'), ((9132, 9196), 'os.path.join', 'os.path.join', (['base_dname', '"""ArcScene"""', '"""landsat_subset_index.bmp"""'], {}), "(base_dname, 'ArcScene', 'landsat_subset_index.bmp')\n", (9144, 9196), False, 'import os\n'), ((9212, 9253), 'pyphenocam.dataaccess.get_site', 'pyphenocam.dataaccess.get_site', (['site_name'], {}), '(site_name)\n', (9242, 9253), False, 'import pyphenocam\n'), ((9668, 9729), 'pyphenocam.headerextraction.get_exposure', 'pyphenocam.headerextraction.get_exposure', (['closest_photo_fname'], {}), '(closest_photo_fname)\n', (9708, 9729), False, 'import pyphenocam\n'), ((9745, 9809), 'pyphenocam.headerextraction.get_exposure', 'pyphenocam.headerextraction.get_exposure', (['closest_photo_fname_ir'], {}), '(closest_photo_fname_ir)\n', (9785, 9809), False, 'import pyphenocam\n'), ((10962, 10989), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 3)'}), '(figsize=(12, 3))\n', (10972, 10989), True, 'import matplotlib.pyplot as plt\n'), ((11049, 11090), 'pyphenocam.plotting.format_photo_axes', 'pyphenocam.plotting.format_photo_axes', (['ax'], {}), '(ax)\n', (11086, 11090), False, 'import pyphenocam\n'), ((11126, 11168), 'pyphenocam.plotting.format_photo_axes', 'pyphenocam.plotting.format_photo_axes', (['ax2'], {}), '(ax2)\n', (11163, 11168), False, 'import pyphenocam\n'), ((11181, 11234), 'pyphenocam.headerextraction.get_exposure', 'pyphenocam.headerextraction.get_exposure', (['local_fname'], {}), '(local_fname)\n', (11221, 11234), False, 'import pyphenocam\n'), ((11250, 11306), 'pyphenocam.headerextraction.get_exposure', 'pyphenocam.headerextraction.get_exposure', (['local_fname_ir'], {}), '(local_fname_ir)\n', (11290, 11306), False, 'import pyphenocam\n'), ((11793, 11831), 'skimage.io.imread', 'skimage.io.imread', (['landsat_index_fname'], {}), '(landsat_index_fname)\n', (11810, 11831), False, 'import skimage\n'), ((11846, 11955), 'skimage.transform.resize', 'trans.resize', (['index_grid', '(sample_image.shape[0], sample_image.shape[1], 3)'], {'preserve_range': '(True)', 'order': '(0)'}), '(index_grid, (sample_image.shape[0], sample_image.shape[1], 3),\n preserve_range=True, order=0)\n', (11858, 11955), True, 'import skimage.transform as trans\n'), ((11968, 12016), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(index_grid > 254)', 'index_grid'], {}), '(index_grid > 254, index_grid)\n', (11986, 12016), True, 'import numpy as np\n'), ((12050, 12119), 'numpy.logical_and', 'np.logical_and', (['(index_grid[:, :, 0] == 54)', '(index_grid[:, :, 1] == 148)'], {}), '(index_grid[:, :, 0] == 54, index_grid[:, :, 1] == 148)\n', (12064, 12119), True, 'import numpy as np\n'), ((12320, 12353), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(20, 10)'}), '(1, figsize=(20, 10))\n', (12332, 12353), True, 'import matplotlib.pyplot as plt\n'), ((12457, 12498), 'pyphenocam.plotting.format_photo_axes', 'pyphenocam.plotting.format_photo_axes', (['ax'], {}), '(ax)\n', (12494, 12498), False, 'import pyphenocam\n'), ((12703, 12726), 'cartopy.crs.LambertConformal', 'ccrs.LambertConformal', ([], {}), '()\n', (12724, 12726), True, 'import cartopy.crs as ccrs\n'), ((12870, 12885), 'cartopy.crs.Geodetic', 'ccrs.Geodetic', ([], {}), '()\n', (12883, 12885), True, 'import cartopy.crs as ccrs\n'), ((12895, 12923), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (12905, 12923), True, 'import matplotlib.pyplot as plt\n'), ((13136, 13164), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': 'ax_proj'}), '(projection=ax_proj)\n', (13144, 13164), True, 'import matplotlib.pyplot as plt\n'), ((13856, 13980), 'cartopy.feature.NaturalEarthFeature', 'cfeature.NaturalEarthFeature', ([], {'category': '"""cultural"""', 'name': '"""admin_1_states_provinces_lines"""', 'scale': '"""50m"""', 'facecolor': '"""none"""'}), "(category='cultural', name=\n 'admin_1_states_provinces_lines', scale='50m', facecolor='none')\n", (13884, 13980), True, 'import cartopy.feature as cfeature\n'), ((14050, 14066), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {}), '(im)\n', (14062, 14066), True, 'import matplotlib.pyplot as plt\n'), ((19092, 19189), 'ipywidgets.interactive', 'interactive', (['plot_one'], {'col_index': '(0, landsat.shape[0], 1)', 'row_index': '(0, landsat.shape[1], 1)'}), '(plot_one, col_index=(0, landsat.shape[0], 1), row_index=(0,\n landsat.shape[1], 1))\n', (19103, 19189), False, 'from ipywidgets import interactive\n'), ((19389, 19405), 'skimage.data.view', 'data.view', (['dtype'], {}), '(dtype)\n', (19398, 19405), False, 'from skimage import data, img_as_float\n'), ((19414, 19431), 'numpy.unique', 'np.unique', (['struct'], {}), '(struct)\n', (19423, 19431), True, 'import numpy as np\n'), ((21059, 21081), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (21072, 21081), True, 'import seaborn as sns\n'), ((21099, 21149), 'os.path.join', 'os.path.join', (['base_dname', '"""Landsat"""', '"""SceneSubset"""'], {}), "(base_dname, 'Landsat', 'SceneSubset')\n", (21111, 21149), False, 'import os\n'), ((21168, 21196), 'numpy.zeros', 'np.zeros', (['landsat_data.shape'], {}), '(landsat_data.shape)\n', (21176, 21196), True, 'import numpy as np\n'), ((21305, 21360), 'os.path.join', 'os.path.join', (['scene_dname', '"""mapped"""', '"""visible_mask.npy"""'], {}), "(scene_dname, 'mapped', 'visible_mask.npy')\n", (21317, 21360), False, 'import os\n'), ((21362, 21396), 'numpy.save', 'np.save', (['out_fname', 'visible_pixels'], {}), '(out_fname, visible_pixels)\n', (21369, 21396), True, 'import numpy as np\n'), ((21400, 21428), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (21410, 21428), True, 'import matplotlib.pyplot as plt\n'), ((21468, 21577), 'matplotlib.pyplot.imshow', 'plt.imshow', (['landsat_data'], {'vmin': '(0)', 'vmax': '(0.7)', 'cmap': 'mpl.cm.YlGn', 'interpolation': 'None', 'extent': 'landsat_extents'}), '(landsat_data, vmin=0, vmax=0.7, cmap=mpl.cm.YlGn, interpolation=\n None, extent=landsat_extents)\n', (21478, 21577), True, 'import matplotlib.pyplot as plt\n'), ((21611, 21670), 'numpy.ma.MaskedArray', 'np.ma.MaskedArray', (['visible_pixels'], {'mask': '(visible_pixels == 0)'}), '(visible_pixels, mask=visible_pixels == 0)\n', (21628, 21670), True, 'import numpy as np\n'), ((21670, 21769), 'matplotlib.pyplot.imshow', 'plt.imshow', (['visible_pixels'], {'cmap': '"""jet_r"""', 'alpha': '(0.6)', 'interpolation': 'None', 'extent': 'landsat_extents'}), "(visible_pixels, cmap='jet_r', alpha=0.6, interpolation=None,\n extent=landsat_extents)\n", (21680, 21769), True, 'import matplotlib.pyplot as plt\n'), ((21776, 21785), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (21783, 21785), True, 'import matplotlib.pyplot as plt\n'), ((22141, 22164), 'matplotlib.ticker.FuncFormatter', 'tkr.FuncFormatter', (['func'], {}), '(func)\n', (22158, 22164), True, 'import matplotlib.ticker as tkr\n'), ((22201, 22225), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%d"""'], {}), "('%d')\n", (22219, 22225), False, 'from matplotlib.ticker import MultipleLocator, FormatStrFormatter\n'), ((22362, 22411), 'pyphenocam.plotting.format_photo_axes', 'pyphenocam.plotting.format_photo_axes', (['locator_ax'], {}), '(locator_ax)\n', (22399, 22411), False, 'import pyphenocam\n'), ((22465, 22474), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (22472, 22474), True, 'import matplotlib.pyplot as plt\n'), ((22778, 22800), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (22791, 22800), True, 'import seaborn as sns\n'), ((22818, 22868), 'os.path.join', 'os.path.join', (['base_dname', '"""Landsat"""', '"""SceneSubset"""'], {}), "(base_dname, 'Landsat', 'SceneSubset')\n", (22830, 22868), False, 'import os\n'), ((22887, 22915), 'numpy.zeros', 'np.zeros', (['landsat_data.shape'], {}), '(landsat_data.shape)\n', (22895, 22915), True, 'import numpy as np\n'), ((23024, 23079), 'os.path.join', 'os.path.join', (['scene_dname', '"""mapped"""', '"""visible_mask.npy"""'], {}), "(scene_dname, 'mapped', 'visible_mask.npy')\n", (23036, 23079), False, 'import os\n'), ((23081, 23115), 'numpy.save', 'np.save', (['out_fname', 'visible_pixels'], {}), '(out_fname, visible_pixels)\n', (23088, 23115), True, 'import numpy as np\n'), ((23119, 23147), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (23129, 23147), True, 'import matplotlib.pyplot as plt\n'), ((23866, 23921), 'os.path.join', 'os.path.join', (['scene_dname', '"""mapped"""', '"""visible_mask.npy"""'], {}), "(scene_dname, 'mapped', 'visible_mask.npy')\n", (23878, 23921), False, 'import os\n'), ((23940, 23958), 'numpy.load', 'np.load', (['out_fname'], {}), '(out_fname)\n', (23947, 23958), True, 'import numpy as np\n'), ((23977, 24036), 'numpy.ma.MaskedArray', 'np.ma.MaskedArray', (['visible_pixels'], {'mask': '(visible_pixels == 0)'}), '(visible_pixels, mask=visible_pixels == 0)\n', (23994, 24036), True, 'import numpy as np\n'), ((24036, 24062), 'matplotlib.pyplot.imshow', 'plt.imshow', (['visible_pixels'], {}), '(visible_pixels)\n', (24046, 24062), True, 'import matplotlib.pyplot as plt\n'), ((24064, 24115), 'matplotlib.pyplot.imshow', 'plt.imshow', (['visible_pixels'], {'cmap': '"""jet_r"""', 'alpha': '(0.6)'}), "(visible_pixels, cmap='jet_r', alpha=0.6)\n", (24074, 24115), True, 'import matplotlib.pyplot as plt\n'), ((24270, 24280), 'IPython.display.display', 'display', (['f'], {}), '(f)\n', (24277, 24280), False, 'from IPython.display import display\n'), ((25733, 25780), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['results'], {'orient': '"""index"""'}), "(results, orient='index')\n", (25755, 25780), True, 'import pandas as pd\n'), ((26097, 26107), 'skimage.data.row', 'data.row', ([], {}), '()\n', (26105, 26107), False, 'from skimage import data, img_as_float\n'), ((26281, 26314), 'skimage.data.pixelslope.plot', 'data.pixelslope.plot', ([], {'kind': '"""hist"""'}), "(kind='hist')\n", (26301, 26314), False, 'from skimage import data, img_as_float\n'), ((28508, 28538), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 12)'}), '(figsize=(20, 12))\n', (28520, 28538), True, 'import matplotlib.pyplot as plt\n'), ((29184, 29193), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (29191, 29193), True, 'import matplotlib.pyplot as plt\n'), ((29466, 29493), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(24, 8)'}), '(figsize=(24, 8))\n', (29476, 29493), True, 'import matplotlib.pyplot as plt\n'), ((29502, 29678), 'mpl_toolkits.axes_grid1.AxesGrid', 'AxesGrid', (['fig', '(111)'], {'nrows_ncols': '(1, 3)', 'axes_pad': '(0.1)', 'label_mode': '"""1"""', 'share_all': '(True)', 'cbar_location': '"""top"""', 'cbar_mode': '"""each"""', 'cbar_size': '"""7%"""', 'cbar_pad': '"""2%"""', 'aspect': '(False)'}), "(fig, 111, nrows_ncols=(1, 3), axes_pad=0.1, label_mode='1',\n share_all=True, cbar_location='top', cbar_mode='each', cbar_size='7%',\n cbar_pad='2%', aspect=False)\n", (29510, 29678), False, 'from mpl_toolkits.axes_grid1 import AxesGrid\n'), ((30676, 30716), 'seaborn.set', 'sns.set', ([], {'style': '"""white"""', 'color_codes': '(True)'}), "(style='white', color_codes=True)\n", (30683, 30716), True, 'import seaborn as sns\n'), ((30718, 30743), 'seaborn.set_context', 'sns.set_context', (['"""poster"""'], {}), "('poster')\n", (30733, 30743), True, 'import seaborn as sns\n'), ((31096, 31105), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (31103, 31105), True, 'import matplotlib.pyplot as plt\n'), ((31253, 31381), 'seaborn.jointplot', 'sns.jointplot', (['"""landsat_ndvi"""', '"""phenocam_ndvi"""'], {'data': 'data', 'kind': '"""reg"""', 'xlim': '(0.1, 0.5)', 'ylim': '(0.1, 1.0)', 'color': '"""r"""', 'size': '(14)'}), "('landsat_ndvi', 'phenocam_ndvi', data=data, kind='reg', xlim=\n (0.1, 0.5), ylim=(0.1, 1.0), color='r', size=14)\n", (31266, 31381), True, 'import seaborn as sns\n'), ((31784, 31793), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (31791, 31793), True, 'import matplotlib.pyplot as plt\n'), ((32315, 32375), 'numpy.ma.masked_where', 'np.ma.masked_where', (['index_grid.mask[:, :, 0]', 'corrected_ndvi'], {}), '(index_grid.mask[:, :, 0], corrected_ndvi)\n', (32333, 32375), True, 'import numpy as np\n'), ((32393, 32452), 'numpy.ma.masked_where', 'np.ma.masked_where', (['index_grid.mask[:, :, 0]', 'mapped_output'], {}), '(index_grid.mask[:, :, 0], mapped_output)\n', (32411, 32452), True, 'import numpy as np\n'), ((32577, 32591), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (32586, 32591), True, 'import numpy as np\n'), ((32620, 32651), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(20, 12)'}), '(1, figsize=(20, 12))\n', (32630, 32651), True, 'import matplotlib.pyplot as plt\n'), ((32660, 32827), 'mpl_toolkits.axes_grid1.ImageGrid', 'ImageGrid', (['fig', '(111)'], {'nrows_ncols': '(1, 2)', 'axes_pad': '(0.5)', 'label_mode': '"""1"""', 'share_all': '(True)', 'cbar_location': '"""bottom"""', 'cbar_mode': '"""each"""', 'cbar_size': '"""7%"""', 'cbar_pad': '"""2%"""'}), "(fig, 111, nrows_ncols=(1, 2), axes_pad=0.5, label_mode='1',\n share_all=True, cbar_location='bottom', cbar_mode='each', cbar_size=\n '7%', cbar_pad='2%')\n", (32669, 32827), False, 'from mpl_toolkits.axes_grid1 import ImageGrid\n'), ((34584, 34627), 'os.path.join', 'os.path.join', (['output_dir', '"""MODIS"""', '"""subset"""'], {}), "(output_dir, 'MODIS', 'subset')\n", (34596, 34627), False, 'import os\n'), ((34650, 34695), 'os.path.join', 'os.path.join', (['modis_dname', '"""modis_subset.tif"""'], {}), "(modis_dname, 'modis_subset.tif')\n", (34662, 34695), False, 'import os\n'), ((34722, 34755), 'rasterio.open', 'rasterio.open', (['modis_subset_fname'], {}), '(modis_subset_fname)\n', (34735, 34755), False, 'import rasterio\n'), ((34971, 34999), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (34981, 34999), True, 'import matplotlib.pyplot as plt\n'), ((35006, 35037), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': 'modis_proj'}), '(projection=modis_proj)\n', (35014, 35037), True, 'import matplotlib.pyplot as plt\n'), ((35442, 35493), 'os.path.join', 'os.path.join', (['modis_dname', '"""modis_subset_index.tif"""'], {}), "(modis_dname, 'modis_subset_index.tif')\n", (35454, 35493), False, 'import os\n'), ((35509, 35548), 'rasterio.open', 'rasterio.open', (['modis_index_subset_fname'], {}), '(modis_index_subset_fname)\n', (35522, 35548), False, 'import rasterio\n'), ((35646, 35672), 'numpy.zeros', 'np.zeros', (['modis_data.shape'], {}), '(modis_data.shape)\n', (35654, 35672), True, 'import numpy as np\n'), ((35711, 35739), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (35721, 35739), True, 'import matplotlib.pyplot as plt\n'), ((35746, 35777), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': 'modis_proj'}), '(projection=modis_proj)\n', (35754, 35777), True, 'import matplotlib.pyplot as plt\n'), ((36550, 36578), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (36560, 36578), True, 'import matplotlib.pyplot as plt\n'), ((36585, 36616), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': 'modis_proj'}), '(projection=modis_proj)\n', (36593, 36616), True, 'import matplotlib.pyplot as plt\n'), ((37808, 37836), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (37818, 37836), True, 'import matplotlib.pyplot as plt\n'), ((37843, 37876), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': 'landsat_proj'}), '(projection=landsat_proj)\n', (37851, 37876), True, 'import matplotlib.pyplot as plt\n'), ((38508, 38602), 'shapely.geometry.Polygon', 'Polygon', (['((dem_ulx, dem_uly), (dem_urx, dem_ury), (dem_lrx, dem_lry), (dem_llx, dem_lly)\n )'], {}), '(((dem_ulx, dem_uly), (dem_urx, dem_ury), (dem_lrx, dem_lry), (\n dem_llx, dem_lly)))\n', (38515, 38602), False, 'from shapely.geometry import Polygon, mapping\n'), ((38608, 38677), 'rasterio.features.rasterize', 'rasterize', (['[poly]'], {'transform': 'elev.transform', 'out_shape': 'dem_data.shape'}), '([poly], transform=elev.transform, out_shape=dem_data.shape)\n', (38617, 38677), False, 'from rasterio.features import rasterize\n'), ((38813, 38841), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (38823, 38841), True, 'import matplotlib.pyplot as plt\n'), ((38848, 38881), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': 'landsat_proj'}), '(projection=landsat_proj)\n', (38856, 38881), True, 'import matplotlib.pyplot as plt\n'), ((39621, 39647), 'numpy.indices', 'np.indices', (['dem_data.shape'], {}), '(dem_data.shape)\n', (39631, 39647), True, 'import numpy as np\n'), ((39658, 39716), 'numpy.apply_along_axis', 'np.apply_along_axis', ([], {'func1d': 'rc2xy', 'arr': 'dem_indices', 'axis': '(0)'}), '(func1d=rc2xy, arr=dem_indices, axis=0)\n', (39677, 39716), True, 'import numpy as np\n'), ((39961, 39981), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dem_data'], {}), '(dem_data)\n', (39971, 39981), True, 'import matplotlib.pyplot as plt\n'), ((40021, 40037), 'numpy.indices', 'np.indices', (['data'], {}), '(data)\n', (40031, 40037), True, 'import numpy as np\n'), ((40274, 40292), 'statsmodels.api.add_constant', 'sm.add_constant', (['X'], {}), '(X)\n', (40289, 40292), True, 'import statsmodels.api as sm\n'), ((40298, 40312), 'skimage.data.flatten', 'data.flatten', ([], {}), '()\n', (40310, 40312), False, 'from skimage import data, img_as_float\n'), ((40320, 40342), 'numpy.random.random', 'np.random.random', (['nobs'], {}), '(nobs)\n', (40336, 40342), True, 'import numpy as np\n'), ((40497, 40523), 'numpy.random.random', 'np.random.random', (['(100, 2)'], {}), '((100, 2))\n', (40513, 40523), True, 'import numpy as np\n'), ((40551, 40565), 'skimage.data.flatten', 'data.flatten', ([], {}), '()\n', (40563, 40565), False, 'from skimage import data, img_as_float\n'), ((40812, 40826), 'skimage.data.flatten', 'data.flatten', ([], {}), '()\n', (40824, 40826), False, 'from skimage import data, img_as_float\n'), ((40885, 40903), 'statsmodels.api.add_constant', 'sm.add_constant', (['X'], {}), '(X)\n', (40900, 40903), True, 'import statsmodels.api as sm\n'), ((41203, 41230), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (41213, 41230), True, 'import matplotlib.pyplot as plt\n'), ((41237, 41268), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {'azim': '(-115)', 'elev': '(15)'}), '(fig, azim=-115, elev=15)\n', (41243, 41268), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((42698, 42757), 'math.sqrt', 'math.sqrt', (['((x2 - x1) ** 2 + (y2 - y1) ** 2 + (z2 - z1) ** 2)'], {}), '((x2 - x1) ** 2 + (y2 - y1) ** 2 + (z2 - z1) ** 2)\n', (42707, 42757), False, 'import math\n'), ((42961, 42976), 'skimage.data.describe', 'data.describe', ([], {}), '()\n', (42974, 42976), False, 'from skimage import data, img_as_float\n'), ((43117, 43167), 'os.path.join', 'os.path.join', (['base_dname', '"""Landsat"""', '"""SceneSubset"""'], {}), "(base_dname, 'Landsat', 'SceneSubset')\n", (43129, 43167), False, 'import os\n'), ((46967, 46984), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (46981, 46984), True, 'import numpy as np\n'), ((47397, 47488), 'sklearn.gaussian_process.GaussianProcess', 'GaussianProcess', ([], {'corr': '"""cubic"""', 'theta0': '(0.01)', 'thetaL': '(0.0001)', 'thetaU': '(0.1)', 'random_start': '(100)'}), "(corr='cubic', theta0=0.01, thetaL=0.0001, thetaU=0.1,\n random_start=100)\n", (47412, 47488), False, 'from sklearn.gaussian_process import GaussianProcess\n'), ((47712, 47724), 'numpy.sqrt', 'np.sqrt', (['MSE'], {}), '(MSE)\n', (47719, 47724), True, 'import numpy as np\n'), ((48824, 48947), 'sklearn.gaussian_process.GaussianProcess', 'GaussianProcess', ([], {'corr': '"""squared_exponential"""', 'theta0': '(0.1)', 'thetaL': '(0.001)', 'thetaU': '(1)', 'nugget': '((dy / y) ** 2)', 'random_start': '(100)'}), "(corr='squared_exponential', theta0=0.1, thetaL=0.001,\n thetaU=1, nugget=(dy / y) ** 2, random_start=100)\n", (48839, 48947), False, 'from sklearn.gaussian_process import GaussianProcess\n'), ((49216, 49228), 'numpy.sqrt', 'np.sqrt', (['MSE'], {}), '(MSE)\n', (49223, 49228), True, 'import numpy as np\n'), ((49327, 49338), 'matplotlib.pyplot.figure', 'pl.figure', ([], {}), '()\n', (49336, 49338), True, 'from matplotlib import pyplot as pl\n'), ((49471, 49515), 'matplotlib.pyplot.plot', 'pl.plot', (['x', 'y_pred', '"""b-"""'], {'label': '"""Prediction"""'}), "(x, y_pred, 'b-', label='Prediction')\n", (49478, 49515), True, 'from matplotlib import pyplot as pl\n'), ((49736, 49752), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""$x$"""'], {}), "('$x$')\n", (49745, 49752), True, 'from matplotlib import pyplot as pl\n'), ((49754, 49773), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['"""$f(x)$"""'], {}), "('$f(x)$')\n", (49763, 49773), True, 'from matplotlib import pyplot as pl\n'), ((49775, 49791), 'matplotlib.pyplot.ylim', 'pl.ylim', (['(-10)', '(20)'], {}), '(-10, 20)\n', (49782, 49791), True, 'from matplotlib import pyplot as pl\n'), ((49793, 49820), 'matplotlib.pyplot.legend', 'pl.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (49802, 49820), True, 'from matplotlib import pyplot as pl\n'), ((49824, 49833), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (49831, 49833), True, 'from matplotlib import pyplot as pl\n'), ((50233, 50346), 'sklearn.gaussian_process.GaussianProcess', 'GaussianProcess', ([], {'corr': '"""squared_exponential"""', 'theta0': '(0.1)', 'thetaL': '(0.001)', 'thetaU': '(1)', 'nugget': '(0.1)', 'random_start': '(100)'}), "(corr='squared_exponential', theta0=0.1, thetaL=0.001,\n thetaU=1, nugget=0.1, random_start=100)\n", (50248, 50346), False, 'from sklearn.gaussian_process import GaussianProcess\n'), ((50615, 50627), 'numpy.sqrt', 'np.sqrt', (['MSE'], {}), '(MSE)\n', (50622, 50627), True, 'import numpy as np\n'), ((50726, 50737), 'matplotlib.pyplot.figure', 'pl.figure', ([], {}), '()\n', (50735, 50737), True, 'from matplotlib import pyplot as pl\n'), ((50870, 50914), 'matplotlib.pyplot.plot', 'pl.plot', (['x', 'y_pred', '"""b-"""'], {'label': '"""Prediction"""'}), "(x, y_pred, 'b-', label='Prediction')\n", (50877, 50914), True, 'from matplotlib import pyplot as pl\n'), ((51135, 51151), 'matplotlib.pyplot.xlabel', 'pl.xlabel', (['"""$x$"""'], {}), "('$x$')\n", (51144, 51151), True, 'from matplotlib import pyplot as pl\n'), ((51153, 51172), 'matplotlib.pyplot.ylabel', 'pl.ylabel', (['"""$f(x)$"""'], {}), "('$f(x)$')\n", (51162, 51172), True, 'from matplotlib import pyplot as pl\n'), ((51174, 51190), 'matplotlib.pyplot.ylim', 'pl.ylim', (['(-10)', '(20)'], {}), '(-10, 20)\n', (51181, 51190), True, 'from matplotlib import pyplot as pl\n'), ((51192, 51219), 'matplotlib.pyplot.legend', 'pl.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (51201, 51219), True, 'from matplotlib import pyplot as pl\n'), ((51223, 51232), 'matplotlib.pyplot.show', 'pl.show', ([], {}), '()\n', (51230, 51232), True, 'from matplotlib import pyplot as pl\n'), ((51252, 51287), 'skimage.data.to_pickle', 'data.to_pickle', (['"""c:\\\\temp\\\\data.pd"""'], {}), "('c:\\\\temp\\\\data.pd')\n", (51266, 51287), False, 'from skimage import data, img_as_float\n'), ((51334, 51353), 'sys.getsizeof', 'sys.getsizeof', (['data'], {}), '(data)\n', (51347, 51353), False, 'import sys\n'), ((2768, 2796), 'affine.Affine.translation', 'Affine.translation', (['(0.5)', '(0.5)'], {}), '(0.5, 0.5)\n', (2786, 2796), False, 'from affine import Affine\n'), ((3410, 3420), 'numpy.mean', 'mean', (['xlim'], {}), '(xlim)\n', (3414, 3420), False, 'from numpy import mean\n'), ((3434, 3444), 'numpy.mean', 'mean', (['ylim'], {}), '(ylim)\n', (3438, 3444), False, 'from numpy import mean\n'), ((3458, 3468), 'numpy.mean', 'mean', (['zlim'], {}), '(zlim)\n', (3462, 3468), False, 'from numpy import mean\n'), ((4537, 4558), 'numpy.degrees', 'np.degrees', (['max_slope'], {}), '(max_slope)\n', (4547, 4558), True, 'import numpy as np\n'), ((5080, 5094), 'skimage.data.flatten', 'data.flatten', ([], {}), '()\n', (5092, 5094), False, 'from skimage import data, img_as_float\n'), ((5106, 5124), 'statsmodels.api.add_constant', 'sm.add_constant', (['X'], {}), '(X)\n', (5121, 5124), True, 'import statsmodels.api as sm\n'), ((9272, 9305), 'datetime.datetime', 'datetime.datetime', (['year', '(1)', '(1)', '(12)'], {}), '(year, 1, 1, 12)\n', (9289, 9305), False, 'import datetime\n'), ((9308, 9332), 'datetime.timedelta', 'datetime.timedelta', (['jday'], {}), '(jday)\n', (9326, 9332), False, 'import datetime\n'), ((10564, 10591), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 8)'}), '(figsize=(18, 8))\n', (10574, 10591), True, 'import matplotlib.pyplot as plt\n'), ((10661, 10702), 'pyphenocam.plotting.format_photo_axes', 'pyphenocam.plotting.format_photo_axes', (['ax'], {}), '(ax)\n', (10698, 10702), False, 'import pyphenocam\n'), ((10764, 10806), 'pyphenocam.plotting.format_photo_axes', 'pyphenocam.plotting.format_photo_axes', (['ax2'], {}), '(ax2)\n', (10801, 10806), False, 'import pyphenocam\n'), ((12144, 12244), 'skimage.transform.resize', 'trans.resize', (['single_pixel', '(sample_image.shape[0], sample_image.shape[1])'], {'preserve_range': '(False)'}), '(single_pixel, (sample_image.shape[0], sample_image.shape[1]),\n preserve_range=False)\n', (12156, 12244), True, 'import skimage.transform as trans\n'), ((13191, 13206), 'cartopy.crs.Geodetic', 'ccrs.Geodetic', ([], {}), '()\n', (13204, 13206), True, 'import cartopy.crs as ccrs\n'), ((14394, 14408), 'skimage.data.flatten', 'data.flatten', ([], {}), '()\n', (14406, 14408), False, 'from skimage import data, img_as_float\n'), ((14420, 14438), 'statsmodels.api.add_constant', 'sm.add_constant', (['X'], {}), '(X)\n', (14435, 14438), True, 'import statsmodels.api as sm\n'), ((14890, 14949), 'math.sqrt', 'math.sqrt', (['((x2 - x1) ** 2 + (y2 - y1) ** 2 + (z2 - z1) ** 2)'], {}), '((x2 - x1) ** 2 + (y2 - y1) ** 2 + (z2 - z1) ** 2)\n', (14899, 14949), False, 'import math\n'), ((15280, 15294), 'skimage.data.flatten', 'data.flatten', ([], {}), '()\n', (15292, 15294), False, 'from skimage import data, img_as_float\n'), ((15306, 15324), 'statsmodels.api.add_constant', 'sm.add_constant', (['X'], {}), '(X)\n', (15321, 15324), True, 'import statsmodels.api as sm\n'), ((17075, 17161), 'numpy.logical_and', 'np.logical_and', (['(index_grid[:, :, 0] == col_index)', '(index_grid[:, :, 1] == row_index)'], {}), '(index_grid[:, :, 0] == col_index, index_grid[:, :, 1] ==\n row_index)\n', (17089, 17161), True, 'import numpy as np\n'), ((17370, 17398), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(25, 15)'}), '(figsize=(25, 15))\n', (17380, 17398), True, 'import matplotlib.pyplot as plt\n'), ((17409, 17425), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(131)'], {}), '(131)\n', (17420, 17425), True, 'import matplotlib.pyplot as plt\n'), ((17543, 17584), 'pyphenocam.plotting.format_photo_axes', 'pyphenocam.plotting.format_photo_axes', (['ax'], {}), '(ax)\n', (17580, 17584), False, 'import pyphenocam\n'), ((17626, 17662), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(132)'], {'projection': 'ax_proj'}), '(132, projection=ax_proj)\n', (17637, 17662), True, 'import matplotlib.pyplot as plt\n'), ((18948, 18966), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (18964, 18966), True, 'import matplotlib.pyplot as plt\n'), ((20026, 20105), 'cartopy.io.shapereader.natural_earth', 'shpreader.natural_earth', ([], {'resolution': '"""110m"""', 'category': '"""cultural"""', 'name': 'shapename'}), "(resolution='110m', category='cultural', name=shapename)\n", (20049, 20105), True, 'import cartopy.io.shapereader as shpreader\n'), ((20566, 20674), 'matplotlib.pyplot.plot', 'plt.plot', (['landsat_extents[0]', 'landsat_extents[2]', '"""*"""'], {'markersize': '(15)', 'color': '"""r"""', 'transform': 'landsat_proj'}), "(landsat_extents[0], landsat_extents[2], '*', markersize=15, color=\n 'r', transform=landsat_proj)\n", (20574, 20674), True, 'import matplotlib.pyplot as plt\n'), ((22561, 22657), 'os.path.join', 'os.path.join', (['"""J:\\\\Projects\\\\NCCSC\\\\phenocam\\\\Doc\\\\Presentation"""', '"""VisibleLandsatPixels.jpg"""'], {}), "('J:\\\\Projects\\\\NCCSC\\\\phenocam\\\\Doc\\\\Presentation',\n 'VisibleLandsatPixels.jpg')\n", (22573, 22657), False, 'import os\n'), ((24429, 24499), 'numpy.logical_and', 'np.logical_and', (['(index_grid[:, :, 0] == col)', '(index_grid[:, :, 1] == row)'], {}), '(index_grid[:, :, 0] == col, index_grid[:, :, 1] == row)\n', (24443, 24499), True, 'import numpy as np\n'), ((24675, 24705), 'numpy.count_nonzero', 'np.count_nonzero', (['single_pixel'], {}), '(single_pixel)\n', (24691, 24705), True, 'import numpy as np\n'), ((26041, 26094), 'os.path.join', 'os.path.join', (['base_dname', "(scene_name + '_results.csv')"], {}), "(base_dname, scene_name + '_results.csv')\n", (26053, 26094), False, 'import os\n'), ((26165, 26218), 'os.path.join', 'os.path.join', (['base_dname', "(scene_name + '_results.csv')"], {}), "(base_dname, scene_name + '_results.csv')\n", (26177, 26218), False, 'import os\n'), ((26343, 26368), 'skimage.data.phenocam_ndvi.mean', 'data.phenocam_ndvi.mean', ([], {}), '()\n', (26366, 26368), False, 'from skimage import data, img_as_float\n'), ((26371, 26395), 'skimage.data.landsat_ndvi.mean', 'data.landsat_ndvi.mean', ([], {}), '()\n', (26393, 26395), False, 'from skimage import data, img_as_float\n'), ((27630, 27659), 'numpy.linspace', 'np.linspace', (['start', 'stop', '(257)'], {}), '(start, stop, 257)\n', (27641, 27659), True, 'import numpy as np\n'), ((28124, 28171), 'matplotlib.colors.LinearSegmentedColormap', 'mpl.colors.LinearSegmentedColormap', (['name', 'cdict'], {}), '(name, cdict)\n', (28158, 28171), True, 'import matplotlib as mpl\n'), ((28177, 28208), 'matplotlib.pyplot.register_cmap', 'plt.register_cmap', ([], {'cmap': 'newcmap'}), '(cmap=newcmap)\n', (28194, 28208), True, 'import matplotlib.pyplot as plt\n'), ((29207, 29313), 'os.path.join', 'os.path.join', (['"""J:\\\\Projects\\\\NCCSC\\\\phenocam\\\\Doc\\\\Presentation"""', '"""CorrelationAllPoints_residuals.jpg"""'], {}), "('J:\\\\Projects\\\\NCCSC\\\\phenocam\\\\Doc\\\\Presentation',\n 'CorrelationAllPoints_residuals.jpg')\n", (29219, 29313), False, 'import os\n'), ((31119, 31228), 'os.path.join', 'os.path.join', (['"""J:\\\\Projects\\\\NCCSC\\\\phenocam\\\\Doc\\\\Presentation"""', '"""CorrelationAllPoints_big_pix_data.jpg"""'], {}), "('J:\\\\Projects\\\\NCCSC\\\\phenocam\\\\Doc\\\\Presentation',\n 'CorrelationAllPoints_big_pix_data.jpg')\n", (31131, 31228), False, 'import os\n'), ((32160, 32275), 'os.path.join', 'os.path.join', (['"""J:\\\\Projects\\\\NCCSC\\\\phenocam\\\\Doc\\\\Presentation"""', '"""CorrelationAllPoints_wDist_big_pix_data.jpg"""'], {}), "('J:\\\\Projects\\\\NCCSC\\\\phenocam\\\\Doc\\\\Presentation',\n 'CorrelationAllPoints_wDist_big_pix_data.jpg')\n", (32172, 32275), False, 'import os\n'), ((33522, 33568), 'pyphenocam.plotting.format_photo_axes', 'pyphenocam.plotting.format_photo_axes', (['grid[i]'], {}), '(grid[i])\n', (33559, 33568), False, 'import pyphenocam\n'), ((34051, 34140), 'os.path.join', 'os.path.join', (['"""J:\\\\Projects\\\\NCCSC\\\\phenocam\\\\Doc\\\\Presentation"""', '"""MappedLandsat.jpg"""'], {}), "('J:\\\\Projects\\\\NCCSC\\\\phenocam\\\\Doc\\\\Presentation',\n 'MappedLandsat.jpg')\n", (34063, 34140), False, 'import os\n'), ((36218, 36242), 'affine.Affine.translation', 'Affine.translation', (['(0)', '(0)'], {}), '(0, 0)\n', (36236, 36242), False, 'from affine import Affine\n'), ((39523, 39547), 'affine.Affine.translation', 'Affine.translation', (['(0)', '(0)'], {}), '(0, 0)\n', (39541, 39547), False, 'from affine import Affine\n'), ((40348, 40363), 'numpy.dot', 'np.dot', (['X', 'beta'], {}), '(X, beta)\n', (40354, 40363), True, 'import numpy as np\n'), ((42803, 42834), 'math.asin', 'math.asin', (['((z2 - z1) / distance)'], {}), '((z2 - z1) / distance)\n', (42812, 42834), False, 'import math\n'), ((42884, 42912), 'math.atan2', 'math.atan2', (['(x2 - x1)', '(y2 - y1)'], {}), '(x2 - x1, y2 - y1)\n', (42894, 42912), False, 'import math\n'), ((44073, 44132), 'numpy.ma.masked_where', 'np.ma.masked_where', (['index_grid.mask[:, :, 0]', 'mapped_output'], {}), '(index_grid.mask[:, :, 0], mapped_output)\n', (44091, 44132), True, 'import numpy as np\n'), ((44370, 44402), 'os.path.join', 'os.path.join', (['scene_dname', 'fname'], {}), '(scene_dname, fname)\n', (44382, 44402), False, 'import os\n'), ((44418, 44446), 'rasterio.open', 'rasterio.open', (['landsat_fname'], {}), '(landsat_fname)\n', (44431, 44446), False, 'import rasterio\n'), ((44720, 44755), 'os.path.join', 'os.path.join', (['scene_dname', '"""mapped"""'], {}), "(scene_dname, 'mapped')\n", (44732, 44755), False, 'import os\n'), ((45618, 45650), 'os.path.join', 'os.path.join', (['scene_dname', 'fname'], {}), '(scene_dname, fname)\n', (45630, 45650), False, 'import os\n'), ((45692, 45720), 'rasterio.open', 'rasterio.open', (['landsat_fname'], {}), '(landsat_fname)\n', (45705, 45720), False, 'import rasterio\n'), ((46146, 46178), 'os.rename', 'os.rename', (['out_fname', 'out_fname2'], {}), '(out_fname, out_fname2)\n', (46155, 46178), False, 'import os\n'), ((46322, 46357), 'os.path.join', 'os.path.join', (['scene_dname', '"""mapped"""'], {}), "(scene_dname, 'mapped')\n", (46334, 46357), False, 'import os\n'), ((46447, 46489), 'os.path.join', 'os.path.join', (['scene_dname', '"""mapped"""', 'fname'], {}), "(scene_dname, 'mapped', fname)\n", (46459, 46489), False, 'import os\n'), ((46561, 46589), 'rasterio.open', 'rasterio.open', (['landsat_fname'], {}), '(landsat_fname)\n', (46574, 46589), False, 'import rasterio\n'), ((47330, 47346), 'numpy.atleast_2d', 'np.atleast_2d', (['X'], {}), '(X)\n', (47343, 47346), True, 'import numpy as np\n'), ((49525, 49553), 'numpy.concatenate', 'np.concatenate', (['[x, x[::-1]]'], {}), '([x, x[::-1]])\n', (49539, 49553), True, 'import numpy as np\n'), ((49564, 49634), 'numpy.concatenate', 'np.concatenate', (['[y_pred - 1.96 * sigma, (y_pred + 1.96 * sigma)[::-1]]'], {}), '([y_pred - 1.96 * sigma, (y_pred + 1.96 * sigma)[::-1]])\n', (49578, 49634), True, 'import numpy as np\n'), ((49880, 49896), 'numpy.atleast_2d', 'np.atleast_2d', (['X'], {}), '(X)\n', (49893, 49896), True, 'import numpy as np\n'), ((50924, 50952), 'numpy.concatenate', 'np.concatenate', (['[x, x[::-1]]'], {}), '([x, x[::-1]])\n', (50938, 50952), True, 'import numpy as np\n'), ((50963, 51033), 'numpy.concatenate', 'np.concatenate', (['[y_pred - 1.96 * sigma, (y_pred + 1.96 * sigma)[::-1]]'], {}), '([y_pred - 1.96 * sigma, (y_pred + 1.96 * sigma)[::-1]])\n', (50977, 51033), True, 'import numpy as np\n'), ((1497, 1539), 'cartopy.crs.Globe', 'ccrs.Globe', ([], {'datum': '"""WGS84"""', 'ellipse': '"""WGS84"""'}), "(datum='WGS84', ellipse='WGS84')\n", (1507, 1539), True, 'import cartopy.crs as ccrs\n'), ((4117, 4148), 'numpy.arctan2', 'np.arctan2', (['yslope', '(-1 * xslope)'], {}), '(yslope, -1 * xslope)\n', (4127, 4148), True, 'import numpy as np\n'), ((6354, 6412), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['(center_x, center_y)', '(fall_x, fall_y)'], {}), '((center_x, center_y), (fall_x, fall_y))\n', (6372, 6412), False, 'from scipy.spatial import distance\n'), ((7369, 7381), 'statsmodels.api.OLS', 'sm.OLS', (['y', 'X'], {}), '(y, X)\n', (7375, 7381), True, 'import statsmodels.api as sm\n'), ((8417, 8435), 'numpy.deg2rad', 'np.deg2rad', (['aspect'], {}), '(aspect)\n', (8427, 8435), True, 'import numpy as np\n'), ((8455, 8473), 'numpy.deg2rad', 'np.deg2rad', (['aspect'], {}), '(aspect)\n', (8465, 8473), True, 'import numpy as np\n'), ((8657, 8674), 'numpy.deg2rad', 'np.deg2rad', (['slope'], {}), '(slope)\n', (8667, 8674), True, 'import numpy as np\n'), ((12767, 12809), 'cartopy.crs.Globe', 'ccrs.Globe', ([], {'datum': '"""WGS84"""', 'ellipse': '"""WGS84"""'}), "(datum='WGS84', ellipse='WGS84')\n", (12777, 12809), True, 'import cartopy.crs as ccrs\n'), ((15002, 15030), 'math.atan2', 'math.atan2', (['(x2 - x1)', '(y2 - y1)'], {}), '(x2 - x1, y2 - y1)\n', (15012, 15030), False, 'import math\n'), ((15103, 15130), 'math.asin', 'math.asin', (['((z2 - z1) / dist)'], {}), '((z2 - z1) / dist)\n', (15112, 15130), False, 'import math\n'), ((16566, 16624), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['(center_x, center_y)', '(fall_x, fall_y)'], {}), '((center_x, center_y), (fall_x, fall_y))\n', (16584, 16624), False, 'from scipy.spatial import distance\n'), ((17186, 17286), 'skimage.transform.resize', 'trans.resize', (['single_pixel', '(sample_image.shape[0], sample_image.shape[1])'], {'preserve_range': '(False)'}), '(single_pixel, (sample_image.shape[0], sample_image.shape[1]),\n preserve_range=False)\n', (17198, 17286), True, 'import skimage.transform as trans\n'), ((17794, 17809), 'cartopy.crs.Geodetic', 'ccrs.Geodetic', ([], {}), '()\n', (17807, 17809), True, 'import cartopy.crs as ccrs\n'), ((18984, 19082), 'os.path.join', 'os.path.join', (['"""J:\\\\Projects\\\\NCCSC\\\\phenocam\\\\Doc\\\\Presentation"""', '"""calculatingSlopeAspect.jpg"""'], {}), "('J:\\\\Projects\\\\NCCSC\\\\phenocam\\\\Doc\\\\Presentation',\n 'calculatingSlopeAspect.jpg')\n", (18996, 19082), False, 'import os\n'), ((19301, 19331), 'numpy.array', 'np.array', (['index_grid[:, :, :2]'], {}), '(index_grid[:, :, :2])\n', (19309, 19331), True, 'import numpy as np\n'), ((19474, 19505), 'numpy.squeeze', 'np.squeeze', (['index_grid[:, :, 1]'], {}), '(index_grid[:, :, 1])\n', (19484, 19505), True, 'import numpy as np\n'), ((19935, 19950), 'cartopy.crs.Geodetic', 'ccrs.Geodetic', ([], {}), '()\n', (19948, 19950), True, 'import cartopy.crs as ccrs\n'), ((23173, 23188), 'cartopy.crs.Mercator', 'ccrs.Mercator', ([], {}), '()\n', (23186, 23188), True, 'import cartopy.crs as ccrs\n'), ((24526, 24626), 'skimage.transform.resize', 'trans.resize', (['single_pixel', '(sample_image.shape[0], sample_image.shape[1])'], {'preserve_range': '(False)'}), '(single_pixel, (sample_image.shape[0], sample_image.shape[1]),\n preserve_range=False)\n', (24538, 24626), True, 'import skimage.transform as trans\n'), ((24937, 24984), 'numpy.nanmean', 'np.nanmean', (['corrected_ndvi[single_pixel > 0.95]'], {}), '(corrected_ndvi[single_pixel > 0.95])\n', (24947, 24984), True, 'import numpy as np\n'), ((25015, 25064), 'numpy.nanmedian', 'np.nanmedian', (['corrected_ndvi[single_pixel > 0.95]'], {}), '(corrected_ndvi[single_pixel > 0.95])\n', (25027, 25064), True, 'import numpy as np\n'), ((40405, 40417), 'statsmodels.api.OLS', 'sm.OLS', (['y', 'X'], {}), '(y, X)\n', (40411, 40417), True, 'import statsmodels.api as sm\n'), ((40911, 40923), 'statsmodels.api.OLS', 'sm.OLS', (['y', 'X'], {}), '(y, X)\n', (40917, 40923), True, 'import statsmodels.api as sm\n'), ((43195, 43218), 'os.listdir', 'os.listdir', (['scene_dname'], {}), '(scene_dname)\n', (43205, 43218), False, 'import os\n'), ((43261, 43284), 'os.listdir', 'os.listdir', (['scene_dname'], {}), '(scene_dname)\n', (43271, 43284), False, 'import os\n'), ((43473, 43543), 'numpy.logical_and', 'np.logical_and', (['(index_grid[:, :, 0] == col)', '(index_grid[:, :, 1] == row)'], {}), '(index_grid[:, :, 0] == col, index_grid[:, :, 1] == row)\n', (43487, 43543), True, 'import numpy as np\n'), ((43723, 43753), 'numpy.count_nonzero', 'np.count_nonzero', (['single_pixel'], {}), '(single_pixel)\n', (43739, 43753), True, 'import numpy as np\n'), ((44282, 44315), 'datetime.datetime', 'datetime.datetime', (['year', '(1)', '(1)', '(12)'], {}), '(year, 1, 1, 12)\n', (44299, 44315), False, 'import datetime\n'), ((44318, 44342), 'datetime.timedelta', 'datetime.timedelta', (['jday'], {}), '(jday)\n', (44336, 44342), False, 'import datetime\n'), ((44667, 44695), 'os.path.split', 'os.path.split', (['landsat_fname'], {}), '(landsat_fname)\n', (44680, 44695), False, 'import os\n'), ((44768, 44796), 'os.path.exists', 'os.path.exists', (['mapped_dname'], {}), '(mapped_dname)\n', (44782, 44796), False, 'import os\n'), ((44807, 44832), 'os.makedirs', 'os.makedirs', (['mapped_dname'], {}), '(mapped_dname)\n', (44818, 44832), False, 'import os\n'), ((44972, 44997), 'os.path.exists', 'os.path.exists', (['out_fname'], {}), '(out_fname)\n', (44986, 44997), False, 'import os\n'), ((45142, 45166), 'numpy.save', 'np.save', (['out_fname', 'data'], {}), '(out_fname, data)\n', (45149, 45166), True, 'import numpy as np\n'), ((45530, 45563), 'datetime.datetime', 'datetime.datetime', (['year', '(1)', '(1)', '(12)'], {}), '(year, 1, 1, 12)\n', (45547, 45563), False, 'import datetime\n'), ((45566, 45590), 'datetime.timedelta', 'datetime.timedelta', (['jday'], {}), '(jday)\n', (45584, 45590), False, 'import datetime\n'), ((45868, 45896), 'os.path.split', 'os.path.split', (['landsat_fname'], {}), '(landsat_fname)\n', (45881, 45896), False, 'import os\n'), ((46670, 46689), 'numpy.load', 'np.load', (['fname_full'], {}), '(fname_full)\n', (46677, 46689), True, 'import numpy as np\n'), ((47052, 47061), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (47058, 47061), True, 'import numpy as np\n'), ((49969, 49994), 'numpy.random.random', 'np.random.random', (['y.shape'], {}), '(y.shape)\n', (49985, 49994), True, 'import numpy as np\n'), ((50157, 50181), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(1000)'], {}), '(-1, 1, 1000)\n', (50168, 50181), True, 'import numpy as np\n'), ((985, 1010), 'os.listdir', 'os.listdir', (['landsat_dname'], {}), '(landsat_dname)\n', (995, 1010), False, 'import os\n'), ((5136, 5148), 'statsmodels.api.OLS', 'sm.OLS', (['y', 'X'], {}), '(y, X)\n', (5142, 5148), True, 'import statsmodels.api as sm\n'), ((14450, 14462), 'statsmodels.api.OLS', 'sm.OLS', (['y', 'X'], {}), '(y, X)\n', (14456, 14462), True, 'import statsmodels.api as sm\n'), ((15336, 15348), 'statsmodels.api.OLS', 'sm.OLS', (['y', 'X'], {}), '(y, X)\n', (15342, 15348), True, 'import statsmodels.api as sm\n'), ((19860, 19883), 'cartopy.crs.LambertConformal', 'ccrs.LambertConformal', ([], {}), '()\n', (19881, 19883), True, 'import cartopy.crs as ccrs\n'), ((20168, 20196), 'cartopy.io.shapereader.Reader', 'shpreader.Reader', (['states_shp'], {}), '(states_shp)\n', (20184, 20196), True, 'import cartopy.io.shapereader as shpreader\n'), ((20470, 20488), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (20486, 20488), True, 'import cartopy.crs as ccrs\n'), ((27741, 27788), 'numpy.linspace', 'np.linspace', (['(0.0)', 'midpoint', '(128)'], {'endpoint': '(False)'}), '(0.0, midpoint, 128, endpoint=False)\n', (27752, 27788), True, 'import numpy as np\n'), ((27800, 27846), 'numpy.linspace', 'np.linspace', (['midpoint', '(1.0)', '(129)'], {'endpoint': '(True)'}), '(midpoint, 1.0, 129, endpoint=True)\n', (27811, 27846), True, 'import numpy as np\n'), ((43383, 43414), 'numpy.squeeze', 'np.squeeze', (['index_grid[:, :, 1]'], {}), '(index_grid[:, :, 1])\n', (43393, 43414), True, 'import numpy as np\n'), ((43574, 43674), 'skimage.transform.resize', 'trans.resize', (['single_pixel', '(sample_image.shape[0], sample_image.shape[1])'], {'preserve_range': '(False)'}), '(single_pixel, (sample_image.shape[0], sample_image.shape[1]),\n preserve_range=False)\n', (43586, 43674), True, 'import skimage.transform as trans\n'), ((1242, 1270), 'os.path.split', 'os.path.split', (['landsat_fname'], {}), '(landsat_fname)\n', (1255, 1270), False, 'import os\n'), ((6205, 6223), 'numpy.deg2rad', 'np.deg2rad', (['aspect'], {}), '(aspect)\n', (6215, 6223), True, 'import numpy as np\n'), ((6251, 6269), 'numpy.deg2rad', 'np.deg2rad', (['aspect'], {}), '(aspect)\n', (6261, 6269), True, 'import numpy as np\n'), ((16417, 16435), 'numpy.deg2rad', 'np.deg2rad', (['aspect'], {}), '(aspect)\n', (16427, 16435), True, 'import numpy as np\n'), ((16463, 16481), 'numpy.deg2rad', 'np.deg2rad', (['aspect'], {}), '(aspect)\n', (16473, 16481), True, 'import numpy as np\n'), ((28728, 28747), 'skimage.data.distance.max', 'data.distance.max', ([], {}), '()\n', (28745, 28747), False, 'from skimage import data, img_as_float\n'), ((28707, 28726), 'skimage.data.distance.min', 'data.distance.min', ([], {}), '()\n', (28724, 28726), False, 'from skimage import data, img_as_float\n')] |
import gc
import glob
import math
import numpy as np
import os
import pandas as pd
import sys
def file_is_empty(path):
return os.path.exists(path) and os.stat(path).st_size == 0
# Opens a single capture file, filtering as needed
def open_capture(capture_path, metric_name, unwanted_labels):
print("[open_capture] reading: {}".format(capture_path), file=sys.stderr)
with pd.read_json(capture_path, lines=True, chunksize=16384) as reader:
for chunk in reader:
# Drop unwanted labels from the capture file. The more data we
# can shed here the less we have to hold in memory overall.
chunk = chunk[chunk.metric_name == metric_name]
chunk = chunk.drop(labels=unwanted_labels, axis=1)
if chunk.empty:
continue
yield chunk
gc.collect()
# Opens our capture files, filtering as needed
#
# The capture files generated in our experiments can be quite large, relative to
# the CI machine memory we have available, and we need to do a fair bit here to
# ensure everything will fit into memory. We primarily achieve this by garbage
# collecting after each read, filtering out columns this program does not need
# and reading small chunks of each capture file at a time.
def open_captures(capture_dir, metric_name, unwanted_labels):
capture_paths = glob.glob(os.path.join(capture_dir, "**/*.captures"), recursive=True)
for f in capture_paths:
if file_is_empty(f):
print("[open_captures] encountered empty capture file, skipping: {}".format(f), file=sys.stderr)
continue
yield pd.concat(open_capture(f, metric_name, unwanted_labels))
def compute_throughput(captures, **kwargs):
cpus = kwargs.get('cpus', 1)
for capture in captures:
# Scale bytes_written down to our per-CPU unit, then compute and
# introduce throughput into the table. We compute throughput by central
# finite difference, using the time value recorded to understand step
# size between samples.
capture.value = capture.value.div(cpus)
# The fetches performed by lading and are 1000 milliseconds apart with
# `fetch_index` marking each poll.
capture['throughput'] = np.gradient(capture.value, capture.fetch_index) # bytes/second/cpu
yield capture
def human_bytes(b):
is_negative = False
if b < 0:
is_negative = True
b = -b
if b < 1 and b >= 0:
return "0B"
names = ("B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB")
i = int(math.floor(math.log(b, 1024)))
p = math.pow(1024, i)
s = round(b / p, 2)
if is_negative:
s = -s
return "%s%s" % (s, names[i])
# Use Tukey's method to detect values that sit 1.5 times outside the IQR.
def total_outliers(df):
q1 = df['value'].quantile(0.25)
q3 = df['value'].quantile(0.75)
iqr = q3 - q1
scaled_iqr = 1.5 * iqr
outside_range = lambda b: (b < (q1 - scaled_iqr)) or (b > (q3 + scaled_iqr))
return df['value'].apply(outside_range).sum()
def confidence(p):
c = (1.0 - p) * 100
return "{confidence:.{digits}f}%".format(confidence=c, digits=2)
| [
"math.pow",
"os.stat",
"os.path.exists",
"pandas.read_json",
"gc.collect",
"math.log",
"os.path.join",
"numpy.gradient"
] | [((2622, 2639), 'math.pow', 'math.pow', (['(1024)', 'i'], {}), '(1024, i)\n', (2630, 2639), False, 'import math\n'), ((131, 151), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (145, 151), False, 'import os\n'), ((384, 439), 'pandas.read_json', 'pd.read_json', (['capture_path'], {'lines': '(True)', 'chunksize': '(16384)'}), '(capture_path, lines=True, chunksize=16384)\n', (396, 439), True, 'import pandas as pd\n'), ((1372, 1414), 'os.path.join', 'os.path.join', (['capture_dir', '"""**/*.captures"""'], {}), "(capture_dir, '**/*.captures')\n", (1384, 1414), False, 'import os\n'), ((2262, 2309), 'numpy.gradient', 'np.gradient', (['capture.value', 'capture.fetch_index'], {}), '(capture.value, capture.fetch_index)\n', (2273, 2309), True, 'import numpy as np\n'), ((839, 851), 'gc.collect', 'gc.collect', ([], {}), '()\n', (849, 851), False, 'import gc\n'), ((2594, 2611), 'math.log', 'math.log', (['b', '(1024)'], {}), '(b, 1024)\n', (2602, 2611), False, 'import math\n'), ((156, 169), 'os.stat', 'os.stat', (['path'], {}), '(path)\n', (163, 169), False, 'import os\n')] |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
exf=pd.ExcelFile("f2m.xlsx")
df=exf.parse('f2m_ratios')
#print(df)
df1=df.groupby(["Age","Year"],as_index=False).sum()
#print(df1)
dfu=df1.pivot("Age","Year","Ratio")
dfu.reset_index()
flat=pd.DataFrame(dfu.to_records())
print(flat["Age"])
df1i=flat[:]
del df1i["Age"]
pd.isnull(np.array(df1i, dtype=float))
print(df1i)
plt.figure(figsize=(8,8))
ax=sns.heatmap(df1i,fmt="g", linewidths=.5,yticklabels=flat["Age"],cmap="plasma")
plt.title("Year vs Age - Sex Ratio")
plt.ylabel("Age Range")
plt.xlabel("Year")
plt.savefig("Save2.png")
plt.show()
| [
"matplotlib.pyplot.title",
"seaborn.heatmap",
"matplotlib.pyplot.show",
"pandas.ExcelFile",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] | [((101, 125), 'pandas.ExcelFile', 'pd.ExcelFile', (['"""f2m.xlsx"""'], {}), "('f2m.xlsx')\n", (113, 125), True, 'import pandas as pd\n'), ((430, 456), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (440, 456), True, 'import matplotlib.pyplot as plt\n'), ((460, 547), 'seaborn.heatmap', 'sns.heatmap', (['df1i'], {'fmt': '"""g"""', 'linewidths': '(0.5)', 'yticklabels': "flat['Age']", 'cmap': '"""plasma"""'}), "(df1i, fmt='g', linewidths=0.5, yticklabels=flat['Age'], cmap=\n 'plasma')\n", (471, 547), True, 'import seaborn as sns\n'), ((540, 576), 'matplotlib.pyplot.title', 'plt.title', (['"""Year vs Age - Sex Ratio"""'], {}), "('Year vs Age - Sex Ratio')\n", (549, 576), True, 'import matplotlib.pyplot as plt\n'), ((578, 601), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Age Range"""'], {}), "('Age Range')\n", (588, 601), True, 'import matplotlib.pyplot as plt\n'), ((603, 621), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Year"""'], {}), "('Year')\n", (613, 621), True, 'import matplotlib.pyplot as plt\n'), ((623, 647), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Save2.png"""'], {}), "('Save2.png')\n", (634, 647), True, 'import matplotlib.pyplot as plt\n'), ((649, 659), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (657, 659), True, 'import matplotlib.pyplot as plt\n'), ((387, 414), 'numpy.array', 'np.array', (['df1i'], {'dtype': 'float'}), '(df1i, dtype=float)\n', (395, 414), True, 'import numpy as np\n')] |
import time
import datetime
import gym
import numpy as np
import pandas as pd
from scipy import stats, special
from banditry.base import Seedable
from banditry.experiment import ReplicationMetrics
def register_env(env, num_arms, num_context, num_time_steps=1000,
num_replications=100, version=0, seed=42):
env_name = (f'CMAB1Best{env.__name__}'
f'N{num_arms}C{num_context}T{num_time_steps}-v{version}')
gym.envs.registry.env_specs.pop(env_name, None)
gym.envs.register(
env_name,
trials=num_replications, max_episode_steps=num_time_steps,
entry_point=env, kwargs=dict(
num_arms=num_arms, num_context=num_context,
num_time_steps=num_time_steps, seed=seed))
return env_name
class MetricsRecorder(gym.Wrapper):
"""Record metrics from an agent interacting with the wrapped environment."""
def __init__(self, env):
super().__init__(env)
self.metrics = None
self._current_step = 0
self._compute_start = None
self._last_observation = None
def step(self, action):
assert self._compute_start is not None, "Cannot call env.step() before calling reset()"
time_for_decision = time.time() - self._compute_start
observation, reward, done, info = self.env.step(action)
# Record outcomes for this step
t = self._current_step # will be 0 on first call to step
self.metrics.design_matrix.iloc[t] = self._last_observation.iloc[action]
self.metrics.time_per_decision[t] = time_for_decision
self.metrics.actions[t] = action
self.metrics.optimal_actions[t] = info.get('optimal_action', np.nan)
self.metrics.rewards[t] = reward
self.metrics.optimal_rewards[t] = info.get('optimal_reward', np.nan)
# Move to next step and restart timer
self._current_step += 1
if not done:
self._last_observation = observation
self._compute_start = time.time() # reset compute timer
else:
self._compute_start = None
self.metrics.end = datetime.datetime.now()
return observation, reward, done, info
def reset(self, **kwargs):
self._last_observation = self.env.reset(**kwargs)
self.metrics = ReplicationMetrics(
self.env.initial_seed, self.env.num_time_steps, self.env.num_predictors,
predictor_colnames=self._last_observation.columns)
dtypes = {colname: self._last_observation[colname].dtype
for colname in self._last_observation.columns}
self.metrics.design_matrix = self.metrics.design_matrix.astype(dtypes)
self.metrics.start = datetime.datetime.now()
self._current_step = 0
self._compute_start = time.time()
return self._last_observation
class SeedableDiscrete(gym.spaces.Discrete, Seedable):
def __init__(self, n, **kwargs):
gym.spaces.Discrete.__init__(self, n)
Seedable.__init__(self, **kwargs)
def sample(self):
return self.rng.randint(self.n)
class ContextualBanditEnv(Seedable, gym.Env):
def render(self, mode='human'):
raise NotImplementedError
def __init__(self, num_arms, num_context, num_time_steps, **kwargs):
Seedable.__init__(self, **kwargs) # implements seed and reset
self.num_arms = num_arms
self.num_context = num_context
self.num_predictors = 1 + num_context # 1 for arm categorical
self.num_time_steps = num_time_steps
self._context_colnames = [f'p{i}' for i in range(num_context)]
self._base_obs = pd.Series(range(num_arms), dtype='category').to_frame('arm')
self._last_observation = None
self.reward_range = (0, 1)
self.action_space = SeedableDiscrete(self.num_arms)
self.observation_space = self.create_observation_space()
self._last_observation = None
# Use a fixed random seed for this part so environment is always the same
rng = np.random.RandomState(42)
self.context_dist = self.create_context_dist(rng)
self.interaction_effects = self.create_interaction_effects(rng)
self.arm_effects = self.create_arm_effects(rng)
def create_observation_space(self):
# TODO: fix bounds on this so `sample` actually stays in bounds
# Also, make it seedable like the action space for repeatability
return gym.spaces.Box(
low=0, high=np.inf, shape=(self.num_predictors,), dtype=np.float)
def create_context_dist(self, rng):
return stats.truncnorm(0, 10, loc=0, scale=0.5)
def create_interaction_effects(self, rng):
return np.zeros((self.num_arms, self.num_context))
def create_arm_effects(self, rng):
return np.zeros(self.num_arms)
def _next_observation(self):
context = self.context_dist.rvs(size=self.num_context, random_state=self.rng)
self._last_context = pd.Series(context)
obs = self._base_obs.copy()
for i, name in enumerate(self._context_colnames):
obs[name] = context[i]
self._last_observation = obs
return self._last_observation
def reset(self, **kwargs):
Seedable.reset(self)
self.action_space.reset()
return self._next_observation()
def step(self, action):
logits = (self.arm_effects[action] +
self.interaction_effects.dot(self._last_context))
rates = special.expit(logits)
rewards = self.rng.binomial(n=1, p=rates)
actual_reward = rewards[action]
optimal_action = rates.argmax()
optimal_reward = rewards[optimal_action]
info = dict(optimal_action=optimal_action,
optimal_reward=optimal_reward)
next_observation = self._next_observation()
done = False # will be handled by wrapper
return next_observation, actual_reward, done, info
class OnlyInteractionEffects(ContextualBanditEnv):
def create_interaction_effects(self, rng):
effects = np.ndarray((self.num_arms, self.num_context))
effect_dist = stats.norm(-1, 0.5)
shared_effects = effect_dist.rvs(size=self.num_context, random_state=rng)
effects[:-1] = (np.tile(shared_effects, self.num_arms - 1)
.reshape(self.num_arms - 1, self.num_context))
# The last one will have just slightly better effects.
effects[-1] = shared_effects + stats.truncnorm.rvs(
0.4, 0.7, loc=0.5, scale=0.1,
size=self.num_context, random_state=rng)
return effects
class OnlyArmEffects(ContextualBanditEnv):
def create_arm_effects(self, rng):
return np.linspace(-4, -2, num=self.num_arms)
class ArmAndInteractionEffects(ContextualBanditEnv):
def create_arm_effects(self, rng):
return np.linspace(-2, -4, num=self.num_arms)
def create_interaction_effects(self, rng):
effects = np.ndarray((self.num_arms, self.num_context))
effect_dist = stats.norm(-1, 0.5)
shared_effects = effect_dist.rvs(size=self.num_context, random_state=rng)
effects[:-1] = (np.tile(shared_effects, self.num_arms - 1)
.reshape(self.num_arms - 1, self.num_context))
# The last one will have just slightly better effects.
effects[-1] = shared_effects + stats.truncnorm.rvs(
0.4, 0.7, loc=0.5, scale=0.1,
size=self.num_context, random_state=rng)
return effects
| [
"scipy.stats.norm",
"numpy.ndarray",
"scipy.stats.truncnorm",
"numpy.zeros",
"gym.spaces.Discrete.__init__",
"time.time",
"numpy.random.RandomState",
"scipy.special.expit",
"banditry.base.Seedable.__init__",
"gym.envs.registry.env_specs.pop",
"gym.spaces.Box",
"pandas.Series",
"numpy.linspac... | [((448, 495), 'gym.envs.registry.env_specs.pop', 'gym.envs.registry.env_specs.pop', (['env_name', 'None'], {}), '(env_name, None)\n', (479, 495), False, 'import gym\n'), ((2309, 2456), 'banditry.experiment.ReplicationMetrics', 'ReplicationMetrics', (['self.env.initial_seed', 'self.env.num_time_steps', 'self.env.num_predictors'], {'predictor_colnames': 'self._last_observation.columns'}), '(self.env.initial_seed, self.env.num_time_steps, self.env\n .num_predictors, predictor_colnames=self._last_observation.columns)\n', (2327, 2456), False, 'from banditry.experiment import ReplicationMetrics\n'), ((2716, 2739), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2737, 2739), False, 'import datetime\n'), ((2802, 2813), 'time.time', 'time.time', ([], {}), '()\n', (2811, 2813), False, 'import time\n'), ((2955, 2992), 'gym.spaces.Discrete.__init__', 'gym.spaces.Discrete.__init__', (['self', 'n'], {}), '(self, n)\n', (2983, 2992), False, 'import gym\n'), ((3001, 3034), 'banditry.base.Seedable.__init__', 'Seedable.__init__', (['self'], {}), '(self, **kwargs)\n', (3018, 3034), False, 'from banditry.base import Seedable\n'), ((3299, 3332), 'banditry.base.Seedable.__init__', 'Seedable.__init__', (['self'], {}), '(self, **kwargs)\n', (3316, 3332), False, 'from banditry.base import Seedable\n'), ((4044, 4069), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (4065, 4069), True, 'import numpy as np\n'), ((4457, 4542), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(0)', 'high': 'np.inf', 'shape': '(self.num_predictors,)', 'dtype': 'np.float'}), '(low=0, high=np.inf, shape=(self.num_predictors,), dtype=np.float\n )\n', (4471, 4542), False, 'import gym\n'), ((4607, 4647), 'scipy.stats.truncnorm', 'stats.truncnorm', (['(0)', '(10)'], {'loc': '(0)', 'scale': '(0.5)'}), '(0, 10, loc=0, scale=0.5)\n', (4622, 4647), False, 'from scipy import stats, special\n'), ((4711, 4754), 'numpy.zeros', 'np.zeros', (['(self.num_arms, self.num_context)'], {}), '((self.num_arms, self.num_context))\n', (4719, 4754), True, 'import numpy as np\n'), ((4810, 4833), 'numpy.zeros', 'np.zeros', (['self.num_arms'], {}), '(self.num_arms)\n', (4818, 4833), True, 'import numpy as np\n'), ((4983, 5001), 'pandas.Series', 'pd.Series', (['context'], {}), '(context)\n', (4992, 5001), True, 'import pandas as pd\n'), ((5248, 5268), 'banditry.base.Seedable.reset', 'Seedable.reset', (['self'], {}), '(self)\n', (5262, 5268), False, 'from banditry.base import Seedable\n'), ((5501, 5522), 'scipy.special.expit', 'special.expit', (['logits'], {}), '(logits)\n', (5514, 5522), False, 'from scipy import stats, special\n'), ((6088, 6133), 'numpy.ndarray', 'np.ndarray', (['(self.num_arms, self.num_context)'], {}), '((self.num_arms, self.num_context))\n', (6098, 6133), True, 'import numpy as np\n'), ((6156, 6175), 'scipy.stats.norm', 'stats.norm', (['(-1)', '(0.5)'], {}), '(-1, 0.5)\n', (6166, 6175), False, 'from scipy import stats, special\n'), ((6740, 6778), 'numpy.linspace', 'np.linspace', (['(-4)', '(-2)'], {'num': 'self.num_arms'}), '(-4, -2, num=self.num_arms)\n', (6751, 6778), True, 'import numpy as np\n'), ((6889, 6927), 'numpy.linspace', 'np.linspace', (['(-2)', '(-4)'], {'num': 'self.num_arms'}), '(-2, -4, num=self.num_arms)\n', (6900, 6927), True, 'import numpy as np\n'), ((6994, 7039), 'numpy.ndarray', 'np.ndarray', (['(self.num_arms, self.num_context)'], {}), '((self.num_arms, self.num_context))\n', (7004, 7039), True, 'import numpy as np\n'), ((7062, 7081), 'scipy.stats.norm', 'stats.norm', (['(-1)', '(0.5)'], {}), '(-1, 0.5)\n', (7072, 7081), False, 'from scipy import stats, special\n'), ((1238, 1249), 'time.time', 'time.time', ([], {}), '()\n', (1247, 1249), False, 'import time\n'), ((2005, 2016), 'time.time', 'time.time', ([], {}), '()\n', (2014, 2016), False, 'import time\n'), ((2124, 2147), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2145, 2147), False, 'import datetime\n'), ((6500, 6594), 'scipy.stats.truncnorm.rvs', 'stats.truncnorm.rvs', (['(0.4)', '(0.7)'], {'loc': '(0.5)', 'scale': '(0.1)', 'size': 'self.num_context', 'random_state': 'rng'}), '(0.4, 0.7, loc=0.5, scale=0.1, size=self.num_context,\n random_state=rng)\n', (6519, 6594), False, 'from scipy import stats, special\n'), ((7406, 7500), 'scipy.stats.truncnorm.rvs', 'stats.truncnorm.rvs', (['(0.4)', '(0.7)'], {'loc': '(0.5)', 'scale': '(0.1)', 'size': 'self.num_context', 'random_state': 'rng'}), '(0.4, 0.7, loc=0.5, scale=0.1, size=self.num_context,\n random_state=rng)\n', (7425, 7500), False, 'from scipy import stats, special\n'), ((6283, 6325), 'numpy.tile', 'np.tile', (['shared_effects', '(self.num_arms - 1)'], {}), '(shared_effects, self.num_arms - 1)\n', (6290, 6325), True, 'import numpy as np\n'), ((7189, 7231), 'numpy.tile', 'np.tile', (['shared_effects', '(self.num_arms - 1)'], {}), '(shared_effects, self.num_arms - 1)\n', (7196, 7231), True, 'import numpy as np\n')] |
# Fix paths for imports to work in unit tests ----------------
if __name__ == "__main__":
from _fix_paths import fix_paths
fix_paths()
# ------------------------------------------------------------
# Load libraries ---------------------------------------------
from typing import Dict
import numpy as np
# ------------------------------------------------------------
class Action(object):
"""
Action class with modifiers.
:ivar float bid: Bid.
:ivar dict of dict modifiers: dict of dict of modifiers. Every sub dict
contains modifiers for a single dimension (e.g. gender, location, device etc.).
Modifiers are expressed in a multiplicative form, e.g. +30\% is expressed as 1.3.
The value 1.0 denotes no modifier.
Example: {bid=1.0, modifiers={'gender': {'F': 1.2, 'M': 1.1, 'U': 1.0},
'age': {'0-19': 0.7, '30-39': 1.1, '60-69': 0.9, '50-59': 0.8, '70-*': 1.0, '20-29': 1.5, '40-49': 1.2}}}
"""
def __init__(self, bid, modifiers=None):
"""
:param bid: float
:param modifiers: if not given, must be validated/initialized against an ActionSet
"""
self.bid = bid
self.modifiers = modifiers # type: Dict[str, Dict[str, float]]
def __repr__(self):
if self.modifiers is not None:
if isinstance(self.modifiers, dict):
mod_truncated_dict = {k: {k2: np.round(v, 2) for k2, v in d.items()} for k, d in self.modifiers.items()}
return "{{bid={}, modifiers={}}}".format(self.bid, mod_truncated_dict)
else: # To be removed in the future after clean-up
return "{{bid={}, modifiers={}}}".format(self.bid, [[np.round(v, 2) for v in l] for l in self.modifiers])
else: # when modifier is unspecified
return "{{bid={}, modifiers={}}}".format(self.bid, "None")
class ActionSet(object):
"""
Action Set class
provides validator for action
"""
MOD_DEF_VALUE = 1.0 # default value for modifiers in validify_action
def __init__(self, attr_set, max_bid, min_bid, max_mod, min_mod):
"""
:param attr_set: Attribute set object
:param max_bid: max possible base bid value
:param min_bid: min possible base bid value
:param max_mod: max possible modifier value
:param min_mod: min possible modifier value
"""
self.attr_set = attr_set
self.max_bid = max_bid
self.min_bid = min_bid
self.max_mod = max_mod
self.min_mod = min_mod
def validify_action(self, a, in_place=False):
""" initialize action as a valid form to the action set.
implementation: fills all missing modifiers to ActionSet.MOD_DEF_VALUE to create a "valid" action
DOES NOT remove unnecessary modifiers not defined in self.attr_set.attr_names
:param Action a: Action.
:param bool in_place: if True, param a is modified in-place. Otherwise, a new Action object is returned
:return: A valid action object, if in_place=False (default); None, otherwise (argument a is updated in-place)
"""
assert isinstance(a, Action) # TODO exception handling
new_mods = {}
for k in self.attr_set.attr_names:
new_mod = {k2: ActionSet.MOD_DEF_VALUE for k2 in self.attr_set.attr_sets[k]}
if a.modifiers is not None and k in a.modifiers.keys():
new_mod.update(a.modifiers[k])
new_mods[k] = new_mod
if in_place:
a.modifiers = new_mods
return None
else:
return Action(bid=a.bid, modifiers=new_mods)
def is_valid(self, a):
"""
returns true if the given action a is "valid" according to this ActionSet
Validity check
- bid modifiers are defined for all attributes defined by self.attr_set
- bid modifiers result in valid bids for all attributes defined by self.attr_set
:param a: Action
:return: True, None if valid
False, str if invalid. The second str explains the reason why invalid
"""
base_bid = a.bid
mod_lists = a.modifiers
attr_names = self.attr_set.attr_names
if not len(mod_lists) == len(attr_names):
return False, "modifier list's length not matching attribute names" # number of attribute names mismatch
if not self.min_bid <= base_bid:
return False, "base bid less than min_bid"
if not base_bid <= self.max_bid:
return False, "base bid greater than max_bid"
for k in attr_names:
try:
mods = a.modifiers[k]
except KeyError:
return False, "modifier does not have key {} defined".format(k)
mod_list = []
seg_names = self.attr_set.attr_sets[k]
for k2 in seg_names:
try:
mod_list.append(mods[k2])
except KeyError:
return False, "modifier for {} does not have segment {} defined".format(k, k2)
if not all([self.min_mod <= m for m in mod_list]):
return False, "mod value less than min_mod " # min_mod violated
if not all([m <= self.max_bid for m in mod_list]):
return False, "mod value greater than max_mod" # max_mod violated
return True, None
if __name__ == "__main__":
# from simulator.attribute import AttrSet
import attribute
# sample attrSet
names = ['gender', 'age']
vals = {'gender': ['M', 'F', 'U'],
'age': ['0-19', '20-29', '30-39', '40-49', '50-59', '60-69', '70-*']}
attr_set = attribute.AttrSet(names, vals)
act_set = ActionSet(attr_set, max_bid=9.99, min_bid=0.01, max_mod=9.0, min_mod=0.1)
# valid action
# a1 = Action(1.0, [ [1.1, 1.2, 1.0], [0.7, 1.5, 1.1, 1.2, 0.8, 0.9, 1.0] ] )
a1 = Action(1.0, {'gender': {'M': 1.1, 'F': 1.2, 'U': 1.0},
'age': {'0-19': 0.7, '20-29': 1.5, '30-39': 1.1, '40-49': 1.2, '50-59': 0.8, '60-69': 0.9, '70-*': 1.0}})
print(act_set.is_valid(a1))
# invalid action: modifier not fully defined
a2 = Action(1.0, {'gender': {'M': 1.1, 'F': 1.2, 'U': 1.0},
'age': {'0-19': 0.7, '20-29': 1.5, '30-39': 1.1, '40-49': 1.2, '50-59': 0.8, '60-69': 0.9}})
print(act_set.is_valid(a2))
# invalid action: less than min_bid found
a3 = Action(0.00001, {'gender': {'M': 1.1, 'F': 1.2, 'U': 1.0},
'age': {'0-19': 0.7, '20-29': 1.5, '30-39': 1.1, '40-49': 1.2, '50-59': 0.8, '60-69': 0.9, '70-*': 1.0}})
print(act_set.is_valid(a3))
# invalid action: greater than max_bid found
a4 = Action(120, {'gender': {'M': 1.1, 'F': 1.2, 'U': 1.0},
'age': {'0-19': 0.7, '20-29': 1.5, '30-39': 1.1, '40-49': 1.2, '50-59': 0.8, '60-69': 0.9, '70-*': 1.0}})
print(act_set.is_valid(a4))
# invalid action: greater than max_mod found
a5 = Action(1.0, {'gender': {'M': 1.1, 'F': 1.2, 'U': 1.0},
'age': {'0-19': 0.7, '20-29': 1.5, '30-39': 1.1, '40-49': 1.2, '50-59': 0.8, '60-69': 0.9, '70-*': 10.0}})
print(act_set.is_valid(a5))
# invalid action: less than min_mod found
a6 = Action(1.0, {'gender': {'M': 1.1, 'F': 1.2, 'U': 1.0},
'age': {'0-19': 0.7, '20-29': 1.5, '30-39': 1.1, '40-49': 1.2, '50-59': 0.8, '60-69': 0.9, '70-*': 0.01}})
print(act_set.is_valid(a6))
# check __str__ form of Action
print(a1)
# sanity check for validify_action
a_inc1 = Action(1.0) # modifier not defined
print(a_inc1)
a_inc2 = act_set.validify_action(a_inc1) # in_place modification of a_inc1
print(a_inc1, a_inc2)
# checking in_place flag of validify_action
a_inc3 = Action(1.0)
print(a_inc3)
act_set.validify_action(a_inc3, in_place=True) # returns a new action (preserves a_inc2)
print(a_inc3)
# checking incomplete action fill-ins for a totally missing attribute name
a_inc4 = Action(1.0, {'gender': {'M': 1.1, 'F': 1.2, 'U': 1.0}})
print(a_inc4)
a_inc4_validify = act_set.validify_action(a_inc4) # in_place modification of a_inc1
print(a_inc4_validify)
# checking incomplete action fill-ins for a partially missing attribute name with a totally missing name
a_inc5 = Action(1.0, {'gender': {'M': 1.1}})
print(a_inc5)
a_inc5_validify = act_set.validify_action(a_inc5) # in_place modification of a_inc1
print(a_inc5_validify)
| [
"_fix_paths.fix_paths",
"numpy.round",
"attribute.AttrSet"
] | [((133, 144), '_fix_paths.fix_paths', 'fix_paths', ([], {}), '()\n', (142, 144), False, 'from _fix_paths import fix_paths\n'), ((5730, 5760), 'attribute.AttrSet', 'attribute.AttrSet', (['names', 'vals'], {}), '(names, vals)\n', (5747, 5760), False, 'import attribute\n'), ((1415, 1429), 'numpy.round', 'np.round', (['v', '(2)'], {}), '(v, 2)\n', (1423, 1429), True, 'import numpy as np\n'), ((1710, 1724), 'numpy.round', 'np.round', (['v', '(2)'], {}), '(v, 2)\n', (1718, 1724), True, 'import numpy as np\n')] |
from unittest import SkipTest
import chainer
import numpy as np
from test.util import generate_kernel_test_case, wrap_template
from webdnn.graph.placeholder import Placeholder
from webdnn.frontend.chainer.converter import ChainerConverter
from webdnn.frontend.chainer.placeholder_variable import PlaceholderVariable
@wrap_template
def template(ksize=2, stride=None, pad=0, shape=(2, 4, 6, 8), cover_all=False, description=""):
if cover_all:
SkipTest("AveragePooling2D function in Chainer does not support cover_all=True mode.")
vx = chainer.Variable(np.random.rand(*shape).astype(np.float32))
vy = chainer.functions.average_pooling_2d(vx, ksize=ksize, stride=stride, pad=pad)
graph = ChainerConverter().convert([vx], [vy])
x = graph.inputs[0]
y = graph.outputs[0]
assert list(vy.shape) == list(graph.outputs[0].shape), f"(vy.shape)={vy.shape}, (graph.outputs[0].shape)={graph.outputs[0].shape}"
generate_kernel_test_case(
description=f"[chainer] F.average_pooling_2d {description}",
graph=graph,
inputs={x: vx.data},
expected={y: vy.data},
)
def test():
template()
def test_padding_not_zero():
template(pad=1)
# TODO: chainer's average pooling does not support cover_all=True mode
# def test_cover_all():
# template(shape=(2, 4, 8, 8), ksize=3, pad=0, stride=3, cover_all=True)
def test_no_cover_all():
template(shape=(2, 4, 8, 8), ksize=3, pad=0, stride=3, cover_all=False)
def test_stride_is_none():
template(stride=None, pad=1)
def test_irregular_size():
template(ksize=(3, 4), stride=(1, 2), pad=(1, 3))
def test_with_placeholder():
vx = chainer.Variable(np.random.rand(2, 16, 7, 7).astype(np.float32))
vy = chainer.functions.average_pooling_2d(vx, ksize=3, stride=2, pad=0)
H = Placeholder(label="H")
W = Placeholder(label="W")
px = PlaceholderVariable([2, 16, H, W])
py = chainer.functions.average_pooling_2d(px, ksize=3, stride=2, pad=0)
graph = ChainerConverter().convert([px], [py])
H.value = 7
W.value = 7
generate_kernel_test_case(
description=f"[chainer] F.average_pooling_2d with placeholder",
graph=graph,
backend=["webgpu", "webassembly"],
inputs={graph.inputs[0]: vx.data},
expected={graph.outputs[0]: vy.data},
)
| [
"test.util.generate_kernel_test_case",
"webdnn.frontend.chainer.placeholder_variable.PlaceholderVariable",
"chainer.functions.average_pooling_2d",
"unittest.SkipTest",
"numpy.random.rand",
"webdnn.frontend.chainer.converter.ChainerConverter",
"webdnn.graph.placeholder.Placeholder"
] | [((623, 700), 'chainer.functions.average_pooling_2d', 'chainer.functions.average_pooling_2d', (['vx'], {'ksize': 'ksize', 'stride': 'stride', 'pad': 'pad'}), '(vx, ksize=ksize, stride=stride, pad=pad)\n', (659, 700), False, 'import chainer\n'), ((943, 1095), 'test.util.generate_kernel_test_case', 'generate_kernel_test_case', ([], {'description': 'f"""[chainer] F.average_pooling_2d {description}"""', 'graph': 'graph', 'inputs': '{x: vx.data}', 'expected': '{y: vy.data}'}), "(description=\n f'[chainer] F.average_pooling_2d {description}', graph=graph, inputs={x:\n vx.data}, expected={y: vy.data})\n", (968, 1095), False, 'from test.util import generate_kernel_test_case, wrap_template\n'), ((1742, 1808), 'chainer.functions.average_pooling_2d', 'chainer.functions.average_pooling_2d', (['vx'], {'ksize': '(3)', 'stride': '(2)', 'pad': '(0)'}), '(vx, ksize=3, stride=2, pad=0)\n', (1778, 1808), False, 'import chainer\n'), ((1818, 1840), 'webdnn.graph.placeholder.Placeholder', 'Placeholder', ([], {'label': '"""H"""'}), "(label='H')\n", (1829, 1840), False, 'from webdnn.graph.placeholder import Placeholder\n'), ((1849, 1871), 'webdnn.graph.placeholder.Placeholder', 'Placeholder', ([], {'label': '"""W"""'}), "(label='W')\n", (1860, 1871), False, 'from webdnn.graph.placeholder import Placeholder\n'), ((1881, 1915), 'webdnn.frontend.chainer.placeholder_variable.PlaceholderVariable', 'PlaceholderVariable', (['[2, 16, H, W]'], {}), '([2, 16, H, W])\n', (1900, 1915), False, 'from webdnn.frontend.chainer.placeholder_variable import PlaceholderVariable\n'), ((1925, 1991), 'chainer.functions.average_pooling_2d', 'chainer.functions.average_pooling_2d', (['px'], {'ksize': '(3)', 'stride': '(2)', 'pad': '(0)'}), '(px, ksize=3, stride=2, pad=0)\n', (1961, 1991), False, 'import chainer\n'), ((2081, 2304), 'test.util.generate_kernel_test_case', 'generate_kernel_test_case', ([], {'description': 'f"""[chainer] F.average_pooling_2d with placeholder"""', 'graph': 'graph', 'backend': "['webgpu', 'webassembly']", 'inputs': '{graph.inputs[0]: vx.data}', 'expected': '{graph.outputs[0]: vy.data}'}), "(description=\n f'[chainer] F.average_pooling_2d with placeholder', graph=graph,\n backend=['webgpu', 'webassembly'], inputs={graph.inputs[0]: vx.data},\n expected={graph.outputs[0]: vy.data})\n", (2106, 2304), False, 'from test.util import generate_kernel_test_case, wrap_template\n'), ((457, 553), 'unittest.SkipTest', 'SkipTest', (['"""AveragePooling2D function in Chainer does not support cover_all=True mode."""'], {}), "(\n 'AveragePooling2D function in Chainer does not support cover_all=True mode.'\n )\n", (465, 553), False, 'from unittest import SkipTest\n'), ((714, 732), 'webdnn.frontend.chainer.converter.ChainerConverter', 'ChainerConverter', ([], {}), '()\n', (730, 732), False, 'from webdnn.frontend.chainer.converter import ChainerConverter\n'), ((2005, 2023), 'webdnn.frontend.chainer.converter.ChainerConverter', 'ChainerConverter', ([], {}), '()\n', (2021, 2023), False, 'from webdnn.frontend.chainer.converter import ChainerConverter\n'), ((571, 593), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (585, 593), True, 'import numpy as np\n'), ((1685, 1712), 'numpy.random.rand', 'np.random.rand', (['(2)', '(16)', '(7)', '(7)'], {}), '(2, 16, 7, 7)\n', (1699, 1712), True, 'import numpy as np\n')] |
import numpy as np
import fractions as f
from scipy.linalg import circulant
import matplotlib.pyplot as plt
from scipy import signal
import random
import time
x3 = 100*signal.triang(8)
x3 = np.tile(x3, 16*11*10)
x3_1 = x3 - np.mean(x3)
x1 = 80*signal.cosine(11)
x1 = np.tile(x1, 8*16*10)
x1_1 = x1 - np.mean(x1)
x2 = 70*signal.triang(16)
x2 = np.tile(x2, 8*11*10)
x2_1 = x2 - np.mean(x2)
l = []
for length in range(900, 11000, 506):
cum_dib = 0
start = time.time()
for sing_itr in range(2):
oo = np.arange(20,21,2)
for i in oo:
x5 = i*np.random.rand(8*16*11*10)
sig = x3 + x2 + x5 + x1
sig = sig[0:length]
x = sig
N = x.shape[0]
p,q = 2, int(length/2)
ll = []
for i in range(p,q+1):
m,n = int(N/i), i
ss = m*n
sig = x[0:ss]
data_matrix = sig.reshape(m,n)
u,s,vh = np.linalg.svd(data_matrix)
si = s[0]/s[1]
ll.append(si)
# ar = np.arange(p, q+1)
# plt.figure(1)
# plt.stem(ar,ll)
# plt.show()
end = time.time()
dib = end - start
cum_dib = cum_dib + dib
l.append(cum_dib/2)
print(l)
plt.figure(100)
plt.stem(np.arange(len(l)), l)
plt.show()
| [
"scipy.signal.triang",
"matplotlib.pyplot.show",
"time.time",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"numpy.tile",
"numpy.linalg.svd",
"numpy.random.rand",
"scipy.signal.cosine"
] | [((191, 216), 'numpy.tile', 'np.tile', (['x3', '(16 * 11 * 10)'], {}), '(x3, 16 * 11 * 10)\n', (198, 216), True, 'import numpy as np\n'), ((268, 292), 'numpy.tile', 'np.tile', (['x1', '(8 * 16 * 10)'], {}), '(x1, 8 * 16 * 10)\n', (275, 292), True, 'import numpy as np\n'), ((344, 368), 'numpy.tile', 'np.tile', (['x2', '(8 * 11 * 10)'], {}), '(x2, 8 * 11 * 10)\n', (351, 368), True, 'import numpy as np\n'), ((1067, 1082), 'matplotlib.pyplot.figure', 'plt.figure', (['(100)'], {}), '(100)\n', (1077, 1082), True, 'import matplotlib.pyplot as plt\n'), ((1114, 1124), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1122, 1124), True, 'import matplotlib.pyplot as plt\n'), ((169, 185), 'scipy.signal.triang', 'signal.triang', (['(8)'], {}), '(8)\n', (182, 185), False, 'from scipy import signal\n'), ((225, 236), 'numpy.mean', 'np.mean', (['x3'], {}), '(x3)\n', (232, 236), True, 'import numpy as np\n'), ((245, 262), 'scipy.signal.cosine', 'signal.cosine', (['(11)'], {}), '(11)\n', (258, 262), False, 'from scipy import signal\n'), ((301, 312), 'numpy.mean', 'np.mean', (['x1'], {}), '(x1)\n', (308, 312), True, 'import numpy as np\n'), ((321, 338), 'scipy.signal.triang', 'signal.triang', (['(16)'], {}), '(16)\n', (334, 338), False, 'from scipy import signal\n'), ((377, 388), 'numpy.mean', 'np.mean', (['x2'], {}), '(x2)\n', (384, 388), True, 'import numpy as np\n'), ((458, 469), 'time.time', 'time.time', ([], {}), '()\n', (467, 469), False, 'import time\n'), ((504, 524), 'numpy.arange', 'np.arange', (['(20)', '(21)', '(2)'], {}), '(20, 21, 2)\n', (513, 524), True, 'import numpy as np\n'), ((977, 988), 'time.time', 'time.time', ([], {}), '()\n', (986, 988), False, 'import time\n'), ((548, 580), 'numpy.random.rand', 'np.random.rand', (['(8 * 16 * 11 * 10)'], {}), '(8 * 16 * 11 * 10)\n', (562, 580), True, 'import numpy as np\n'), ((819, 845), 'numpy.linalg.svd', 'np.linalg.svd', (['data_matrix'], {}), '(data_matrix)\n', (832, 845), True, 'import numpy as np\n')] |
import tensorflow as tf
from networks import model, losses
from data_loader.dataset_read import get_image, batch_images
from nets import nets_factory
from preprocessing import preprocessing_factory
import os
import cv2
import numpy as np
slim = tf.contrib.slim
class StyleTransfer:
def __init__(self, FLAGS):
self.FLAGS = FLAGS
self.g = tf.Graph()
with self.g.as_default():
self._init_global_step()
self._build_model()
self._init_train_op()
self._init_saver()
self._init_summary()
def _init_saver(self):
save_variables = []
for var in tf.global_variables():
if not var.name.startswith(self.FLAGS.loss_model):
save_variables.append(var)
self.model_save_dir = self.FLAGS.model_save_dir or "models"
self.saver = tf.train.Saver(save_variables)
def _init_summary(self):
summary_dir = self.FLAGS.log_dir or "logs/"
if not tf.gfile.Exists(summary_dir):
tf.gfile.MakeDirs(summary_dir)
self.summary = slim.summary.merge_all()
self.summary_writer = tf.summary.FileWriter(logdir=summary_dir, graph=self.g)
def _init_global_step(self):
self.global_step = tf.train.get_or_create_global_step()
def _build_model(self):
tf.logging.set_verbosity(tf.logging.INFO)
style_features = losses.get_style_features(self.FLAGS)
network_fn = nets_factory.get_network_fn(
self.FLAGS.loss_model,
num_classes=1,
is_training=False
)
preprocessing_fn, unprocessing_fn = preprocessing_factory.get_preprocessing(
self.FLAGS.loss_model,
is_training=False
)
image = get_image(self.FLAGS.num_samples, self.FLAGS.image_size, self.FLAGS.tfrecord_file)
processed_image = preprocessing_fn(image, self.FLAGS.image_size, self.FLAGS.image_size)
images = batch_images(processed_image, batch_size=self.FLAGS.batch_size)
generated = model.net(images)
self.generated = generated
"""prepare for evaluate the loss"""
processed_generated = [
preprocessing_fn(image, self.FLAGS.image_size, self.FLAGS.image_size)
for image in tf.unstack(generated, axis=0, num=self.FLAGS.batch_size)
]
processed_generated = tf.stack(processed_generated)
_, endpoints_dict = network_fn(tf.concat([processed_generated, images], 0),
spatial_squeeze=False)
"""losses"""
style_loss, style_loss_summary = losses.style_loss(style_features, endpoints_dict, self.FLAGS.style_layers)
content_loss = losses.content_loss(endpoints_dict, self.FLAGS.content_layers)
variation_loss = losses.total_variation_loss(generated)
total_loss = self.FLAGS.style_weight * style_loss + \
self.FLAGS.content_weight * content_loss + \
self.FLAGS.variation_weight * variation_loss
self.style_loss = tf.identity(style_loss, "style_loss")
self.content_loss = tf.identity(content_loss, "content_loss")
self.variation_loss = tf.identity(variation_loss, "variation_loss")
self.total_loss = tf.identity(total_loss, "total_loss")
tf.losses.add_loss(self.style_loss)
tf.losses.add_loss(self.content_loss)
tf.losses.add_loss(self.variation_loss)
tf.losses.add_loss(self.total_loss)
slim.summarize_collection(tf.GraphKeys.LOSSES)
# slim.summarize_variables("style_transfer")
slim.summary.image("generated", generated)
slim.summary.image("origin", tf.stack([
unprocessing_fn(image) for image in tf.unstack(images, axis=0, num=self.FLAGS.batch_size)
]))
def _init_train_op(self):
train_variables = []
for var in tf.trainable_variables():
if not var.name.startswith(self.FLAGS.loss_model):
train_variables.append(var)
self.learning_rate = tf.train.exponential_decay(
self.FLAGS.learning_rate, self.global_step, 1000, 0.66, name="learning_rate")
slim.summarize_tensor(self.learning_rate)
self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.total_loss,
global_step=self.global_step,
var_list=train_variables)
def restore_network(self, sess):
init_fn = self.get_network_init_fn()
init_fn(sess)
last_file = tf.train.latest_checkpoint(self.model_save_dir)
if last_file:
tf.logging.info("restore model from {}".format(last_file))
self.saver.restore(sess, last_file)
def save_network(self, sess, global_step=None):
self.saver.save(sess, os.path.join(self.model_save_dir, "style_transfer.ckpt"), global_step=global_step)
def get_network_init_fn(self):
tf.logging.info("Use pretrained model {}".format(self.FLAGS.loss_model_file))
exclusions = []
if self.FLAGS.checkpoint_exclude_scopes:
exclusions = [scope.strip()
for scope in self.FLAGS.checkpoint_exclude_scopes.split(",")]
variables_to_restore = []
for var in slim.get_model_variables():
excluded = False
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
excluded = True
break
if not excluded:
variables_to_restore.append(var)
return slim.assign_from_checkpoint_fn(
self.FLAGS.loss_model_file,
variables_to_restore,
ignore_missing_vars=True
)
def evaluate_network(self, image_path, image_size=None, origin_color=False):
with tf.Graph().as_default() as g:
filename_queue = tf.train.string_input_producer([image_path])
reader = tf.WholeFileReader()
_, value = reader.read(filename_queue)
test_image = tf.image.decode_image(value)
preprocessing_fn, unprocessing_fn = preprocessing_factory.get_preprocessing(
self.FLAGS.loss_model,
is_training=False
)
image_size = image_size or [self.FLAGS.image_size, self.FLAGS.image_size]
processed_image = preprocessing_fn(test_image, image_size[0], image_size[1])
images = tf.expand_dims(processed_image, axis=0)
generated = model.net(images)
generated = tf.squeeze(generated, axis=0)
with tf.Session(graph=g) as sess:
last_file = tf.train.latest_checkpoint(self.model_save_dir)
if last_file:
tf.logging.info("restore model from {}".format(last_file))
saver = tf.train.Saver()
saver.restore(sess, last_file)
tf.logging.info("start to transfer")
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
origin_image = sess.run(test_image)
styled_image = sess.run(generated)
coord.request_stop()
coord.join(threads)
tf.logging.info("finish transfer")
""" resize to origin image"""
styled_image = cv2.resize(styled_image, origin_image.shape[1::-1])
styled_image = np.array(styled_image, dtype=np.uint8)
if origin_color:
styled_image = self.keep_origin_color(origin_image, styled_image)
return styled_image
@staticmethod
def keep_origin_color(origin, generated):
""" convert image from RGB to YUV. Combine the Y channel of generated and the UV channel of the origin"""
y, _, _ = cv2.split(cv2.cvtColor(generated, cv2.COLOR_RGB2YUV))
_, u, v = cv2.split(cv2.cvtColor(origin, cv2.COLOR_RGB2YUV))
merged = cv2.merge([y, u, v])
return cv2.cvtColor(merged, cv2.COLOR_YUV2RGB)
| [
"tensorflow.gfile.Exists",
"tensorflow.train.Coordinator",
"nets.nets_factory.get_network_fn",
"tensorflow.trainable_variables",
"tensorflow.identity",
"tensorflow.logging.info",
"tensorflow.logging.set_verbosity",
"tensorflow.global_variables",
"tensorflow.train.latest_checkpoint",
"networks.loss... | [((360, 370), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (368, 370), True, 'import tensorflow as tf\n'), ((647, 668), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (666, 668), True, 'import tensorflow as tf\n'), ((865, 895), 'tensorflow.train.Saver', 'tf.train.Saver', (['save_variables'], {}), '(save_variables)\n', (879, 895), True, 'import tensorflow as tf\n'), ((1144, 1199), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', ([], {'logdir': 'summary_dir', 'graph': 'self.g'}), '(logdir=summary_dir, graph=self.g)\n', (1165, 1199), True, 'import tensorflow as tf\n'), ((1261, 1297), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (1295, 1297), True, 'import tensorflow as tf\n'), ((1335, 1376), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (1359, 1376), True, 'import tensorflow as tf\n'), ((1402, 1439), 'networks.losses.get_style_features', 'losses.get_style_features', (['self.FLAGS'], {}), '(self.FLAGS)\n', (1427, 1439), False, 'from networks import model, losses\n'), ((1462, 1550), 'nets.nets_factory.get_network_fn', 'nets_factory.get_network_fn', (['self.FLAGS.loss_model'], {'num_classes': '(1)', 'is_training': '(False)'}), '(self.FLAGS.loss_model, num_classes=1,\n is_training=False)\n', (1489, 1550), False, 'from nets import nets_factory\n'), ((1637, 1723), 'preprocessing.preprocessing_factory.get_preprocessing', 'preprocessing_factory.get_preprocessing', (['self.FLAGS.loss_model'], {'is_training': '(False)'}), '(self.FLAGS.loss_model, is_training=\n False)\n', (1676, 1723), False, 'from preprocessing import preprocessing_factory\n'), ((1769, 1856), 'data_loader.dataset_read.get_image', 'get_image', (['self.FLAGS.num_samples', 'self.FLAGS.image_size', 'self.FLAGS.tfrecord_file'], {}), '(self.FLAGS.num_samples, self.FLAGS.image_size, self.FLAGS.\n tfrecord_file)\n', (1778, 1856), False, 'from data_loader.dataset_read import get_image, batch_images\n'), ((1965, 2028), 'data_loader.dataset_read.batch_images', 'batch_images', (['processed_image'], {'batch_size': 'self.FLAGS.batch_size'}), '(processed_image, batch_size=self.FLAGS.batch_size)\n', (1977, 2028), False, 'from data_loader.dataset_read import get_image, batch_images\n'), ((2049, 2066), 'networks.model.net', 'model.net', (['images'], {}), '(images)\n', (2058, 2066), False, 'from networks import model, losses\n'), ((2382, 2411), 'tensorflow.stack', 'tf.stack', (['processed_generated'], {}), '(processed_generated)\n', (2390, 2411), True, 'import tensorflow as tf\n'), ((2620, 2694), 'networks.losses.style_loss', 'losses.style_loss', (['style_features', 'endpoints_dict', 'self.FLAGS.style_layers'], {}), '(style_features, endpoints_dict, self.FLAGS.style_layers)\n', (2637, 2694), False, 'from networks import model, losses\n'), ((2718, 2780), 'networks.losses.content_loss', 'losses.content_loss', (['endpoints_dict', 'self.FLAGS.content_layers'], {}), '(endpoints_dict, self.FLAGS.content_layers)\n', (2737, 2780), False, 'from networks import model, losses\n'), ((2806, 2844), 'networks.losses.total_variation_loss', 'losses.total_variation_loss', (['generated'], {}), '(generated)\n', (2833, 2844), False, 'from networks import model, losses\n'), ((3066, 3103), 'tensorflow.identity', 'tf.identity', (['style_loss', '"""style_loss"""'], {}), "(style_loss, 'style_loss')\n", (3077, 3103), True, 'import tensorflow as tf\n'), ((3132, 3173), 'tensorflow.identity', 'tf.identity', (['content_loss', '"""content_loss"""'], {}), "(content_loss, 'content_loss')\n", (3143, 3173), True, 'import tensorflow as tf\n'), ((3204, 3249), 'tensorflow.identity', 'tf.identity', (['variation_loss', '"""variation_loss"""'], {}), "(variation_loss, 'variation_loss')\n", (3215, 3249), True, 'import tensorflow as tf\n'), ((3276, 3313), 'tensorflow.identity', 'tf.identity', (['total_loss', '"""total_loss"""'], {}), "(total_loss, 'total_loss')\n", (3287, 3313), True, 'import tensorflow as tf\n'), ((3322, 3357), 'tensorflow.losses.add_loss', 'tf.losses.add_loss', (['self.style_loss'], {}), '(self.style_loss)\n', (3340, 3357), True, 'import tensorflow as tf\n'), ((3366, 3403), 'tensorflow.losses.add_loss', 'tf.losses.add_loss', (['self.content_loss'], {}), '(self.content_loss)\n', (3384, 3403), True, 'import tensorflow as tf\n'), ((3412, 3451), 'tensorflow.losses.add_loss', 'tf.losses.add_loss', (['self.variation_loss'], {}), '(self.variation_loss)\n', (3430, 3451), True, 'import tensorflow as tf\n'), ((3460, 3495), 'tensorflow.losses.add_loss', 'tf.losses.add_loss', (['self.total_loss'], {}), '(self.total_loss)\n', (3478, 3495), True, 'import tensorflow as tf\n'), ((3897, 3921), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (3919, 3921), True, 'import tensorflow as tf\n'), ((4059, 4167), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['self.FLAGS.learning_rate', 'self.global_step', '(1000)', '(0.66)'], {'name': '"""learning_rate"""'}), "(self.FLAGS.learning_rate, self.global_step, 1000,\n 0.66, name='learning_rate')\n", (4085, 4167), True, 'import tensorflow as tf\n'), ((4653, 4700), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['self.model_save_dir'], {}), '(self.model_save_dir)\n', (4679, 4700), True, 'import tensorflow as tf\n'), ((8087, 8107), 'cv2.merge', 'cv2.merge', (['[y, u, v]'], {}), '([y, u, v])\n', (8096, 8107), False, 'import cv2\n'), ((8123, 8162), 'cv2.cvtColor', 'cv2.cvtColor', (['merged', 'cv2.COLOR_YUV2RGB'], {}), '(merged, cv2.COLOR_YUV2RGB)\n', (8135, 8162), False, 'import cv2\n'), ((993, 1021), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['summary_dir'], {}), '(summary_dir)\n', (1008, 1021), True, 'import tensorflow as tf\n'), ((1035, 1065), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['summary_dir'], {}), '(summary_dir)\n', (1052, 1065), True, 'import tensorflow as tf\n'), ((2451, 2494), 'tensorflow.concat', 'tf.concat', (['[processed_generated, images]', '(0)'], {}), '([processed_generated, images], 0)\n', (2460, 2494), True, 'import tensorflow as tf\n'), ((4925, 4981), 'os.path.join', 'os.path.join', (['self.model_save_dir', '"""style_transfer.ckpt"""'], {}), "(self.model_save_dir, 'style_transfer.ckpt')\n", (4937, 4981), False, 'import os\n'), ((5998, 6042), 'tensorflow.train.string_input_producer', 'tf.train.string_input_producer', (['[image_path]'], {}), '([image_path])\n', (6028, 6042), True, 'import tensorflow as tf\n'), ((6064, 6084), 'tensorflow.WholeFileReader', 'tf.WholeFileReader', ([], {}), '()\n', (6082, 6084), True, 'import tensorflow as tf\n'), ((6161, 6189), 'tensorflow.image.decode_image', 'tf.image.decode_image', (['value'], {}), '(value)\n', (6182, 6189), True, 'import tensorflow as tf\n'), ((6239, 6325), 'preprocessing.preprocessing_factory.get_preprocessing', 'preprocessing_factory.get_preprocessing', (['self.FLAGS.loss_model'], {'is_training': '(False)'}), '(self.FLAGS.loss_model, is_training=\n False)\n', (6278, 6325), False, 'from preprocessing import preprocessing_factory\n'), ((6563, 6602), 'tensorflow.expand_dims', 'tf.expand_dims', (['processed_image'], {'axis': '(0)'}), '(processed_image, axis=0)\n', (6577, 6602), True, 'import tensorflow as tf\n'), ((6627, 6644), 'networks.model.net', 'model.net', (['images'], {}), '(images)\n', (6636, 6644), False, 'from networks import model, losses\n'), ((6669, 6698), 'tensorflow.squeeze', 'tf.squeeze', (['generated'], {'axis': '(0)'}), '(generated, axis=0)\n', (6679, 6698), True, 'import tensorflow as tf\n'), ((7382, 7416), 'tensorflow.logging.info', 'tf.logging.info', (['"""finish transfer"""'], {}), "('finish transfer')\n", (7397, 7416), True, 'import tensorflow as tf\n'), ((7486, 7537), 'cv2.resize', 'cv2.resize', (['styled_image', 'origin_image.shape[1::-1]'], {}), '(styled_image, origin_image.shape[1::-1])\n', (7496, 7537), False, 'import cv2\n'), ((7565, 7603), 'numpy.array', 'np.array', (['styled_image'], {'dtype': 'np.uint8'}), '(styled_image, dtype=np.uint8)\n', (7573, 7603), True, 'import numpy as np\n'), ((7956, 7998), 'cv2.cvtColor', 'cv2.cvtColor', (['generated', 'cv2.COLOR_RGB2YUV'], {}), '(generated, cv2.COLOR_RGB2YUV)\n', (7968, 7998), False, 'import cv2\n'), ((8028, 8067), 'cv2.cvtColor', 'cv2.cvtColor', (['origin', 'cv2.COLOR_RGB2YUV'], {}), '(origin, cv2.COLOR_RGB2YUV)\n', (8040, 8067), False, 'import cv2\n'), ((2285, 2341), 'tensorflow.unstack', 'tf.unstack', (['generated'], {'axis': '(0)', 'num': 'self.FLAGS.batch_size'}), '(generated, axis=0, num=self.FLAGS.batch_size)\n', (2295, 2341), True, 'import tensorflow as tf\n'), ((4251, 4293), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.learning_rate'], {}), '(self.learning_rate)\n', (4273, 4293), True, 'import tensorflow as tf\n'), ((6716, 6735), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'g'}), '(graph=g)\n', (6726, 6735), True, 'import tensorflow as tf\n'), ((6773, 6820), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['self.model_save_dir'], {}), '(self.model_save_dir)\n', (6799, 6820), True, 'import tensorflow as tf\n'), ((7042, 7078), 'tensorflow.logging.info', 'tf.logging.info', (['"""start to transfer"""'], {}), "('start to transfer')\n", (7057, 7078), True, 'import tensorflow as tf\n'), ((7103, 7125), 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), '()\n', (7123, 7125), True, 'import tensorflow as tf\n'), ((7152, 7193), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'coord': 'coord'}), '(coord=coord)\n', (7180, 7193), True, 'import tensorflow as tf\n'), ((5939, 5949), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (5947, 5949), True, 'import tensorflow as tf\n'), ((6958, 6974), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (6972, 6974), True, 'import tensorflow as tf\n'), ((3752, 3805), 'tensorflow.unstack', 'tf.unstack', (['images'], {'axis': '(0)', 'num': 'self.FLAGS.batch_size'}), '(images, axis=0, num=self.FLAGS.batch_size)\n', (3762, 3805), True, 'import tensorflow as tf\n')] |
import numpy as np
class ReplayBuffer:
def __init__(self, size, miniBatchSize):
self.buffer = []
self.miniBatchSize = miniBatchSize
self.maxSize = size
# append new state, remove oldest if full
def append(self, state, action, reward, terminal, nState):
if len(self.buffer) == self.maxSize:
del self.buffer[0]
self.buffer.append([state, action, reward, terminal, nState])
# get random sample
def getSample(self):
idxs = np.random.RandomState().choice(
np.arange(len(self.buffer)),
size=self.miniBatchSize,
)
return [self.buffer[idx] for idx in idxs]
# get buffer size
def getSize(self):
return len(self.buffer)
| [
"numpy.random.RandomState"
] | [((503, 526), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (524, 526), True, 'import numpy as np\n')] |
import numpy as np
def load_dataset():
"""
(function) load_dataset
-----------------------
Load test dataset
Parameter
---------
- None
Return
------
- dataset
"""
# Create test dataset
post_list = [['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
class_vec = [0, 1, 0, 1, 0, 1]
return post_list, class_vec
def create_voca_list(dataset):
"""
(function) create_voca_list
---------------------------
Create voca dataset
Parameter
---------
- None
Return
------
- voca list
"""
# Create a voca list
voca_set = set([])
for doc in dataset:
voca_set = voca_set | set(doc)
return list(voca_set)
def word2vec(voca_list, input_set):
"""
(function) word2vec
-------------------
Set a vector from the words
Parameter
---------
- None
Return
------
- word vector
"""
# Convert words to a vector
vec = [0] * len(voca_list)
for word in input_set:
if word in voca_list:
vec[voca_list.index(word)] = 1
else:
print('the word: %s is not in my vocabulary!' % word)
return vec
def train(mat, cat):
"""
(function) train
----------------
Train on input matrix
Parameter
---------
- mat : input matrix
- cat : category information
Return
------
- probabilities
"""
# Calculate probabilties
num_docs = len(mat)
num_words = len(mat[0])
pr_abusive = sum(cat) / float(num_docs)
#p0_num, p1_num = np.zeros(num_words), np.zeros(num_words)
#p0_denom, p1_denom = 0.0, 0.0
p0_num, p1_num = np.ones(num_words), np.ones(num_words) # zero 확률 방지
p0_denom, p1_denom = 2.0, 2.0
for i in range(num_docs):
if cat[i] == 1:
p1_num += mat[i]
p1_denom += sum(mat[i])
else:
p0_num += mat[i]
p0_denom += sum(mat[i])
#p0_vec, p1_vec = p0_num / p0_denom, p1_num / p1_denom
p0_vec, p1_vec = np.log(p0_num / p0_denom), np.log(p1_num / p1_denom) # underflow 방지
return p0_vec, p1_vec, pr_abusive
def classify(vec2class, p0_vec, p1_vec, p1_class):
"""
(function) classify
-------------------
Classify the input vector
Parameter
---------
- mat : input matrix
- cat : category information
Return
------
- classify results
"""
# Find an argmax
p0 = sum(vec2class * p0_vec) + np.log(1.0 - p1_class)
p1 = sum(vec2class * p1_vec) + np.log(p1_class)
if p1 > p0:
return 1
else:
return 0
def bag_of_words(voca_list, input_set):
"""
(function) bag_of_words
-----------------------
Create bag of words
Parameter
---------
- None
Return
------
- bag of words vector
"""
# Convert words to a vector
vec = [0] * len(voca_list)
for word in input_set:
if word in voca_list:
vec[voca_list.index(word)] += 1
return vec | [
"numpy.log",
"numpy.ones"
] | [((2078, 2096), 'numpy.ones', 'np.ones', (['num_words'], {}), '(num_words)\n', (2085, 2096), True, 'import numpy as np\n'), ((2098, 2116), 'numpy.ones', 'np.ones', (['num_words'], {}), '(num_words)\n', (2105, 2116), True, 'import numpy as np\n'), ((2445, 2470), 'numpy.log', 'np.log', (['(p0_num / p0_denom)'], {}), '(p0_num / p0_denom)\n', (2451, 2470), True, 'import numpy as np\n'), ((2472, 2497), 'numpy.log', 'np.log', (['(p1_num / p1_denom)'], {}), '(p1_num / p1_denom)\n', (2478, 2497), True, 'import numpy as np\n'), ((2890, 2912), 'numpy.log', 'np.log', (['(1.0 - p1_class)'], {}), '(1.0 - p1_class)\n', (2896, 2912), True, 'import numpy as np\n'), ((2948, 2964), 'numpy.log', 'np.log', (['p1_class'], {}), '(p1_class)\n', (2954, 2964), True, 'import numpy as np\n')] |
"""Plot figure 6: attack rates vs R0."""
import numpy as np
import pandas as pd
from scipy.stats import linregress
import matplotlib as mplt
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cm
import matplotlib.font_manager as font_manager
import cmocean
import cmasher
import seaborn
import copy
import os
# set the font family style
mplt.rcParams['font.family'] = 'Myriad Pro' # change to a font you have installed on your computer - checkout Google Fonts for free fonts available for download
# set some initial paths
# path to the directory where this script lives
thisdir = os.path.abspath('')
# path to the main directory of the repository
maindir = os.path.split(os.path.split(thisdir)[0])[0]
# path to the analysis_results subdirectory
analysisdir = os.path.split(thisdir)[0]
# path to the data subdirectory
datadir = os.path.join(os.path.split(os.path.split(thisdir)[0])[0], 'data')
# path to the figures subdirectory within analysis_results
figdir = os.path.join(analysisdir, 'figures')
location_file = os.path.join(analysisdir, 'location_country_names.csv')
locations_df = pd.read_csv(location_file, delimiter = ',')
setting_fractions_file = os.path.join(analysisdir, 'summary_fractions_by_location.csv')
setting_fractions_df = pd.read_csv(setting_fractions_file)
setting_codes = ['H','S','W','R']
def get_country_name(df,location):
"""
Return country name of the location
Args:
df (pandas Dataframe) : dataframe containing locations and corresponding countries
location (str) : name of the location
Returns:
str: Name of the country the location is in.
"""
d = df[df.location == location]
return d.country.values[0]
def get_locations_by_country(df,country):
"""
Return locations in the country
Args:
df (pandas Dataframe) : dataframe containing locations and corresponding countries
country (str) : name of the country
Returns:
str: Name of the country the location is in.
"""
locations = list(df[df.country == country].location.values)
return locations
def get_fractions(setting_fractions_df, location):
"""
Get the fraction of people with contacts in each setting of household (H), school (S), or work (W).
Args:
setting_fractions_df (pandas DataFrame) : a dataframe
location (str) : name of the location
Returns:
dict: A dictionary of fractions of people with contacts in each setting for the location.
"""
fractions = dict.fromkeys(['H','S','W','R'],1.)
d = setting_fractions_df[setting_fractions_df.location == location]
fractions['H'] = d.NhN.values[0]
fractions['S'] = d.NsN.values[0]
fractions['W'] = d.NwN.values[0]
return fractions
def read_contact_matrix(location, country, level, setting, num_agebrackets=85):
"""
Read in the contact for each setting.
Args:
location (str) : name of the location
country (str) : name of the country
level (str) : name of level (country or subnational)
setting (str) : name of the contact setting
num_agebrackets (int) : the number of age brackets for the matrix
Returns:
A numpy matrix of contact.
"""
setting_type, setting_suffix = 'F', 'setting'
if setting == 'overall':
setting_type, setting_suffix = 'M', 'contact_matrix'
if country == 'Europe':
country = location
level = 'country'
if level == 'country':
file_name = country + '_' + level + '_level_' + setting_type + '_' + setting + '_' + setting_suffix + '_' + '%i' % num_agebrackets + '.csv'
else:
file_name = country + '_' + level + '_' + location + '_' + setting_type + '_' + setting + '_' + setting_suffix + '_' + '%i' % num_agebrackets + '.csv'
file_path = os.path.join(datadir, 'contact_matrices', file_name)
M = np.loadtxt(file_path, delimiter=',')
return M
def get_ages(location, country, level, num_agebrackets=85):
"""
Get the age count for the synthetic population of the location.
Args:
location (str) : name of the location
country (str) : name of the country
level (str) : name of level (country or subnational)
num_agebrackets (int) : the number of age brackets
Returns:
dict: A dictionary of the age count.
"""
if country == 'Europe':
country = location
level = 'country'
if level == 'country':
file_name = country + '_' + level + '_level_age_distribution_' + '%i' % num_agebrackets + '.csv'
else:
file_name = country + '_' + level + '_' + location + '_age_distribution_' + '%i' % num_agebrackets + '.csv'
file_path = os.path.join(datadir, 'age_distributions', file_name)
df = pd.read_csv(file_path, delimiter=',', header=None)
df.columns = ['age', 'age_count']
ages = dict(zip(df.age.values.astype(int), df.age_count.values))
return ages
def get_average_age(ages):
"""
Get the average age from a dictionary of age counts.
Args:
ages (dict): dictionary of age counts
Return:
float: The average age given the age count.
"""
average_age = 0
total_population = sum(ages.values())
for a in ages:
average_age += ages[a] * a
average_age = average_age/total_population
return average_age
def get_school_age_distribution(location):
"""
Get the age count of people active in the school setting.
Args:
location (str): name of the location
Returns:
dict: Age count of people active in the school setting.
"""
ages = {}
file_path = os.path.join(analysisdir, 'schools_age_distributions',
'schools_age_distributions_' + location + '.dat')
df = pd.read_csv(file_path, delimiter = ',')
ages = dict(zip(df.age.values, df.setting_count.values))
return ages
def get_percent_in_school(ages, school_ages):
"""
Get the percent of people in school.
Args:
ages (dict) : age count
school_ages (dict) : school age count
Returns:
float: The percent of people in the school setting.
"""
total_in_school = np.sum([v for v in school_ages.values()], dtype = float)
total_population = np.sum([v for v in ages.values()], dtype = float)
return total_in_school/total_population * 100
def get_attack_rates_df(reference_location, reference_scenario, beta, susceptibility_drop_factor, gamma_inverse, num_agebrackets):
"""
Get attack rates dataframe for an SIR compartmental model with age specific contact patterns.
Args:
reference_location (str) : name of reference location or locations
reference_scenario (str) : specific reference scenario
beta (float) : the transmissibilty
susceptibility_drop_factor (float) : susceptibility of adults to those under 18
gamma_inverse (float) : the mean recovery period
num_agebrackets (int) : the number of age brackets for the matrix
Returns:
Pandas dataframe of attack rates by location
"""
file_path = os.path.join(analysisdir, 'reference_location_' + reference_location, 'mcmc_beta_and_dropfactor_scenario_' + reference_scenario,
'all_locations_attack_rates_by_age_reference_scenario_' + reference_scenario + '_beta_' + '%.2f' % beta + '_susceptibility_' + '%.2f' % susceptibility + '_gamma_inverse_' + '%.1f' % gamma_inverse + '_' + str(num_agebrackets) + '.csv')
return pd.read_csv(file_path)
def get_attack_rate(df, location):
"""
Get the total attack rate for the location from the dataframe for an SIR compartmental model with age specific contact patterns.
Args:
df (pd.DataFrame) : a dataframe of attack rates by location
location (str) : name of the location
Returns:
float: The total attack rate for the location as a fraction from an SIR compartmental model with age specific contact patterns. Values between 0 and 1.
"""
return df.loc[df['location'] == location]['artotal'].values[0]
def get_homogeneous_attack_rate_df(gamma_inverse, num_agebrackets):
"""
Get a dataframe with the attack rate for an SIR model with the homogeneous mixing assumption for different basic reproduction, R0, values with a given average recovery period.
Args:
gamma_inverse (float): the mean recovery period
num_agebrackets (int): the number of age brackets for the matrix
Returns:
Pandas dataframe of attack rates by R0 value
"""
file_path = os.path.join(analysisdir, 'homogeneous_sir_attack_rates', 'attack_rates_SIR_homogeneous_mixing.csv')
df = pd.read_csv(file_path)
return df
def get_homogeneous_attack_rate(df, R0_star):
"""
Get the attack rate for an SIR model with the homogeneous mixing assumption for a given basic reproduction, R0_star, value.
Args:
df (pd.DataFrame) : a dataframe of attack rates by the basic reproduction number
R0_star (float) : the basic reproduction number
Returns:
float: The total attack rate as a fraction from an SIR model with homogeneous mixing assumptions and specified basic reproduction number R0_star. Values between 0 and 1.
"""
return df.loc[df['R0'] == R0_star]['attack_rate'].values[0]
def get_eigenvalue(matrix):
"""
Get the real component of the leading eigenvalue of a square matrix.
Args:
matrix (np.ndarray): square matrix
Returns:
float: Real component of the leading eigenvalue of the matrix.
"""
eigenvalue = max(np.linalg.eigvals(matrix)).real
return eigenvalue
def get_R0(beta, gamma_inverse, matrix):
"""
Get the basic reproduction number, R0, for an SIR compartmental model given the basic reproduction number, the mean recovery period, and the contact matrix.
Args:
beta (float) : the transmissibility beta
gamma_inverse (float) : the mean recovery period
matrix (np.ndarray) : the contact matrix
Returns:
float: The basic reproduction number R0 for an SIR compartmental model.
"""
gamma = 1./gamma_inverse
eigenvalue = get_eigenvalue(matrix)
return beta * eigenvalue / gamma
def get_beta(R0, gamma_inverse, matrix):
"""
Get the transmissibility from an SIR model with age specific contact patterns and basic reproduction number.
Args:
R0_star (float) : the basic reproduction number
gamma_inverse (float) : the mean recovery period
matrix (np.ndarray) : the age specific contact matrix
Returns:
float: The transmissibility from an SIR model with age specific contact patterns.
"""
gamma = float(1)/gamma_inverse
eigenvalue = get_eigenvalue(matrix)
return R0 * gamma / eigenvalue
def linear_function(x,m,b):
"""
Get the y value of a linear function given the x value.
Args:
m (float): the slope
b (float): the intercept
Returns:
The expected y value from a linear function at some specified x value.
"""
return m*x + b
def plot_fig(countries, reference_location, reference_scenario, beta, susceptibility_drop_factor, gamma_inverse, num_agebrackets):
"""
Plot the attack rates from an SIR model with age specific contact patterns vs the basic reproduction, the average age,
and the percent of the population with contacts in the school layer for subnational locations.
Args:
countries (list) : list of countries
reference_location (str) : the reference location (or set of locations) the transmissibilty is calibrated to
reference_scenario (str) : label of the reference scenario
beta (float) : the transmissibility
susceptibility_drop_factor (float) : susceptibility of adults to those under 18
gamma_inverse (float) : the mean recovery period
num_agebrackets (int) : the number of age brackets
Returns:
Matplotlib figure.
"""
countries = ['Australia', 'Canada', 'China', 'Europe', 'India', 'Israel', 'Japan', 'Russia', 'South_Africa', 'United_States']
locations = []
for country in countries:
locations += get_locations_by_country(locations_df, country)
if country in locations and country != 'Israel':
locations.remove(country)
if country == 'India':
locations.remove('Dadra_and_Nagar_Haveli')
locations.remove('Chandigarh')
locations.remove('Lakshadweep')
if 'Russian_Federation' in locations:
locations.remove('Russian_Federation')
country_colour_dic = {}
country_colour_dic['Australia'] = '#0000ff'
country_colour_dic['Canada'] = '#2ab207'
country_colour_dic['China'] = '#fcc200'
country_colour_dic['Europe'] = '#941cca'
country_colour_dic['India'] = 'darkorange'
country_colour_dic['Israel'] = '#9b9b9b'
country_colour_dic['Japan'] = '#000098'
country_colour_dic['Russia'] = '#dc142b'
country_colour_dic['South_Africa'] = '#b5d93c'
country_colour_dic['United_States'] = '#00ace7'
hdf = get_homogeneous_attack_rate_df(gamma_inverse, num_agebrackets)
df = get_attack_rates_df(reference_location, reference_scenario, beta, susceptibility_drop_factor, gamma_inverse, num_agebrackets)
width = 16
height = 5
left = 0.06
right = 0.865
bottom = 0.16
top = 0.88
wspace = 0.32
fig, ax = plt.subplots(1, 3, figsize=(width, height))
fig.subplots_adjust(left = left, right = right, top = top, bottom = bottom, wspace = wspace)
leg_left = right + 0.01
leg_right = 0.985
leg_bottom = bottom
leg_top = top
leg_width = leg_right - leg_left
leg_height = leg_top - leg_bottom
fontsize = 20
axleg = fig.add_axes([leg_left, leg_bottom, leg_width, leg_height])
axleg.axis('off')
attack_rates_list = []
R0_list = []
average_age_list = []
percent_in_school_list = []
color_list = []
beta_drop_age = 18
for n, location in enumerate(locations):
country = get_country_name(locations_df, location)
if location == 'Israel':
matrix = read_contact_matrix(location, country, 'country', 'overall')
ages = get_ages(location, country, 'country')
else:
matrix = read_contact_matrix(location, country, 'subnational', 'overall')
ages = get_ages(location, country, 'subnational')
R0 = get_R0(beta, gamma_inverse, matrix)
R0_list.append(R0)
average_age = get_average_age(ages)
average_age_list.append(average_age)
if country != 'Europe':
school_ages = get_school_age_distribution(location)
percent_in_school = get_percent_in_school(ages, school_ages)
else:
fractions = get_fractions(setting_fractions_df, location)
percent_in_school = fractions['S'] * 100
percent_in_school_list.append(percent_in_school)
ar = get_attack_rate(df, location) * 100
attack_rates_list.append(ar)
color = country_colour_dic[country]
color_list.append(color)
homogeneous_attack_rates_list = hdf.attack_rate.values * 100
homogeneous_R0_list = hdf.R0.values
size = 12
leg = []
ax[0].scatter(R0_list, attack_rates_list, marker='o', color=color_list, s=size)
ax[0].plot(homogeneous_R0_list, homogeneous_attack_rates_list, color='k', lw=1.5, label='Homogenous \nmixing model')
leg.append(ax[0].legend(loc=2, fontsize=17))
ax[0].set_xlim(1.5, 2.0)
ax[0].set_xticks(np.arange(1.5, 2.01, 0.1))
ax[0].set_xlabel(r'$R_0$', fontsize = fontsize)
ax[0].text(1.39, 50, 'a', fontsize=fontsize+24, fontstyle='oblique')
m,b,r,p,std_err = linregress(average_age_list,attack_rates_list)
y_theory = np.array([linear_function(a,m,b) for a in average_age_list])
ax[1].plot(average_age_list, y_theory, color = 'k', lw = 1.5)
ax[1].scatter(average_age_list, attack_rates_list, marker='o', color=color_list, s=size)
ax[1].text(45,73, r'$\rho$ = ' + '%.2f' % r , fontsize = 18, verticalalignment = 'center', horizontalalignment = 'center', color = 'k')
ax[1].set_xlim(20, 55)
ax[1].set_xticks(np.arange(20, 51, 10))
leg.append(ax[1].legend(loc = 7, fontsize = 16, ncol = 1))
ax[1].set_xlabel('Average Age', fontsize = fontsize)
ax[1].text(13.5, 50, 'b', fontsize=fontsize+24, fontstyle='oblique')
m,b,r,p,std_err = linregress(percent_in_school_list,attack_rates_list)
y_theory = np.array([linear_function(a,m,b) for a in percent_in_school_list])
ax[2].plot(percent_in_school_list, y_theory, color = 'k', lw = 1.5)
ax[2].scatter(percent_in_school_list, attack_rates_list, marker='o', color=color_list, s=size)
ax[2].text(20,73, r'$\rho$ = ' + '%.2f' % r , fontsize = 18, verticalalignment = 'center', horizontalalignment = 'center', color = 'k')
ax[2].set_xlim(10, 45)
ax[2].set_xticks(np.arange(10, 50, 10))
ax[2].set_xlabel('% In Educational Institutions', fontsize = fontsize)
ax[2].text(3.5, 50, 'c', fontsize=fontsize+24, fontstyle='oblique')
for country in ['Australia','Canada','China','Europe','India','Israel','Japan','Russia','South_Africa','United_States']:
ax[2].scatter(0,0, color = country_colour_dic[country], s = size * 2, label = country.replace('-',' ').replace('_',' '))
leg.append(ax[2].legend(loc = 7, fontsize = 17, ncol = 1, bbox_to_anchor = (0.6,0.4,1,0.2)))
for i in range(len(ax)):
ax[i].set_ylabel('Attack Rate (%)', fontsize = fontsize)
ax[i].set_yticks(np.arange(55, 81, 5))
ax[i].set_ylim(54, 80)
ax[i].tick_params(labelsize=fontsize-2)
ax[i].tick_params(axis='x', which='minor', bottom=False)
ax[i].tick_params(axis='y', which='minor', left=False)
leg[i].draw_frame(False)
plt.minorticks_off()
fig_path = os.path.join(figdir, 'fig_6.pdf')
fig.savefig(fig_path, format='pdf')
if __name__ == '__main__':
reference_location = 'polymod_and_tomsk'
reference_scenario = 'all_locations'
beta = 0.04752
susceptibility = 1.0
gamma_inverse = 2.6
num_agebrackets = 85
countries = ['Australia', 'Canada', 'China', 'Europe', 'India', 'Israel', 'Japan', 'Russia', 'South_Africa', 'United_States']
plot_fig(countries, reference_location, reference_scenario, beta, susceptibility, gamma_inverse, num_agebrackets)
| [
"numpy.linalg.eigvals",
"os.path.abspath",
"pandas.read_csv",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.minorticks_off",
"numpy.arange",
"numpy.loadtxt",
"scipy.stats.linregress",
"os.path.split",
"os.path.join"
] | [((628, 647), 'os.path.abspath', 'os.path.abspath', (['""""""'], {}), "('')\n", (643, 647), False, 'import os\n'), ((1013, 1049), 'os.path.join', 'os.path.join', (['analysisdir', '"""figures"""'], {}), "(analysisdir, 'figures')\n", (1025, 1049), False, 'import os\n'), ((1067, 1122), 'os.path.join', 'os.path.join', (['analysisdir', '"""location_country_names.csv"""'], {}), "(analysisdir, 'location_country_names.csv')\n", (1079, 1122), False, 'import os\n'), ((1138, 1179), 'pandas.read_csv', 'pd.read_csv', (['location_file'], {'delimiter': '""","""'}), "(location_file, delimiter=',')\n", (1149, 1179), True, 'import pandas as pd\n'), ((1208, 1270), 'os.path.join', 'os.path.join', (['analysisdir', '"""summary_fractions_by_location.csv"""'], {}), "(analysisdir, 'summary_fractions_by_location.csv')\n", (1220, 1270), False, 'import os\n'), ((1294, 1329), 'pandas.read_csv', 'pd.read_csv', (['setting_fractions_file'], {}), '(setting_fractions_file)\n', (1305, 1329), True, 'import pandas as pd\n'), ((809, 831), 'os.path.split', 'os.path.split', (['thisdir'], {}), '(thisdir)\n', (822, 831), False, 'import os\n'), ((3926, 3978), 'os.path.join', 'os.path.join', (['datadir', '"""contact_matrices"""', 'file_name'], {}), "(datadir, 'contact_matrices', file_name)\n", (3938, 3978), False, 'import os\n'), ((3987, 4023), 'numpy.loadtxt', 'np.loadtxt', (['file_path'], {'delimiter': '""","""'}), "(file_path, delimiter=',')\n", (3997, 4023), True, 'import numpy as np\n'), ((4845, 4898), 'os.path.join', 'os.path.join', (['datadir', '"""age_distributions"""', 'file_name'], {}), "(datadir, 'age_distributions', file_name)\n", (4857, 4898), False, 'import os\n'), ((4908, 4958), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {'delimiter': '""","""', 'header': 'None'}), "(file_path, delimiter=',', header=None)\n", (4919, 4958), True, 'import pandas as pd\n'), ((5780, 5889), 'os.path.join', 'os.path.join', (['analysisdir', '"""schools_age_distributions"""', "('schools_age_distributions_' + location + '.dat')"], {}), "(analysisdir, 'schools_age_distributions', \n 'schools_age_distributions_' + location + '.dat')\n", (5792, 5889), False, 'import os\n'), ((5910, 5947), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {'delimiter': '""","""'}), "(file_path, delimiter=',')\n", (5921, 5947), True, 'import pandas as pd\n'), ((7723, 7745), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {}), '(file_path)\n', (7734, 7745), True, 'import pandas as pd\n'), ((8790, 8894), 'os.path.join', 'os.path.join', (['analysisdir', '"""homogeneous_sir_attack_rates"""', '"""attack_rates_SIR_homogeneous_mixing.csv"""'], {}), "(analysisdir, 'homogeneous_sir_attack_rates',\n 'attack_rates_SIR_homogeneous_mixing.csv')\n", (8802, 8894), False, 'import os\n'), ((8900, 8922), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {}), '(file_path)\n', (8911, 8922), True, 'import pandas as pd\n'), ((13779, 13822), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(width, height)'}), '(1, 3, figsize=(width, height))\n', (13791, 13822), True, 'import matplotlib.pyplot as plt\n'), ((16103, 16150), 'scipy.stats.linregress', 'linregress', (['average_age_list', 'attack_rates_list'], {}), '(average_age_list, attack_rates_list)\n', (16113, 16150), False, 'from scipy.stats import linregress\n'), ((16813, 16866), 'scipy.stats.linregress', 'linregress', (['percent_in_school_list', 'attack_rates_list'], {}), '(percent_in_school_list, attack_rates_list)\n', (16823, 16866), False, 'from scipy.stats import linregress\n'), ((18217, 18237), 'matplotlib.pyplot.minorticks_off', 'plt.minorticks_off', ([], {}), '()\n', (18235, 18237), True, 'import matplotlib.pyplot as plt\n'), ((18254, 18287), 'os.path.join', 'os.path.join', (['figdir', '"""fig_6.pdf"""'], {}), "(figdir, 'fig_6.pdf')\n", (18266, 18287), False, 'import os\n'), ((15927, 15952), 'numpy.arange', 'np.arange', (['(1.5)', '(2.01)', '(0.1)'], {}), '(1.5, 2.01, 0.1)\n', (15936, 15952), True, 'import numpy as np\n'), ((16573, 16594), 'numpy.arange', 'np.arange', (['(20)', '(51)', '(10)'], {}), '(20, 51, 10)\n', (16582, 16594), True, 'import numpy as np\n'), ((17307, 17328), 'numpy.arange', 'np.arange', (['(10)', '(50)', '(10)'], {}), '(10, 50, 10)\n', (17316, 17328), True, 'import numpy as np\n'), ((720, 742), 'os.path.split', 'os.path.split', (['thisdir'], {}), '(thisdir)\n', (733, 742), False, 'import os\n'), ((9822, 9847), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['matrix'], {}), '(matrix)\n', (9839, 9847), True, 'import numpy as np\n'), ((17950, 17970), 'numpy.arange', 'np.arange', (['(55)', '(81)', '(5)'], {}), '(55, 81, 5)\n', (17959, 17970), True, 'import numpy as np\n'), ((905, 927), 'os.path.split', 'os.path.split', (['thisdir'], {}), '(thisdir)\n', (918, 927), False, 'import os\n')] |
import numpy as np
import pandas as pd
from keras.layers import Dense
from keras.models import Sequential
from keras.optimizers import Adam
import pickle
import matplotlib.pyplot as plt
import lime
import lime.lime_tabular
from lime.lime_tabular import LimeTabularExplainer
# fix random seed for reproducibility
np.random.seed(0)
# Read dataset
db = pd.read_csv("covid_filtered_1-5_allMin5.csv")
# Take features columns index
inputColumns = [0, 2, 3, 4, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
class1_db = db.loc[db.ill_level == 1]
class2_db = db.loc[db.ill_level == 2]
class3_db = db.loc[db.ill_level == 3]
class4_db = db.loc[db.ill_level == 4]
class5_db = db.loc[db.ill_level == 5]
# Shuffle rows
db1_train = class1_db.sample(frac=1, random_state=99)
db2_train = class2_db.sample(frac=1, random_state=99)
db3_train = class3_db.sample(frac=1, random_state=99)
db4_train = class4_db.sample(frac=1, random_state=99)
db5_train = class5_db.sample(frac=1, random_state=99)
li_train = [db1_train, db2_train, db3_train, db4_train, db5_train]
db_train = pd.concat(li_train)
db_train = db_train.sample(frac=1, random_state=99)
X = db_train.iloc[:, inputColumns]
# Take output column
db_train_label = db_train.iloc[:, 23]
y = []
for lab in db_train_label:
if lab in [1, 2]:
y.append([0]) # class 1
elif lab in [3, 4, 5]:
y.append([1]) # class 2
else:
print("DATA ERROR")
y = np.array(y)
model = Sequential()
model.add(Dense(24, input_dim=len(inputColumns), activation='sigmoid'))
model.add(Dense(16, activation='sigmoid'))
model.add(Dense(1, activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer=Adam(learning_rate=0.002), metrics=['binary_accuracy'])
epoches_num = 200
batch = 20
# Fit the model (train the model)
history = model.fit(X, y, epochs=epoches_num, batch_size=batch, shuffle=True)
# Plot graphs
loss_train = history.history['loss']
loss_val = history.history['val_loss']
epochs = range(1, epoches_num + 1)
plt.plot(epochs, loss_train, 'g', label='Training loss')
plt.plot(epochs, loss_val, 'b', label='validation loss')
plt.title('Training and Validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
acc_train = history.history['accuracy']
acc_val = history.history['val_accuracy']
epochs = range(1, epoches_num + 1)
plt.plot(epochs, acc_train, 'g', label='Training accuracy')
plt.plot(epochs, acc_val, 'b', label='validation accuracy')
plt.title('Training and Validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
# Uncomment if you want to export model
"""model.save('model')
with open('config.X', 'wb') as config_X_file:
pickle.dump(X, config_X_file)"""
# Uncomment for testing explainability
"""features_names = ['sex', 'HSD', 'entry_month', 'symptoms_month', 'pneumonia', 'age_group', 'pregnancy', 'diabetes',
'copd', 'asthma', 'immsupr', 'hypertension', 'other_disease', 'cardiovascular', 'obesity',
'renal_chronic', 'tobacco', 'contact_other_covid']
ls = []
explainer = lime.lime_tabular.LimeTabularExplainer(np.array(X), feature_names=features_names,
verbose=True, class_names=['Sick'], mode='classification',
categorical_features=features_names)
ts = [1, 10, 1, 1, 1, 6, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0]
ls.append(ts)
ls = np.array(ls)
prediction = model.predict_classes(ls)
print('Model Class: %s' % prediction[0])
exp = explainer.explain_instance(ls[0], model.predict, num_features=len(features_names), labels=[0])
exp.as_pyplot_figure(label=0)
#exp.as_pyplot_figure(label=1)
exp.show_in_notebook(show_table=True, show_all=False)"""
| [
"matplotlib.pyplot.title",
"numpy.random.seed",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"keras.optimizers.Adam",
"keras.layers.Dense",
"numpy.array",
"keras.models.Sequential",
"matplotlib.pyplot.xlabel",
... | [((314, 331), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (328, 331), True, 'import numpy as np\n'), ((353, 398), 'pandas.read_csv', 'pd.read_csv', (['"""covid_filtered_1-5_allMin5.csv"""'], {}), "('covid_filtered_1-5_allMin5.csv')\n", (364, 398), True, 'import pandas as pd\n'), ((1067, 1086), 'pandas.concat', 'pd.concat', (['li_train'], {}), '(li_train)\n', (1076, 1086), True, 'import pandas as pd\n'), ((1428, 1439), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1436, 1439), True, 'import numpy as np\n'), ((1449, 1461), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1459, 1461), False, 'from keras.models import Sequential\n'), ((2013, 2069), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'loss_train', '"""g"""'], {'label': '"""Training loss"""'}), "(epochs, loss_train, 'g', label='Training loss')\n", (2021, 2069), True, 'import matplotlib.pyplot as plt\n'), ((2070, 2126), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'loss_val', '"""b"""'], {'label': '"""validation loss"""'}), "(epochs, loss_val, 'b', label='validation loss')\n", (2078, 2126), True, 'import matplotlib.pyplot as plt\n'), ((2127, 2168), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation loss"""'], {}), "('Training and Validation loss')\n", (2136, 2168), True, 'import matplotlib.pyplot as plt\n'), ((2169, 2189), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (2179, 2189), True, 'import matplotlib.pyplot as plt\n'), ((2190, 2208), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (2200, 2208), True, 'import matplotlib.pyplot as plt\n'), ((2209, 2221), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2219, 2221), True, 'import matplotlib.pyplot as plt\n'), ((2222, 2232), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2230, 2232), True, 'import matplotlib.pyplot as plt\n'), ((2351, 2410), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'acc_train', '"""g"""'], {'label': '"""Training accuracy"""'}), "(epochs, acc_train, 'g', label='Training accuracy')\n", (2359, 2410), True, 'import matplotlib.pyplot as plt\n'), ((2411, 2470), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'acc_val', '"""b"""'], {'label': '"""validation accuracy"""'}), "(epochs, acc_val, 'b', label='validation accuracy')\n", (2419, 2470), True, 'import matplotlib.pyplot as plt\n'), ((2471, 2516), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation accuracy"""'], {}), "('Training and Validation accuracy')\n", (2480, 2516), True, 'import matplotlib.pyplot as plt\n'), ((2517, 2537), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (2527, 2537), True, 'import matplotlib.pyplot as plt\n'), ((2538, 2560), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (2548, 2560), True, 'import matplotlib.pyplot as plt\n'), ((2561, 2573), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2571, 2573), True, 'import matplotlib.pyplot as plt\n'), ((2574, 2584), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2582, 2584), True, 'import matplotlib.pyplot as plt\n'), ((1544, 1575), 'keras.layers.Dense', 'Dense', (['(16)'], {'activation': '"""sigmoid"""'}), "(16, activation='sigmoid')\n", (1549, 1575), False, 'from keras.layers import Dense\n'), ((1587, 1617), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (1592, 1617), False, 'from keras.layers import Dense\n'), ((1688, 1713), 'keras.optimizers.Adam', 'Adam', ([], {'learning_rate': '(0.002)'}), '(learning_rate=0.002)\n', (1692, 1713), False, 'from keras.optimizers import Adam\n')] |
import numpy as np
from numpy.random import choice
from gerel.genome.factories import copy
from random import random
from gerel.mutators.mutator import Mutator
from gerel.algorithms.NEAT.functions import curry_weight_mutator, curry_crossover, add_node, add_edge
class NEATMutator(Mutator):
def __init__(
self,
weight_mutation_likelihood=0.8,
weight_mutation_rate_random=0.1,
weight_mutation_rate_uniform=0.9,
weight_mutation_variance=0.1,
mutation_without_crossover_rate=0.25,
interspecies_mating_rate=0.001,
species_member_survival_rate=0.2,
gene_disable_rate=0.75,
new_node_probability=0.03,
new_edge_probability=0.05,
weight_low=-2,
weight_high=2,
):
"""build NEATMutator object that acts on NEATPopulation objects.
:param weight_mutation_likelihood: Given a node or edge this
is the likelihood that targets weight is mutated.
:param weight_mutation_rate_random: Likelihood of normal
distribution perturbation of weight.
:param weight_mutation_variance: Variance of normal distribution
used to perturb weights.
:param mutation_without_crossover_rate: Likelihood of just
weight or topological mutation occurring without crossover.
:param interspecies_mating_rate: Likelihood of crossover between
two species.
:param species_member_survival_rate: Proportion of species
members that survive yeah generation.
:param gene_disable_rate: Likelihood of disabled gene staying
inactive.
:param new_node_probability: Likelihood of new node mutation.
:param new_edge_probability: Likelihood of new edge mutation.
:param weight_low: uniform distribution lower bound
:param weight_high: uniform distribution upper bound
"""
super().__init__()
self.gene_disable_rate = gene_disable_rate
self.new_node_probability = new_node_probability
self.new_edge_probability = new_edge_probability
self.mutation_without_crossover_rate = mutation_without_crossover_rate
self.interspecies_mating_rate = interspecies_mating_rate
self.species_member_survival_rate = species_member_survival_rate
self.weight_mutator = curry_weight_mutator(
weight_mutation_likelihood,
weight_mutation_rate_random,
weight_mutation_variance,
weight_mutation_rate_uniform,
weight_low=weight_low,
weight_high=weight_high,
)
self.crossover = curry_crossover(gene_disable_rate)
def call_on_population(self, population):
"""Takes population of speciated genomes and evolves them into the next generation of genomes.
- First we compute the population proportion that each group is granted.
- Then we keep only the top species_member_survival_rate of each generation.
- for each group
- we put the top performing genome into the new populations
- randomly draw Genomes from the remaining top performing
genomes and apply mutations/pairing until the rest of the
groups population share is taken up.
:param population: NEATPopulation object
:return: None
"""
total_group_fitness_sum = sum([item['group_fitness'] for key, item in population.species.items()])
new_genomes = []
for key, item in population.species.items():
pop_prop = int(round(population.population_size * (item['group_fitness'] / total_group_fitness_sum)))
survival_prop = int(len(item['group']) * self.species_member_survival_rate)
survival_prop = 5 if survival_prop < 5 else survival_prop
item['group'] = item['group'][:survival_prop]
best_performer = copy(item['group'][0])
new_genomes.append(best_performer)
for _ in range(pop_prop - 1):
selected_gene = choice(item['group'])
new_genome = self.call_on_genome(selected_gene)
if random() > self.mutation_without_crossover_rate:
other_genome = None
if random() < self.interspecies_mating_rate and len(population.species) > 1:
# select from other species
other_item = choice([item for _, item in population.species.items()])
if len(other_item['group']) > 2:
other_genome = choice([g for g in other_item['group'] if g is not selected_gene])
elif len(item['group']) > 2:
other_genome = choice([g for g in item['group'] if g is not selected_gene])
if other_genome:
secondary, primary = sorted([new_genome, other_genome], key=lambda g: g.fitness)
new_genome = self.crossover(primary, secondary)
new_genomes.append(new_genome)
population.generation += 1
population.genomes = new_genomes
def call_on_genome(self, genome):
"""Action on genome.
Only performs weight and topological mutations.
:param genome: Genome to copy and mutate.
:return: New genome.
"""
new_genome = copy(genome)
new_genome.fitness = genome.fitness
for edge in new_genome.edges:
self.weight_mutator(edge)
for node in new_genome.nodes:
self.weight_mutator(node)
if np.random.uniform(0, 1, 1) < self.new_node_probability:
add_node(new_genome)
if np.random.uniform(0, 1, 1) < self.new_edge_probability:
add_edge(new_genome)
return new_genome
| [
"numpy.random.uniform",
"gerel.algorithms.NEAT.functions.curry_weight_mutator",
"gerel.genome.factories.copy",
"gerel.algorithms.NEAT.functions.curry_crossover",
"random.random",
"numpy.random.choice",
"gerel.algorithms.NEAT.functions.add_edge",
"gerel.algorithms.NEAT.functions.add_node"
] | [((2416, 2610), 'gerel.algorithms.NEAT.functions.curry_weight_mutator', 'curry_weight_mutator', (['weight_mutation_likelihood', 'weight_mutation_rate_random', 'weight_mutation_variance', 'weight_mutation_rate_uniform'], {'weight_low': 'weight_low', 'weight_high': 'weight_high'}), '(weight_mutation_likelihood,\n weight_mutation_rate_random, weight_mutation_variance,\n weight_mutation_rate_uniform, weight_low=weight_low, weight_high=\n weight_high)\n', (2436, 2610), False, 'from gerel.algorithms.NEAT.functions import curry_weight_mutator, curry_crossover, add_node, add_edge\n'), ((2707, 2741), 'gerel.algorithms.NEAT.functions.curry_crossover', 'curry_crossover', (['gene_disable_rate'], {}), '(gene_disable_rate)\n', (2722, 2741), False, 'from gerel.algorithms.NEAT.functions import curry_weight_mutator, curry_crossover, add_node, add_edge\n'), ((5453, 5465), 'gerel.genome.factories.copy', 'copy', (['genome'], {}), '(genome)\n', (5457, 5465), False, 'from gerel.genome.factories import copy\n'), ((3981, 4003), 'gerel.genome.factories.copy', 'copy', (["item['group'][0]"], {}), "(item['group'][0])\n", (3985, 4003), False, 'from gerel.genome.factories import copy\n'), ((5675, 5701), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (5692, 5701), True, 'import numpy as np\n'), ((5743, 5763), 'gerel.algorithms.NEAT.functions.add_node', 'add_node', (['new_genome'], {}), '(new_genome)\n', (5751, 5763), False, 'from gerel.algorithms.NEAT.functions import curry_weight_mutator, curry_crossover, add_node, add_edge\n'), ((5776, 5802), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (5793, 5802), True, 'import numpy as np\n'), ((5844, 5864), 'gerel.algorithms.NEAT.functions.add_edge', 'add_edge', (['new_genome'], {}), '(new_genome)\n', (5852, 5864), False, 'from gerel.algorithms.NEAT.functions import curry_weight_mutator, curry_crossover, add_node, add_edge\n'), ((4125, 4146), 'numpy.random.choice', 'choice', (["item['group']"], {}), "(item['group'])\n", (4131, 4146), False, 'from numpy.random import choice\n'), ((4230, 4238), 'random.random', 'random', ([], {}), '()\n', (4236, 4238), False, 'from random import random\n'), ((4342, 4350), 'random.random', 'random', ([], {}), '()\n', (4348, 4350), False, 'from random import random\n'), ((4662, 4728), 'numpy.random.choice', 'choice', (["[g for g in other_item['group'] if g is not selected_gene]"], {}), "([g for g in other_item['group'] if g is not selected_gene])\n", (4668, 4728), False, 'from numpy.random import choice\n'), ((4817, 4877), 'numpy.random.choice', 'choice', (["[g for g in item['group'] if g is not selected_gene]"], {}), "([g for g in item['group'] if g is not selected_gene])\n", (4823, 4877), False, 'from numpy.random import choice\n')] |
#!/usr/bin/env python
# FizzPyX - FizzPyXPlot
# Copyright (C) 2017 <NAME>
# GNU GPLv3
from __future__ import division
from matplotlib.pyplot import figure, plot, title, xlabel, ylabel, xlim, ylim, savefig
from numpy import argsort, abs, mean, arange, argmax
from numpy.fft import fft, rfft, fftfreq
# from Numpy_Neurons.FizzPyXFreq import InputtoFrequencyGen
from RawNumpyNeurons.FizzPyX import solutionGenerator, CoupledOscillatorsGen, FitzhughNagumoGen
# Time series plot
def do_tplot(modelname, solvername, plotname=None, xaxis=None, yaxis=None):
solution = solutionGenerator(modelname, solvername)
solutionArray = solution[0]
firstarray = solutionArray[:, 0]
secondarray = solutionArray[:, 2]
timeArray = solution[1]
figure()
plot(timeArray, firstarray)
plot(timeArray, secondarray)
title(plotname)
xlabel(xaxis)
ylabel(yaxis)
savefig('%s_tplot.png' % plotname)
return
# Phase plot
def do_pplot(modelname, solvername, plotname=None, xaxis=None, yaxis=None):
solution = solutionGenerator(modelname, solvername)
solutionArray = solution[0]
membranePotential = solutionArray[:, 0]
KgatingVariable = solutionArray[:, 1]
figure()
plot(KgatingVariable, membranePotential)
title(plotname)
xlabel(xaxis)
ylabel(yaxis)
savefig('%s_pplot.png' % plotname)
return
# Power Spectrum
def do_psplot(modelname, solvername, plotname=None, xaxis=None, yaxis=None):
solution = solutionGenerator(modelname, solvername)
solutionArray = solution[0]
membranePotential = solutionArray[:, 0]
timeArray = solution[1]
Y = mean(membranePotential) # determine DC component of signal
X = membranePotential - Y # subtract DC component from PS to get rid of peak at 0
fdata = X.size
ps = abs(fft(X))**2
time_step = 1 / 30
freqs = fftfreq(int(fdata/2 - 1), time_step)
idx = argsort(freqs)
figure()
plot(freqs[idx], ps[idx])
title(plotname)
xlabel(xaxis)
ylabel(yaxis)
xlim(0, 1)
ylim(0, 2.5e9)
savefig('%s_psplot.png' % plotname)
return
# Input Stimulus to Frequency Plot
def InputtoFrequencyGen():
freq = []
inputs = []
for I in arange(0.398, 0.539, 0.001):
I *= -1
solution = FitzhughNagumoGen('FN', 'ord2', i=I)
solutionArray = solution[0]
membranePotential = solutionArray[:, 0]
Y = mean(membranePotential) # determine DC component of signal
X = membranePotential - Y # subtract DC component from PS to get rid of peak at 0
fdata = X.size
ps = abs(rfft(X)) ** 2
time_step = 1 / 30
freqs = fftfreq(int(fdata / 2 - 1), time_step)
locpeak = argmax(ps) # Find its location
maxfreq = freqs[locpeak] # Get the actual frequency value
freq.append(maxfreq)
inputs.append(I)
return inputs, freq
if __name__ == '__main__':
# print(do_tplot('CO', 'ord2', plotname='Coupled Oscillators - Beats', xaxis='Time', yaxis='Mass Position'))
# print(do_pplot('HH', 'rk4'))
# print(do_psplot('HH', 'rk4'))
data = InputtoFrequencyGen()
plot(abs(data[0]), data[1])
title('Cheese')
xlabel('x')
ylabel('y')
# xlim(0, 1)
# ylim(0, 2.5e9)
savefig('cheese_tplot.png')
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"RawNumpyNeurons.FizzPyX.solutionGenerator",
"RawNumpyNeurons.FizzPyX.FitzhughNagumoGen",
"numpy.abs",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"numpy.argmax",
"numpy.fft.fft",
"numpy.fft.rfft",
"numpy.argsort",
"matplotlib.pyplot... | [((570, 610), 'RawNumpyNeurons.FizzPyX.solutionGenerator', 'solutionGenerator', (['modelname', 'solvername'], {}), '(modelname, solvername)\n', (587, 610), False, 'from RawNumpyNeurons.FizzPyX import solutionGenerator, CoupledOscillatorsGen, FitzhughNagumoGen\n'), ((750, 758), 'matplotlib.pyplot.figure', 'figure', ([], {}), '()\n', (756, 758), False, 'from matplotlib.pyplot import figure, plot, title, xlabel, ylabel, xlim, ylim, savefig\n'), ((763, 790), 'matplotlib.pyplot.plot', 'plot', (['timeArray', 'firstarray'], {}), '(timeArray, firstarray)\n', (767, 790), False, 'from matplotlib.pyplot import figure, plot, title, xlabel, ylabel, xlim, ylim, savefig\n'), ((795, 823), 'matplotlib.pyplot.plot', 'plot', (['timeArray', 'secondarray'], {}), '(timeArray, secondarray)\n', (799, 823), False, 'from matplotlib.pyplot import figure, plot, title, xlabel, ylabel, xlim, ylim, savefig\n'), ((828, 843), 'matplotlib.pyplot.title', 'title', (['plotname'], {}), '(plotname)\n', (833, 843), False, 'from matplotlib.pyplot import figure, plot, title, xlabel, ylabel, xlim, ylim, savefig\n'), ((848, 861), 'matplotlib.pyplot.xlabel', 'xlabel', (['xaxis'], {}), '(xaxis)\n', (854, 861), False, 'from matplotlib.pyplot import figure, plot, title, xlabel, ylabel, xlim, ylim, savefig\n'), ((866, 879), 'matplotlib.pyplot.ylabel', 'ylabel', (['yaxis'], {}), '(yaxis)\n', (872, 879), False, 'from matplotlib.pyplot import figure, plot, title, xlabel, ylabel, xlim, ylim, savefig\n'), ((884, 918), 'matplotlib.pyplot.savefig', 'savefig', (["('%s_tplot.png' % plotname)"], {}), "('%s_tplot.png' % plotname)\n", (891, 918), False, 'from matplotlib.pyplot import figure, plot, title, xlabel, ylabel, xlim, ylim, savefig\n'), ((1037, 1077), 'RawNumpyNeurons.FizzPyX.solutionGenerator', 'solutionGenerator', (['modelname', 'solvername'], {}), '(modelname, solvername)\n', (1054, 1077), False, 'from RawNumpyNeurons.FizzPyX import solutionGenerator, CoupledOscillatorsGen, FitzhughNagumoGen\n'), ((1200, 1208), 'matplotlib.pyplot.figure', 'figure', ([], {}), '()\n', (1206, 1208), False, 'from matplotlib.pyplot import figure, plot, title, xlabel, ylabel, xlim, ylim, savefig\n'), ((1213, 1253), 'matplotlib.pyplot.plot', 'plot', (['KgatingVariable', 'membranePotential'], {}), '(KgatingVariable, membranePotential)\n', (1217, 1253), False, 'from matplotlib.pyplot import figure, plot, title, xlabel, ylabel, xlim, ylim, savefig\n'), ((1258, 1273), 'matplotlib.pyplot.title', 'title', (['plotname'], {}), '(plotname)\n', (1263, 1273), False, 'from matplotlib.pyplot import figure, plot, title, xlabel, ylabel, xlim, ylim, savefig\n'), ((1278, 1291), 'matplotlib.pyplot.xlabel', 'xlabel', (['xaxis'], {}), '(xaxis)\n', (1284, 1291), False, 'from matplotlib.pyplot import figure, plot, title, xlabel, ylabel, xlim, ylim, savefig\n'), ((1296, 1309), 'matplotlib.pyplot.ylabel', 'ylabel', (['yaxis'], {}), '(yaxis)\n', (1302, 1309), False, 'from matplotlib.pyplot import figure, plot, title, xlabel, ylabel, xlim, ylim, savefig\n'), ((1314, 1348), 'matplotlib.pyplot.savefig', 'savefig', (["('%s_pplot.png' % plotname)"], {}), "('%s_pplot.png' % plotname)\n", (1321, 1348), False, 'from matplotlib.pyplot import figure, plot, title, xlabel, ylabel, xlim, ylim, savefig\n'), ((1472, 1512), 'RawNumpyNeurons.FizzPyX.solutionGenerator', 'solutionGenerator', (['modelname', 'solvername'], {}), '(modelname, solvername)\n', (1489, 1512), False, 'from RawNumpyNeurons.FizzPyX import solutionGenerator, CoupledOscillatorsGen, FitzhughNagumoGen\n'), ((1625, 1648), 'numpy.mean', 'mean', (['membranePotential'], {}), '(membranePotential)\n', (1629, 1648), False, 'from numpy import argsort, abs, mean, arange, argmax\n'), ((1929, 1943), 'numpy.argsort', 'argsort', (['freqs'], {}), '(freqs)\n', (1936, 1943), False, 'from numpy import argsort, abs, mean, arange, argmax\n'), ((1948, 1956), 'matplotlib.pyplot.figure', 'figure', ([], {}), '()\n', (1954, 1956), False, 'from matplotlib.pyplot import figure, plot, title, xlabel, ylabel, xlim, ylim, savefig\n'), ((1961, 1986), 'matplotlib.pyplot.plot', 'plot', (['freqs[idx]', 'ps[idx]'], {}), '(freqs[idx], ps[idx])\n', (1965, 1986), False, 'from matplotlib.pyplot import figure, plot, title, xlabel, ylabel, xlim, ylim, savefig\n'), ((1991, 2006), 'matplotlib.pyplot.title', 'title', (['plotname'], {}), '(plotname)\n', (1996, 2006), False, 'from matplotlib.pyplot import figure, plot, title, xlabel, ylabel, xlim, ylim, savefig\n'), ((2011, 2024), 'matplotlib.pyplot.xlabel', 'xlabel', (['xaxis'], {}), '(xaxis)\n', (2017, 2024), False, 'from matplotlib.pyplot import figure, plot, title, xlabel, ylabel, xlim, ylim, savefig\n'), ((2029, 2042), 'matplotlib.pyplot.ylabel', 'ylabel', (['yaxis'], {}), '(yaxis)\n', (2035, 2042), False, 'from matplotlib.pyplot import figure, plot, title, xlabel, ylabel, xlim, ylim, savefig\n'), ((2047, 2057), 'matplotlib.pyplot.xlim', 'xlim', (['(0)', '(1)'], {}), '(0, 1)\n', (2051, 2057), False, 'from matplotlib.pyplot import figure, plot, title, xlabel, ylabel, xlim, ylim, savefig\n'), ((2062, 2083), 'matplotlib.pyplot.ylim', 'ylim', (['(0)', '(2500000000.0)'], {}), '(0, 2500000000.0)\n', (2066, 2083), False, 'from matplotlib.pyplot import figure, plot, title, xlabel, ylabel, xlim, ylim, savefig\n'), ((2081, 2116), 'matplotlib.pyplot.savefig', 'savefig', (["('%s_psplot.png' % plotname)"], {}), "('%s_psplot.png' % plotname)\n", (2088, 2116), False, 'from matplotlib.pyplot import figure, plot, title, xlabel, ylabel, xlim, ylim, savefig\n'), ((2236, 2263), 'numpy.arange', 'arange', (['(0.398)', '(0.539)', '(0.001)'], {}), '(0.398, 0.539, 0.001)\n', (2242, 2263), False, 'from numpy import argsort, abs, mean, arange, argmax\n'), ((3201, 3216), 'matplotlib.pyplot.title', 'title', (['"""Cheese"""'], {}), "('Cheese')\n", (3206, 3216), False, 'from matplotlib.pyplot import figure, plot, title, xlabel, ylabel, xlim, ylim, savefig\n'), ((3221, 3232), 'matplotlib.pyplot.xlabel', 'xlabel', (['"""x"""'], {}), "('x')\n", (3227, 3232), False, 'from matplotlib.pyplot import figure, plot, title, xlabel, ylabel, xlim, ylim, savefig\n'), ((3237, 3248), 'matplotlib.pyplot.ylabel', 'ylabel', (['"""y"""'], {}), "('y')\n", (3243, 3248), False, 'from matplotlib.pyplot import figure, plot, title, xlabel, ylabel, xlim, ylim, savefig\n'), ((3291, 3318), 'matplotlib.pyplot.savefig', 'savefig', (['"""cheese_tplot.png"""'], {}), "('cheese_tplot.png')\n", (3298, 3318), False, 'from matplotlib.pyplot import figure, plot, title, xlabel, ylabel, xlim, ylim, savefig\n'), ((2300, 2336), 'RawNumpyNeurons.FizzPyX.FitzhughNagumoGen', 'FitzhughNagumoGen', (['"""FN"""', '"""ord2"""'], {'i': 'I'}), "('FN', 'ord2', i=I)\n", (2317, 2336), False, 'from RawNumpyNeurons.FizzPyX import solutionGenerator, CoupledOscillatorsGen, FitzhughNagumoGen\n'), ((2433, 2456), 'numpy.mean', 'mean', (['membranePotential'], {}), '(membranePotential)\n', (2437, 2456), False, 'from numpy import argsort, abs, mean, arange, argmax\n'), ((2739, 2749), 'numpy.argmax', 'argmax', (['ps'], {}), '(ps)\n', (2745, 2749), False, 'from numpy import argsort, abs, mean, arange, argmax\n'), ((3174, 3186), 'numpy.abs', 'abs', (['data[0]'], {}), '(data[0])\n', (3177, 3186), False, 'from numpy import argsort, abs, mean, arange, argmax\n'), ((1836, 1842), 'numpy.fft.fft', 'fft', (['X'], {}), '(X)\n', (1839, 1842), False, 'from numpy.fft import fft, rfft, fftfreq\n'), ((2624, 2631), 'numpy.fft.rfft', 'rfft', (['X'], {}), '(X)\n', (2628, 2631), False, 'from numpy.fft import fft, rfft, fftfreq\n')] |
import numpy as np
import matplotlib.pyplot as plt
# Simulations for the evolution of parasitic viral strains
# Author: <NAME>
# System to be visualized numerically
# Inspired from "The Equations of Life" (<NAME>)
# S is the host susceptible population
# I1, I2 are the strain 1 or strain 2 infected
# R is the "removed" individuals (dead population)
# dS/ dt = a - (b1*I1 + b2*I2)*S - b*S
# dI1/dt = b1*I1*S - b*I1 - a1*I1
# dI2/dt = b2*I2*S - b*I2 - a2*I2
# dR/dt = a1*I1 + a2*I2
# Global variables
# Time
step = 1/12 # one month
span = 10 # years
t = np.arange(1, span, step)
# Virus virulence of strains 1 and 2
a1 = 4.1
a2 = a1/4
# Virus transmissiblity of strains 1 and 2
b1 = 0.00001
b2 = 0.00001
# Host recovery rates of strains 1 and 2
v1 = 0.0001
v2 = 0.0001
# Host birth and death rates
a = 1
b = 0.00001
# Initial population infected by 1 and 2
I01 = 100
I02 = 100
# Host population
N = 100000
print(b1*N)
# Take-over rate in the model of Levin and Pimentel (1981)
s = 0.0001
# R0
def R0(beta, alpha):
return beta*a/((b + alpha)*b)
# Defining functional second member
def fS(I1, I2, S):
return a - b*S - S*(b1*I1 + b2*I2)
def fI1(I1, S):
return I1*(b1*S - b - a1)
def fI2(I2, S):
return I2*(b2*S - b - a2)
def fR(I1, I2):
return a1*I1 + a2*I2
# Using euler function
def euler(t, N, I01, I02):
"""
Apply the euler method for differential equations to find S, I1, I2 and R of our systems
for a defined time array and given initial conditions
"""
# number of points
n = len(t)
# time step
h = t[1] - t[0]
# Initializing arrays
S = np.zeros(n)
I1 = np.zeros(n)
I2 = np.zeros(n)
R = np.zeros(n)
# Giving initial values
S[0] = N
I1[0] = I01
I2[0] = I02
# R[0] = 0
for i in range(1, n):
# Getting previous values
Sin = S[i-1]
I1in = I1[i-1]
I2in = I2[i-1]
Rin = R[i-1]
# Getting the derivative of each function
Sout = fS(I1in, I2in, Sin)
I1out = fI1(I1in, Sin)
I2out = fI2(I2in, Sin)
Rout = fR(I1in, I2in)
# Getting the next value
S[i] = Sin + h*Sout
I1[i] = I1in + h*I1out
I2[i] = I2in + h*I2out
R[i] = Rin + h*Rout
return S, I1, I2, R
def main():
(S, I1, I2, R) = euler(t, N, I01, I02)
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
ax1.plot(t, S, c="blue")
ax1.set_title("Susceptible population", fontsize=10)
ax3.plot(t, R, c="red")
ax3.set_title("Deceased caused by the virus", fontsize=10)
ax2.plot(t, I1, c="green")
ax2.set_title(
"Infected by strain 1 (R0 = {:.2f})".format(R0(b1, a1)), fontsize=10)
ax4.plot(t, I2, c="yellow")
ax4.set_title(
"Infected by strain 2 (R0 = {:.2f})".format(R0(b2, a2)), fontsize=10)
fig.tight_layout()
plt.savefig("outputs/simulation/a1={:.2e} a2={:.2e} beta={:.2e}.png".format(
a1, a2, b1), dpi=500)
plt.show()
main()
exit(0)
| [
"numpy.zeros",
"numpy.arange",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((562, 586), 'numpy.arange', 'np.arange', (['(1)', 'span', 'step'], {}), '(1, span, step)\n', (571, 586), True, 'import numpy as np\n'), ((1633, 1644), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1641, 1644), True, 'import numpy as np\n'), ((1654, 1665), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1662, 1665), True, 'import numpy as np\n'), ((1675, 1686), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1683, 1686), True, 'import numpy as np\n'), ((1695, 1706), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1703, 1706), True, 'import numpy as np\n'), ((2394, 2412), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {}), '(2, 2)\n', (2406, 2412), True, 'import matplotlib.pyplot as plt\n'), ((2991, 3001), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2999, 3001), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
import os
import struct
import numpy as np
import matplotlib.pyplot as plt
def load_mnist(path, kind='train'):
"""load_mnist
Load MNIST data from path and save as numpy.ndarray(label, data...)
Args:
path (filepath): where the dataset file is
kind (string): "%s" means the file with "%s"
"""
labels_path = os.path.join(path, '%s-labels.idx1-ubyte' % kind)
images_path = os.path.join(path, '%s-images.idx3-ubyte' % kind)
with open(labels_path, 'rb') as lbpath:
magic, n = struct.unpack('>II', lbpath.read(8))
labels = np.fromfile(lbpath, dtype=np.uint8).reshape(-1, )
with open(images_path, 'rb') as imgpath:
magic, num, rows, cols = struct.unpack('>IIII', imgpath.read(16))
images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 784)
# one picture(28x28) per line(28x28=784)
return images, labels
def sample_show(x_train, y_train, mode=0, n=0):
if mode == 0:
fig, ax = plt.subplots(nrows=2, ncols=5, sharex=True, sharey=True)
ax = ax.flatten()
for i in range(10):
img = x_train[y_train == i][0].reshape(28, 28)
ax[i].imshow(img, cmap='Greys', interpolation='nearest')
else:
fig, ax = plt.subplots(nrows=5, ncols=5, sharex=True, sharey=True)
ax = ax.flatten()
for i in range(25):
img = x_train[y_train == n][i].reshape(28, 28)
ax[i].imshow(img, cmap='Greys', interpolation='nearest')
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
plt.show()
| [
"matplotlib.pyplot.show",
"os.path.join",
"numpy.fromfile",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.tight_layout"
] | [((368, 417), 'os.path.join', 'os.path.join', (['path', "('%s-labels.idx1-ubyte' % kind)"], {}), "(path, '%s-labels.idx1-ubyte' % kind)\n", (380, 417), False, 'import os\n'), ((436, 485), 'os.path.join', 'os.path.join', (['path', "('%s-images.idx3-ubyte' % kind)"], {}), "(path, '%s-images.idx3-ubyte' % kind)\n", (448, 485), False, 'import os\n'), ((1577, 1595), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1593, 1595), True, 'import matplotlib.pyplot as plt\n'), ((1600, 1610), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1608, 1610), True, 'import matplotlib.pyplot as plt\n'), ((1016, 1072), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(5)', 'sharex': '(True)', 'sharey': '(True)'}), '(nrows=2, ncols=5, sharex=True, sharey=True)\n', (1028, 1072), True, 'import matplotlib.pyplot as plt\n'), ((1283, 1339), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(5)', 'ncols': '(5)', 'sharex': '(True)', 'sharey': '(True)'}), '(nrows=5, ncols=5, sharex=True, sharey=True)\n', (1295, 1339), True, 'import matplotlib.pyplot as plt\n'), ((603, 638), 'numpy.fromfile', 'np.fromfile', (['lbpath'], {'dtype': 'np.uint8'}), '(lbpath, dtype=np.uint8)\n', (614, 638), True, 'import numpy as np\n'), ((790, 826), 'numpy.fromfile', 'np.fromfile', (['imgpath'], {'dtype': 'np.uint8'}), '(imgpath, dtype=np.uint8)\n', (801, 826), True, 'import numpy as np\n')] |
from kivy.app import App
import numpy as np
from kivyplot import Plot2D
class MainApp(App):
def build(self):
N = 20
points = [np.array([
i,
np.sin(2*np.pi*i/(N-1) + np.pi/2)])
for i in range(N)]
self.root = Plot2D(xmin=0, xmax=N, stepx=1)
self.root.plot(points, label='label plot', filled=True)
return self.root
if __name__ == '__main__':
MainApp().run()
| [
"numpy.sin",
"kivyplot.Plot2D"
] | [((275, 306), 'kivyplot.Plot2D', 'Plot2D', ([], {'xmin': '(0)', 'xmax': 'N', 'stepx': '(1)'}), '(xmin=0, xmax=N, stepx=1)\n', (281, 306), False, 'from kivyplot import Plot2D\n'), ((187, 230), 'numpy.sin', 'np.sin', (['(2 * np.pi * i / (N - 1) + np.pi / 2)'], {}), '(2 * np.pi * i / (N - 1) + np.pi / 2)\n', (193, 230), True, 'import numpy as np\n')] |
import trimesh
import numpy as np
import os
from . import backend
def encode_mesh(mesh, compression_level):
""" Encodes a quantized mesh into Neuroglancer-compatible Draco format
Parameters
----------
mesh : trimesh.base.Trimesh
A Trimesh mesh object to encode
compression_level : int
Level of compression for Draco format from 0 to 10.
Returns
-------
buffer : bytes
A bytes object containing the encoded mesh.
"""
return encode_vertices_faces(mesh.vertices, mesh.faces, compression_level)
def encode_vertices_faces(vertices, faces, compression_level):
""" Encodes a set of quantized vertices and faces into
Neuroglancer-compatible Draco format
Parameters
----------
vertices : np.ndarray
An nx3 uint32 numpy array containing quantized vertex coordinates.
faces : np.ndarray
An nx3 uint32 numpy array containing mesh faces.
compression_level : int
Level of compression for Draco format from 0 to 10.
Returns
-------
buffer : bytes
A bytes object containing the encoded mesh.
"""
return backend.encode_mesh(
vertices.flatten().astype(np.uint32),
faces.flatten().astype(np.uint32),
compression_level)
def decode_buffer(buffer):
""" Decodes Draco buffer into vertices and faces
Parameters
----------
buffer : bytes
A bytes object containing a Draco mesh buffer.
Returns
-------
vertices : np.ndarray
An nx3 uint32 numpy array containing quantized vertex coordinates.
faces : np.ndarray
An nx3 uint32 numpy array containing mesh faces.
"""
vertices, faces = backend.decode_mesh(buffer)
vertices = np.asarray(vertices, dtype=np.uint32).reshape(-1, 3)
faces = np.asarray(faces, dtype=np.uint32).reshape(-1, 3)
return vertices, faces
| [
"numpy.asarray"
] | [((1766, 1803), 'numpy.asarray', 'np.asarray', (['vertices'], {'dtype': 'np.uint32'}), '(vertices, dtype=np.uint32)\n', (1776, 1803), True, 'import numpy as np\n'), ((1831, 1865), 'numpy.asarray', 'np.asarray', (['faces'], {'dtype': 'np.uint32'}), '(faces, dtype=np.uint32)\n', (1841, 1865), True, 'import numpy as np\n')] |
import sys
sys.path.append(".")
import numpy as np
import pygsp as gsp
import sgw_tools as sgw
import matplotlib.pyplot as plt
G = gsp.graphs.Comet(20, 11)
G.estimate_lmax()
g = gsp.filters.Heat(G)
data = sgw.kernelCentrality(G, g)
nodes = np.arange(G.N)
ranking = sorted(nodes, key=lambda v: data[v][0])
sgw.plotGraph(G)
print(["{} ({:.3f})".format(r, data[r][0]) for r in ranking])
plt.show()
| [
"sys.path.append",
"matplotlib.pyplot.show",
"pygsp.filters.Heat",
"sgw_tools.kernelCentrality",
"numpy.arange",
"pygsp.graphs.Comet",
"sgw_tools.plotGraph"
] | [((11, 31), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (26, 31), False, 'import sys\n'), ((133, 157), 'pygsp.graphs.Comet', 'gsp.graphs.Comet', (['(20)', '(11)'], {}), '(20, 11)\n', (149, 157), True, 'import pygsp as gsp\n'), ((181, 200), 'pygsp.filters.Heat', 'gsp.filters.Heat', (['G'], {}), '(G)\n', (197, 200), True, 'import pygsp as gsp\n'), ((208, 234), 'sgw_tools.kernelCentrality', 'sgw.kernelCentrality', (['G', 'g'], {}), '(G, g)\n', (228, 234), True, 'import sgw_tools as sgw\n'), ((244, 258), 'numpy.arange', 'np.arange', (['G.N'], {}), '(G.N)\n', (253, 258), True, 'import numpy as np\n'), ((310, 326), 'sgw_tools.plotGraph', 'sgw.plotGraph', (['G'], {}), '(G)\n', (323, 326), True, 'import sgw_tools as sgw\n'), ((389, 399), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (397, 399), True, 'import matplotlib.pyplot as plt\n')] |
import warnings
import numpy as np
import scipy.integrate
import scipy.special
import matplotlib.streamplot
import bokeh.application
import bokeh.application.handlers
import bokeh.layouts
import bokeh.models
import bokeh.palettes
import bokeh.plotting
import colorcet
from .jsfunctions import jsfuns
def _sin_plot():
"""Test to plot a sine wave"""
x = np.linspace(0, 2 * np.pi, 200)
y = np.sin(x)
p = bokeh.plotting.figure(frame_height=200, frame_width=400)
cds = bokeh.models.ColumnDataSource(dict(x=x, y=y))
p.line(source=cds, x="x", y="y", line_width=2)
f_slider = bokeh.models.Slider(
title="f", start=0.1, end=10, value=1, step=0.005, width=150
)
js_code = (
jsfuns["sin"]
+ """
let x = cds.data['x'];
let y = cds.data['y'];
let f = f_slider.value;
for (let i = 0; i < x.length; i++) {
y[i] = sin(f * x[i]);
}
cds.change.emit();
"""
)
callback = bokeh.models.CustomJS(
args=dict(cds=cds, f_slider=f_slider), code=js_code
)
f_slider.js_on_change("value", callback)
layout = bokeh.layouts.column(
bokeh.layouts.row(bokeh.models.Spacer(width=60), f_slider, width=150),
bokeh.models.Spacer(height=20),
p,
)
return layout
def michaelis_menten_approx():
def michaelis_menten_rhs(c, t, kappa, zeta):
cs, ces, cp = c
return np.array(
[
(-(1 - ces) * cs + (1 - kappa) * ces) / kappa,
((1 - ces) * cs - ces) / kappa / zeta,
ces,
]
)
def approx_michaelis_menten(c0, t, kappa, zeta):
"""Analytical solution to the Michaelis-Menten equation."""
cs0, ces0, cp0 = c0
cs = scipy.special.lambertw(cs0 * np.exp(cs0 - t)).real
ces = cs / (1 + cs)
cp = cs0 + cp0 - cs - zeta * (ces0 + ces)
return cs, ces, cp
kappa_slider = bokeh.models.Slider(
title="κ", start=0.01, end=1, value=0.5, step=0.01, width=100
)
zeta_slider = bokeh.models.Slider(
title="ζ",
start=-2,
end=2,
value=-2,
step=0.05,
width=100,
format=bokeh.models.FuncTickFormatter(
code="return Math.pow(10, tick).toFixed(2)"
),
)
cs0_slider = bokeh.models.Slider(
title="init. substr. conc.",
start=-1,
end=1,
value=0.0,
step=0.01,
width=100,
format=bokeh.models.FuncTickFormatter(
code="return Math.pow(10, tick).toFixed(2)"
),
)
def solve_mm(kappa, zeta, cs0):
# Initial condition
c0 = np.array([cs0, 0.0, 0.0])
# Time points
t = np.linspace(0, 10, 400)
# Solve the full system
c = scipy.integrate.odeint(michaelis_menten_rhs, c0, t, args=(kappa, zeta))
cs, ces, cp = c.transpose()
# Solve the approximate system
cs_approx, ces_approx, cp_approx = approx_michaelis_menten(c0, t, kappa, zeta)
return t, cs, ces, cp, cs_approx, ces_approx, cp_approx
# Get solution for initial glyphs
t, cs, ces, cp, cs_approx, ces_approx, cp_approx = solve_mm(
kappa_slider.value, 10 ** zeta_slider.value, 10 ** cs0_slider.value
)
# Set up ColumnDataSource for plot
cds = bokeh.models.ColumnDataSource(
dict(
t=t,
cs=cs,
ces=ces,
cp=cp,
cs_approx=cs_approx,
ces_approx=ces_approx,
cp_approx=cp_approx,
)
)
# Make the plot
p = bokeh.plotting.figure(
plot_width=500,
plot_height=250,
x_axis_label="dimensionless time",
y_axis_label="dimensionless concentration",
x_range=[0, 10],
y_range=[-0.02, 1.02],
)
colors = colorcet.b_glasbey_category10
# Populate glyphs
p.line(
source=cds, x="t", y="ces", line_width=2, color=colors[0], legend_label="ES",
)
p.line(
source=cds, x="t", y="cs", line_width=2, color=colors[1], legend_label="S",
)
p.line(
source=cds, x="t", y="cp", line_width=2, color=colors[2], legend_label="P",
)
p.line(
source=cds, x="t", y="ces_approx", line_width=4, color=colors[0], alpha=0.3,
)
p.line(
source=cds, x="t", y="cs_approx", line_width=4, color=colors[1], alpha=0.3,
)
p.line(
source=cds, x="t", y="cp_approx", line_width=4, color=colors[2], alpha=0.3,
)
p.legend.location = "center_right"
# JavaScript callback
js_code = (
jsfuns["michaelis_menten_approx"]
+ jsfuns["utils"]
+ jsfuns["linalg"]
+ jsfuns["ode"]
+ "callback()"
)
callback = bokeh.models.CustomJS(
args=dict(
cds=cds,
kappaSlider=kappa_slider,
zetaSlider=zeta_slider,
cs0Slider=cs0_slider,
xRange=p.x_range,
yRange=p.y_range,
),
code=js_code,
)
# Link sliders
kappa_slider.js_on_change("value", callback)
zeta_slider.js_on_change("value", callback)
cs0_slider.js_on_change("value", callback)
# Also trigger if x_range changes
p.x_range.js_on_change("end", callback)
# Build layout
layout = bokeh.layouts.column(
bokeh.layouts.row(
bokeh.models.Spacer(width=40),
kappa_slider,
bokeh.models.Spacer(width=10),
zeta_slider,
bokeh.models.Spacer(width=10),
cs0_slider,
),
bokeh.models.Spacer(width=10),
p,
)
return layout
def gaussian_pulse():
"""Make a plot of a Gaussian pulse.
"""
# t/s data for plotting
t_0 = 4.0
tau = 2.0
t = np.linspace(0, 10, 200)
s = np.exp(-4 * (t - t_0) ** 2 / tau ** 2)
# Place the data in a ColumnDataSource
cds = bokeh.models.ColumnDataSource(dict(t=t, s=s))
# Build the plot
p = bokeh.plotting.figure(
frame_height=200,
frame_width=400,
x_axis_label="time",
y_axis_label="input signal",
x_range=[0, 10],
y_range=[-0.02, 1.1],
)
p.line(source=cds, x="t", y="s", line_width=2)
t0_slider = bokeh.models.Slider(
title="t₀", start=0, end=10, step=0.01, value=4.0, width=150
)
tau_slider = bokeh.models.Slider(
title="τ", start=0, end=10, step=0.01, value=2.0, width=150
)
# JavaScript callback
js_code = jsfuns["gaussian_pulse"] + "callback()"
callback = bokeh.models.CustomJS(
args=dict(cds=cds, t0_slider=t0_slider, tau_slider=tau_slider), code=js_code,
)
t0_slider.js_on_change("value", callback)
tau_slider.js_on_change("value", callback)
# Lay out and return
return bokeh.layouts.row(
p, bokeh.models.Spacer(width=30), bokeh.layouts.column(t0_slider, tau_slider)
)
def autorepressor_response_to_pulse():
"""Make an interactive plot of the response of an autorepressive
circuit's response to a Gaussian pulse of induction. Also overlay
response of unregulated circuit and approximate pulse itself.
"""
def neg_auto_rhs(x, t, beta0, gamma, k, n, ks, ns, s):
"""
Right hand side for negative autoregulation motif with s dependence.
Return dx/dt.
"""
# Compute dx/dt
return (
beta0 * (s / ks) ** ns / (1 + (s / ks) ** ns) / (1 + (x / k) ** n)
- gamma * x
)
def neg_auto_rhs_s_fun(x, t, beta0, gamma, k, n, ks, ns, s_fun, s_args):
"""
Right hand side for negative autoregulation function, with s variable.
Returns dx/dt.
s_fun is a function of the form s_fun(t, *s_args), so s_args is a tuple
containing the arguments to pass to s_fun.
"""
# Compute s
s = s_fun(t, *s_args)
# Correct for x possibly being numerically negative as odeint() adjusts step size
x = np.maximum(0, x)
# Plug in this value of s to the RHS of the negative autoregulation model
return neg_auto_rhs(x, t, beta0, gamma, k, n, ks, ns, s)
def unreg_rhs(x, t, beta0, gamma, ks, ns, s):
"""
Right hand side for constitutive gene expression
modulated to only be active in the presence of s.
Returns dx/dt.
"""
return beta0 * (s / ks) ** ns / (1 + (s / ks) ** ns) - gamma * x
def unreg_rhs_s_fun(x, t, beta0, gamma, ks, ns, s_fun, s_args):
"""
Right hand side for unregulated function, with s variable.
Returns dx/dt.
s_fun is a function of the form s_fun(t, *s_args), so s_args is a tuple
containing the arguments to pass to s_fun.
"""
# Compute s
s = s_fun(t, *s_args)
# Plug in this value of s to the RHS of the negative autoregulation model
return unreg_rhs(x, t, beta0, gamma, ks, ns, s)
def s_pulse(t, t_0, tau):
"""
Returns s value for a pulse centered at t_0 with duration tau.
"""
# Return 0 is tau is zero, otherwise Gaussian
return 0 if tau == 0 else np.exp(-4 * (t - t_0) ** 2 / tau ** 2)
# Set up initial parameters
colors = colorcet.b_glasbey_category10
# Time points we want for the solution
t = np.linspace(0, 10, 200)
# Initial condition
x0 = 0.0
# Parameters
beta0 = 100.0
gamma = 1.0
k = 1.0
n = 1.0
s = 100.0
ns = 10.0
ks = 0.1
s_args = (4.0, 2.0)
args = (beta0, gamma, k, n, ks, ns, s_pulse, s_args)
args_unreg = (beta0, gamma, ks, ns, s_pulse, s_args)
# Integrate ODE
x = scipy.integrate.odeint(neg_auto_rhs_s_fun, x0, t, args=args)
x = x.transpose()[0]
x_unreg = scipy.integrate.odeint(unreg_rhs_s_fun, x0, t, args=args_unreg)
x_unreg = x_unreg.transpose()[0]
# also calculate the input
s = s_pulse(t, *s_args)
# Normalize time courses
x /= x.max()
x_unreg /= x_unreg.max()
# set up the column data source
cds = bokeh.models.ColumnDataSource(dict(t=t, x=x, s=s, x_unreg=x_unreg))
# set up plot
p = bokeh.plotting.figure(
frame_width=375,
frame_height=250,
x_axis_label="time",
y_axis_label="normalized concentration",
x_range=[t.min(), t.max()],
)
# Populate glyphs
p.line(
source=cds,
x="t",
y="x",
line_width=2,
color=colors[1],
legend_label="x neg. auto.",
)
p.line(
source=cds,
x="t",
y="x_unreg",
line_width=2,
color=colors[2],
legend_label="x unreg.",
)
p.line(source=cds, x="t", y="s", line_width=2, color=colors[0], legend_label="s")
# Place the legend
p.legend.location = "top_left"
# Build the widgets
log_beta0_slider = bokeh.models.Slider(
title="log₁₀ β₀", start=-1, end=2, step=0.1, value=np.log10(beta0), width=150
)
log_gamma_slider = bokeh.models.Slider(
title="log₁₀ γ", start=-1, end=2, step=0.1, value=np.log10(gamma), width=150
)
log_k_slider = bokeh.models.Slider(
title="log₁₀ k", start=-1, end=2, step=0.1, value=np.log10(k), width=150
)
n_slider = bokeh.models.Slider(
title="n", start=0.1, end=10, step=0.1, value=n, width=150
)
log_ks_slider = bokeh.models.Slider(
title="log₁₀ kₛ", start=-2, end=2, step=0.1, value=np.log10(ks), width=150
)
ns_slider = bokeh.models.Slider(
title="nₛ", start=0.1, end=10, step=0.1, value=ns, width=150
)
t0_slider = bokeh.models.Slider(
title="t₀", start=0.01, end=10, step=0.01, value=s_args[0], width=150
)
tau_slider = bokeh.models.Slider(
title="τ", start=0.01, end=10, step=0.01, value=s_args[1], width=150
)
normalize_toggle = bokeh.models.Toggle(label="Normalize", active=True, width=50)
legend_toggle = bokeh.models.Toggle(label="Legend", active=True, width=50)
# JavaScript callback, updates fixed points using Newton's method
js_code = (
jsfuns["linalg"]
+ jsfuns["ode"]
+ jsfuns["utils"]
+ jsfuns["autorepressor_response_to_pulse"]
+ "callback()"
)
callback = bokeh.models.CustomJS(
args=dict(
cds=cds,
p=p,
t0Slider=t0_slider,
tauSlider=tau_slider,
logBeta0Slider=log_beta0_slider,
logGammaSlider=log_gamma_slider,
logkSlider=log_k_slider,
nSlider=n_slider,
logksSlider=log_ks_slider,
nsSlider=ns_slider,
normalizeToggle=normalize_toggle,
legendToggle=legend_toggle,
xRange=p.x_range,
yaxis=p.yaxis[0],
legend=p.legend[0],
),
code=js_code,
)
# Use the `js_on_change()` method to call the custom JavaScript code.
for slider in [
t0_slider,
tau_slider,
log_beta0_slider,
log_gamma_slider,
log_k_slider,
n_slider,
log_ks_slider,
n_slider,
log_ks_slider,
ns_slider,
]:
slider.js_on_change("value", callback)
# Execute callback with changes in toggles
normalize_toggle.js_on_change("active", callback)
legend_toggle.js_on_change("active", callback)
# Also trigger if x_range changes
p.x_range.js_on_change("end", callback)
# Lay out and return
layout = bokeh.layouts.row(
p,
bokeh.layouts.Spacer(width=30),
bokeh.layouts.column(
log_beta0_slider, log_gamma_slider, log_k_slider, n_slider, legend_toggle,
),
bokeh.layouts.column(
log_ks_slider, ns_slider, t0_slider, tau_slider, normalize_toggle,
),
)
return layout
def autoactivator_fixed_points():
"""Make an interactive plot of fixed points for a potentially
bistable autoactivator circuit.
"""
# Parameters for first plot
beta = 10
k = 3
n = 5
gamma = 1
# Theroetical curves
x = np.linspace(0, 20, 400)
fp = beta * (x / k) ** n / (1 + (x / k) ** n)
fd = gamma * x
# Set up sliders
params = [
dict(
name="γ", start=0.1, end=4, step=0.1, value=gamma, long_name="gamma_slider",
),
dict(
name="β", start=0.1, end=15, step=0.1, value=beta, long_name="beta_slider",
),
dict(name="k", start=1, end=5, step=0.1, value=k, long_name="k_slider"),
dict(name="n", start=0.1, end=10, step=0.1, value=n, long_name="n_slider"),
]
sliders = [
bokeh.models.Slider(
start=param["start"],
end=param["end"],
value=param["value"],
step=param["step"],
title=param["name"],
width=100,
)
for param in params
]
# Build plot
p = bokeh.plotting.figure(
frame_height=200,
frame_width=300,
x_axis_label="x",
y_axis_label="production or removal rate",
y_range=[-1, 16],
x_range=[-1, 16],
toolbar_location="above",
)
# Column data source for curves
cds = bokeh.models.ColumnDataSource(dict(x=x, fp=fp, fd=fd))
p.line(source=cds, x="x", y="fp", line_width=2)
p.line(source=cds, x="x", y="fd", line_width=2, color="orange")
# Column data sources for stable and unstable fixed points.
# Values for initial parameters hard-coded to save coding up fixed-
# point finding in Python; already done in JS code
cds_fp_stable = bokeh.models.ColumnDataSource(dict(x=[0, 9.97546], y=[0, 9.97546]))
cds_fp_unstable = bokeh.models.ColumnDataSource(dict(x=[2.37605], y=[2.37605]))
p.circle(source=cds_fp_stable, x="x", y="y", color="black", size=10)
p.circle(
source=cds_fp_unstable,
x="x",
y="y",
line_color="black",
line_width=2,
fill_color="white",
size=10,
)
# JavaScript callback, updates fixed points using Newton's method
js_code = js_code = (
jsfuns["rootfinding"] + jsfuns["autoactivator_fixed_points"] + "callback()"
)
callback = bokeh.models.CustomJS(
args=dict(
cds=cds, cds_fp_stable=cds_fp_stable, cds_fp_unstable=cds_fp_unstable
),
code=js_code,
)
# Use the `js_on_change()` method to call the custom JavaScript code.
for param, slider in zip(params, sliders):
callback.args[param["long_name"]] = slider
slider.js_on_change("value", callback)
# Lay out and return
return bokeh.layouts.row(
p,
bokeh.models.Spacer(width=30),
bokeh.layouts.column([bokeh.models.Spacer(height=20)] + sliders),
)
def toggle_nullclines():
"""Make an interactive plot of nullclines and fixed points of
the Gardner-Collins synthetic toggle switch.
"""
# Set up sliders
params = [
dict(
name="βx", start=0.1, end=20, step=0.1, value=10, long_name="beta_x_slider",
),
dict(
name="βy", start=0.1, end=20, step=0.1, value=10, long_name="beta_y_slider",
),
dict(name="n", start=1, end=10, step=0.1, value=4, long_name="n_slider"),
]
sliders = [
bokeh.models.Slider(
start=param["start"],
end=param["end"],
value=param["value"],
step=param["step"],
title=param["name"],
width=150,
)
for param in params
]
# Build base plot with starting parameters
beta = 10
n = 4
# Compute nullclines
x_y = np.linspace(0, 20, 400)
y_x = np.linspace(0, 20, 400)
x_x = beta / (1 + y_x ** n)
y_y = beta / (1 + x_y ** n)
cds = bokeh.models.ColumnDataSource(data=dict(x_x=x_x, x_y=x_y, y_x=y_x, y_y=y_y))
# Make the plot
p = bokeh.plotting.figure(
frame_height=250,
frame_width=250,
x_axis_label="x",
y_axis_label="y",
x_range=[-1, 20],
y_range=[-1, 20],
)
p.line(x="x_x", y="y_x", source=cds, line_width=2, legend_label="x nullcline")
p.line(
x="x_y",
y="y_y",
source=cds,
line_width=2,
color="orange",
legend_label="y nullcline",
)
cds_stable = bokeh.models.ColumnDataSource(
dict(x=[0.0009999, 9.99999999999], y=[9.99999999999, 0.0009999])
)
cds_unstable = bokeh.models.ColumnDataSource(
dict(x=[1.533012798623252], y=[1.533012798623252])
)
p.circle(source=cds_stable, x="x", y="y", color="black", size=10)
p.circle(
source=cds_unstable,
x="x",
y="y",
line_color="black",
fill_color="white",
line_width=2,
size=10,
)
# Callback (uses JavaScript)
js_code = jsfuns["rootfinding"] + jsfuns["toggle_nullclines"] + "callback()"
callback = bokeh.models.CustomJS(
args=dict(cds=cds, cdsStable=cds_stable, cdsUnstable=cds_unstable), code=js_code
)
# We use the `js_on_change()` method to call the custom JavaScript code.
for param, slider in zip(params, sliders):
callback.args[param["long_name"]] = slider
slider.js_on_change("value", callback)
# Return layout
return bokeh.layouts.row(
p,
bokeh.models.Spacer(width=30),
bokeh.layouts.column(bokeh.models.Spacer(height=40), *sliders),
)
def protein_repressilator():
"""Plot the dynamics of a protein-only repressilator circuit.
"""
def protein_repressilator_rhs(x, t, beta, n):
"""
Returns 3-array of (dx_1/dt, dx_2/dt, dx_3/dt)
"""
x_1, x_2, x_3 = x
return np.array(
[
beta / (1 + x_3 ** n) - x_1,
beta / (1 + x_1 ** n) - x_2,
beta / (1 + x_2 ** n) - x_3,
]
)
# Initial condiations
x0 = np.array([1, 1, 1.2])
# Number of points to use in plots
n_points = 1000
# Widgets for controlling parameters
beta_slider = bokeh.models.Slider(
title="β", start=0.01, end=100, step=0.01, value=10.0
)
n_slider = bokeh.models.Slider(title="n", start=1, end=5, step=0.1, value=3)
# Solve for species concentrations
def _solve_repressilator(beta, n, t_max):
t = np.linspace(0, t_max, n_points)
x = scipy.integrate.odeint(protein_repressilator_rhs, x0, t, args=(beta, n))
return t, x.transpose()
# Obtain solution for plot
t, x = _solve_repressilator(beta_slider.value, n_slider.value, 40.0)
# Build the plot
colors = colorcet.b_glasbey_category10[:3]
p_rep = bokeh.plotting.figure(
frame_width=550,
frame_height=200,
x_axis_label="t",
x_range=[0, 40.0],
)
cds = bokeh.models.ColumnDataSource(data=dict(t=t, x1=x[0], x2=x[1], x3=x[2]))
labels = dict(x1="x₁", x2="x₂", x3="x₃")
for color, x_val in zip(colors, labels):
p_rep.line(
source=cds,
x="t",
y=x_val,
color=color,
legend_label=labels[x_val],
line_width=2,
)
p_rep.legend.location = "top_left"
# Set up plot
p_phase = bokeh.plotting.figure(
frame_width=200, frame_height=200, x_axis_label="x₁", y_axis_label="x₂",
)
p_phase.line(source=cds, x="x1", y="x2", line_width=2)
# Set up callbacks
js_code = (
jsfuns["reg"]
+ jsfuns["ode"]
+ jsfuns["circuits"]
+ jsfuns["utils"]
+ jsfuns["linalg"]
+ jsfuns["proteinRepressilator"]
+ 'callback()'
)
callback = bokeh.models.CustomJS(
args=dict(
cds=cds,
xRange=p_rep.x_range,
betaSlider=beta_slider,
nSlider=n_slider,
),
code=js_code,
)
beta_slider.js_on_change("value", callback)
n_slider.js_on_change("value", callback)
p_rep.x_range.js_on_change("end", callback)
# Build layout
layout = bokeh.layouts.column(
p_rep,
bokeh.layouts.Spacer(height=10),
bokeh.layouts.row(
p_phase,
bokeh.layouts.Spacer(width=70),
bokeh.layouts.column(beta_slider, n_slider, width=150),
),
)
return layout
def repressilator():
"""Plot the dynamics of a repressilator circuit.
"""
# Sliders
beta_slider = bokeh.models.Slider(
title="β",
start=0,
end=4,
step=0.1,
value=1,
format=bokeh.models.FuncTickFormatter(code="return Math.pow(10, tick).toFixed(2)"),
)
gamma_slider = bokeh.models.Slider(
title="γ",
start=-3,
end=0,
step=0.1,
value=0,
format=bokeh.models.FuncTickFormatter(code="return Math.pow(10, tick).toFixed(3)"),
)
rho_slider = bokeh.models.Slider(
title="ρ",
start=-6,
end=0,
step=0.1,
value=-3,
format=bokeh.models.FuncTickFormatter(code="return Math.pow(10, tick).toFixed(6)"),
)
n_slider = bokeh.models.Slider(title="n", start=1, end=5, step=0.1, value=3)
def repressilator_rhs(mx, t, beta, gamma, rho, n):
"""
Returns 6-array of (dm_1/dt, dm_2/dt, dm_3/dt, dx_1/dt, dx_2/dt, dx_3/dt)
"""
m_1, m_2, m_3, x_1, x_2, x_3 = mx
return np.array(
[
beta * (rho + 1 / (1 + x_3 ** n)) - m_1,
beta * (rho + 1 / (1 + x_1 ** n)) - m_2,
beta * (rho + 1 / (1 + x_2 ** n)) - m_3,
gamma * (m_1 - x_1),
gamma * (m_2 - x_2),
gamma * (m_3 - x_3),
]
)
# Initial condiations
x0 = np.array([0, 0, 0, 1, 1.1, 1.2])
# Number of points to use in plots
n_points = 1000
# Solve for species concentrations
def _solve_repressilator(log_beta, log_gamma, log_rho, n, t_max):
beta = 10 ** log_beta
gamma = 10 ** log_gamma
rho = 10 ** log_rho
t = np.linspace(0, t_max, n_points)
x = scipy.integrate.odeint(repressilator_rhs, x0, t, args=(beta, gamma, rho, n))
m1, m2, m3, x1, x2, x3 = x.transpose()
return t, m1, m2, m3, x1, x2, x3
t, m1, m2, m3, x1, x2, x3 = _solve_repressilator(
beta_slider.value,
gamma_slider.value,
rho_slider.value,
n_slider.value,
40.0,
)
cds = bokeh.models.ColumnDataSource(
dict(t=t, m1=m1, m2=m2, m3=m3, x1=x1, x2=x2, x3=x3)
)
p = bokeh.plotting.figure(
frame_width=500,
frame_height=200,
x_axis_label="t",
x_range=[0, 40.0],
)
colors = bokeh.palettes.d3["Category20"][6]
m1_line = p.line(source=cds, x="t", y="m1", line_width=2, color=colors[1])
x1_line = p.line(source=cds, x="t", y="x1", line_width=2, color=colors[0])
m2_line = p.line(source=cds, x="t", y="m2", line_width=2, color=colors[3])
x2_line = p.line(source=cds, x="t", y="x2", line_width=2, color=colors[2])
m3_line = p.line(source=cds, x="t", y="m3", line_width=2, color=colors[5])
x3_line = p.line(source=cds, x="t", y="x3", line_width=2, color=colors[4])
legend_items = [
("m₁", [m1_line]),
("x₁", [x1_line]),
("m₂", [m2_line]),
("x₂", [x2_line]),
("m₃", [m3_line]),
("x₃", [x3_line]),
]
legend = bokeh.models.Legend(items=legend_items)
legend.click_policy = 'hide'
p.add_layout(legend, "right")
# Build the layout
layout = bokeh.layouts.column(
bokeh.layouts.row(
beta_slider,
gamma_slider,
rho_slider,
n_slider,
width=575,
),
bokeh.layouts.Spacer(height=10),
p,
)
# Set up callbacks
js_code = (
jsfuns["reg"]
+ jsfuns["ode"]
+ jsfuns["circuits"]
+ jsfuns["utils"]
+ jsfuns["linalg"]
+ jsfuns["repressilator"]
+ 'callback()'
)
callback = bokeh.models.CustomJS(
args=dict(
cds=cds,
xRange=p.x_range,
betaSlider=beta_slider,
rhoSlider=rho_slider,
gammaSlider=gamma_slider,
nSlider=n_slider,
),
code=js_code,
)
beta_slider.js_on_change("value", callback)
gamma_slider.js_on_change("value", callback)
rho_slider.js_on_change("value", callback)
n_slider.js_on_change("value", callback)
p.x_range.js_on_change("end", callback)
return layout
def turing_dispersion_relation():
"""Plot of dispersion relation for Turing patterns.
Replaces Python code:
def dispersion_relation(k_vals, d, mu):
lam = np.empty_like(k_vals)
for i, k in enumerate(k_vals):
A = np.array([[1-d*k**2, 1],
[-2*mu, -mu - k**2]])
lam[i] = np.linalg.eigvals(A).real.max()
return lam
d_slider = pn.widgets.FloatSlider(
name="d", start=0.01, end=1, value=0.05, step=0.01, width=150
)
mu_slider = pn.widgets.FloatSlider(
name="μ", start=0.01, end=2, value=1.5, step=0.005, width=150
)
@pn.depends(d_slider.param.value, mu_slider.param.value)
def plot_dispersion_relation(d, mu):
k = np.linspace(0, 10, 200)
lam_max_real_part = dispersion_relation(k, d, mu)
p = bokeh.plotting.figure(
frame_width=350,
frame_height=200,
x_axis_label="k",
y_axis_label="Re[λ-max]",
x_range=[0, 10],
)
p.line(k, lam_max_real_part, color="black", line_width=2)
return p
pn.Column(
pn.Row(pn.Spacer(width=50), d_slider, mu_slider),
pn.Spacer(height=20),
plot_dispersion_relation,
)
"""
d_slider = bokeh.models.Slider(
title="d", start=0.01, end=1, value=0.05, step=0.01, width=150
)
mu_slider = bokeh.models.Slider(
title="μ", start=0.01, end=2, value=1.5, step=0.005, width=150
)
k = np.linspace(0, 10, 500)
k2 = k ** 2
mu = mu_slider.value
d = d_slider.value
b = mu + (1.0 + d) * k2 - 1.0
c = (mu + k ** 2) * (d * k ** 2 - 1.0) + 2.0 * mu
discriminant = b ** 2 - 4.0 * c
lam = np.empty_like(k)
inds = discriminant <= 0
lam[inds] = -b[inds] / 2.0
inds = discriminant > 0
lam[inds] = (-b[inds] + np.sqrt(discriminant[inds])) / 2.0
cds = bokeh.models.ColumnDataSource(dict(k=k, lam=lam))
p = bokeh.plotting.figure(
frame_width=350,
frame_height=200,
x_axis_label="k",
y_axis_label="Re[λ-max]",
x_range=[0, 10],
)
p.line(source=cds, x="k", y="lam", color="black", line_width=2)
js_code = """
function dispersion_relation(mu, d, k) {
let k2 = k**2;
let b = mu + (1.0 + d) * k2 - 1.0;
let c = (mu + k**2) * (d * k**2 - 1.0) + 2.0 * mu
let discriminant = b**2 - 4.0 * c;
if (discriminant < 0) {
return -b / 2.0;
}
else {
return (-b + Math.sqrt(discriminant)) / 2.0;
}
}
let mu = mu_slider.value;
let d = d_slider.value;
let k = cds.data['k'];
let lam = cds.data['lam'];
for (let i = 0; i < k.length; i++) {
lam[i] = dispersion_relation(mu, d, k[i]);
}
cds.change.emit();
"""
callback = bokeh.models.CustomJS(
args=dict(cds=cds, d_slider=d_slider, mu_slider=mu_slider), code=js_code
)
mu_slider.js_on_change("value", callback)
d_slider.js_on_change("value", callback)
layout = bokeh.layouts.column(
bokeh.layouts.row(
bokeh.models.Spacer(width=60), d_slider, mu_slider, width=400
),
bokeh.models.Spacer(height=20),
p,
)
return layout
def lotka_volterra():
"""Make a plot of the Lotka-Volterra system
"""
"""Test to plot Lotka-Volterra"""
t = np.linspace(0.0, 20.0, 500)
# Sliders
alpha_slider = bokeh.models.Slider(
title="α", start=0.1, end=10, value=1, step=0.005, width=150
)
beta_slider = bokeh.models.Slider(
title="β", start=0.1, end=10, value=1, step=0.005, width=150
)
gamma_slider = bokeh.models.Slider(
title="γ", start=0.1, end=10, value=1, step=0.005, width=150
)
delta_slider = bokeh.models.Slider(
title="δ", start=0.1, end=10, value=1, step=0.005, width=150
)
def lotka_volterra_rhs(xy, t, alpha, beta, gamma, delta):
# Unpack
x, y = xy
dxdt = alpha * x - beta * x * y
dydt = delta * x * y - gamma * y
return np.array([dxdt, dydt])
# Initial conditions
x0 = np.array([1.0, 3.0])
# Solve
xy = scipy.integrate.odeint(
lotka_volterra_rhs,
x0,
t,
(alpha_slider.value, beta_slider.value, gamma_slider.value, delta_slider.value),
)
x, y = xy.transpose()
# Set up plots
p_phase = bokeh.plotting.figure(
frame_width=200, frame_height=200, x_axis_label="x", y_axis_label="y",
)
p = bokeh.plotting.figure(frame_width=550, frame_height=200, x_axis_label="t",)
# The data source
cds = bokeh.models.ColumnDataSource(dict(t=t, x=x, y=y))
p_phase.line(source=cds, x="x", y="y", line_width=2)
p.line(source=cds, x="t", y="x", line_width=2)
p.line(source=cds, x="t", y="y", color="tomato", line_width=2)
js_code = (
jsfuns["ode"]
+ jsfuns["circuits"]
+ """
var x = cds.data['x'];
var y = cds.data['y'];
var alpha = alpha_slider.value;
var beta = beta_slider.value;
var gamma = gamma_slider.value;
var delta = delta_slider.value;
var args = [alpha, beta, gamma, delta];
var xy = rkf45(lotkaVolterra, [1.0, 3.0], timePoints, args);
x = xy[0];
y = xy[1];
cds.data['x'] = x;
cds.data['y'] = y;
cds.change.emit();
"""
)
callback = bokeh.models.CustomJS(
args=dict(
cds=cds,
timePoints=t,
alpha_slider=alpha_slider,
beta_slider=beta_slider,
gamma_slider=gamma_slider,
delta_slider=delta_slider,
),
code=js_code,
)
alpha_slider.js_on_change("value", callback)
beta_slider.js_on_change("value", callback)
gamma_slider.js_on_change("value", callback)
delta_slider.js_on_change("value", callback)
# Build layout
layout = bokeh.layouts.column(
p,
bokeh.layouts.Spacer(height=10),
bokeh.layouts.row(
p_phase,
bokeh.layouts.Spacer(width=70),
bokeh.layouts.column(
alpha_slider, beta_slider, gamma_slider, delta_slider, width=150
),
),
)
return layout
| [
"numpy.maximum",
"numpy.empty_like",
"numpy.sin",
"numpy.array",
"numpy.exp",
"numpy.linspace",
"numpy.log10",
"numpy.sqrt"
] | [((367, 397), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(200)'], {}), '(0, 2 * np.pi, 200)\n', (378, 397), True, 'import numpy as np\n'), ((406, 415), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (412, 415), True, 'import numpy as np\n'), ((5781, 5804), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(200)'], {}), '(0, 10, 200)\n', (5792, 5804), True, 'import numpy as np\n'), ((5813, 5851), 'numpy.exp', 'np.exp', (['(-4 * (t - t_0) ** 2 / tau ** 2)'], {}), '(-4 * (t - t_0) ** 2 / tau ** 2)\n', (5819, 5851), True, 'import numpy as np\n'), ((9327, 9350), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(200)'], {}), '(0, 10, 200)\n', (9338, 9350), True, 'import numpy as np\n'), ((14090, 14113), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', '(400)'], {}), '(0, 20, 400)\n', (14101, 14113), True, 'import numpy as np\n'), ((17655, 17678), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', '(400)'], {}), '(0, 20, 400)\n', (17666, 17678), True, 'import numpy as np\n'), ((17689, 17712), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', '(400)'], {}), '(0, 20, 400)\n', (17700, 17712), True, 'import numpy as np\n'), ((19948, 19969), 'numpy.array', 'np.array', (['[1, 1, 1.2]'], {}), '([1, 1, 1.2])\n', (19956, 19969), True, 'import numpy as np\n'), ((23773, 23805), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1.1, 1.2]'], {}), '([0, 0, 0, 1, 1.1, 1.2])\n', (23781, 23805), True, 'import numpy as np\n'), ((28118, 28141), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(500)'], {}), '(0, 10, 500)\n', (28129, 28141), True, 'import numpy as np\n'), ((28341, 28357), 'numpy.empty_like', 'np.empty_like', (['k'], {}), '(k)\n', (28354, 28357), True, 'import numpy as np\n'), ((30019, 30046), 'numpy.linspace', 'np.linspace', (['(0.0)', '(20.0)', '(500)'], {}), '(0.0, 20.0, 500)\n', (30030, 30046), True, 'import numpy as np\n'), ((30775, 30795), 'numpy.array', 'np.array', (['[1.0, 3.0]'], {}), '([1.0, 3.0])\n', (30783, 30795), True, 'import numpy as np\n'), ((1412, 1517), 'numpy.array', 'np.array', (['[(-(1 - ces) * cs + (1 - kappa) * ces) / kappa, ((1 - ces) * cs - ces) /\n kappa / zeta, ces]'], {}), '([(-(1 - ces) * cs + (1 - kappa) * ces) / kappa, ((1 - ces) * cs -\n ces) / kappa / zeta, ces])\n', (1420, 1517), True, 'import numpy as np\n'), ((2666, 2691), 'numpy.array', 'np.array', (['[cs0, 0.0, 0.0]'], {}), '([cs0, 0.0, 0.0])\n', (2674, 2691), True, 'import numpy as np\n'), ((2727, 2750), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(400)'], {}), '(0, 10, 400)\n', (2738, 2750), True, 'import numpy as np\n'), ((7991, 8007), 'numpy.maximum', 'np.maximum', (['(0)', 'x'], {}), '(0, x)\n', (8001, 8007), True, 'import numpy as np\n'), ((19729, 19830), 'numpy.array', 'np.array', (['[beta / (1 + x_3 ** n) - x_1, beta / (1 + x_1 ** n) - x_2, beta / (1 + x_2 **\n n) - x_3]'], {}), '([beta / (1 + x_3 ** n) - x_1, beta / (1 + x_1 ** n) - x_2, beta /\n (1 + x_2 ** n) - x_3])\n', (19737, 19830), True, 'import numpy as np\n'), ((20358, 20389), 'numpy.linspace', 'np.linspace', (['(0)', 't_max', 'n_points'], {}), '(0, t_max, n_points)\n', (20369, 20389), True, 'import numpy as np\n'), ((23406, 23611), 'numpy.array', 'np.array', (['[beta * (rho + 1 / (1 + x_3 ** n)) - m_1, beta * (rho + 1 / (1 + x_1 ** n)) -\n m_2, beta * (rho + 1 / (1 + x_2 ** n)) - m_3, gamma * (m_1 - x_1), \n gamma * (m_2 - x_2), gamma * (m_3 - x_3)]'], {}), '([beta * (rho + 1 / (1 + x_3 ** n)) - m_1, beta * (rho + 1 / (1 + \n x_1 ** n)) - m_2, beta * (rho + 1 / (1 + x_2 ** n)) - m_3, gamma * (m_1 -\n x_1), gamma * (m_2 - x_2), gamma * (m_3 - x_3)])\n', (23414, 23611), True, 'import numpy as np\n'), ((24078, 24109), 'numpy.linspace', 'np.linspace', (['(0)', 't_max', 'n_points'], {}), '(0, t_max, n_points)\n', (24089, 24109), True, 'import numpy as np\n'), ((30717, 30739), 'numpy.array', 'np.array', (['[dxdt, dydt]'], {}), '([dxdt, dydt])\n', (30725, 30739), True, 'import numpy as np\n'), ((9160, 9198), 'numpy.exp', 'np.exp', (['(-4 * (t - t_0) ** 2 / tau ** 2)'], {}), '(-4 * (t - t_0) ** 2 / tau ** 2)\n', (9166, 9198), True, 'import numpy as np\n'), ((10955, 10970), 'numpy.log10', 'np.log10', (['beta0'], {}), '(beta0)\n', (10963, 10970), True, 'import numpy as np\n'), ((11088, 11103), 'numpy.log10', 'np.log10', (['gamma'], {}), '(gamma)\n', (11096, 11103), True, 'import numpy as np\n'), ((11218, 11229), 'numpy.log10', 'np.log10', (['k'], {}), '(k)\n', (11226, 11229), True, 'import numpy as np\n'), ((11458, 11470), 'numpy.log10', 'np.log10', (['ks'], {}), '(ks)\n', (11466, 11470), True, 'import numpy as np\n'), ((28475, 28502), 'numpy.sqrt', 'np.sqrt', (['discriminant[inds]'], {}), '(discriminant[inds])\n', (28482, 28502), True, 'import numpy as np\n'), ((1791, 1806), 'numpy.exp', 'np.exp', (['(cs0 - t)'], {}), '(cs0 - t)\n', (1797, 1806), True, 'import numpy as np\n')] |
import warnings
import csv
import numpy as np
import pandas as pd
import torch
from genetic_algorithm import GeneticAlgorithm as ga
from genetic_algorithm import cross_entropy_one_hot
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from torch.autograd import Variable
from utils import binning
warnings.filterwarnings('ignore')
device = torch.device("cpu")
# parameter definitions of experiments
parameters = {1: {'number_runs': 10, 'population_size': 20, 'number_generations': 20, 'crossover_rate': 0.9,
'mutation_rate': 0.001, 'learning_rate': 1e-4, 'number_epochs': 3000, 'hidden_size': 12,
'number_connections1': 12, 'number_connections2': 6, 'lambda': 0.1, 'patience_ES': 10,
'tolerance_ES': 1e-4, 'elitist_pct': 0.1, 'patience_GA': 5, 'tolerance_GA': 1e-4},
2: {'number_runs': 10, 'population_size': 20, 'number_generations': 20, 'crossover_rate': 0.9,
'mutation_rate': 0.001, 'learning_rate': 1e-2, 'number_epochs': 3000, 'hidden_size': 12,
'number_connections1': 12, 'number_connections2': 6, 'lambda': 0.1, 'patience_ES': 15,
'tolerance_ES': 1e-4, 'elitist_pct': 0.1, 'patience_GA': 5, 'tolerance_GA': 1e-4},
3: {'number_runs': 10, 'population_size': 20, 'number_generations': 20, 'crossover_rate': 0.9,
'mutation_rate': 0.001, 'learning_rate': 3e-2, 'number_epochs': 3000, 'hidden_size': 12,
'number_connections1': 12, 'number_connections2': 6, 'lambda': 0.1, 'patience_ES': 5,
'tolerance_ES': 1e-4, 'elitist_pct': 0.1, 'patience_GA': 5, 'tolerance_GA': 1e-4},
4: {'number_runs': 10, 'population_size': 50, 'number_generations': 20, 'crossover_rate': 0.9,
'mutation_rate': 0.001, 'learning_rate': 3e-2, 'number_epochs': 3000, 'hidden_size': 12,
'number_connections1': 12, 'number_connections2': 6, 'lambda': 0.1, 'patience_ES': 5,
'tolerance_ES': 1e-4, 'elitist_pct': 0.1, 'patience_GA': 5, 'tolerance_GA': 1e-4},
5: {'number_runs': 10, 'population_size': 20, 'number_generations': 20, 'crossover_rate': 0.9,
'mutation_rate': 0.001, 'learning_rate': 3e-2, 'number_epochs': 3000, 'hidden_size': 12,
'number_connections1': 8, 'number_connections2': 6, 'lambda': 0.1, 'patience_ES': 5,
'tolerance_ES': 1e-4, 'elitist_pct': 0.1, 'patience_GA': 5, 'tolerance_GA': 1e-4},
6: {'number_runs': 10, 'population_size': 50, 'number_generations': 20, 'crossover_rate': 0.9,
'mutation_rate': 0.001, 'learning_rate': 3e-2, 'number_epochs': 3000, 'hidden_size': 12,
'number_connections1': 8, 'number_connections2': 6, 'lambda': 0.1, 'patience_ES': 5,
'tolerance_ES': 1e-4, 'elitist_pct': 0.1, 'patience_GA': 5, 'tolerance_GA': 1e-4},
7: {'number_runs': 10, 'population_size': 100, 'number_generations': 20, 'crossover_rate': 0.9,
'mutation_rate': 0.001, 'learning_rate': 3e-2, 'number_epochs': 3000, 'hidden_size': 12,
'number_connections1': 8, 'number_connections2': 6, 'lambda': 0.2, 'patience_ES': 5,
'tolerance_ES': 1e-4, 'elitist_pct': 0.1, 'patience_GA': 5, 'tolerance_GA': 1e-4}
}
# Loading iris data
iris_data = load_iris()
x = iris_data.data
y_ = iris_data.target.reshape(-1, 1) # convert data to a single column
# one hot encode class labels
encoder = OneHotEncoder(sparse=False)
y = encoder.fit_transform(y_)
# binning of features
X, inputs = binning(pd.DataFrame(x), n_bins=3, encode='onehot-dense', strategy='uniform',
feature_names=['Petal length', 'Petal width', 'Sepal length', 'Sepal width'])
label = ['Iris setosa', 'Iris versicolor', 'Iris virginica']
fname = 'iris_results.csv'
# create csv to store results and write header
with open(fname, 'w') as file:
writer = csv.writer(file)
header = ["Parameters", "Number of connections", "Training accuracy", "Test accuracy", "Recall", "Precision",
"F1 score"]
writer.writerow(header)
for params in parameters.values():
# set fixed seeds for reproducibility
torch.manual_seed(2021)
np.random.seed(2021) # scikit-learn also uses numpy random seed
for run in range(params['number_runs']):
# split train and test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, stratify=y)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.125, stratify=y_train)
X_tr = Variable(torch.tensor(X_train.to_numpy(), dtype=torch.float))
X_te = Variable(torch.tensor(X_test.to_numpy(), dtype=torch.float))
y_tr = Variable(torch.tensor(y_train, dtype=torch.float))
y_te = Variable(torch.tensor(y_test, dtype=torch.float))
X_val = Variable(torch.tensor(X_val.to_numpy(), dtype=torch.float))
y_val = Variable(torch.tensor(y_val, dtype=torch.float))
model = ga(input_size=X.shape[1], output_size=3, selection_method='tournament_selection',
crossover_method='two_point_crossover', mutation_method='flip_mutation', params=params,
loss_function=cross_entropy_one_hot)
model.run(X_tr, y_tr, X_val, y_val, X_te, y_te, input_labels=inputs, class_labels=label, file_name=fname)
| [
"pandas.DataFrame",
"sklearn.datasets.load_iris",
"numpy.random.seed",
"csv.writer",
"warnings.filterwarnings",
"genetic_algorithm.GeneticAlgorithm",
"torch.manual_seed",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.OneHotEncoder",
"torch.device",
"torch.tensor"
] | [((388, 421), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (411, 421), False, 'import warnings\n'), ((431, 450), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (443, 450), False, 'import torch\n'), ((3488, 3499), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (3497, 3499), False, 'from sklearn.datasets import load_iris\n'), ((3632, 3659), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'sparse': '(False)'}), '(sparse=False)\n', (3645, 3659), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((3733, 3748), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {}), '(x)\n', (3745, 3748), True, 'import pandas as pd\n'), ((4082, 4098), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (4092, 4098), False, 'import csv\n'), ((4349, 4372), 'torch.manual_seed', 'torch.manual_seed', (['(2021)'], {}), '(2021)\n', (4366, 4372), False, 'import torch\n'), ((4377, 4397), 'numpy.random.seed', 'np.random.seed', (['(2021)'], {}), '(2021)\n', (4391, 4397), True, 'import numpy as np\n'), ((4566, 4615), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'stratify': 'y'}), '(X, y, test_size=0.2, stratify=y)\n', (4582, 4615), False, 'from sklearn.model_selection import train_test_split\n'), ((4657, 4726), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train', 'y_train'], {'test_size': '(0.125)', 'stratify': 'y_train'}), '(X_train, y_train, test_size=0.125, stratify=y_train)\n', (4673, 4726), False, 'from sklearn.model_selection import train_test_split\n'), ((5169, 5389), 'genetic_algorithm.GeneticAlgorithm', 'ga', ([], {'input_size': 'X.shape[1]', 'output_size': '(3)', 'selection_method': '"""tournament_selection"""', 'crossover_method': '"""two_point_crossover"""', 'mutation_method': '"""flip_mutation"""', 'params': 'params', 'loss_function': 'cross_entropy_one_hot'}), "(input_size=X.shape[1], output_size=3, selection_method=\n 'tournament_selection', crossover_method='two_point_crossover',\n mutation_method='flip_mutation', params=params, loss_function=\n cross_entropy_one_hot)\n", (5171, 5389), True, 'from genetic_algorithm import GeneticAlgorithm as ga\n'), ((4904, 4944), 'torch.tensor', 'torch.tensor', (['y_train'], {'dtype': 'torch.float'}), '(y_train, dtype=torch.float)\n', (4916, 4944), False, 'import torch\n'), ((4970, 5009), 'torch.tensor', 'torch.tensor', (['y_test'], {'dtype': 'torch.float'}), '(y_test, dtype=torch.float)\n', (4982, 5009), False, 'import torch\n'), ((5112, 5150), 'torch.tensor', 'torch.tensor', (['y_val'], {'dtype': 'torch.float'}), '(y_val, dtype=torch.float)\n', (5124, 5150), False, 'import torch\n')] |
import numpy as np
import pandas as pd
import sqlite3
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LogisticRegressionCV
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV
from pickle import dump
####################
# uncomment the lines depending on your source (SQLite or csv)
# SQLLite
# connection = sqlite3.connect("loan_database") # connect to sql db
# df = pd.read_sql_query('SELECT * FROM joined_data;', connection)
# connection.execute("VACUUM;")
# from CSV (during development)
df = pd.read_csv('joined_data.csv', low_memory=False)
print('import done')
#####################
# replace special values with null based on Prosper documentation
# we aren't going to worry about mixed type features as a simplifying assumption
df.replace(to_replace=[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, 999],
value = np.nan,
inplace = True)
print("replaced special values")
# convert all column names to lowercase
df.columns = df.columns.str.strip().str.lower()
# drop some un-needed columns
# df.drop(['unnamed: 0', 'level_0', 'unnamed: 0.1'], inplace=True, axis=1)
df.drop(['unnamed: 0'], inplace=True, axis=1)
#drop Experian fields
exp_fields_to_drop = pd.read_excel('tu_exp_fields.xlsx', sheet_name='EXP')
exp_fields_to_drop = exp_fields_to_drop['Field']
df.drop(exp_fields_to_drop, inplace=True, axis=1)
# create year column & leave as string (will one-hot encode later)
df['year'] = df['loan_origination_date'].str[:4]
# store as a vector for filter later
year = df['loan_origination_date'].str[:4].astype(int)
# drop columns with 'date' in name since we have captured origination year
df.drop(df.filter(regex='date').columns, inplace=True, axis=1)
df.drop(df.filter(regex='paid').columns, inplace=True, axis=1)
print('Removed dates and paid columns')
# create training dataframe
# we still need to keep to the side to identify records later
loan_numbers = df['loan_number']
# create default flag vector
default_flag = np.where(df['loan_status'] == 2, 1, 0)
# remove columns we know are not known at origination or that we do not want in model
df.drop(['age_in_months', 'days_past_due', 'loan_number', 'days_past_due', 'principal_balance',
'debt_sale_proceeds_received', 'next_payment_due_amount', 'loan_default_reason',
'loan_default_reason_description', 'index', 'member_key', 'listing_number', 'amount_funded',
'amount_remaining', 'percent_funded', 'partial_funding_indicator', 'funding_threshold',
'estimated_return', 'estimated_loss_rate', 'lender_yield', 'effective_yield', 'listing_category_id',
'income_range', 'lender_indicator', 'group_indicator', 'group_name', 'channel_code',
'amount_participation', 'investment_typeid', 'investment_type_description', 'loan_status',
'loan_status_description', 'listing_status_reason', 'borrower_city', 'borrower_metropolitan_area',
'first_recorded_credit_line', 'investment_type_description', 'tuficorange', 'listing_term', 'listing_amount',
'borrower_apr']
, inplace=True
, axis=1)
# identify non numeric columns to one-hot encode
str_cols = list(df.select_dtypes(include=['object', 'string']).columns)
#print(str_cols)
# add loan term to features to one-hot encode. We want to treat as categorical since only three possible terms.
str_cols.append('term')
# write function to one-hot encode specific features
def encode_and_bind(original_dataframe, feature_to_encode):
dummies = pd.get_dummies(original_dataframe[[feature_to_encode]], dummy_na=True)
result = pd.concat([original_dataframe, dummies], axis=1)
result = result.drop([feature_to_encode], axis=1)
return result
# perform one hot encoding on string features
for feature in str_cols:
df = encode_and_bind(df, feature)
print('Finished One-Hot encoding')
# filter to 2017 and beyond since that is when TransUnion started being used
full_df = df
full_default_flag = default_flag
# default_flag = default_flag[df['year'].astype(int) >= 2017]
default_flag = default_flag[year >= 2017]
# df = df[df['year'].astype(int) >= 2017]
df = df[year >= 2017]
print('Finished filtering by year')
#capture feature names to ID later
feature_names = pd.Series(df.columns.values)
feature_names.to_csv('feature_names_considered.csv', index=False)
# dump(feature_names, open('feature_names.pkl', 'wb'))
# filter by prosper rating
df_AA = df[df['prosper_rating_AA'] == 1]
df_A = df[df['prosper_rating_A'] == 1]
df_B = df[df['prosper_rating_B'] == 1]
df_C = df[df['prosper_rating_C'] == 1]
df_D = df[df['prosper_rating_D'] == 1]
df_E = df[df['prosper_rating_E'] == 1]
df_HR = df[df['prosper_rating_HR'] == 1]
# convert to array to pass to the model
df_AA = df_AA.values
df_A = df_A.values
df_B = df_B.values
df_C = df_C.values
df_D = df_D.values
df_E = df_E.values
df_HR = df_HR.values
# Fill n/a and inf values with 0 now that missing flag is set
df_AA[~np.isfinite(df_AA)] = 0
df_A[~np.isfinite(df_A)] = 0
df_B[~np.isfinite(df_B)] = 0
df_C[~np.isfinite(df_C)] = 0
df_D[~np.isfinite(df_D)] = 0
df_E[~np.isfinite(df_E)] = 0
df_HR[~np.isfinite(df_HR)] = 0
print('Defined model datasets done')
# start modeling
# define model hyperparameters and cv
def logistic_cv(x_train, y_true, class_wgts, folds=5, regs = [.05], max_iterations=500):
return LogisticRegressionCV(Cs=regs, cv=folds, penalty='l1', class_weight=class_wgts, scoring='f1',
max_iter=max_iterations, solver='saga', random_state=1990).fit(x_train, y_true)
# find optimal class weights and regularization strength
weights = np.linspace(0.04, 0.07, 4)
regs = [.01, .05, .1]
gsc = GridSearchCV(
estimator=LogisticRegression(),
param_grid={
'class_weight': [{0: x, 1: 1.0-x} for x in weights], 'C': regs, 'penalty': ['l1'], 'random_state': [1990],
'solver': ['saga'], 'max_iter': [750]
},
scoring='f1',
cv=3
)
# prosper rating AA
scaler_AA = StandardScaler().fit(df_AA)
train = scaler_AA.transform(df_AA)
y = default_flag[df['prosper_rating_AA'] == 1]
model_AA = logistic_cv(train, y, {0: .04, 1: .96}, folds=5, regs = [.01], max_iterations=750)
features_AA = np.where(model_AA.coef_ != 0)
print('The AA model variables & coefficients are: ',
list(zip(np.array(feature_names)[features_AA[1].astype(int)],
model_AA.coef_[np.where(model_AA.coef_ != 0)])))
# uncomment the next two lines if you want to save the model and scaler
# dump(model_AA, open('model_AA.pkl', 'wb'))
# dump(scaler_AA, open('scaler_AA.pkl', 'wb'))
# prosper rating A
scaler_A = StandardScaler().fit(df_A)
train = scaler_A.transform(df_A)
y = default_flag[df['prosper_rating_A'] == 1]
# model_A = gsc.fit(train, y)
model_A = logistic_cv(train, y, {0: .04, 1: .96}, folds=5, regs = [.01], max_iterations=750)
features_A = np.where(model_A.coef_ != 0)
print('The A model variables & coefficients are: ',
list(zip(np.array(feature_names)[features_A[1].astype(int)],
model_A.coef_[np.where(model_A.coef_ != 0)])))
# uncomment the next two lines if you want to save the model and scaler
# dump(model_A, open('model_A.pkl', 'wb'))
# dump(scaler_A, open('scaler_A.pkl', 'wb'))
# prosper rating B
scaler_B = StandardScaler().fit(df_B)
train = scaler_B.transform(df_B)
y = default_flag[df['prosper_rating_B'] == 1]
model_B = logistic_cv(train, y, {0: .04, 1: .96}, folds=5, regs = [.01], max_iterations=500)
# model_B = gsc.fit(train, y)
features_B = np.where(model_B.coef_ != 0)
print('The B model variables & coefficients are: ',
list(zip(np.array(feature_names)[features_B[1].astype(int)],
model_B.coef_[np.where(model_B.coef_ != 0)])))
# uncomment the next two lines if you want to save the model and scaler
# dump(model_B, open('model_B.pkl', 'wb'))
# dump(scaler_B, open('scaler_B.pkl', 'wb'))
# prosper rating C
scaler_C = StandardScaler().fit(df_C)
train = scaler_C.transform(df_C)
y = default_flag[df['prosper_rating_C'] == 1]
model_C = logistic_cv(train, y, {0: .04, 1: .96}, folds=5, regs = [.01], max_iterations=500)
# model_C = gsc.fit(train, y)
features_C = np.where(model_C.coef_ != 0)
print('The C model variables & coefficients are: ',
list(zip(np.array(feature_names)[features_C[1].astype(int)],
model_C.coef_[np.where(model_C.coef_ != 0)])))
# uncomment the next two lines if you want to save the model and scaler
# dump(model_C, open('model_C.pkl', 'wb'))
# dump(scaler_C, open('scaler_C.pkl', 'wb'))
# prosper rating D
scaler_D = StandardScaler().fit(df_D)
train = scaler_D.transform(df_D)
y = default_flag[df['prosper_rating_D'] == 1]
model_D = logistic_cv(train, y, {0: .04, 1: .96}, folds=5, regs = [.01], max_iterations=750)
# model_D = gsc.fit(train, y)
features_D = np.where(model_D.coef_ != 0)
print('The D model variables & coefficients are: ',
list(zip(np.array(feature_names)[features_D[1].astype(int)],
model_D.coef_[np.where(model_D.coef_ != 0)])))
# uncomment the next two lines if you want to save the model and scaler
# dump(model_D, open('model_D.pkl', 'wb'))
# dump(scaler_D, open('scaler_D.pkl', 'wb'))
# prosper rating E
scaler_E = StandardScaler().fit(df_E)
train = scaler_E.transform(df_E)
y = default_flag[df['prosper_rating_E'] == 1]
model_E = logistic_cv(train, y, {0: .04, 1: .96}, folds=5, regs = [.05])
#model_E = gsc.fit(train, y)
features_E = np.where(model_E.coef_ != 0)
print('The E model variables & coefficients are: ',
list(zip(np.array(feature_names)[features_E[1].astype(int)],
model_E.coef_[np.where(model_E.coef_ != 0)])))
# uncomment the next two lines if you want to save the model and scaler
# dump(model_E, open('model_E.pkl', 'wb'))
# dump(scaler_E, open('scaler_E.pkl', 'wb'))
# prosper rating HR
scaler_HR = StandardScaler().fit(df_HR)
train = scaler_HR.transform(df_HR)
y = default_flag[df['prosper_rating_HR'] == 1]
model_HR = logistic_cv(train, y, {0: .04, 1: .96}, folds=5, regs = [.1], max_iterations = 1000)
# model_HR = gsc.fit(train, y)
features_HR = np.where(model_HR.coef_ != 0)
print('The HR model variables & coefficients are: ',
list(zip(np.array(feature_names)[features_HR[1].astype(int)],
model_HR.coef_[np.where(model_HR.coef_ != 0)])))
# uncomment the next two lines if you want to save the model and scaler
# dump(model_HR, open('model_HR.pkl', 'wb'))
# dump(scaler_HR, open('scaler_HR.pkl', 'wb'))
### PROBABILITIES ARE BIASED, BUT CAN BE USED FOR THRESHOLDS
full_df[~np.isfinite(full_df)] = 0
train = full_df
pred = dict.fromkeys(['AA', 'A', 'B', 'C', 'D', 'E', 'HR', 'nan'])
pred['AA'] = model_AA.predict_proba(scaler_AA.transform(train[train['prosper_rating_AA'] == 1].values))[:, 1]
pred['A'] = model_A.predict_proba(scaler_A.transform(train[train['prosper_rating_A'] == 1].values))[:, 1]
pred['B'] = model_B.predict_proba(scaler_B.transform(train[train['prosper_rating_B'] == 1].values))[:, 1]
pred['C'] = model_C.predict_proba(scaler_C.transform(train[train['prosper_rating_C'] == 1].values))[:, 1]
pred['D'] = model_D.predict_proba(scaler_D.transform(train[train['prosper_rating_D'] == 1].values))[:, 1]
pred['E'] = model_E.predict_proba(scaler_E.transform(train[train['prosper_rating_E'] == 1].values))[:, 1]
pred['HR'] = model_HR.predict_proba(scaler_HR.transform(train[train['prosper_rating_HR'] == 1].values))[:, 1]
pred['nan'] = model_C.predict_proba(scaler_C.transform(train[train['prosper_rating_nan'] == 1].values))[:, 1]
pred['AA'] = pd.qcut(pred['AA'], q=3, labels=['Plus', 'Mid', 'Minus'], duplicates='drop')
pred['A'] = pd.qcut(pred['A'], q=3, labels=['Plus', 'Mid', 'Minus'], duplicates='drop')
pred['B'] = pd.qcut(pred['B'], q=3, labels=['Plus', 'Mid', 'Minus'], duplicates='drop')
pred['C'] = pd.qcut(pred['C'], q=3, labels=['Plus', 'Mid', 'Minus'], duplicates='drop')
pred['D'] = pd.qcut(pred['D'], q=3, labels=['Plus', 'Mid', 'Minus'], duplicates='drop')
pred['E'] = pd.qcut(pred['E'], q=3, labels=['Plus', 'Mid', 'Minus'], duplicates='drop')
pred['HR'] = pd.qcut(pred['HR'], q=3, labels=['Plus', 'Mid', 'Minus'], duplicates='drop')
pred['nan'] = pd.qcut(pred['nan'], q=3, labels=['Plus', 'Mid', 'Minus'], duplicates='drop')
print('Created final predictions')
# final = full_df.values
full_df['predict'] = 0
full_df.loc[full_df['prosper_rating_AA'] == 1, 'predict'] = pred['AA']
full_df.loc[full_df['prosper_rating_A'] == 1, 'predict'] = pred['A']
full_df.loc[full_df['prosper_rating_B'] == 1, 'predict'] = pred['B']
full_df.loc[full_df['prosper_rating_C'] == 1, 'predict'] = pred['C']
full_df.loc[full_df['prosper_rating_D'] == 1, 'predict'] = pred['D']
full_df.loc[full_df['prosper_rating_E'] == 1, 'predict'] = pred['E']
full_df.loc[full_df['prosper_rating_HR'] == 1, 'predict'] = pred['HR']
full_df.loc[full_df['prosper_rating_nan'] == 1, 'predict'] = pred['nan']
print(full_df['predict'].head(10))
test = pd.DataFrame(zip(loan_numbers, full_df['predict']))
test.to_csv('notches.csv')
# full_df.drop(['predict'], axis=1, inplace=True)
| [
"sklearn.preprocessing.StandardScaler",
"pandas.read_csv",
"pandas.get_dummies",
"numpy.isfinite",
"pandas.read_excel",
"sklearn.linear_model.LogisticRegressionCV",
"sklearn.linear_model.LogisticRegression",
"numpy.where",
"numpy.array",
"pandas.Series",
"numpy.linspace",
"pandas.qcut",
"pan... | [((585, 633), 'pandas.read_csv', 'pd.read_csv', (['"""joined_data.csv"""'], {'low_memory': '(False)'}), "('joined_data.csv', low_memory=False)\n", (596, 633), True, 'import pandas as pd\n'), ((1268, 1321), 'pandas.read_excel', 'pd.read_excel', (['"""tu_exp_fields.xlsx"""'], {'sheet_name': '"""EXP"""'}), "('tu_exp_fields.xlsx', sheet_name='EXP')\n", (1281, 1321), True, 'import pandas as pd\n'), ((2043, 2081), 'numpy.where', 'np.where', (["(df['loan_status'] == 2)", '(1)', '(0)'], {}), "(df['loan_status'] == 2, 1, 0)\n", (2051, 2081), True, 'import numpy as np\n'), ((4287, 4315), 'pandas.Series', 'pd.Series', (['df.columns.values'], {}), '(df.columns.values)\n', (4296, 4315), True, 'import pandas as pd\n'), ((5657, 5683), 'numpy.linspace', 'np.linspace', (['(0.04)', '(0.07)', '(4)'], {}), '(0.04, 0.07, 4)\n', (5668, 5683), True, 'import numpy as np\n'), ((6227, 6256), 'numpy.where', 'np.where', (['(model_AA.coef_ != 0)'], {}), '(model_AA.coef_ != 0)\n', (6235, 6256), True, 'import numpy as np\n'), ((6880, 6908), 'numpy.where', 'np.where', (['(model_A.coef_ != 0)'], {}), '(model_A.coef_ != 0)\n', (6888, 6908), True, 'import numpy as np\n'), ((7525, 7553), 'numpy.where', 'np.where', (['(model_B.coef_ != 0)'], {}), '(model_B.coef_ != 0)\n', (7533, 7553), True, 'import numpy as np\n'), ((8169, 8197), 'numpy.where', 'np.where', (['(model_C.coef_ != 0)'], {}), '(model_C.coef_ != 0)\n', (8177, 8197), True, 'import numpy as np\n'), ((8813, 8841), 'numpy.where', 'np.where', (['(model_D.coef_ != 0)'], {}), '(model_D.coef_ != 0)\n', (8821, 8841), True, 'import numpy as np\n'), ((9436, 9464), 'numpy.where', 'np.where', (['(model_E.coef_ != 0)'], {}), '(model_E.coef_ != 0)\n', (9444, 9464), True, 'import numpy as np\n'), ((10091, 10120), 'numpy.where', 'np.where', (['(model_HR.coef_ != 0)'], {}), '(model_HR.coef_ != 0)\n', (10099, 10120), True, 'import numpy as np\n'), ((11527, 11603), 'pandas.qcut', 'pd.qcut', (["pred['AA']"], {'q': '(3)', 'labels': "['Plus', 'Mid', 'Minus']", 'duplicates': '"""drop"""'}), "(pred['AA'], q=3, labels=['Plus', 'Mid', 'Minus'], duplicates='drop')\n", (11534, 11603), True, 'import pandas as pd\n'), ((11616, 11691), 'pandas.qcut', 'pd.qcut', (["pred['A']"], {'q': '(3)', 'labels': "['Plus', 'Mid', 'Minus']", 'duplicates': '"""drop"""'}), "(pred['A'], q=3, labels=['Plus', 'Mid', 'Minus'], duplicates='drop')\n", (11623, 11691), True, 'import pandas as pd\n'), ((11704, 11779), 'pandas.qcut', 'pd.qcut', (["pred['B']"], {'q': '(3)', 'labels': "['Plus', 'Mid', 'Minus']", 'duplicates': '"""drop"""'}), "(pred['B'], q=3, labels=['Plus', 'Mid', 'Minus'], duplicates='drop')\n", (11711, 11779), True, 'import pandas as pd\n'), ((11792, 11867), 'pandas.qcut', 'pd.qcut', (["pred['C']"], {'q': '(3)', 'labels': "['Plus', 'Mid', 'Minus']", 'duplicates': '"""drop"""'}), "(pred['C'], q=3, labels=['Plus', 'Mid', 'Minus'], duplicates='drop')\n", (11799, 11867), True, 'import pandas as pd\n'), ((11880, 11955), 'pandas.qcut', 'pd.qcut', (["pred['D']"], {'q': '(3)', 'labels': "['Plus', 'Mid', 'Minus']", 'duplicates': '"""drop"""'}), "(pred['D'], q=3, labels=['Plus', 'Mid', 'Minus'], duplicates='drop')\n", (11887, 11955), True, 'import pandas as pd\n'), ((11968, 12043), 'pandas.qcut', 'pd.qcut', (["pred['E']"], {'q': '(3)', 'labels': "['Plus', 'Mid', 'Minus']", 'duplicates': '"""drop"""'}), "(pred['E'], q=3, labels=['Plus', 'Mid', 'Minus'], duplicates='drop')\n", (11975, 12043), True, 'import pandas as pd\n'), ((12057, 12133), 'pandas.qcut', 'pd.qcut', (["pred['HR']"], {'q': '(3)', 'labels': "['Plus', 'Mid', 'Minus']", 'duplicates': '"""drop"""'}), "(pred['HR'], q=3, labels=['Plus', 'Mid', 'Minus'], duplicates='drop')\n", (12064, 12133), True, 'import pandas as pd\n'), ((12148, 12225), 'pandas.qcut', 'pd.qcut', (["pred['nan']"], {'q': '(3)', 'labels': "['Plus', 'Mid', 'Minus']", 'duplicates': '"""drop"""'}), "(pred['nan'], q=3, labels=['Plus', 'Mid', 'Minus'], duplicates='drop')\n", (12155, 12225), True, 'import pandas as pd\n'), ((3555, 3625), 'pandas.get_dummies', 'pd.get_dummies', (['original_dataframe[[feature_to_encode]]'], {'dummy_na': '(True)'}), '(original_dataframe[[feature_to_encode]], dummy_na=True)\n', (3569, 3625), True, 'import pandas as pd\n'), ((3639, 3687), 'pandas.concat', 'pd.concat', (['[original_dataframe, dummies]'], {'axis': '(1)'}), '([original_dataframe, dummies], axis=1)\n', (3648, 3687), True, 'import pandas as pd\n'), ((4990, 5008), 'numpy.isfinite', 'np.isfinite', (['df_AA'], {}), '(df_AA)\n', (5001, 5008), True, 'import numpy as np\n'), ((5020, 5037), 'numpy.isfinite', 'np.isfinite', (['df_A'], {}), '(df_A)\n', (5031, 5037), True, 'import numpy as np\n'), ((5049, 5066), 'numpy.isfinite', 'np.isfinite', (['df_B'], {}), '(df_B)\n', (5060, 5066), True, 'import numpy as np\n'), ((5078, 5095), 'numpy.isfinite', 'np.isfinite', (['df_C'], {}), '(df_C)\n', (5089, 5095), True, 'import numpy as np\n'), ((5107, 5124), 'numpy.isfinite', 'np.isfinite', (['df_D'], {}), '(df_D)\n', (5118, 5124), True, 'import numpy as np\n'), ((5136, 5153), 'numpy.isfinite', 'np.isfinite', (['df_E'], {}), '(df_E)\n', (5147, 5153), True, 'import numpy as np\n'), ((5166, 5184), 'numpy.isfinite', 'np.isfinite', (['df_HR'], {}), '(df_HR)\n', (5177, 5184), True, 'import numpy as np\n'), ((5740, 5760), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (5758, 5760), False, 'from sklearn.linear_model import LogisticRegression\n'), ((6009, 6025), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (6023, 6025), False, 'from sklearn.preprocessing import StandardScaler\n'), ((6638, 6654), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (6652, 6654), False, 'from sklearn.preprocessing import StandardScaler\n'), ((7283, 7299), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (7297, 7299), False, 'from sklearn.preprocessing import StandardScaler\n'), ((7927, 7943), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (7941, 7943), False, 'from sklearn.preprocessing import StandardScaler\n'), ((8571, 8587), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (8585, 8587), False, 'from sklearn.preprocessing import StandardScaler\n'), ((9215, 9231), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (9229, 9231), False, 'from sklearn.preprocessing import StandardScaler\n'), ((9840, 9856), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (9854, 9856), False, 'from sklearn.preprocessing import StandardScaler\n'), ((10543, 10563), 'numpy.isfinite', 'np.isfinite', (['full_df'], {}), '(full_df)\n', (10554, 10563), True, 'import numpy as np\n'), ((5383, 5543), 'sklearn.linear_model.LogisticRegressionCV', 'LogisticRegressionCV', ([], {'Cs': 'regs', 'cv': 'folds', 'penalty': '"""l1"""', 'class_weight': 'class_wgts', 'scoring': '"""f1"""', 'max_iter': 'max_iterations', 'solver': '"""saga"""', 'random_state': '(1990)'}), "(Cs=regs, cv=folds, penalty='l1', class_weight=\n class_wgts, scoring='f1', max_iter=max_iterations, solver='saga',\n random_state=1990)\n", (5403, 5543), False, 'from sklearn.linear_model import LogisticRegressionCV\n'), ((6325, 6348), 'numpy.array', 'np.array', (['feature_names'], {}), '(feature_names)\n', (6333, 6348), True, 'import numpy as np\n'), ((6408, 6437), 'numpy.where', 'np.where', (['(model_AA.coef_ != 0)'], {}), '(model_AA.coef_ != 0)\n', (6416, 6437), True, 'import numpy as np\n'), ((6976, 6999), 'numpy.array', 'np.array', (['feature_names'], {}), '(feature_names)\n', (6984, 6999), True, 'import numpy as np\n'), ((7057, 7085), 'numpy.where', 'np.where', (['(model_A.coef_ != 0)'], {}), '(model_A.coef_ != 0)\n', (7065, 7085), True, 'import numpy as np\n'), ((7621, 7644), 'numpy.array', 'np.array', (['feature_names'], {}), '(feature_names)\n', (7629, 7644), True, 'import numpy as np\n'), ((7702, 7730), 'numpy.where', 'np.where', (['(model_B.coef_ != 0)'], {}), '(model_B.coef_ != 0)\n', (7710, 7730), True, 'import numpy as np\n'), ((8265, 8288), 'numpy.array', 'np.array', (['feature_names'], {}), '(feature_names)\n', (8273, 8288), True, 'import numpy as np\n'), ((8346, 8374), 'numpy.where', 'np.where', (['(model_C.coef_ != 0)'], {}), '(model_C.coef_ != 0)\n', (8354, 8374), True, 'import numpy as np\n'), ((8909, 8932), 'numpy.array', 'np.array', (['feature_names'], {}), '(feature_names)\n', (8917, 8932), True, 'import numpy as np\n'), ((8990, 9018), 'numpy.where', 'np.where', (['(model_D.coef_ != 0)'], {}), '(model_D.coef_ != 0)\n', (8998, 9018), True, 'import numpy as np\n'), ((9532, 9555), 'numpy.array', 'np.array', (['feature_names'], {}), '(feature_names)\n', (9540, 9555), True, 'import numpy as np\n'), ((9613, 9641), 'numpy.where', 'np.where', (['(model_E.coef_ != 0)'], {}), '(model_E.coef_ != 0)\n', (9621, 9641), True, 'import numpy as np\n'), ((10189, 10212), 'numpy.array', 'np.array', (['feature_names'], {}), '(feature_names)\n', (10197, 10212), True, 'import numpy as np\n'), ((10272, 10301), 'numpy.where', 'np.where', (['(model_HR.coef_ != 0)'], {}), '(model_HR.coef_ != 0)\n', (10280, 10301), True, 'import numpy as np\n')] |
# - <NAME> <<EMAIL>>
"""Flask app for annotating ROIs."""
import os
import json
from glob import glob
import flask
import numpy as np
from . import utils as ut
APP = flask.Flask("plseg-roi")
@APP.route("/", methods=["GET"])
def index():
"""Return html file."""
return flask.send_file(APP.ldir+"/cset.html", cache_timeout=0)
@APP.route("/cset.js", methods=["GET"])
def csetjs():
"""Return javascript file."""
return flask.send_file(APP.ldir+"/cset.js", cache_timeout=0)
@APP.route("/img/<imid>", methods=["GET"])
def getimg(imid):
"""Return image given directory id + skip."""
imid = [int(f) for f in imid.split(":")]
flist = ut.getimglist(APP.srcdir+"/"+APP.flist[imid[0]])
return flask.send_file(flist[imid[1]], cache_timeout=0)
@APP.route("/save/<int:seqid>", methods=["POST"])
def saveinfo(seqid):
"""Save to json."""
flask.request.get_data()
data = json.loads(flask.request.data)
info = {'source': APP.srcdir+"/"+APP.flist[seqid]}
destdir = APP.destdir+"/"+APP.flist[seqid]
info['skip'], info['scale'] = data['skip'], data['scale']
xlim, ylim = data['xlim'], data['ylim']
if xlim[0] > xlim[1]:
xlim = [xlim[1], xlim[0]]
if ylim[0] > ylim[1]:
ylim = [ylim[1], ylim[0]]
flist = ut.getimglist(info['source'])
img = ut.imread(flist[info['skip']])
shape = [int(img.shape[0]*info['scale']/100),
int(img.shape[1]*info['scale']/100)]
xls = np.asarray(xlim, dtype=np.float64) * shape[1]
yls = np.asarray(ylim, dtype=np.float64) * shape[0]
info['xlim'] = [int(xls[0]), int(xls[1])]
info['ylim'] = [int(yls[0]), int(yls[1])]
print(json.dumps(info))
if not os.path.isdir(destdir):
try:
os.mkdir(destdir)
except Exception:
return flask.json.jsonify("Could not create "+destdir)
try:
_f = open(destdir+"/caopt.json", "w")
_f.write(json.dumps(info))
_f.close()
return flask.json.jsonify("Saved to "+destdir+"/caopt.json")
except Exception:
return flask.json.jsonify("Error writing to "+destdir+"/caopt.json")
@APP.route("/info/<int:seqid>", methods=["GET"])
def getinfo(seqid):
"""Get information about specific sequence directory."""
flist = ut.getimglist(APP.srcdir+"/"+APP.flist[seqid])
info = {'smax': len(flist)}
info['saved'] = False
if os.path.isfile(APP.destdir+"/"+APP.flist[seqid]+'/caopt.json'):
try:
with open(APP.destdir+"/"+APP.flist[seqid]
+ '/caopt.json', 'r') as _f:
data = json.load(_f)
info['scale'] = data['scale']
info['skip'] = data['skip']
img = ut.imread(flist[info['skip']])
shape = [int(img.shape[0]*info['scale']/100),
int(img.shape[1]*info['scale']/100)]
xls = np.asarray(data['xlim'], dtype=np.float64) / shape[1]
yls = np.asarray(data['ylim'], dtype=np.float64) / shape[0]
info['xlim'], info['ylim'] = list(xls), list(yls)
info['saved'] = True
except Exception:
info['saved'] = False
return flask.json.jsonify(info)
@APP.route("/dlist", methods=["GET"])
def dlist():
"""Return list of sequence sub-directories"""
return flask.json.jsonify(APP.flist)
def main(srcdir, destdir, port=8888):
"""Run server"""
APP.ldir = "/".join(__file__.split('/')[:-1]) + '/jshtml'
APP.srcdir = srcdir.rstrip("/")
APP.destdir = destdir.rstrip("/")
APP.flist = sorted([f.split('/')[-1] for f in glob(srcdir+'/*')
if os.path.isdir(f)])
APP.run(port=port)
| [
"os.mkdir",
"json.load",
"json.loads",
"os.path.isdir",
"numpy.asarray",
"flask.Flask",
"flask.json.jsonify",
"json.dumps",
"os.path.isfile",
"flask.request.get_data",
"glob.glob",
"flask.send_file"
] | [((169, 193), 'flask.Flask', 'flask.Flask', (['"""plseg-roi"""'], {}), "('plseg-roi')\n", (180, 193), False, 'import flask\n'), ((281, 338), 'flask.send_file', 'flask.send_file', (["(APP.ldir + '/cset.html')"], {'cache_timeout': '(0)'}), "(APP.ldir + '/cset.html', cache_timeout=0)\n", (296, 338), False, 'import flask\n'), ((438, 493), 'flask.send_file', 'flask.send_file', (["(APP.ldir + '/cset.js')"], {'cache_timeout': '(0)'}), "(APP.ldir + '/cset.js', cache_timeout=0)\n", (453, 493), False, 'import flask\n'), ((722, 770), 'flask.send_file', 'flask.send_file', (['flist[imid[1]]'], {'cache_timeout': '(0)'}), '(flist[imid[1]], cache_timeout=0)\n', (737, 770), False, 'import flask\n'), ((872, 896), 'flask.request.get_data', 'flask.request.get_data', ([], {}), '()\n', (894, 896), False, 'import flask\n'), ((908, 938), 'json.loads', 'json.loads', (['flask.request.data'], {}), '(flask.request.data)\n', (918, 938), False, 'import json\n'), ((2392, 2460), 'os.path.isfile', 'os.path.isfile', (["(APP.destdir + '/' + APP.flist[seqid] + '/caopt.json')"], {}), "(APP.destdir + '/' + APP.flist[seqid] + '/caopt.json')\n", (2406, 2460), False, 'import os\n'), ((3170, 3194), 'flask.json.jsonify', 'flask.json.jsonify', (['info'], {}), '(info)\n', (3188, 3194), False, 'import flask\n'), ((3309, 3338), 'flask.json.jsonify', 'flask.json.jsonify', (['APP.flist'], {}), '(APP.flist)\n', (3327, 3338), False, 'import flask\n'), ((1463, 1497), 'numpy.asarray', 'np.asarray', (['xlim'], {'dtype': 'np.float64'}), '(xlim, dtype=np.float64)\n', (1473, 1497), True, 'import numpy as np\n'), ((1519, 1553), 'numpy.asarray', 'np.asarray', (['ylim'], {'dtype': 'np.float64'}), '(ylim, dtype=np.float64)\n', (1529, 1553), True, 'import numpy as np\n'), ((1668, 1684), 'json.dumps', 'json.dumps', (['info'], {}), '(info)\n', (1678, 1684), False, 'import json\n'), ((1697, 1719), 'os.path.isdir', 'os.path.isdir', (['destdir'], {}), '(destdir)\n', (1710, 1719), False, 'import os\n'), ((1982, 2039), 'flask.json.jsonify', 'flask.json.jsonify', (["('Saved to ' + destdir + '/caopt.json')"], {}), "('Saved to ' + destdir + '/caopt.json')\n", (2000, 2039), False, 'import flask\n'), ((1746, 1763), 'os.mkdir', 'os.mkdir', (['destdir'], {}), '(destdir)\n', (1754, 1763), False, 'import os\n'), ((1930, 1946), 'json.dumps', 'json.dumps', (['info'], {}), '(info)\n', (1940, 1946), False, 'import json\n'), ((2073, 2138), 'flask.json.jsonify', 'flask.json.jsonify', (["('Error writing to ' + destdir + '/caopt.json')"], {}), "('Error writing to ' + destdir + '/caopt.json')\n", (2091, 2138), False, 'import flask\n'), ((1809, 1858), 'flask.json.jsonify', 'flask.json.jsonify', (["('Could not create ' + destdir)"], {}), "('Could not create ' + destdir)\n", (1827, 1858), False, 'import flask\n'), ((2598, 2611), 'json.load', 'json.load', (['_f'], {}), '(_f)\n', (2607, 2611), False, 'import json\n'), ((2877, 2919), 'numpy.asarray', 'np.asarray', (["data['xlim']"], {'dtype': 'np.float64'}), "(data['xlim'], dtype=np.float64)\n", (2887, 2919), True, 'import numpy as np\n'), ((2949, 2991), 'numpy.asarray', 'np.asarray', (["data['ylim']"], {'dtype': 'np.float64'}), "(data['ylim'], dtype=np.float64)\n", (2959, 2991), True, 'import numpy as np\n'), ((3587, 3606), 'glob.glob', 'glob', (["(srcdir + '/*')"], {}), "(srcdir + '/*')\n", (3591, 3606), False, 'from glob import glob\n'), ((3632, 3648), 'os.path.isdir', 'os.path.isdir', (['f'], {}), '(f)\n', (3645, 3648), False, 'import os\n')] |
"""
Title:
example_processing.py
Author:
<NAME> and <NAME>
Creation Date:
20170220
Last Modified:
20170220
Purpose:
This script serves as a representative example of our data processing
pipeline. This script reads in a set of csv files containing the output
from the MACSQuant Flow Cytomter (after being converted from the flow
cytometry standard .fcs format), peforms unsupervised gating, and computes
the measured fold-change in gene expression.
"""
# Import dependencies.
import os
import glob
import numpy as np
import pandas as pd
import scipy
# Import matplotlib stuff for plotting
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# Seaborn, useful for graphics
import seaborn as sns
# Set the plotting style.
import mwc_induction_utils as mwc
mwc.set_plotting_style
# Define variables to use over the script
date = 20160825
username = 'mrazomej'
run = 'r2'
# List the target directory.
datadir = 'example_data/'
files = np.array(os.listdir(datadir))
csv_bool = np.array([str(date) in f and 'csv' in f for f in files])
files = files[np.array(csv_bool)]
# define the patterns in the file names to read them
operator = 'O1'
energy = -15.3
rbs = np.array(['auto', 'delta', 'RBS1L',
'RBS1', 'RBS1027', 'RBS446',
'RBS1147', 'HG104'])
repressors = np.array([0, 0, 870, 610, 130, 62, 30, 11])
# Define the IPTG concentrations in units of µM.
concentrations = [0, 0.1, 5, 10, 25, 50, 75, 100, 250, 500, 1000, 5000]
# Define the parameter alpha for the automatic gating
alpha = 0.40
# Initialize the DataFrame to save the mean expression levels
df = pd.DataFrame()
# Read the files and compute the mean YFP value
for i, c in enumerate(concentrations):
for j, strain in enumerate(rbs):
# Find the file
try:
r_file = glob.glob(datadir + str(date) + '_' + run + '*' +
operator + '_' + strain + '_' + str(c) + 'uM' +
'*csv')
print(r_file)
# Read the csv file
dataframe = pd.read_csv(r_file[0])
# Apply an automatic bivariate gaussian gate to the log front
# and side scattering
data = mwc.auto_gauss_gate(dataframe, alpha,
x_val='FSC-A', y_val='SSC-A',
log=True)
# Compute the mean and append it to the data frame along the
# operator and strain
df = df.append([[date, username, operator, energy,
strain, repressors[j], c,
data['FITC-A'].mean()]],
ignore_index=True)
except:
pass
# Rename the columns of the data_frame
df.columns = ['date', 'username', 'operator', 'binding_energy',
'rbs', 'repressors', 'IPTG_uM', 'mean_YFP_A']
# Initialize pandas series to save the corrected YFP value
mean_bgcorr_A = np.array([])
# Correct for the autofluorescence background
for i in np.arange(len(df)):
data = df.loc[i]
auto = df[(df.IPTG_uM == data.IPTG_uM) &
(df.rbs == 'auto')].mean_YFP_A
mean_bgcorr_A = np.append(mean_bgcorr_A, data.mean_YFP_A - auto)
mean_bgcorr_A = pd.Series(mean_bgcorr_A)
mean_bgcorr_A.name = 'mean_YFP_bgcorr_A'
df = pd.concat([df, mean_bgcorr_A], join_axes=[df.index],
axis=1, join='inner')
mean_fc_A = np.array([])
# Compute the fold-change
for i in np.arange(len(df)):
data = df.loc[i]
delta = df[(df.IPTG_uM == data.IPTG_uM) &
(df.rbs == 'delta')].mean_YFP_bgcorr_A
mean_fc_A = np.append(mean_fc_A, data.mean_YFP_bgcorr_A / delta)
# Convert the fold-change to a pandas DataFrame.
mean_fc_A = pd.Series(mean_fc_A)
mean_fc_A.name = 'fold_change_A'
df = pd.concat([df, mean_fc_A], join_axes=[df.index], axis=1, join='inner')
# Save the dataframe to disk as a csv including the comment header.
df.to_csv('example_nocomments_' + str(date) + '_' + run + '_' +
operator + '_IPTG_titration_MACSQuant.csv', index=False)
filenames = ['./example_comments.txt', 'example_nocomments_' + str(date) + '_' + run +
'_' + operator + '_IPTG_titration_MACSQuant.csv']
with open('./example_' + str(date) + '_' + run + '_' + operator +
'_IPTG_titration_MACSQuant.csv', 'w') as output:
for fname in filenames:
with open(fname) as infile:
output.write(infile.read())
| [
"pandas.DataFrame",
"os.listdir",
"mwc_induction_utils.auto_gauss_gate",
"pandas.read_csv",
"numpy.append",
"numpy.array",
"pandas.Series",
"pandas.concat"
] | [((1203, 1292), 'numpy.array', 'np.array', (["['auto', 'delta', 'RBS1L', 'RBS1', 'RBS1027', 'RBS446', 'RBS1147', 'HG104']"], {}), "(['auto', 'delta', 'RBS1L', 'RBS1', 'RBS1027', 'RBS446', 'RBS1147',\n 'HG104'])\n", (1211, 1292), True, 'import numpy as np\n'), ((1334, 1377), 'numpy.array', 'np.array', (['[0, 0, 870, 610, 130, 62, 30, 11]'], {}), '([0, 0, 870, 610, 130, 62, 30, 11])\n', (1342, 1377), True, 'import numpy as np\n'), ((1637, 1651), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1649, 1651), True, 'import pandas as pd\n'), ((2986, 2998), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2994, 2998), True, 'import numpy as np\n'), ((3272, 3296), 'pandas.Series', 'pd.Series', (['mean_bgcorr_A'], {}), '(mean_bgcorr_A)\n', (3281, 3296), True, 'import pandas as pd\n'), ((3343, 3417), 'pandas.concat', 'pd.concat', (['[df, mean_bgcorr_A]'], {'join_axes': '[df.index]', 'axis': '(1)', 'join': '"""inner"""'}), "([df, mean_bgcorr_A], join_axes=[df.index], axis=1, join='inner')\n", (3352, 3417), True, 'import pandas as pd\n'), ((3445, 3457), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3453, 3457), True, 'import numpy as np\n'), ((3766, 3786), 'pandas.Series', 'pd.Series', (['mean_fc_A'], {}), '(mean_fc_A)\n', (3775, 3786), True, 'import pandas as pd\n'), ((3825, 3895), 'pandas.concat', 'pd.concat', (['[df, mean_fc_A]'], {'join_axes': '[df.index]', 'axis': '(1)', 'join': '"""inner"""'}), "([df, mean_fc_A], join_axes=[df.index], axis=1, join='inner')\n", (3834, 3895), True, 'import pandas as pd\n'), ((989, 1008), 'os.listdir', 'os.listdir', (['datadir'], {}), '(datadir)\n', (999, 1008), False, 'import os\n'), ((1092, 1110), 'numpy.array', 'np.array', (['csv_bool'], {}), '(csv_bool)\n', (1100, 1110), True, 'import numpy as np\n'), ((3206, 3254), 'numpy.append', 'np.append', (['mean_bgcorr_A', '(data.mean_YFP_A - auto)'], {}), '(mean_bgcorr_A, data.mean_YFP_A - auto)\n', (3215, 3254), True, 'import numpy as np\n'), ((3651, 3703), 'numpy.append', 'np.append', (['mean_fc_A', '(data.mean_YFP_bgcorr_A / delta)'], {}), '(mean_fc_A, data.mean_YFP_bgcorr_A / delta)\n', (3660, 3703), True, 'import numpy as np\n'), ((2084, 2106), 'pandas.read_csv', 'pd.read_csv', (['r_file[0]'], {}), '(r_file[0])\n', (2095, 2106), True, 'import pandas as pd\n'), ((2234, 2311), 'mwc_induction_utils.auto_gauss_gate', 'mwc.auto_gauss_gate', (['dataframe', 'alpha'], {'x_val': '"""FSC-A"""', 'y_val': '"""SSC-A"""', 'log': '(True)'}), "(dataframe, alpha, x_val='FSC-A', y_val='SSC-A', log=True)\n", (2253, 2311), True, 'import mwc_induction_utils as mwc\n')] |
# coding: utf-8
import tr
import sys, cv2, time, os
from PIL import Image, ImageDraw, ImageFont
import numpy
import csv
import getVerticalBorder
_BASEDIR = os.path.dirname(os.path.abspath(__file__))
os.chdir(_BASEDIR)
def get_table(ocr_results, row_x):
'''
对ocr的结果进行整理,根据结果中的位置信息整理出表格
:param ocr_results: ocr的带位置结果
:param row_x: 表格中每条竖线x坐标
:return: 表格形式的数据
'''
data_all = [] # 存储所有的数据
data_count = 0 # 统计数据的数目
for i, rect in enumerate(ocr_results):
cx, cy, w, h, a = tuple(rect[0])
tmp_data = [] # 数据的形式为[x,y,value],其中x和y为中心的坐标
tmp_data.append(cx)
tmp_data.append(cy)
tmp_data.append(rect[1])
data_all.append(tmp_data)
data_count = data_count + 1
data_lines = [] # 存储分行之后的数据
tmp_line = [] # 临时变量,收集每行的数据
tmp_line.append(data_all[0])
for i in range(1, data_count):
if abs(data_all[i][1] - data_all[i - 1][1]) < 10: # y坐标相差不超过10则被认为是同一行
tmp_line.append(data_all[i])
else:
if tmp_line:
data_lines.append(tmp_line)
tmp_line = []
tmp_line.append(data_all[i])
data_lines.append(tmp_line)
res = [] # 最终的结果,其中只存放了value
for i in range(len(data_lines)): # i遍历剩下的行
# print(data_lines[i])
tmp_line = [] # 用来帮助收集value的临时变量,每次收集一行
for k in range(len(row_x)-1):
tmp_line.append("")
for data in data_lines[i]:
loc_x = float(data[0])
value = data[2]
for x_index in range(1, len(row_x)): # 根据竖线坐标对每行数据进行列的划分
if loc_x < row_x[x_index]:
tmp_line[x_index - 1] += value
break
res.append(tmp_line)
return res
def run_tr(img_path):
'''
运行对图片进行处理,然后运行tr
:param img_path: 图片路径
:return: 格式化的表格数据
'''
img_pil = Image.open(img_path)
init_width = img_pil.width
try:
if hasattr(img_pil, '_getexif'):
orientation = 274
exif = dict(img_pil._getexif().items())
if exif[orientation] == 3:
img_pil = img_pil.rotate(180, expand=True)
elif exif[orientation] == 6:
img_pil = img_pil.rotate(270, expand=True)
elif exif[orientation] == 8:
img_pil = img_pil.rotate(90, expand=True)
except:
pass
MAX_SIZE = 1600 # 图片的大小最好不超过 1600
scale = max(img_pil.height / MAX_SIZE, img_pil.width / MAX_SIZE)
new_width = int(img_pil.width / scale + 0.5)
new_height = int(img_pil.height / scale + 0.5)
img_pil = img_pil.resize((new_width, new_height), Image.ANTIALIAS)
img_cv = cv2.cvtColor(numpy.asarray(img_pil), cv2.COLOR_RGB2BGR)
row_x = getVerticalBorder.get_row_x(img_cv) # 获取表格竖线的坐标
print(row_x)
print('len :::', len(img_pil.split()))
img_pil_split = img_pil.split()
img_pil_split_len = len(img_pil_split)
if img_pil_split_len == 3:
r, g, b = img_pil_split
img_pil = r # 取印章不明显的通道
elif img_pil_split_len == 4:
r,g,b,a = img_pil_split
img_pil = r
threshold = 150 # 对图片进行二值化的阈值,进一步抹除印章
table = []
for i in range(256):
if i < threshold:
table.append(0)
else:
table.append(1)
if init_width > 500:
print('init_width', init_width)
# img_pil = img_pil.point(table, "1") # 对图片进行二值化
ocr_results = tr.run(img_pil, flag=tr.FLAG_RECT) #运行tr,获得带位置的ocr结果
res = get_table(ocr_results, row_x)
return res
| [
"os.path.abspath",
"getVerticalBorder.get_row_x",
"numpy.asarray",
"tr.run",
"PIL.Image.open",
"os.chdir"
] | [((209, 227), 'os.chdir', 'os.chdir', (['_BASEDIR'], {}), '(_BASEDIR)\n', (217, 227), False, 'import sys, cv2, time, os\n'), ((181, 206), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (196, 206), False, 'import sys, cv2, time, os\n'), ((1935, 1955), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1945, 1955), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((2826, 2861), 'getVerticalBorder.get_row_x', 'getVerticalBorder.get_row_x', (['img_cv'], {}), '(img_cv)\n', (2853, 2861), False, 'import getVerticalBorder\n'), ((3550, 3584), 'tr.run', 'tr.run', (['img_pil'], {'flag': 'tr.FLAG_RECT'}), '(img_pil, flag=tr.FLAG_RECT)\n', (3556, 3584), False, 'import tr\n'), ((2770, 2792), 'numpy.asarray', 'numpy.asarray', (['img_pil'], {}), '(img_pil)\n', (2783, 2792), False, 'import numpy\n')] |
"""Train script for dagger learning.
Currently uses SimpleAgent as the expert.
The training is performed on one processor,
but evaluation is run on multiple processors
TODO:
make code less redundant
if not using the value loss it will store and do many unnecessary operations
Example args:
python train_dagger.py --num-processes 16 --run-name a --how-train dagger \
--minibatch-size 5000 --num-steps 5000 --log-interval 10 --save-interval 10 \
--lr 0.005 --expert-prob 0.5 --num-steps-eval 500 --use-value-loss
The --use-value-loss setting makes it so that the value loss is considered.
The --stop-grads-value setting stops the gradients from the value loss in going
through the rest of the shared params of the network. Both default to false.
"""
from collections import defaultdict
import os
import random
import time
import numpy as np
from tensorboardX import SummaryWriter
import torch
from torch.autograd import Variable
from arguments import get_args
import dagger_agent
import envs as env_helpers
import networks
import utils
def train():
os.environ['OMP_NUM_THREADS'] = '1'
args = get_args()
assert(args.run_name)
print("\n###############")
print("args ", args)
print("##############\n")
if args.cuda:
torch.cuda.empty_cache()
num_training_per_episode = utils.validate_how_train(args)
how_train, config, num_agents, num_stack, num_steps, num_processes, \
num_epochs, reward_sharing, batch_size, num_mini_batch = \
utils.get_train_vars(args, num_training_per_episode)
assert(num_processes % 4 == 0), "Num Processes should be a multiple of " \
"four so that the distribution of positions is even."
obs_shape, action_space, character, board_size = env_helpers.get_env_info(config, num_stack)
training_agents = utils.load_agents(
obs_shape, action_space, num_training_per_episode, num_steps, args,
agent_type=dagger_agent.DaggerAgent, character=character, board_size=board_size)
agent = training_agents[0]
#####
# Logging helpers.
suffix = "{}.{}.{}.{}.nc{}.lr{}.mb{}.ned{}.prob{}.nopt{}.seed{}.maxaggr{}.pt" \
.format(args.run_name, args.how_train, config, args.model_str,
args.num_channels, args.lr, args.minibatch_size,
args.num_episodes_dagger, args.expert_prob,
args.dagger_epoch, args.seed,
args.max_aggregate_agent_states)
if args.state_directory_distribution:
suffix += ".%s" % args.state_directory_distribution
log_dir = os.path.join(args.log_dir, suffix)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
writer = SummaryWriter(log_dir)
start_epoch = agent.num_epoch
total_steps = agent.total_steps
num_episodes = agent.num_episodes
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
agent.cuda()
aggregate_agent_states = []
aggregate_expert_actions = []
aggregate_returns = []
cross_entropy_loss = torch.nn.CrossEntropyLoss()
dummy_states = torch.zeros(1,1)
dummy_masks = torch.zeros(1,1)
if args.cuda:
dummy_states = dummy_states.cuda()
dummy_masks = dummy_masks.cuda()
envs = env_helpers.make_train_envs(
config, how_train, args.seed, args.game_state_file, training_agents,
num_stack, num_processes, state_directory=args.state_directory,
state_directory_distribution=args.state_directory_distribution,
step_loss=args.step_loss, bomb_reward=args.bomb_reward,
item_reward=args.item_reward)
# [num_proc, num_frame*19, board_size, board_size]
agent_obs = torch.from_numpy(envs.reset()).float().squeeze(1)
if args.cuda:
agent_obs = agent_obs.cuda()
dummy_states_eval = torch.zeros(num_processes, 1)
dummy_masks_eval = torch.zeros(num_processes, 1)
if args.cuda:
dummy_states_eval = dummy_states_eval.cuda()
dummy_masks_eval = dummy_masks_eval.cuda()
episode_rewards = torch.zeros([num_training_per_episode,
num_processes, 1])
final_rewards = torch.zeros([num_training_per_episode,
num_processes, 1])
running_num_episodes = 0
cumulative_reward = 0
terminal_reward = 0
success_rate = 0
done = np.array([[False]])
agent_act_arr = []
expert_act_arr = []
start = time.time()
for num_epoch in range(start_epoch, num_epochs):
epoch_start_time = time.time()
if num_epoch > 0:
print("Avg Epoch Time: %.3f (%d)" % ((epoch_start_time - start)*1.0/num_epoch, num_epoch))
if args.anneal_expert_prob:
expert_prob = args.expert_prob - num_epoch * args.anneal_factor
else:
expert_prob = args.expert_prob
agent.set_eval()
agent_states_list = [[] for _ in range(num_processes)]
expert_actions_list = [[] for _ in range(num_processes)]
returns_list = [[] for _ in range(num_processes)]
########
# Collect data using DAGGER
########
count_episodes = 0
current_ep_len = [0 for _ in range(num_processes)]
while count_episodes < args.num_episodes_dagger:
expert_obs = envs.get_expert_obs()
expert_actions = envs.get_expert_actions(expert_obs, 'ComplexAgent')
for num_process in range(num_processes):
agent_states_list[num_process].append(agent_obs[num_process])
expert_actions_list[num_process].append(expert_actions[num_process])
# expert_actions_list.append(expert_action_tensor)
if random.random() <= expert_prob:
env_actions = expert_actions
expert_act_arr.append(expert_actions)
else:
result = agent.act_on_data(
Variable(agent_obs, volatile=True),
Variable(dummy_states, volatile=True),
Variable(dummy_masks, volatile=True))
_, action, _, _, _, _ = result
env_actions = action.data.squeeze(1).cpu().numpy()
agent_act_arr.append(env_actions)
del result # for reducing memory usage
obs, reward, done, info = envs.step(env_actions)
agent_obs = torch.from_numpy(obs).float().squeeze(1)
if args.cuda:
agent_obs = agent_obs.cuda()
for num_process, done_ in enumerate(done):
returns_list[num_process].append(float(reward[num_process][0]))
if not done_[0]:
current_ep_len[num_process] += 1
continue
# NOTE: In a FFA game, at this point it's over for the agent so
# we call it. However, in a team game, it may not be over yet.
# That depends on if the returned Result is Incomplete. We
# could do something awkward and try to manage the rewards. We
# could also change it so that the agent keeps going a la the
# other setups. However, that's not really the point here and
# so we keep it simple and give it zero reward.
count_episodes += 1
total_data_len = len(returns_list[num_process])
start_current_ep = total_data_len - current_ep_len[num_process] - 1
for step in range(total_data_len - 2, start_current_ep, -1):
next_return = returns_list[num_process][step+1]
future_value = float(next_return * args.gamma)
returns_list[num_process][step] += future_value
current_ep_len[num_process] = 0
if num_epoch % args.log_interval == 0:
agent_act_arr = utils.flatten(agent_act_arr)
expert_act_arr = utils.flatten(expert_act_arr)
if len(agent_act_arr) > 0:
agent_mean_act_prob = [
len([i for i in agent_act_arr if i == k]) * \
1.0/len(agent_act_arr) for k in range(6)
]
for k in range(6):
print("mean act {} probs {}".format(k, agent_mean_act_prob[k]))
print("")
if len(expert_act_arr) > 0:
expert_mean_act_prob = [
len([i for i in expert_act_arr if i == k]) * \
1.0/len(expert_act_arr) for k in range(6)
]
for k in range(6):
print("expert mean act {} probs {}".format(
k, expert_mean_act_prob[k]))
print("")
expert_act_arr = []
agent_act_arr = []
total_steps += num_processes * num_steps
#########
# Train using DAGGER (with supervision from the expert)
#########
agent.set_train()
agent_states_list = utils.flatten(agent_states_list)
expert_actions_list = utils.flatten(expert_actions_list)
returns_list = utils.flatten(returns_list)
if len(aggregate_agent_states) >= args.max_aggregate_agent_states:
indices_replace = np.arange(0, len(aggregate_agent_states)) \
.tolist()
random.shuffle(indices_replace)
for k in range(len(agent_states_list)):
indice = indices_replace[k]
aggregate_agent_states[indice] = agent_states_list[k]
aggregate_expert_actions[indice] = expert_actions_list[k]
aggregate_returns[indice] = returns_list[k]
else:
aggregate_agent_states += agent_states_list
aggregate_expert_actions += expert_actions_list
aggregate_returns += returns_list
del agent_states_list, expert_actions_list, returns_list
indices = np.arange(0, len(aggregate_agent_states)).tolist()
random.shuffle(indices)
for j in range(args.dagger_epoch):
if j == args.dagger_epoch - 1:
action_losses = []
value_losses = []
# TODO: make this loop more efficient - maybe you can move part of
# minibatching outside and only select in the tensors?
for i in range(0, len(aggregate_agent_states),
args.minibatch_size):
indices_minibatch = indices[i: i + args.minibatch_size]
agent_states_minibatch = [aggregate_agent_states[k]
for k in indices_minibatch]
expert_actions_minibatch = [aggregate_expert_actions[k]
for k in indices_minibatch]
returns_minibatch = [aggregate_returns[k]
for k in indices_minibatch]
agent_states_minibatch = torch.stack(agent_states_minibatch, 0)
expert_actions_minibatch = torch.from_numpy(np.array(expert_actions_minibatch).squeeze(1))
returns_minibatch = torch.FloatTensor(returns_minibatch) \
.unsqueeze(1)
if args.cuda:
agent_states_minibatch = agent_states_minibatch.cuda()
expert_actions_minibatch = expert_actions_minibatch.cuda()
returns_minibatch = returns_minibatch.cuda()
values, action_scores = agent.get_values_action_scores(
Variable(agent_states_minibatch),
Variable(dummy_states).detach(),
Variable(dummy_masks).detach())
action_loss = cross_entropy_loss(
action_scores, Variable(expert_actions_minibatch))
value_loss = (Variable(returns_minibatch) - values) \
.pow(2).mean()
agent.optimize(action_loss, value_loss, args.max_grad_norm, \
use_value_loss=args.use_value_loss,
stop_grads_value=args.stop_grads_value,
add_nonlin=args.add_nonlin_valhead)
if j == args.dagger_epoch - 1:
action_losses.append(action_loss.data[0])
value_losses.append(value_loss.data[0])
del action_scores, action_loss, value_loss, values
del indices_minibatch, returns_minibatch, agent_states_minibatch,\
expert_actions_minibatch
if utils.is_save_epoch(num_epoch, start_epoch, args.save_interval):
utils.save_agents("dagger-", num_epoch, training_agents,
total_steps, num_episodes, args)
######
# Eval the current policy
######
# TODO: make eval deterministic
if num_epoch % args.log_interval == 0:
agent.set_eval()
eval_time = time.time()
eval_envs = env_helpers.make_train_envs(
config, 'simple', args.seed, args.game_state_file,
training_agents, num_stack, num_processes,
state_directory=args.state_directory,
state_directory_distribution=args.state_directory_distribution,
do_filter_team=False, step_loss=args.step_loss,
bomb_reward=args.bomb_reward, item_reward=args.item_reward)
dagger_obs = torch.from_numpy(eval_envs.reset()) \
.float().squeeze(0).squeeze(1)
if args.cuda:
dagger_obs = dagger_obs.cuda()
while running_num_episodes < args.num_steps_eval:
result_eval = agent.act_on_data(
Variable(dagger_obs, volatile=True),
Variable(dummy_states_eval, volatile=True),
Variable(dummy_masks_eval, volatile=True),
deterministic=True)
_, actions_eval, _, _, _, _ = result_eval
cpu_actions_eval = actions_eval.data.squeeze(1).cpu().numpy()
cpu_actions_agents_eval = cpu_actions_eval
obs_eval, reward_eval, done_eval, info_eval = eval_envs.step(
cpu_actions_agents_eval)
del result_eval
dagger_obs = torch.from_numpy(
obs_eval.reshape(num_processes, *obs_shape)).float()
if args.cuda:
dagger_obs = dagger_obs.cuda()
running_num_episodes += sum([1 if done_ else 0
for done_ in done_eval])
terminal_reward += reward_eval[done_eval.squeeze() == True] \
.sum()
success_rate += sum([1 if x else 0 for x in
[(done_eval.squeeze() == True) & \
(reward_eval.squeeze() > 0)][0] ])
masks = torch.FloatTensor([
[0.0]*num_training_per_episode if done_ \
else [1.0]*num_training_per_episode
for done_ in done_eval])
reward_eval = utils.torch_numpy_stack(reward_eval, False) \
.transpose(0, 1)
episode_rewards += reward_eval[:, :, None]
final_rewards *= masks
final_rewards += (1 - masks) * episode_rewards
episode_rewards *= masks
final_reward_arr = np.array(final_rewards.squeeze(0))
cumulative_reward += final_reward_arr[
done_eval.squeeze() == True].sum()
if args.render:
eval_envs.render()
print("Eval Time: ", time.time() - eval_time)
cumulative_reward = 1.0 * cumulative_reward / running_num_episodes
terminal_reward = 1.0 * terminal_reward / running_num_episodes
success_rate = 1.0 * success_rate / running_num_episodes
end = time.time()
steps_per_sec = 1.0 * total_steps / (end - start)
epochs_per_sec = 1.0 * num_epoch / (end - start)
print("###########")
print("Epoch {}, # steps: {} SPS {} EPS {} \n success rate {} " \
"mean final reward {} mean total reward {} " \
.format(num_epoch, len(aggregate_agent_states),
steps_per_sec, epochs_per_sec, success_rate,
terminal_reward, cumulative_reward))
print("###########\n")
utils.log_to_tensorboard_dagger(
writer, num_epoch, total_steps, np.mean(action_losses),
cumulative_reward, success_rate, terminal_reward,
np.mean(value_losses), epochs_per_sec, steps_per_sec,
agent_mean_act_prob, expert_mean_act_prob)
running_num_episodes = 0
cumulative_reward = 0
terminal_reward = 0
success_rate = 0
eval_envs.close()
writer.close()
if __name__ == "__main__":
train()
| [
"random.shuffle",
"numpy.mean",
"envs.get_env_info",
"os.path.join",
"utils.load_agents",
"utils.get_train_vars",
"os.path.exists",
"torch.FloatTensor",
"arguments.get_args",
"utils.validate_how_train",
"torch.zeros",
"utils.torch_numpy_stack",
"torch.manual_seed",
"torch.autograd.Variable... | [((1106, 1116), 'arguments.get_args', 'get_args', ([], {}), '()\n', (1114, 1116), False, 'from arguments import get_args\n'), ((1313, 1343), 'utils.validate_how_train', 'utils.validate_how_train', (['args'], {}), '(args)\n', (1337, 1343), False, 'import utils\n'), ((1493, 1545), 'utils.get_train_vars', 'utils.get_train_vars', (['args', 'num_training_per_episode'], {}), '(args, num_training_per_episode)\n', (1513, 1545), False, 'import utils\n'), ((1741, 1784), 'envs.get_env_info', 'env_helpers.get_env_info', (['config', 'num_stack'], {}), '(config, num_stack)\n', (1765, 1784), True, 'import envs as env_helpers\n'), ((1808, 1983), 'utils.load_agents', 'utils.load_agents', (['obs_shape', 'action_space', 'num_training_per_episode', 'num_steps', 'args'], {'agent_type': 'dagger_agent.DaggerAgent', 'character': 'character', 'board_size': 'board_size'}), '(obs_shape, action_space, num_training_per_episode,\n num_steps, args, agent_type=dagger_agent.DaggerAgent, character=\n character, board_size=board_size)\n', (1825, 1983), False, 'import utils\n'), ((2574, 2608), 'os.path.join', 'os.path.join', (['args.log_dir', 'suffix'], {}), '(args.log_dir, suffix)\n', (2586, 2608), False, 'import os\n'), ((2688, 2710), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['log_dir'], {}), '(log_dir)\n', (2701, 2710), False, 'from tensorboardX import SummaryWriter\n'), ((2825, 2853), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2842, 2853), False, 'import torch\n'), ((3054, 3081), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (3079, 3081), False, 'import torch\n'), ((3102, 3119), 'torch.zeros', 'torch.zeros', (['(1)', '(1)'], {}), '(1, 1)\n', (3113, 3119), False, 'import torch\n'), ((3137, 3154), 'torch.zeros', 'torch.zeros', (['(1)', '(1)'], {}), '(1, 1)\n', (3148, 3154), False, 'import torch\n'), ((3268, 3597), 'envs.make_train_envs', 'env_helpers.make_train_envs', (['config', 'how_train', 'args.seed', 'args.game_state_file', 'training_agents', 'num_stack', 'num_processes'], {'state_directory': 'args.state_directory', 'state_directory_distribution': 'args.state_directory_distribution', 'step_loss': 'args.step_loss', 'bomb_reward': 'args.bomb_reward', 'item_reward': 'args.item_reward'}), '(config, how_train, args.seed, args.\n game_state_file, training_agents, num_stack, num_processes,\n state_directory=args.state_directory, state_directory_distribution=args\n .state_directory_distribution, step_loss=args.step_loss, bomb_reward=\n args.bomb_reward, item_reward=args.item_reward)\n', (3295, 3597), True, 'import envs as env_helpers\n'), ((3822, 3851), 'torch.zeros', 'torch.zeros', (['num_processes', '(1)'], {}), '(num_processes, 1)\n', (3833, 3851), False, 'import torch\n'), ((3875, 3904), 'torch.zeros', 'torch.zeros', (['num_processes', '(1)'], {}), '(num_processes, 1)\n', (3886, 3904), False, 'import torch\n'), ((4050, 4107), 'torch.zeros', 'torch.zeros', (['[num_training_per_episode, num_processes, 1]'], {}), '([num_training_per_episode, num_processes, 1])\n', (4061, 4107), False, 'import torch\n'), ((4163, 4220), 'torch.zeros', 'torch.zeros', (['[num_training_per_episode, num_processes, 1]'], {}), '([num_training_per_episode, num_processes, 1])\n', (4174, 4220), False, 'import torch\n'), ((4367, 4386), 'numpy.array', 'np.array', (['[[False]]'], {}), '([[False]])\n', (4375, 4386), True, 'import numpy as np\n'), ((4448, 4459), 'time.time', 'time.time', ([], {}), '()\n', (4457, 4459), False, 'import time\n'), ((1256, 1280), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (1278, 1280), False, 'import torch\n'), ((2620, 2643), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (2634, 2643), False, 'import os\n'), ((2653, 2673), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(log_dir)\n', (2664, 2673), False, 'import os\n'), ((2880, 2913), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2902, 2913), False, 'import torch\n'), ((4540, 4551), 'time.time', 'time.time', ([], {}), '()\n', (4549, 4551), False, 'import time\n'), ((8992, 9024), 'utils.flatten', 'utils.flatten', (['agent_states_list'], {}), '(agent_states_list)\n', (9005, 9024), False, 'import utils\n'), ((9055, 9089), 'utils.flatten', 'utils.flatten', (['expert_actions_list'], {}), '(expert_actions_list)\n', (9068, 9089), False, 'import utils\n'), ((9113, 9140), 'utils.flatten', 'utils.flatten', (['returns_list'], {}), '(returns_list)\n', (9126, 9140), False, 'import utils\n'), ((9997, 10020), 'random.shuffle', 'random.shuffle', (['indices'], {}), '(indices)\n', (10011, 10020), False, 'import random\n'), ((12587, 12650), 'utils.is_save_epoch', 'utils.is_save_epoch', (['num_epoch', 'start_epoch', 'args.save_interval'], {}), '(num_epoch, start_epoch, args.save_interval)\n', (12606, 12650), False, 'import utils\n'), ((7867, 7895), 'utils.flatten', 'utils.flatten', (['agent_act_arr'], {}), '(agent_act_arr)\n', (7880, 7895), False, 'import utils\n'), ((7925, 7954), 'utils.flatten', 'utils.flatten', (['expert_act_arr'], {}), '(expert_act_arr)\n', (7938, 7954), False, 'import utils\n'), ((9345, 9376), 'random.shuffle', 'random.shuffle', (['indices_replace'], {}), '(indices_replace)\n', (9359, 9376), False, 'import random\n'), ((12664, 12757), 'utils.save_agents', 'utils.save_agents', (['"""dagger-"""', 'num_epoch', 'training_agents', 'total_steps', 'num_episodes', 'args'], {}), "('dagger-', num_epoch, training_agents, total_steps,\n num_episodes, args)\n", (12681, 12757), False, 'import utils\n'), ((12990, 13001), 'time.time', 'time.time', ([], {}), '()\n', (12999, 13001), False, 'import time\n'), ((13026, 13376), 'envs.make_train_envs', 'env_helpers.make_train_envs', (['config', '"""simple"""', 'args.seed', 'args.game_state_file', 'training_agents', 'num_stack', 'num_processes'], {'state_directory': 'args.state_directory', 'state_directory_distribution': 'args.state_directory_distribution', 'do_filter_team': '(False)', 'step_loss': 'args.step_loss', 'bomb_reward': 'args.bomb_reward', 'item_reward': 'args.item_reward'}), "(config, 'simple', args.seed, args.\n game_state_file, training_agents, num_stack, num_processes,\n state_directory=args.state_directory, state_directory_distribution=args\n .state_directory_distribution, do_filter_team=False, step_loss=args.\n step_loss, bomb_reward=args.bomb_reward, item_reward=args.item_reward)\n", (13053, 13376), True, 'import envs as env_helpers\n'), ((16087, 16098), 'time.time', 'time.time', ([], {}), '()\n', (16096, 16098), False, 'import time\n'), ((5706, 5721), 'random.random', 'random.random', ([], {}), '()\n', (5719, 5721), False, 'import random\n'), ((10949, 10987), 'torch.stack', 'torch.stack', (['agent_states_minibatch', '(0)'], {}), '(agent_states_minibatch, 0)\n', (10960, 10987), False, 'import torch\n'), ((15022, 15151), 'torch.FloatTensor', 'torch.FloatTensor', (['[([0.0] * num_training_per_episode if done_ else [1.0] *\n num_training_per_episode) for done_ in done_eval]'], {}), '([([0.0] * num_training_per_episode if done_ else [1.0] *\n num_training_per_episode) for done_ in done_eval])\n', (15039, 15151), False, 'import torch\n'), ((16728, 16750), 'numpy.mean', 'np.mean', (['action_losses'], {}), '(action_losses)\n', (16735, 16750), True, 'import numpy as np\n'), ((16834, 16855), 'numpy.mean', 'np.mean', (['value_losses'], {}), '(value_losses)\n', (16841, 16855), True, 'import numpy as np\n'), ((5919, 5953), 'torch.autograd.Variable', 'Variable', (['agent_obs'], {'volatile': '(True)'}), '(agent_obs, volatile=True)\n', (5927, 5953), False, 'from torch.autograd import Variable\n'), ((5975, 6012), 'torch.autograd.Variable', 'Variable', (['dummy_states'], {'volatile': '(True)'}), '(dummy_states, volatile=True)\n', (5983, 6012), False, 'from torch.autograd import Variable\n'), ((6034, 6070), 'torch.autograd.Variable', 'Variable', (['dummy_masks'], {'volatile': '(True)'}), '(dummy_masks, volatile=True)\n', (6042, 6070), False, 'from torch.autograd import Variable\n'), ((11563, 11595), 'torch.autograd.Variable', 'Variable', (['agent_states_minibatch'], {}), '(agent_states_minibatch)\n', (11571, 11595), False, 'from torch.autograd import Variable\n'), ((11787, 11821), 'torch.autograd.Variable', 'Variable', (['expert_actions_minibatch'], {}), '(expert_actions_minibatch)\n', (11795, 11821), False, 'from torch.autograd import Variable\n'), ((13784, 13819), 'torch.autograd.Variable', 'Variable', (['dagger_obs'], {'volatile': '(True)'}), '(dagger_obs, volatile=True)\n', (13792, 13819), False, 'from torch.autograd import Variable\n'), ((13841, 13883), 'torch.autograd.Variable', 'Variable', (['dummy_states_eval'], {'volatile': '(True)'}), '(dummy_states_eval, volatile=True)\n', (13849, 13883), False, 'from torch.autograd import Variable\n'), ((13905, 13946), 'torch.autograd.Variable', 'Variable', (['dummy_masks_eval'], {'volatile': '(True)'}), '(dummy_masks_eval, volatile=True)\n', (13913, 13946), False, 'from torch.autograd import Variable\n'), ((15819, 15830), 'time.time', 'time.time', ([], {}), '()\n', (15828, 15830), False, 'import time\n'), ((11131, 11167), 'torch.FloatTensor', 'torch.FloatTensor', (['returns_minibatch'], {}), '(returns_minibatch)\n', (11148, 11167), False, 'import torch\n'), ((15232, 15275), 'utils.torch_numpy_stack', 'utils.torch_numpy_stack', (['reward_eval', '(False)'], {}), '(reward_eval, False)\n', (15255, 15275), False, 'import utils\n'), ((6379, 6400), 'torch.from_numpy', 'torch.from_numpy', (['obs'], {}), '(obs)\n', (6395, 6400), False, 'import torch\n'), ((11048, 11082), 'numpy.array', 'np.array', (['expert_actions_minibatch'], {}), '(expert_actions_minibatch)\n', (11056, 11082), True, 'import numpy as np\n'), ((11617, 11639), 'torch.autograd.Variable', 'Variable', (['dummy_states'], {}), '(dummy_states)\n', (11625, 11639), False, 'from torch.autograd import Variable\n'), ((11670, 11691), 'torch.autograd.Variable', 'Variable', (['dummy_masks'], {}), '(dummy_masks)\n', (11678, 11691), False, 'from torch.autograd import Variable\n'), ((11853, 11880), 'torch.autograd.Variable', 'Variable', (['returns_minibatch'], {}), '(returns_minibatch)\n', (11861, 11880), False, 'from torch.autograd import Variable\n')] |
import os
import argparse
import matplotlib
import matplotlib.pyplot as plt
import numpy as onp
import jax.numpy as np
import jax.random as random
from jax import vmap
from jax.config import config as jax_config
import numpyro.distributions as dist
from numpyro.handlers import seed, substitute, trace
from numpyro.hmc_util import initialize_model
from numpyro.mcmc import mcmc
from numpyro import sample
import numpyro
from numpy import linalg as LA
from jax import device_get
from sklearn.utils import shuffle
from utils import *
# CONFIG
args = {
"num_samples" : 1000, #def: 1000
"num_warmup" : 3000, #def: 3000
"num_data" : 100, #def: 100
"num_hidden" : 10, #def: 10
"device" : 'cpu', #def: cpu
"save_directory": "./results",
}
# PREPARE TO SAVE RESULTS
try:
os.stat(args["save_directory"])
except:
os.mkdir(args["save_directory"])
sigmas = [1/5, 1, 5] # def: [1/5, 1, 5]
jax_config.update('jax_platform_name', args["device"])
N, D_X, D_H = args["num_data"], 1, args["num_hidden"]
# GENERATE ARTIFICIAL DATA
X, Y, X_test = get_data(functions, ranges, num_samples=500)
mean = X.mean()
X = X/mean
X, Y = shuffle(X, Y)
X_test=onp.arange(0,2,0.01).reshape(-1,1)
# PLOTTING
plt.cla() # Clear axis
plt.clf() # Clear figure
plt.close() # Close a figure window
# make plots
fig, ax = plt.subplots(1, len(sigmas), sharey=True)
fig.set_figheight(5)
fig.set_figwidth(len(sigmas)*7)
samples_collected = []
# INFERENCE
for i, sigma in enumerate(sigmas):
print("Model with weights prior sigma ", sigma)
rng, rng_predict = random.split(random.PRNGKey(0));
samples = run_inference(model, args, rng, X, Y, D_H, sigma);
samples_collected.append((sigma, samples))
# predict Y_test at inputs X_test
vmap_args = (samples, random.split(rng_predict, args["num_samples"]));
predictions = vmap(lambda samples, rng: predict(model, rng, samples, X_test, D_H, sigma))(*vmap_args)
predictions = predictions[..., 0]
1
train_predictions = vmap(lambda samples, rng: predict(model, rng, samples, X, D_H, sigma))(*vmap_args)
train_predictions = train_predictions[..., 0]
# compute mean prediction and 95% confidence interval around median
mean_prediction = np.mean(predictions, axis=0)
percentiles = onp.percentile(predictions, [2.5, 97.5], axis=0)
# compute mean prediction and confidence interval around median
train_mean_prediction = np.mean(train_predictions, axis=0)
# plot training data
ax[i].plot(X, Y, 'kx', c="red", alpha=0.3, label="Data samples")
# plot 90% confidence level of predictions
ax[i].fill_between(X_test[:,0], percentiles[0, :], percentiles[1, :], color='lightblue', label="95% CI", step='mid')
# plot mean prediction
ax[i].plot(X_test, mean_prediction, c='blue', alpha=0.6, label="Predicted")
ax[i].plot(X_test[:100], [data_gen_func(x, normalizing_mean=mean) for x in X_test[:100]], c='purple', alpha=0.6, label="True")
ax[i].plot(X_test[100:], [data_gen_func(x, normalizing_mean=mean) for x in X_test[100:]], c='purple', alpha=0.6)
ax[i].set(xlabel="X", ylabel="Y", title="σ = " + str(sigma))
ax[i].title.set_size(30)
ax[i].xaxis.label.set_size(30)
ax[i].yaxis.label.set_size(30)
ax[i].set_ylim([-2,3])
ax[i].tick_params(labelsize=30)
if(i==len(samples_collected)-1):
ax[i].legend(fontsize=15, loc="lower left")
print("Saving sigma analysis confidence interval plot...")
plt.savefig(os.path.join(args["save_directory"], "sigma_ci.png"))
plt.cla() # Clear axis
plt.clf() # Clear figure
plt.close() # Close a figure window
fig, ax = plt.subplots(1,len(samples_collected), sharey=True)
fig.set_figheight(5)
fig.set_figwidth(len(samples_collected)*7)
for i in range(len(samples_collected)):
to_plot = []
for name, value in samples_collected[i][1].items():
value = device_get(value)
neffs = numpyro.diagnostics.effective_sample_size(value[None, ...])
if isinstance(neffs, onp.ndarray):
to_plot.append(onp.log(neffs.flatten()))
bplot = ax[i].boxplot(
to_plot, labels=list(samples_collected[i][1].keys()),
patch_artist=True,
)
for element in ['boxes', 'whiskers', 'fliers', 'means', 'medians', 'caps']:
plt.setp(bplot[element], color="black")
for patch in bplot['boxes']:
patch.set_facecolor("lightblue")
ax[i].set(ylabel="log ESS", title="σ = " + str(samples_collected[i][0]))
ax[i].title.set_size(30)
ax[i].xaxis.label.set_size(30)
ax[i].yaxis.set_label("neff")
ax[i].yaxis.label.set_size(30)
ax[i].tick_params(labelsize=25.0)
print("Saving sigma analysis's effective sample size box plot...")
plt.savefig(os.path.join(args["save_directory"], "sigma_ess.png"))
| [
"jax.config.config.update",
"os.mkdir",
"jax.random.split",
"os.stat",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close",
"matplotlib.pyplot.setp",
"jax.device_get",
"numpy.percentile",
"jax.random.PRNGKey",
"matplotlib.pyplot.cla",
"numpy.arange",
"numpyro.diagnostics.effective_sample_size... | [((915, 969), 'jax.config.config.update', 'jax_config.update', (['"""jax_platform_name"""', "args['device']"], {}), "('jax_platform_name', args['device'])\n", (932, 969), True, 'from jax.config import config as jax_config\n'), ((1147, 1160), 'sklearn.utils.shuffle', 'shuffle', (['X', 'Y'], {}), '(X, Y)\n', (1154, 1160), False, 'from sklearn.utils import shuffle\n'), ((1218, 1227), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (1225, 1227), True, 'import matplotlib.pyplot as plt\n'), ((1243, 1252), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1250, 1252), True, 'import matplotlib.pyplot as plt\n'), ((1270, 1281), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1279, 1281), True, 'import matplotlib.pyplot as plt\n'), ((3579, 3588), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (3586, 3588), True, 'import matplotlib.pyplot as plt\n'), ((3604, 3613), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3611, 3613), True, 'import matplotlib.pyplot as plt\n'), ((3631, 3642), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3640, 3642), True, 'import matplotlib.pyplot as plt\n'), ((795, 826), 'os.stat', 'os.stat', (["args['save_directory']"], {}), "(args['save_directory'])\n", (802, 826), False, 'import os\n'), ((2263, 2291), 'jax.numpy.mean', 'np.mean', (['predictions'], {'axis': '(0)'}), '(predictions, axis=0)\n', (2270, 2291), True, 'import jax.numpy as np\n'), ((2310, 2358), 'numpy.percentile', 'onp.percentile', (['predictions', '[2.5, 97.5]'], {'axis': '(0)'}), '(predictions, [2.5, 97.5], axis=0)\n', (2324, 2358), True, 'import numpy as onp\n'), ((2460, 2494), 'jax.numpy.mean', 'np.mean', (['train_predictions'], {'axis': '(0)'}), '(train_predictions, axis=0)\n', (2467, 2494), True, 'import jax.numpy as np\n'), ((3523, 3575), 'os.path.join', 'os.path.join', (["args['save_directory']", '"""sigma_ci.png"""'], {}), "(args['save_directory'], 'sigma_ci.png')\n", (3535, 3575), False, 'import os\n'), ((4874, 4927), 'os.path.join', 'os.path.join', (["args['save_directory']", '"""sigma_ess.png"""'], {}), "(args['save_directory'], 'sigma_ess.png')\n", (4886, 4927), False, 'import os\n'), ((839, 871), 'os.mkdir', 'os.mkdir', (["args['save_directory']"], {}), "(args['save_directory'])\n", (847, 871), False, 'import os\n'), ((1170, 1192), 'numpy.arange', 'onp.arange', (['(0)', '(2)', '(0.01)'], {}), '(0, 2, 0.01)\n', (1180, 1192), True, 'import numpy as onp\n'), ((1586, 1603), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (1600, 1603), True, 'import jax.random as random\n'), ((1798, 1844), 'jax.random.split', 'random.split', (['rng_predict', "args['num_samples']"], {}), "(rng_predict, args['num_samples'])\n", (1810, 1844), True, 'import jax.random as random\n'), ((3923, 3940), 'jax.device_get', 'device_get', (['value'], {}), '(value)\n', (3933, 3940), False, 'from jax import device_get\n'), ((3957, 4016), 'numpyro.diagnostics.effective_sample_size', 'numpyro.diagnostics.effective_sample_size', (['value[None, ...]'], {}), '(value[None, ...])\n', (3998, 4016), False, 'import numpyro\n'), ((4426, 4465), 'matplotlib.pyplot.setp', 'plt.setp', (['bplot[element]'], {'color': '"""black"""'}), "(bplot[element], color='black')\n", (4434, 4465), True, 'import matplotlib.pyplot as plt\n')] |
# Andrei, 2018
"""
Collect data.
"""
from argparse import ArgumentParser
import numpy as np
import cv2
import os
import time
from utils import read_cfg
from get_camera import VideoLoad
from get_obd import OBDLoader
import matplotlib.pyplot as plt
from can_utils import validate_data as validate_can_data
from can_utils import CanPlot
from phone_data_utils import validate_data as validate_phone_data
from phone_data_utils import PhonePlot
CAN_PLOT_TIME = 5000
CFG_FILE = "cfg.yaml"
CFG_EXTRA_FILE = "cfg_extra.yaml"
PLAYBACK_FACTOR = 2
CV_WAIT_KEY_TIME = 1
if __name__ == "__main__":
arg_parser = ArgumentParser()
arg_parser.add_argument(dest='experiment_path', help='Path to experiment to visualize.')
arg_parser.add_argument('--camera-view-size', default=400, type=int, dest="camera_view_size")
arg_parser = arg_parser.parse_args()
experiment_path = arg_parser.experiment_path
camera_view_size = arg_parser.camera_view_size
cfg = read_cfg(os.path.join(experiment_path, CFG_FILE))
cfg_extra = read_cfg(os.path.join(experiment_path, CFG_EXTRA_FILE))
collect = cfg.collect
record_timestamp = cfg.recorded_min_max_tp
common_min_max_tp = cfg.common_min_max_tp
video_loders = []
obd_loader = None
can_plot = None
phone_plot = None
plot_stuff = False
live_plot = True
if collect.camera:
camera_names = ["camera_{}".format(x) for x in cfg.camera.ids]
camera_cfgs = [getattr(cfg.camera, x) for x in camera_names]
extra_camera_cfgs = [getattr(cfg_extra, x) for x in camera_names]
video_loders = [VideoLoad(experiment_path, x, getattr(cfg_extra, x),
view_height=camera_view_size,
flip_view=getattr(cfg.camera, x).flip) for x in camera_names]
if collect.obd:
obd_loader = OBDLoader(experiment_path)
if plot_stuff:
plt.ion()
# plt.show()
print("=" * 70)
if collect.can:
print("=" * 30, "Validate can data", "=" * 30)
validate_can_data(experiment_path)
key = raw_input("Press key to continue ...")
print("")
print("=" * 70)
if collect.phone:
print("=" * 30, "Validate phone data", "=" * 30)
validate_phone_data(experiment_path)
key = raw_input("Press key to continue ...")
print("")
if live_plot:
print("=" * 70)
if collect.can:
plot_stuff = True
can_plot = CanPlot(experiment_path)
key = raw_input("Press key to continue ...")
print("")
print("=" * 70)
if collect.phone:
plot_stuff = True
phone_plot = PhonePlot(experiment_path)
key = raw_input("Press key to continue ...")
print("")
print("=" * 70)
cursor_img = np.zeros((100, 100, 3)).astype(np.uint8)
def get_key(wait_time):
cv2.imshow("Cursor", cursor_img)
k = cv2.waitKey(wait_time)
# r = chr(k % 256)
r = k & 0xFF
return r
freq_tp = [1/30., 1/10., 1.]
freq_id = 0
freq = freq_tp[freq_id]
r = None
crt_tp = common_min_max_tp[0]
print ("START factor: --->")
print (crt_tp)
print ("------------------")
live_play = False
key_wait_time = 0
playback_speed = 1.
playback_factor = PLAYBACK_FACTOR
# -- Define menu
# Menu functions
def increase_tp():
global crt_tp
global freq
crt_tp += freq
def decrease_tp():
global crt_tp
global freq
crt_tp -= freq
def change_freq():
global freq
global freq_id
freq_id = (freq_id + 1) % len(freq_tp)
freq = freq_tp[freq_id]
def toggle_play():
global live_play
global key_wait_time
live_play = not live_play
key_wait_time = CV_WAIT_KEY_TIME if live_play else 0
def increase_playback_speed():
global playback_speed
playback_speed = playback_speed * playback_factor
def decrease_playback_speed():
global playback_speed
playback_speed = playback_speed / playback_factor
menu = dict({
27: (quit, "Key [ESC]: Exit"), # if the 'ESC' key is pressed, Quit
ord('l'): (change_freq, "Key [ l ]: Change freq"),
ord('\''): (increase_tp, "Key [ \'; ]: Increase tp by freq"),
ord(';'): (decrease_tp, "Key [ ; ]: Decrease tp by freq"),
ord('p'): (toggle_play, "Key [ p ]: Toggle Live play"),
ord(']'): (increase_playback_speed, "Key [ ] ]: Increase playback speed (*{})"),
ord('['): (decrease_playback_speed, "Key [ [ ]: Decrease playback speed"),
})
menu_text = "\n".join([x[1] for x in menu.values()])
while r != "q":
prev_tp = time.time()
key = get_key(key_wait_time)
plt.clf()
if key in menu.keys():
menu[key][0]()
elif key != 255:
print("Unknown key: {}".format(key))
if collect.camera:
print ("------")
frames = []
for v in video_loders:
dif_tp, frame = v.get_closest(crt_tp)
frames.append(frame)
# print (dif_tp)
v.show(frame)
if collect.obd:
obd_data = obd_loader.get_closest(crt_tp)
if collect.can:
can_plot.plot(crt_tp)
if collect.phone:
phone_plot.plot(crt_tp)
# TODO Plot magnetometer
if plot_stuff:
plt.show()
plt.pause(0.0000001) # Note this correction
if live_play:
crt_tp += (time.time() - prev_tp) * playback_factor
| [
"can_utils.CanPlot",
"argparse.ArgumentParser",
"phone_data_utils.validate_data",
"matplotlib.pyplot.clf",
"cv2.waitKey",
"get_obd.OBDLoader",
"can_utils.validate_data",
"phone_data_utils.PhonePlot",
"numpy.zeros",
"matplotlib.pyplot.show",
"time.time",
"matplotlib.pyplot.ion",
"matplotlib.p... | [((612, 628), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (626, 628), False, 'from argparse import ArgumentParser\n'), ((984, 1023), 'os.path.join', 'os.path.join', (['experiment_path', 'CFG_FILE'], {}), '(experiment_path, CFG_FILE)\n', (996, 1023), False, 'import os\n'), ((1050, 1095), 'os.path.join', 'os.path.join', (['experiment_path', 'CFG_EXTRA_FILE'], {}), '(experiment_path, CFG_EXTRA_FILE)\n', (1062, 1095), False, 'import os\n'), ((1865, 1891), 'get_obd.OBDLoader', 'OBDLoader', (['experiment_path'], {}), '(experiment_path)\n', (1874, 1891), False, 'from get_obd import OBDLoader\n'), ((1920, 1929), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (1927, 1929), True, 'import matplotlib.pyplot as plt\n'), ((2055, 2089), 'can_utils.validate_data', 'validate_can_data', (['experiment_path'], {}), '(experiment_path)\n', (2072, 2089), True, 'from can_utils import validate_data as validate_can_data\n'), ((2269, 2305), 'phone_data_utils.validate_data', 'validate_phone_data', (['experiment_path'], {}), '(experiment_path)\n', (2288, 2305), True, 'from phone_data_utils import validate_data as validate_phone_data\n'), ((2929, 2961), 'cv2.imshow', 'cv2.imshow', (['"""Cursor"""', 'cursor_img'], {}), "('Cursor', cursor_img)\n", (2939, 2961), False, 'import cv2\n'), ((2974, 2996), 'cv2.waitKey', 'cv2.waitKey', (['wait_time'], {}), '(wait_time)\n', (2985, 2996), False, 'import cv2\n'), ((4800, 4811), 'time.time', 'time.time', ([], {}), '()\n', (4809, 4811), False, 'import time\n'), ((4857, 4866), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4864, 4866), True, 'import matplotlib.pyplot as plt\n'), ((2497, 2521), 'can_utils.CanPlot', 'CanPlot', (['experiment_path'], {}), '(experiment_path)\n', (2504, 2521), False, 'from can_utils import CanPlot\n'), ((2707, 2733), 'phone_data_utils.PhonePlot', 'PhonePlot', (['experiment_path'], {}), '(experiment_path)\n', (2716, 2733), False, 'from phone_data_utils import PhonePlot\n'), ((2851, 2874), 'numpy.zeros', 'np.zeros', (['(100, 100, 3)'], {}), '((100, 100, 3))\n', (2859, 2874), True, 'import numpy as np\n'), ((5541, 5551), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5549, 5551), True, 'import matplotlib.pyplot as plt\n'), ((5564, 5580), 'matplotlib.pyplot.pause', 'plt.pause', (['(1e-07)'], {}), '(1e-07)\n', (5573, 5580), True, 'import matplotlib.pyplot as plt\n'), ((5655, 5666), 'time.time', 'time.time', ([], {}), '()\n', (5664, 5666), False, 'import time\n')] |
import logging
import os
from pathlib import Path
from pickle import load
from typing import Callable, List
import numpy as np
import pandas as pd
import tensorflow as tf
from dotenv import load_dotenv
from tensorflow import keras
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import CSVLogger
from generators.parameters import ParameterSet, ParamValue
from models.common.data_generator import SoundDataGenerator
"""Dotenv Config"""
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
"""Data Utils"""
def train_val_split(
x_train: np.ndarray, y_train: np.ndarray, split: float = 0.2,
) -> tuple:
slice: int = int(x_train.shape[0] * split)
x_val: np.ndarray = x_train[-slice:]
y_val: np.ndarray = y_train[-slice:]
x_train = x_train[:-slice]
y_train = y_train[:-slice]
return (x_val, y_val, x_train, y_train)
"""Model Utils"""
def mean_percentile_rank(y_true, y_pred, k=5):
"""
@paper
The first evaluation measure is the Mean Percentile Rank
(MPR) which is computed per synthesizer parameter.
"""
# TODO
def top_k_mean_accuracy(y_true, y_pred, k=5):
"""
@ paper
The top-k mean accuracy is obtained by computing the top-k
accuracy for each test example and then taking the mean across
all examples. In the same manner as done in the MPR analysis,
we compute the top-k mean accuracy per synthesizer
parameter for 𝑘 = 1, ... ,5.
"""
# TODO: per parameter?
original_shape = tf.shape(y_true)
y_true = tf.reshape(y_true, (-1, tf.shape(y_true)[-1]))
y_pred = tf.reshape(y_pred, (-1, tf.shape(y_pred)[-1]))
top_k = K.in_top_k(y_pred, tf.cast(tf.argmax(y_true, axis=-1), "int32"), k)
correct_pred = tf.reshape(top_k, original_shape[:-1])
return tf.reduce_mean(tf.cast(correct_pred, tf.float32))
def summarize_compile(model: keras.Model):
model.summary(line_length=80, positions=[0.33, 0.65, 0.8, 1.0])
# Specify the training configuration (optimizer, loss, metrics)
model.compile(
optimizer=keras.optimizers.Adam(), # Optimizer- Adam [14] optimizer
# Loss function to minimize
# @paper: Therefore, we converged on using sigmoid activations with binary cross entropy loss.
loss=keras.losses.BinaryCrossentropy(),
# List of metrics to monitor
metrics=[
# @paper: 1) Mean Percentile Rank?
# mean_percentile_rank,
# @paper: 2) Top-k mean accuracy based evaluation
top_k_mean_accuracy,
# @paper: 3) Mean Absolute Error based evaluation
keras.metrics.MeanAbsoluteError(),
],
)
def fit(
model: keras.Model,
x_train: np.ndarray,
y_train: np.ndarray,
x_val: np.ndarray,
y_val: np.ndarray,
batch_size: int = 16,
epochs: int = 100,
) -> keras.Model:
# @paper:
# with a minibatch size of 16 for
# 100 epochs. The best weights for each model were set by
# employing an early stopping procedure.
logging.info("# Fit model on training data")
history = model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
# @paper:
# Early stopping procedure:
# We pass some validation for
# monitoring validation loss and metrics
# at the end of each epoch
validation_data=(x_val, y_val),
verbose=0,
)
# The returned "history" object holds a record
# of the loss values and metric values during training
logging.info("\nhistory dict:", history.history)
return model
def compare(target, prediction, params, precision=1, print_output=False):
if print_output and len(prediction) < 10:
print(prediction)
print("Pred: {}".format(np.round(prediction, decimals=2)))
print("PRnd: {}".format(np.round(prediction)))
print("Act : {}".format(target))
print("+" * 5)
pred: List[ParamValue] = params.decode(prediction)
act: List[ParamValue] = params.decode(target)
pred_index: List[int] = [np.array(p.encoding).argmax() for p in pred]
act_index: List[int] = [np.array(p.encoding).argmax() for p in act]
width = 8
names = "Parameter: "
act_s = "Actual: "
pred_s = "Predicted: "
pred_i = "Pred. Indx:"
act_i = "Act. Index:"
diff_i = "Index Diff:"
for p in act:
names += p.name.rjust(width)[:width]
act_s += f"{p.value:>8.2f}"
for p in pred:
pred_s += f"{p.value:>8.2f}"
for p in pred_index:
pred_i += f"{p:>8}"
for p in act_index:
act_i += f"{p:>8}"
for i in range(len(act_index)):
diff = pred_index[i] - act_index[i]
diff_i += f"{diff:>8}"
exact = 0.0
close = 0.0
n_params = len(pred_index)
for i in range(n_params):
if pred_index[i] == act_index[i]:
exact = exact + 1.0
if abs(pred_index[i] - act_index[i]) <= precision:
close = close + 1.0
exact_ratio = exact / n_params
close_ratio = close / n_params
if print_output:
print(names)
print(act_s)
print(pred_s)
print(act_i)
print(pred_i)
print(diff_i)
print("-" * 30)
return exact_ratio, close_ratio
def evaluate(
prediction: np.ndarray, x: np.ndarray, y: np.ndarray, params: ParameterSet,
):
print("Prediction Shape: {}".format(prediction.shape))
num: int = x.shape[0]
correct: int = 0
correct_r: float = 0.0
close_r: float = 0.0
for i in range(num):
should_print = i < 5
exact, close = compare(
target=y[i],
prediction=prediction[i],
params=params,
print_output=should_print,
)
if exact == 1.0:
correct = correct + 1
correct_r += exact
close_r += close
summary = params.explain()
print(
"{} Parameters with {} levels (fixed: {})".format(
summary["n_variable"], summary["levels"], summary["n_fixed"]
)
)
print(
"Got {} out of {} ({:.1f}% perfect); Exact params: {:.1f}%, Close params: {:.1f}%".format(
correct,
num,
correct / num * 100,
correct_r / num * 100,
close_r / num * 100,
)
)
def data_format_audio(audio: np.ndarray, data_format: str) -> np.ndarray:
# `(None, n_channel, n_freq, n_time)` if `'channels_first'`,
# `(None, n_freq, n_time, n_channel)` if `'channels_last'`,
if data_format == "channels_last":
audio = audio[np.newaxis, :, np.newaxis]
else:
audio = audio[np.newaxis, np.newaxis, :]
return audio
"""
Wrap up the whole training process in a standard function. Gets a callback
to actually make the model, to keep it as flexible as possible.
# Params:
# - dataset_name (dataset name)
# - model_name: (C1..C6,e2e)
# - model_callback: function taking name,inputs,outputs,data_format and returning a Keras model
# - epochs: int
# - dataset_dir: place to find input data
# - output_dir: place to put outputs
# - parameters_file (override parameters filename)
# - dataset_file (override dataset filename)
# - data_format (channels_first or channels_last)
# - run_name: to save this run as
"""
def train_model(
# Main options
dataset_name: str,
model_name: str,
epochs: int,
model_callback: Callable[[str, int, int, str], keras.Model],
dataset_dir: str,
output_dir: str, # Directory names
dataset_file: str = None,
parameters_file: str = None,
run_name: str = None,
data_format: str = "channels_last",
save_best: bool = True,
resume: bool = False,
checkpoint: bool = True,
model_type: str = "E2E",
):
if not dataset_file:
dataset_file = (
os.getcwd() + "/" + dataset_dir + "/" + dataset_name + "_data.hdf5"
)
if not parameters_file:
parameters_file = (
os.getcwd() + "/" + dataset_dir + "/" + dataset_name + "_params.pckl"
)
if not run_name:
run_name = dataset_name + "_" + model_name
model_file = f"{output_dir}/{run_name}.h5"
best_model_file = f"{output_dir}/{run_name}_best.h5"
checkpoint_model_file = f"{output_dir}/{run_name}_checkpoint.h5"
history_file = f"{output_dir}/{run_name}.csv"
history_graph_file = f"{output_dir}/{run_name}.pdf"
gpu_avail = tf.test.is_gpu_available() # True/False
cuda_gpu_avail = tf.test.is_gpu_available(cuda_only=True) # True/False
print("+" * 30)
print(f"++ {run_name}")
print(
f"Running model: {model_name} on dataset {dataset_file} (parameters {parameters_file}) for {epochs} epochs"
)
print(f"Saving model in {output_dir} as {model_file}")
print(f"Saving history as {history_file}")
print(f"GPU: {gpu_avail}, with CUDA: {cuda_gpu_avail}")
print("+" * 30)
os.makedirs(output_dir, exist_ok=True)
# Get training and validation generators
params = {"data_file": dataset_file, "batch_size": 64, "shuffle": True}
training_generator = SoundDataGenerator(first=0.8, **params)
validation_generator = SoundDataGenerator(last=0.2, **params)
n_samples = training_generator.get_audio_length()
print(f"get_audio_length: {n_samples}")
n_outputs = training_generator.get_label_size()
# set keras image_data_format
# NOTE: on CPU only `channels_last` is supported
keras.backend.set_image_data_format(data_format)
model: keras.Model = None
if resume and os.path.exists(checkpoint_model_file):
history = pd.read_csv(history_file)
# Note - its zero indexed in the file, but 1 indexed in the display
initial_epoch: int = max(history.iloc[:, 0]) + 1
print(
f"Resuming from model file: {checkpoint_model_file} after epoch {initial_epoch}"
)
model = keras.models.load_model(
checkpoint_model_file,
custom_objects={"top_k_mean_accuracy": top_k_mean_accuracy},
)
else:
model = model_callback(
model_name=model_name,
inputs=n_samples,
outputs=n_outputs,
data_format=data_format,
)
# Summarize and compile the model
summarize_compile(model)
initial_epoch = 0
open(history_file, "w").close()
callbacks = []
best_callback = keras.callbacks.ModelCheckpoint(
filepath=best_model_file,
save_weights_only=False,
save_best_only=True,
verbose=1,
)
checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath=checkpoint_model_file,
save_weights_only=False,
save_best_only=False,
verbose=1,
)
if save_best:
callbacks.append(best_callback)
if checkpoint:
callbacks.append(checkpoint_callback)
callbacks.append(CSVLogger(history_file, append=True))
# Fit the model
history = None
try:
# TODO: fix incompatible shapes during spectrogram_cnn
history = model.fit(
x=training_generator,
validation_data=validation_generator,
epochs=epochs,
callbacks=callbacks,
initial_epoch=initial_epoch,
verbose=0, # https://github.com/tensorflow/tensorflow/issues/38064
)
except Exception as e:
print(f"Something went wrong during `model.fit`: {e}")
# Save model
model.save(model_file)
# Save history
if history:
try:
hist_df = pd.DataFrame(history.history)
try:
fig = hist_df.plot(subplots=True, figsize=(8, 25))
fig[0].get_figure().savefig(history_graph_file)
except Exception as e:
print("Couldn't create history graph")
print(e)
except Exception as e:
print("Couldn't save history")
print(e)
# evaluate prediction on random sample from validation set
# Parameter data - needed for decoding!
with open(parameters_file, "rb") as f:
parameters: ParameterSet = load(f)
# Shuffle data
validation_generator.on_epoch_end()
X, y = validation_generator.__getitem__(0)
if model_type == "STFT":
# stft expects shape (channel, sample_rate)
X = np.moveaxis(X, 1, -1)
prediction: np.ndarray = model.predict(X)
evaluate(prediction, X, y, parameters)
| [
"numpy.moveaxis",
"pandas.read_csv",
"tensorflow.reshape",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.callbacks.CSVLogger",
"pathlib.Path",
"pickle.load",
"tensorflow.keras.metrics.MeanAbsoluteError",
"numpy.round",
"pandas.DataFrame",
"os.path.exists",
"tensorflow.cast",
... | [((494, 527), 'dotenv.load_dotenv', 'load_dotenv', ([], {'dotenv_path': 'env_path'}), '(dotenv_path=env_path)\n', (505, 527), False, 'from dotenv import load_dotenv\n'), ((475, 484), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (479, 484), False, 'from pathlib import Path\n'), ((1518, 1534), 'tensorflow.shape', 'tf.shape', (['y_true'], {}), '(y_true)\n', (1526, 1534), True, 'import tensorflow as tf\n'), ((1754, 1792), 'tensorflow.reshape', 'tf.reshape', (['top_k', 'original_shape[:-1]'], {}), '(top_k, original_shape[:-1])\n', (1764, 1792), True, 'import tensorflow as tf\n'), ((3039, 3083), 'logging.info', 'logging.info', (['"""# Fit model on training data"""'], {}), "('# Fit model on training data')\n", (3051, 3083), False, 'import logging\n'), ((3553, 3601), 'logging.info', 'logging.info', (['"""\nhistory dict:"""', 'history.history'], {}), "('\\nhistory dict:', history.history)\n", (3565, 3601), False, 'import logging\n'), ((8427, 8453), 'tensorflow.test.is_gpu_available', 'tf.test.is_gpu_available', ([], {}), '()\n', (8451, 8453), True, 'import tensorflow as tf\n'), ((8489, 8529), 'tensorflow.test.is_gpu_available', 'tf.test.is_gpu_available', ([], {'cuda_only': '(True)'}), '(cuda_only=True)\n', (8513, 8529), True, 'import tensorflow as tf\n'), ((8917, 8955), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (8928, 8955), False, 'import os\n'), ((9103, 9142), 'models.common.data_generator.SoundDataGenerator', 'SoundDataGenerator', ([], {'first': '(0.8)'}), '(first=0.8, **params)\n', (9121, 9142), False, 'from models.common.data_generator import SoundDataGenerator\n'), ((9170, 9208), 'models.common.data_generator.SoundDataGenerator', 'SoundDataGenerator', ([], {'last': '(0.2)'}), '(last=0.2, **params)\n', (9188, 9208), False, 'from models.common.data_generator import SoundDataGenerator\n'), ((9451, 9499), 'tensorflow.keras.backend.set_image_data_format', 'keras.backend.set_image_data_format', (['data_format'], {}), '(data_format)\n', (9486, 9499), False, 'from tensorflow import keras\n'), ((10408, 10527), 'tensorflow.keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', ([], {'filepath': 'best_model_file', 'save_weights_only': '(False)', 'save_best_only': '(True)', 'verbose': '(1)'}), '(filepath=best_model_file, save_weights_only\n =False, save_best_only=True, verbose=1)\n', (10439, 10527), False, 'from tensorflow import keras\n'), ((10588, 10713), 'tensorflow.keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', ([], {'filepath': 'checkpoint_model_file', 'save_weights_only': '(False)', 'save_best_only': '(False)', 'verbose': '(1)'}), '(filepath=checkpoint_model_file,\n save_weights_only=False, save_best_only=False, verbose=1)\n', (10619, 10713), False, 'from tensorflow import keras\n'), ((1819, 1852), 'tensorflow.cast', 'tf.cast', (['correct_pred', 'tf.float32'], {}), '(correct_pred, tf.float32)\n', (1826, 1852), True, 'import tensorflow as tf\n'), ((9549, 9586), 'os.path.exists', 'os.path.exists', (['checkpoint_model_file'], {}), '(checkpoint_model_file)\n', (9563, 9586), False, 'import os\n'), ((9606, 9631), 'pandas.read_csv', 'pd.read_csv', (['history_file'], {}), '(history_file)\n', (9617, 9631), True, 'import pandas as pd\n'), ((9899, 10011), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['checkpoint_model_file'], {'custom_objects': "{'top_k_mean_accuracy': top_k_mean_accuracy}"}), "(checkpoint_model_file, custom_objects={\n 'top_k_mean_accuracy': top_k_mean_accuracy})\n", (9922, 10011), False, 'from tensorflow import keras\n'), ((10893, 10929), 'tensorflow.keras.callbacks.CSVLogger', 'CSVLogger', (['history_file'], {'append': '(True)'}), '(history_file, append=True)\n', (10902, 10929), False, 'from tensorflow.keras.callbacks import CSVLogger\n'), ((12128, 12135), 'pickle.load', 'load', (['f'], {}), '(f)\n', (12132, 12135), False, 'from pickle import load\n'), ((12337, 12358), 'numpy.moveaxis', 'np.moveaxis', (['X', '(1)', '(-1)'], {}), '(X, 1, -1)\n', (12348, 12358), True, 'import numpy as np\n'), ((1694, 1720), 'tensorflow.argmax', 'tf.argmax', (['y_true'], {'axis': '(-1)'}), '(y_true, axis=-1)\n', (1703, 1720), True, 'import tensorflow as tf\n'), ((2072, 2095), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {}), '()\n', (2093, 2095), False, 'from tensorflow import keras\n'), ((2283, 2316), 'tensorflow.keras.losses.BinaryCrossentropy', 'keras.losses.BinaryCrossentropy', ([], {}), '()\n', (2314, 2316), False, 'from tensorflow import keras\n'), ((11553, 11582), 'pandas.DataFrame', 'pd.DataFrame', (['history.history'], {}), '(history.history)\n', (11565, 11582), True, 'import pandas as pd\n'), ((1572, 1588), 'tensorflow.shape', 'tf.shape', (['y_true'], {}), '(y_true)\n', (1580, 1588), True, 'import tensorflow as tf\n'), ((1632, 1648), 'tensorflow.shape', 'tf.shape', (['y_pred'], {}), '(y_pred)\n', (1640, 1648), True, 'import tensorflow as tf\n'), ((2625, 2658), 'tensorflow.keras.metrics.MeanAbsoluteError', 'keras.metrics.MeanAbsoluteError', ([], {}), '()\n', (2656, 2658), False, 'from tensorflow import keras\n'), ((3800, 3832), 'numpy.round', 'np.round', (['prediction'], {'decimals': '(2)'}), '(prediction, decimals=2)\n', (3808, 3832), True, 'import numpy as np\n'), ((3867, 3887), 'numpy.round', 'np.round', (['prediction'], {}), '(prediction)\n', (3875, 3887), True, 'import numpy as np\n'), ((4089, 4109), 'numpy.array', 'np.array', (['p.encoding'], {}), '(p.encoding)\n', (4097, 4109), True, 'import numpy as np\n'), ((4162, 4182), 'numpy.array', 'np.array', (['p.encoding'], {}), '(p.encoding)\n', (4170, 4182), True, 'import numpy as np\n'), ((7832, 7843), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7841, 7843), False, 'import os\n'), ((7978, 7989), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7987, 7989), False, 'import os\n')] |
# %%
import csv
import numpy as np
import time
import gurobipy as gp
from gurobipy import GRB
# %%
class MCF:
def __init__(self, dowFile):
self.nunNodes = 0
self.numArcs = 0
self.numComm = 0
self.numScen = 0
self.arcOD = {}
self.arcV = {}
self.arcCap = {}
self.arcCost = {}
self.commOD = {}
self.commDem = {}
#dowFile = '/Users/emoreno/Code/BendersGAPM-MCF/instances/r04.1.dow'
with open(dowFile) as fileData:
reader = csv.reader(fileData, delimiter=' ', skipinitialspace=True)
for line in reader:
if reader.line_num == 2:
self.numNodes = int(line[0])
self.numArcs = int(line[1])
self.numComm = int(line[2])
elif (reader.line_num >= 3) and (reader.line_num <= self.numArcs+2):
e = reader.line_num - 3
self.arcOD[e] = (int(line[0]),int(line[1]))
self.arcV[e] = float(line[2])
self.arcCap[e] = float(line[3])
self.arcCost[e] = float(line[4])
elif (reader.line_num > self.numArcs+2):
k = reader.line_num - self.numArcs-3
self.commOD[k] = (int(line[0]),int(line[1]))
self.commDem[k] = float(line[2])
# %%
class SMCF(MCF):
def __init__(self, dowFile, scenFile):
MCF.__init__(self,dowFile)
self.numScen = 0
self.probs = None
self.scens = None
with open(scenFile) as fileData:
reader = csv.reader(fileData, delimiter=' ', skipinitialspace=True)
for line in reader:
if reader.line_num == 1:
self.numScen = int(line[0])
self.probs = np.zeros(self.numScen)
self.scens = np.zeros((self.numScen,self.numComm))
else:
s = reader.line_num-2
self.probs[s] = float(line[0])
for k in range(self.numComm):
self.scens[s,k] = line[k+1]
self.formulateMP()
self.formulateSP()
def solveDE(self, timeLimit = 86400):
start_time = time.time()
m = gp.Model("DetEquiv")
#Defining variables
X = m.addVars(range(self.numArcs), lb=0, ub=1, name="X")
Y = m.addVars(range(self.numScen), range(self.numArcs), range(self.numComm), lb=0, name='Y')
cap = m.addConstrs(gp.quicksum(Y[s,e,k] for k in range(self.numComm)) <= self.arcCap[e]*X[e] for s in range(self.numScen) for e in range(self.numArcs))
flow = {}
for k in range(self.numComm):
for v in range(1,self.numNodes+1):
if self.commOD[k][0] == v:
flow[k,v] = m.addConstrs(
gp.quicksum(Y[s,e,k] for e in range(self.numArcs) if self.arcOD[e][0] == v)
- gp.quicksum(Y[s,e,k] for e in range(self.numArcs) if self.arcOD[e][1] == v)
== self.scens[s,k] for s in range(self.numScen))
elif self.commOD[k][1] == v:
flow[k,v] = m.addConstrs(
gp.quicksum(Y[s,e,k] for e in range(self.numArcs) if self.arcOD[e][0] == v)
- gp.quicksum(Y[s,e,k] for e in range(self.numArcs) if self.arcOD[e][1] == v)
== -self.scens[s,k] for s in range(self.numScen))
else:
flow[k,v] = m.addConstrs(
gp.quicksum(Y[s,e,k] for e in range(self.numArcs) if self.arcOD[e][0] == v)
- gp.quicksum(Y[s,e,k] for e in range(self.numArcs) if self.arcOD[e][1] == v)
== 0 for s in range(self.numScen))
m.setObjective(
gp.quicksum(self.arcCost[e]*X[e] for e in range(self.numArcs))
+ gp.quicksum(self.arcV[e]*self.probs[s]*Y[s,e,k] for s in range(self.numScen) for e in range(self.numArcs) for k in range(self.numComm))
)
m.update()
m.Params.timeLimit = timeLimit
m.Params.Threads = 4
m.optimize()
if m.status == GRB.OPTIMAL:
print("FinalReport: %d %f %f %f %d %d %d %f"
% (0,m.ObjVal,m.ObjVal,0,0,0,self.numScen,time.time()-start_time))
else:
raise Exception("Gurobi solStatus "+str(m.status))
def formulateMP(self):
self.MP = gp.Model("MasterProblem")
#Defining variables
X = self.MP.addVars(range(self.numArcs), lb=0, ub=1, name="X")
theta = self.MP.addVars(range(self.numScen), lb=0, name="theta")
self.MP.setObjective(
gp.quicksum(self.arcCost[e]*X[e] for e in range(self.numArcs))
+ gp.quicksum(self.probs[s]*theta[s] for s in range(self.numScen))
)
self._varX = X
self._varTheta = theta
## set parameters
self.MP.Params.OutputFlag = 0
self.MP.Params.Threads = 4
def formulateSP(self):
self.SP = gp.Model("SubProblemDual")
#Defining variables
lambd = self.SP.addVars(range(1,self.numNodes+1), range(self.numComm), lb=-float('inf'), name="lambda")
mu = self.SP.addVars(range(self.numArcs), lb=0, name="mu")
self.SP.addConstrs(lambd[self.arcOD[e][0],k] - lambd[self.arcOD[e][1],k] - mu[e] <= self.arcV[e] for e in range(self.numArcs) for k in range(self.numComm))
self.SP.setObjective(0, GRB.MAXIMIZE)
## Copy variable to acces them later
self._varLambda = lambd
self._varMu = mu
## set parameters
self.SP.Params.InfUnbdInfo = 1
self.SP.Params.OutputFlag = 0
self.SP.Params.Threads = 4
# Set objective for mu variables given an x
def SPsetX(self, X):
for e in range(self.numArcs):
self._varMu[e].obj = -self.arcCap[e]*X[e]
# Set objective of lambda variables, solve the problem and returns solution
def SPsolve(self, Demand):
for k in range(self.numComm):
self._varLambda[self.commOD[k][0],k].obj = Demand[k]
self._varLambda[self.commOD[k][1],k].obj = -Demand[k]
self.SP.optimize()
# Case optimum found
if self.SP.status == GRB.OPTIMAL:
solMu = np.array(self.SP.getAttr('x',self._varMu).values())
solDiffLambda = np.array([self._varLambda[self.commOD[k][0],k].x - self._varLambda[self.commOD[k][1],k].x for k in range(self.numComm)])
return(1, self.SP.ObjVal, solDiffLambda, solMu)
# if unbounded get ray
elif self.SP.status == GRB.UNBOUNDED:
solMu = np.array(self.SP.getAttr('UnbdRay',self._varMu).values())
solDiffLambda = np.array([self._varLambda[self.commOD[k][0],k].UnbdRay - self._varLambda[self.commOD[k][1],k].UnbdRay for k in range(self.numComm)])
return(0, float('inf'), solDiffLambda, solMu)
else:
raise Exception("Gurobi solStatus "+str(self.SP.status))
# Solve master problem
def MPsolve(self):
self.MP.optimize()
if self.MP.status == GRB.OPTIMAL:
solX = np.array(self.MP.getAttr('x',self._varX).values())
solT = np.array(self.MP.getAttr('x',self._varTheta).values())
return(self.MP.ObjVal, solX, solT)
else:
raise Exception("Gurobi solStatus "+str(self.MP.status))
# Benders
def Benders(self, method = 'm', timeLimit = 86400, tol_optcut = 1e-5, tol_stopRgap = 1e-6, tol_stopAgap = 1e-6):
ub = float('inf')
lb = -float('inf')
nOptCuts = 0
nFeasCuts = 0
partitionId = np.zeros(self.numScen)
sizePartition = 1
if (method != 'a') and (method != 'p'):
partitionId = np.arange(self.numScen)
sizePartition = self.numScen
start_time = time.time()
dLambdasDiff = np.zeros((self.numScen, self.numComm))
it = 1
while(time.time() - start_time < timeLimit):
# Solve master
(cLB,X,theta) = self.MPsolve()
#print("Iter %d: master = %f\n" % (it,cLB))
lb = max(lb,cLB)
# fix X on the subproblem
self.SPsetX(X)
#current UB including X costs
cUB = sum(self.arcCost[e]*X[e] for e in range(self.numArcs))
#info for single cuts
noInfCutAdded = True
singleCutPartA = 0
singleCutPartB = np.zeros(self.numArcs)
# info for adaptive cuts
noCutAdded = True
# Solve subproblem for each scenario
# for s in range(self.numScen):
# (stat,objSP,dLambda, dMu) = self.SPsolve(self.scens[s])
for p in range(sizePartition):
# Warning: assuming equiprobable for numerical stability
# if not it should be np.average()
# demP = self.scens[s]
demP = np.sum(self.scens[partitionId==p], axis=0)/np.sum(partitionId==p)
probP = np.sum(partitionId==p)/self.numScen
(stat,objSP,dLambda, dMu) = self.SPsolve(demP)
if stat == 0: # Unbounded
# Feasibility cut
self.MP.addConstr(
gp.quicksum(demP[k] * dLambda[k] for k in range(self.numComm))
- gp.quicksum(dMu[e]*self.arcCap[e]*self._varX[e] for e in range(self.numArcs))
<= 0
)
nFeasCuts += 1
noInfCutAdded = False
noCutAdded = False
else: # Optimum
# dLambdasDiff[s] = dLambda
if (method == 'm') or (method == 'a'):
#Optimality cut
partA = sum(demP[k] * dLambda[k] for k in range(self.numComm))
partB = -sum(dMu[e]*self.arcCap[e]*X[e] for e in range(self.numArcs))
# Warning: assuming equiprobable for numerical stability
if partA+partB > (sum(theta[partitionId==p])/np.sum(partitionId==p)) + tol_optcut:
scen = np.extract(partitionId==p,range(self.numScen)).tolist()
self.MP.addConstr(
gp.quicksum(demP[k] * dLambda[k] for k in range(self.numComm))
- gp.quicksum(dMu[e]*self.arcCap[e]*self._varX[e] for e in range(self.numArcs))
<= gp.quicksum(self._varTheta[s] for s in scen)/np.sum(partitionId==p))
nOptCuts += 1
noCutAdded = False
elif ((method == 's') or (method == 'p')) and noInfCutAdded:
singleCutPartA += sum(demP[k] * dLambda[k] for k in range(self.numComm))*probP
for e in range(self.numArcs):
singleCutPartB[e] += -dMu[e]*self.arcCap[e]*probP
if (method != 'a') and (method != 'p') :
cUB += np.sum(self.probs[partitionId==p])*objSP
else:
cUB = float('inf')
if ((method == 's') or (method == 'p')) and noInfCutAdded:
if singleCutPartA + sum(singleCutPartB[e]*X[e] for e in range(self.numArcs)) > sum(self.probs[s]*theta[s] for s in range(self.numScen)) + tol_optcut:
self.MP.addConstr(
singleCutPartA + gp.quicksum(singleCutPartB[e]*self._varX[e] for e in range(self.numArcs))
<= sum(self.probs[s]*self._varTheta[s] for s in range(self.numScen)))
nOptCuts += 1
noCutAdded = False
if ((method == 'a') or (method == 'p')) and noCutAdded:
# No cut added. Check partition and compute UB
cUB = sum(self.arcCost[e]*X[e] for e in range(self.numArcs))
newSizePartition = sizePartition
singleCutPartA = 0
singleCutPartB = np.zeros(self.numArcs)
for p in range(sizePartition):
scen = np.extract(partitionId==p,range(self.numScen)).tolist()
for s in scen:
(stat,objSP,dLambda, dMu) = self.SPsolve(self.scens[s])
dLambdasDiff[s] = dLambda
cUB += objSP*self.probs[s]
singleCutPartA += sum(self.scens[s,k] * dLambda[k] for k in range(self.numComm))
for e in range(self.numArcs):
singleCutPartB[e] += -dMu[e]*self.arcCap[e]
# Revise for repeated duals differences
(dualsUnique, inverse) = np.unique(dLambdasDiff[scen,:],axis=0, return_inverse=True)
numSubsets = dualsUnique.shape[0]
if numSubsets > 1:
# we add new elements to the partition
partitionId[partitionId==p] = (inverse+newSizePartition)
# but rename the last one as the current one
partitionId[partitionId==(newSizePartition+numSubsets-1)] = p
newSizePartition += numSubsets -1
#print("Spliting %d into %d new subsets" % (p,numSubsets))
print("Partition now has %d elements" % newSizePartition)
sizePartition = newSizePartition
self.dL = dLambdasDiff
self.part = partitionId
if (method == 'p'):
singleCutPartA = singleCutPartA/self.numScen
singleCutPartB = singleCutPartB/self.numScen
#We add an extra optimality cut. I should be all scenarios feasible
if singleCutPartA + sum(singleCutPartB[e]*X[e] for e in range(self.numArcs)) > sum(self.probs[s]*theta[s] for s in range(self.numScen)) + tol_optcut:
self.MP.addConstr(
singleCutPartA + gp.quicksum(singleCutPartB[e]*self._varX[e] for e in range(self.numArcs))
<= sum(self.probs[s]*self._varTheta[s] for s in range(self.numScen)))
nOptCuts += 1
noCutAdded = False
#print("Iter %d: master = %f subp = %f gap = %f\n" % (it,cLB,cUB, cUB/cLB-1))
ub = min(ub, cUB)
elap_time = time.time()
#print("It=%d t=%f LB=%8.2f UB=%8.2f rgap=%8.2e nF=%d nO=%d"
# % (it,elap_time-start_time,lb,ub,ub/(lb+1e-6)-1,nFeasCuts,nOptCuts))
print("%d %8.2f %8.2f %8.2e %d %d %d %f"
% (it,lb,ub,ub/(lb+1e-6)-1,nFeasCuts,nOptCuts,sizePartition,elap_time-start_time))
if (ub-lb < tol_stopRgap) or (ub/(lb+1e-6)-1 < tol_stopRgap) :
print("FinalReport: %d %f %f %f %d %d %d %f"
% (it,lb,ub,ub/(lb+1e-6)-1,nFeasCuts,nOptCuts,sizePartition,elap_time-start_time))
break
it += 1
def MPsolveFull(self,sizePartition,partitionId):
m = gp.Model("GAPM")
#Defining variables
X = m.addVars(range(self.numArcs), lb=0, ub=1, name="X")
Y = m.addVars(range(sizePartition), range(self.numArcs), range(self.numComm), lb=0, name='Y')
cap = m.addConstrs(gp.quicksum(Y[s,e,k] for k in range(self.numComm)) <= self.arcCap[e]*X[e] for s in range(sizePartition) for e in range(self.numArcs))
flow = {}
demP = np.zeros((sizePartition,self.numComm))
probP = np.zeros(sizePartition)
for p in range(sizePartition):
demP[p] = np.sum(self.scens[partitionId==p], axis=0)/np.sum(partitionId==p)
probP[p] = np.sum(partitionId==p)/self.numScen
for k in range(self.numComm):
for v in range(1,self.numNodes+1):
if self.commOD[k][0] == v:
flow[k,v] = m.addConstrs(
gp.quicksum(Y[s,e,k] for e in range(self.numArcs) if self.arcOD[e][0] == v)
- gp.quicksum(Y[s,e,k] for e in range(self.numArcs) if self.arcOD[e][1] == v)
== demP[s,k] for s in range(sizePartition))
elif self.commOD[k][1] == v:
flow[k,v] = m.addConstrs(
gp.quicksum(Y[s,e,k] for e in range(self.numArcs) if self.arcOD[e][0] == v)
- gp.quicksum(Y[s,e,k] for e in range(self.numArcs) if self.arcOD[e][1] == v)
== -demP[s,k] for s in range(sizePartition))
else:
flow[k,v] = m.addConstrs(
gp.quicksum(Y[s,e,k] for e in range(self.numArcs) if self.arcOD[e][0] == v)
- gp.quicksum(Y[s,e,k] for e in range(self.numArcs) if self.arcOD[e][1] == v)
== 0 for s in range(sizePartition))
m.setObjective(
gp.quicksum(self.arcCost[e]*X[e] for e in range(self.numArcs))
+ gp.quicksum(self.arcV[e]*probP[s]*Y[s,e,k] for s in range(sizePartition) for e in range(self.numArcs) for k in range(self.numComm))
)
m.update()
m.Params.OutputFlag = 0
m.Params.Threads = 4
m.optimize()
if m.status == GRB.OPTIMAL:
solX = np.array(m.getAttr('x',X).values())
return(m.ObjVal, solX)
else:
raise Exception("Gurobi solStatus "+str(m.status))
def GAPM(self, timeLimit = 86400, tol_optcut = 1e-5, tol_stopRgap = 1e-6, tol_stopAgap = 1e-6):
ub = float('inf')
lb = -float('inf')
partitionId = np.zeros(self.numScen)
sizePartition = 1
start_time = time.time()
dLambdasDiff = np.zeros((self.numScen, self.numComm))
it = 1
while(time.time() - start_time < timeLimit):
# Solve master
(cLB,X) = self.MPsolveFull(sizePartition,partitionId)
#print("Iter %d: master = %f\n" % (it,cLB))
lb = max(lb,cLB)
# fix X on the subproblem
self.SPsetX(X)
#current UB including X costs
cUB = sum(self.arcCost[e]*X[e] for e in range(self.numArcs))
newSizePartition = sizePartition
for p in range(sizePartition):
scen = np.extract(partitionId==p,range(self.numScen)).tolist()
for s in scen:
(stat,objSP,dLambda, dMu) = self.SPsolve(self.scens[s])
dLambdasDiff[s] = dLambda
cUB += objSP*self.probs[s]
# Revise for repeated duals differences
(dualsUnique, inverse) = np.unique(dLambdasDiff[scen,:],axis=0, return_inverse=True)
numSubsets = dualsUnique.shape[0]
if numSubsets > 1:
# we add new elements to the partition
partitionId[partitionId==p] = (inverse+newSizePartition)
# but rename the last one as the current one
partitionId[partitionId==(newSizePartition+numSubsets-1)] = p
newSizePartition += numSubsets -1
#print("Spliting %d into %d new subsets" % (p,numSubsets))
print("Partition now has %d elements" % newSizePartition)
sizePartition = newSizePartition
ub = min(ub, cUB)
elap_time = time.time()
#print("It=%d t=%f LB=%8.2f UB=%8.2f rgap=%8.2e nF=%d nO=%d"
# % (it,elap_time-start_time,lb,ub,ub/(lb+1e-6)-1,nFeasCuts,nOptCuts))
print("%d %8.2f %8.2f %8.2e %d %d %d %f"
% (it,lb,ub,ub/(lb+1e-6)-1,0,0,sizePartition,elap_time-start_time))
if (ub-lb < tol_stopRgap) or (ub/(lb+1e-6)-1 < tol_stopRgap) :
print("FinalReport: %d %f %f %f %d %d %d %f"
% (it,lb,ub,ub/(lb+1e-6)-1,0,0,sizePartition,elap_time-start_time))
break
it += 1
# %%
# prob2 = SMCF('/Users/emoreno/Code/BendersGAPM-MCF/instances/r04.1.dow','/Users/emoreno/Code/BendersGAPM-MCF/instances/r04-0-100')
# prob2.GAPM()
# prob2.Benders('p', 500)
# %%
# dem = list(prob2.commDem.values())
# prob2.SPsolve(dem)
# # %%
# prob2.SPsetX(np.ones(prob2.numArcs))
# # %%
# prob2.SPsolve(list(prob2.commDem.values()))
# # %%
# prob2.MPsolve()
# # %%
# # %%
# %%
| [
"csv.reader",
"numpy.sum",
"numpy.zeros",
"gurobipy.Model",
"time.time",
"numpy.arange",
"gurobipy.quicksum",
"numpy.unique"
] | [((2276, 2287), 'time.time', 'time.time', ([], {}), '()\n', (2285, 2287), False, 'import time\n'), ((2300, 2320), 'gurobipy.Model', 'gp.Model', (['"""DetEquiv"""'], {}), "('DetEquiv')\n", (2308, 2320), True, 'import gurobipy as gp\n'), ((4556, 4581), 'gurobipy.Model', 'gp.Model', (['"""MasterProblem"""'], {}), "('MasterProblem')\n", (4564, 4581), True, 'import gurobipy as gp\n'), ((5155, 5181), 'gurobipy.Model', 'gp.Model', (['"""SubProblemDual"""'], {}), "('SubProblemDual')\n", (5163, 5181), True, 'import gurobipy as gp\n'), ((7777, 7799), 'numpy.zeros', 'np.zeros', (['self.numScen'], {}), '(self.numScen)\n', (7785, 7799), True, 'import numpy as np\n'), ((7987, 7998), 'time.time', 'time.time', ([], {}), '()\n', (7996, 7998), False, 'import time\n'), ((8022, 8060), 'numpy.zeros', 'np.zeros', (['(self.numScen, self.numComm)'], {}), '((self.numScen, self.numComm))\n', (8030, 8060), True, 'import numpy as np\n'), ((15382, 15398), 'gurobipy.Model', 'gp.Model', (['"""GAPM"""'], {}), "('GAPM')\n", (15390, 15398), True, 'import gurobipy as gp\n'), ((15788, 15827), 'numpy.zeros', 'np.zeros', (['(sizePartition, self.numComm)'], {}), '((sizePartition, self.numComm))\n', (15796, 15827), True, 'import numpy as np\n'), ((15843, 15866), 'numpy.zeros', 'np.zeros', (['sizePartition'], {}), '(sizePartition)\n', (15851, 15866), True, 'import numpy as np\n'), ((17984, 18006), 'numpy.zeros', 'np.zeros', (['self.numScen'], {}), '(self.numScen)\n', (17992, 18006), True, 'import numpy as np\n'), ((18063, 18074), 'time.time', 'time.time', ([], {}), '()\n', (18072, 18074), False, 'import time\n'), ((18102, 18140), 'numpy.zeros', 'np.zeros', (['(self.numScen, self.numComm)'], {}), '((self.numScen, self.numComm))\n', (18110, 18140), True, 'import numpy as np\n'), ((532, 590), 'csv.reader', 'csv.reader', (['fileData'], {'delimiter': '""" """', 'skipinitialspace': '(True)'}), "(fileData, delimiter=' ', skipinitialspace=True)\n", (542, 590), False, 'import csv\n'), ((1630, 1688), 'csv.reader', 'csv.reader', (['fileData'], {'delimiter': '""" """', 'skipinitialspace': '(True)'}), "(fileData, delimiter=' ', skipinitialspace=True)\n", (1640, 1688), False, 'import csv\n'), ((7901, 7924), 'numpy.arange', 'np.arange', (['self.numScen'], {}), '(self.numScen)\n', (7910, 7924), True, 'import numpy as np\n'), ((8591, 8613), 'numpy.zeros', 'np.zeros', (['self.numArcs'], {}), '(self.numArcs)\n', (8599, 8613), True, 'import numpy as np\n'), ((14721, 14732), 'time.time', 'time.time', ([], {}), '()\n', (14730, 14732), False, 'import time\n'), ((19894, 19905), 'time.time', 'time.time', ([], {}), '()\n', (19903, 19905), False, 'import time\n'), ((8090, 8101), 'time.time', 'time.time', ([], {}), '()\n', (8099, 8101), False, 'import time\n'), ((12235, 12257), 'numpy.zeros', 'np.zeros', (['self.numArcs'], {}), '(self.numArcs)\n', (12243, 12257), True, 'import numpy as np\n'), ((15928, 15972), 'numpy.sum', 'np.sum', (['self.scens[partitionId == p]'], {'axis': '(0)'}), '(self.scens[partitionId == p], axis=0)\n', (15934, 15972), True, 'import numpy as np\n'), ((15971, 15995), 'numpy.sum', 'np.sum', (['(partitionId == p)'], {}), '(partitionId == p)\n', (15977, 15995), True, 'import numpy as np\n'), ((16017, 16041), 'numpy.sum', 'np.sum', (['(partitionId == p)'], {}), '(partitionId == p)\n', (16023, 16041), True, 'import numpy as np\n'), ((18178, 18189), 'time.time', 'time.time', ([], {}), '()\n', (18187, 18189), False, 'import time\n'), ((19108, 19169), 'numpy.unique', 'np.unique', (['dLambdasDiff[scen, :]'], {'axis': '(0)', 'return_inverse': '(True)'}), '(dLambdasDiff[scen, :], axis=0, return_inverse=True)\n', (19117, 19169), True, 'import numpy as np\n'), ((1843, 1865), 'numpy.zeros', 'np.zeros', (['self.numScen'], {}), '(self.numScen)\n', (1851, 1865), True, 'import numpy as np\n'), ((1899, 1937), 'numpy.zeros', 'np.zeros', (['(self.numScen, self.numComm)'], {}), '((self.numScen, self.numComm))\n', (1907, 1937), True, 'import numpy as np\n'), ((9077, 9121), 'numpy.sum', 'np.sum', (['self.scens[partitionId == p]'], {'axis': '(0)'}), '(self.scens[partitionId == p], axis=0)\n', (9083, 9121), True, 'import numpy as np\n'), ((9120, 9144), 'numpy.sum', 'np.sum', (['(partitionId == p)'], {}), '(partitionId == p)\n', (9126, 9144), True, 'import numpy as np\n'), ((9167, 9191), 'numpy.sum', 'np.sum', (['(partitionId == p)'], {}), '(partitionId == p)\n', (9173, 9191), True, 'import numpy as np\n'), ((12951, 13012), 'numpy.unique', 'np.unique', (['dLambdasDiff[scen, :]'], {'axis': '(0)', 'return_inverse': '(True)'}), '(dLambdasDiff[scen, :], axis=0, return_inverse=True)\n', (12960, 13012), True, 'import numpy as np\n'), ((11229, 11265), 'numpy.sum', 'np.sum', (['self.probs[partitionId == p]'], {}), '(self.probs[partitionId == p])\n', (11235, 11265), True, 'import numpy as np\n'), ((4398, 4409), 'time.time', 'time.time', ([], {}), '()\n', (4407, 4409), False, 'import time\n'), ((10253, 10277), 'numpy.sum', 'np.sum', (['(partitionId == p)'], {}), '(partitionId == p)\n', (10259, 10277), True, 'import numpy as np\n'), ((10671, 10715), 'gurobipy.quicksum', 'gp.quicksum', (['(self._varTheta[s] for s in scen)'], {}), '(self._varTheta[s] for s in scen)\n', (10682, 10715), True, 'import gurobipy as gp\n'), ((10716, 10740), 'numpy.sum', 'np.sum', (['(partitionId == p)'], {}), '(partitionId == p)\n', (10722, 10740), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#
# Copyright 2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from stellargraph import IndexedArray
def test_indexed_array_empty():
frame = IndexedArray()
assert frame.index == range(0)
np.testing.assert_array_equal(frame.values, np.empty((0, 0)))
def test_indexed_array_non_empty():
list_ids = ["a", "b", "c"]
array_ids = np.array([10, -1, 2])
range_ids = range(106, 100, -2)
values = np.random.rand(3, 4, 5)
# this test uses 'is' checks to validate that there's no copying of data
frame = IndexedArray(values)
assert frame.index == range(3)
assert frame.values is values
frame = IndexedArray(values, index=list_ids)
assert frame.index is list_ids
assert frame.values is values
frame = IndexedArray(values, index=array_ids)
assert frame.index is array_ids
assert frame.values is values
frame = IndexedArray(values, index=range_ids)
assert frame.index is range_ids
assert frame.values is values
def test_indexed_array_invalid():
values = np.random.rand(3, 4, 5)
with pytest.raises(TypeError, match="values: expected a NumPy array .* found int"):
IndexedArray(123)
with pytest.raises(
ValueError,
match=r"values: expected an array with shape .* found shape \(\) of length 0",
):
IndexedArray(np.zeros(()))
with pytest.raises(
ValueError,
match=r"values: expected an array with shape .* found shape \(123,\) of length 1",
):
IndexedArray(np.zeros(123))
# check that the index `len`-failure works with or without index inference
with pytest.raises(TypeError, match="index: expected a sequence .* found int"):
IndexedArray(index=0)
with pytest.raises(TypeError, match="index: expected a sequence .* found int"):
IndexedArray(values, index=123)
with pytest.raises(
ValueError, match="values: expected the index length 2 .* found 3 rows"
):
IndexedArray(values, index=range(0, 3, 2))
| [
"numpy.empty",
"numpy.zeros",
"pytest.raises",
"numpy.array",
"numpy.random.rand",
"stellargraph.IndexedArray"
] | [((718, 732), 'stellargraph.IndexedArray', 'IndexedArray', ([], {}), '()\n', (730, 732), False, 'from stellargraph import IndexedArray\n'), ((919, 940), 'numpy.array', 'np.array', (['[10, -1, 2]'], {}), '([10, -1, 2])\n', (927, 940), True, 'import numpy as np\n'), ((991, 1014), 'numpy.random.rand', 'np.random.rand', (['(3)', '(4)', '(5)'], {}), '(3, 4, 5)\n', (1005, 1014), True, 'import numpy as np\n'), ((1105, 1125), 'stellargraph.IndexedArray', 'IndexedArray', (['values'], {}), '(values)\n', (1117, 1125), False, 'from stellargraph import IndexedArray\n'), ((1208, 1244), 'stellargraph.IndexedArray', 'IndexedArray', (['values'], {'index': 'list_ids'}), '(values, index=list_ids)\n', (1220, 1244), False, 'from stellargraph import IndexedArray\n'), ((1327, 1364), 'stellargraph.IndexedArray', 'IndexedArray', (['values'], {'index': 'array_ids'}), '(values, index=array_ids)\n', (1339, 1364), False, 'from stellargraph import IndexedArray\n'), ((1448, 1485), 'stellargraph.IndexedArray', 'IndexedArray', (['values'], {'index': 'range_ids'}), '(values, index=range_ids)\n', (1460, 1485), False, 'from stellargraph import IndexedArray\n'), ((1605, 1628), 'numpy.random.rand', 'np.random.rand', (['(3)', '(4)', '(5)'], {}), '(3, 4, 5)\n', (1619, 1628), True, 'import numpy as np\n'), ((816, 832), 'numpy.empty', 'np.empty', (['(0, 0)'], {}), '((0, 0))\n', (824, 832), True, 'import numpy as np\n'), ((1639, 1716), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""values: expected a NumPy array .* found int"""'}), "(TypeError, match='values: expected a NumPy array .* found int')\n", (1652, 1716), False, 'import pytest\n'), ((1726, 1743), 'stellargraph.IndexedArray', 'IndexedArray', (['(123)'], {}), '(123)\n', (1738, 1743), False, 'from stellargraph import IndexedArray\n'), ((1754, 1864), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""values: expected an array with shape .* found shape \\\\(\\\\) of length 0"""'}), "(ValueError, match=\n 'values: expected an array with shape .* found shape \\\\(\\\\) of length 0')\n", (1767, 1864), False, 'import pytest\n'), ((1928, 2047), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""values: expected an array with shape .* found shape \\\\(123,\\\\) of length 1"""'}), "(ValueError, match=\n 'values: expected an array with shape .* found shape \\\\(123,\\\\) of length 1'\n )\n", (1941, 2047), False, 'import pytest\n'), ((2186, 2259), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""index: expected a sequence .* found int"""'}), "(TypeError, match='index: expected a sequence .* found int')\n", (2199, 2259), False, 'import pytest\n'), ((2269, 2290), 'stellargraph.IndexedArray', 'IndexedArray', ([], {'index': '(0)'}), '(index=0)\n', (2281, 2290), False, 'from stellargraph import IndexedArray\n'), ((2301, 2374), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""index: expected a sequence .* found int"""'}), "(TypeError, match='index: expected a sequence .* found int')\n", (2314, 2374), False, 'import pytest\n'), ((2384, 2415), 'stellargraph.IndexedArray', 'IndexedArray', (['values'], {'index': '(123)'}), '(values, index=123)\n', (2396, 2415), False, 'from stellargraph import IndexedArray\n'), ((2426, 2517), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""values: expected the index length 2 .* found 3 rows"""'}), "(ValueError, match=\n 'values: expected the index length 2 .* found 3 rows')\n", (2439, 2517), False, 'import pytest\n'), ((1904, 1916), 'numpy.zeros', 'np.zeros', (['()'], {}), '(())\n', (1912, 1916), True, 'import numpy as np\n'), ((2082, 2095), 'numpy.zeros', 'np.zeros', (['(123)'], {}), '(123)\n', (2090, 2095), True, 'import numpy as np\n')] |
#! /usr/bin/python3
import numpy as np
import matplotlib.pyplot as plt
from nephelae.database import NephelaeDataServer
from nephelae_utils.analysis import TimedData, estimate_wind
# This is an example showing how to estimate the advective wind (average (x,y)
# wind) from the data of an aircraft flight.
# First loading a flight database.
databasePath = '/home/pnarvor/work/nephelae/data/barbados/post_processing/cams_logs/flight_02_08_03/database/database01.neph'
database = NephelaeDataServer.load(databasePath)
# Then we have to find a suitable section of the flight from which estimating
# the wind. A typical good flight section is when an aircraft is performing a
# circle (which it always does at some points in the flight, either to change
# altitude or the half circles at the end of hippodromes). To find such a
# section, plotting the flight trajectory does really help.
# To find the circles, one can look at two plots (t,x) and (t,y) and find
# section where the two curves both have a kind of "zigzag", sinusoidal-ish
# shape.
# To plot the flight path of the aircraft, one has to fetch the aircraft
# position from the database.
status7 = database['7','STATUS'](sortCriteria=lambda x: x.position.t)[:]
position7 = np.array([[s.position.t, s.position.x, s.position.y, s.position.z] for s in status7])
status10 = database['10','STATUS'](sortCriteria=lambda x: x.position.t)[:]
position10 = np.array([[s.position.t, s.position.x, s.position.y, s.position.z] for s in status10])
fig, axes = plt.subplots(2,1)
axes[0].plot( position7[:,0], position7[:,1], label="East 7")
axes[0].plot( position7[:,0], position7[:,2], label="North 7")
axes[0].plot(position10[:,0], position10[:,1], label="East 10")
axes[0].plot(position10[:,0], position10[:,2], label="North 10")
axes[0].legend(loc='upper right')
axes[0].grid()
axes[0].set_xlabel('Time (s)')
axes[0].set_ylabel('(m)')
axes[1].plot( position7[:,1], position7[:,2], label="Aircraft 7")
axes[1].plot(position10[:,1], position10[:,2], label="Aircraft 10")
axes[1].legend(loc='upper right')
axes[1].grid()
axes[1].set_xlabel('East (m)')
axes[1].set_ylabel('North (m)')
axes[1].set_aspect('equal')
# After looking at the plots, on this particular dataset, a good interval for
# aircraft 7 is between 630 seconds and 930 seconds
wind7, err = estimate_wind(database, '7', [630, 930])
print("Aircraft 7 wind estimation (m/s) : [east : {:.2f}, north : {:.2f}]".format(wind7[0], wind7[1]))
print("Standard deviation of the error (m/s): {:.2f}".format(err))
# After looking at the plots, on this particular dataset, a good interval for
# aircraft 10 is between 875 seconds and 1040 seconds
wind10, err = estimate_wind(database, '10', [875, 1040])
print("Aircraft 10 wind estimation (m/s) : [east : {:.2f}, north : {:.2f}]".format(wind10[0], wind10[1]))
print("Standard deviation of the error (m/s): {:.2f}".format(err))
# set block to True if display window disappear as soon as they are displayed
plt.show(block=False)
| [
"nephelae.database.NephelaeDataServer.load",
"nephelae_utils.analysis.estimate_wind",
"matplotlib.pyplot.show",
"numpy.array",
"matplotlib.pyplot.subplots"
] | [((481, 518), 'nephelae.database.NephelaeDataServer.load', 'NephelaeDataServer.load', (['databasePath'], {}), '(databasePath)\n', (504, 518), False, 'from nephelae.database import NephelaeDataServer\n'), ((1241, 1330), 'numpy.array', 'np.array', (['[[s.position.t, s.position.x, s.position.y, s.position.z] for s in status7]'], {}), '([[s.position.t, s.position.x, s.position.y, s.position.z] for s in\n status7])\n', (1249, 1330), True, 'import numpy as np\n'), ((1417, 1507), 'numpy.array', 'np.array', (['[[s.position.t, s.position.x, s.position.y, s.position.z] for s in status10]'], {}), '([[s.position.t, s.position.x, s.position.y, s.position.z] for s in\n status10])\n', (1425, 1507), True, 'import numpy as np\n'), ((1517, 1535), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (1529, 1535), True, 'import matplotlib.pyplot as plt\n'), ((2317, 2357), 'nephelae_utils.analysis.estimate_wind', 'estimate_wind', (['database', '"""7"""', '[630, 930]'], {}), "(database, '7', [630, 930])\n", (2330, 2357), False, 'from nephelae_utils.analysis import TimedData, estimate_wind\n'), ((2679, 2721), 'nephelae_utils.analysis.estimate_wind', 'estimate_wind', (['database', '"""10"""', '[875, 1040]'], {}), "(database, '10', [875, 1040])\n", (2692, 2721), False, 'from nephelae_utils.analysis import TimedData, estimate_wind\n'), ((2977, 2998), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (2985, 2998), True, 'import matplotlib.pyplot as plt\n')] |
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import os
import time
import tensorflow as tf
from tensorflow import keras
import random
class dataset(object):
def __init__(self, train_data=None, train_labels=None, test_data=None, test_labels=None):
if train_data is not None:
self.train_data = np.asarray(train_data)
self.train_size = self.train_data.shape[0]
self.train_seed_hist = [0 for i in range(self.train_size)]
else:
self.train_data = None
if train_labels is not None:
self.train_labels = np.asarray(train_labels)
else:
self.train_labels = None
if test_data is not None:
self.test_data = np.asarray(test_data)
self.test_size = self.test_data.shape[0]
self.test_seed_hist = [0 for i2 in range(self.test_size)]
else:
self.test_data = None
if test_labels is not None:
self.test_labels = np.asarray(test_labels)
else:
self.test_labels = None
self.train_call = -1
self.test_call = -1
def take(self, idx, type='train'):
if type is 'train':
if self.train_data is not None:
if self.train_labels is not None:
return self.train_data[idx], self.train_labels[idx]
else:
return self.train_data[idx]
elif type is 'test':
if self.test_data is not None:
if self.test_labels is not None:
return self.test_data[idx], self.test_labels[idx]
else:
return self.test_data[idx]
def iterate(self, type='train', from_start=False):
if type is 'train':
if self.train_data is not None:
if self.train_call == self.train_size - 1:
self.train_call = 0
elif from_start:
self.train_call = 0
else:
self.train_call += 1
if self.train_labels is not None:
return self.train_data[self.train_call], self.train_labels[self.train_call]
else:
return self.train_data[self.train_call]
elif type is 'test':
if self.test_data is not None:
if self.test_call == self.test_size - 1:
self.test_call = 0
elif from_start:
self.test_call = 0
else:
self.test_call += 1
if self.test_labels is not None:
return self.test_data[self.test_call], self.test_labels[self.test_call]
else:
return self.test_data[self.test_call]
def rand(self, type='train'):
if type is 'train':
if self.train_data is not None:
full = True
for i3 in range(self.train_size):
if self.train_seed_hist[i3] is 0:
full = False
if not full:
seed = random.randint(0, self.train_size - 1)
while self.train_seed_hist[seed] is 1:
seed = seed = random.randint(0, self.train_size - 1)
self.train_seed_hist[seed] = 1
else:
self.train_seed_hist = [0 for i in range(self.train_size)]
seed = random.randint(0, self.train_size - 1)
self.train_seed_hist[seed] = 1
if self.train_labels is not None:
return self.train_data[seed], self.train_labels[seed]
else:
return self.train_data[seed]
if type is 'test':
if self.test_data is not None:
full = True
for i3 in range(self.test_size):
if self.test_seed_hist[i3] is 0:
full = False
if not full:
seed = random.randint(0, self.test_size - 1)
while self.test_seed_hist[seed] is 1:
seed = seed = random.randint(0, self.test_size - 1)
self.test_seed_hist[seed] = 1
else:
self.test_seed_hist = [0 for i in range(self.test_size)]
seed = random.randint(0, self.test_size - 1)
self.test_seed_hist[seed] = 1
if self.test_labels is not None:
return self.test_data[seed], self.test_labels[seed]
else:
return self.test_data[seed]
| [
"numpy.asarray",
"random.randint"
] | [((387, 409), 'numpy.asarray', 'np.asarray', (['train_data'], {}), '(train_data)\n', (397, 409), True, 'import numpy as np\n'), ((660, 684), 'numpy.asarray', 'np.asarray', (['train_labels'], {}), '(train_labels)\n', (670, 684), True, 'import numpy as np\n'), ((803, 824), 'numpy.asarray', 'np.asarray', (['test_data'], {}), '(test_data)\n', (813, 824), True, 'import numpy as np\n'), ((1069, 1092), 'numpy.asarray', 'np.asarray', (['test_labels'], {}), '(test_labels)\n', (1079, 1092), True, 'import numpy as np\n'), ((3272, 3310), 'random.randint', 'random.randint', (['(0)', '(self.train_size - 1)'], {}), '(0, self.train_size - 1)\n', (3286, 3310), False, 'import random\n'), ((3632, 3670), 'random.randint', 'random.randint', (['(0)', '(self.train_size - 1)'], {}), '(0, self.train_size - 1)\n', (3646, 3670), False, 'import random\n'), ((4229, 4266), 'random.randint', 'random.randint', (['(0)', '(self.test_size - 1)'], {}), '(0, self.test_size - 1)\n', (4243, 4266), False, 'import random\n'), ((4583, 4620), 'random.randint', 'random.randint', (['(0)', '(self.test_size - 1)'], {}), '(0, self.test_size - 1)\n', (4597, 4620), False, 'import random\n'), ((3410, 3448), 'random.randint', 'random.randint', (['(0)', '(self.train_size - 1)'], {}), '(0, self.train_size - 1)\n', (3424, 3448), False, 'import random\n'), ((4365, 4402), 'random.randint', 'random.randint', (['(0)', '(self.test_size - 1)'], {}), '(0, self.test_size - 1)\n', (4379, 4402), False, 'import random\n')] |
#!/usr/bin/env python
import numpy as N
from load import ROOT as R
from gna import constructors as C
from gna.env import env
from gna.unittest import *
import numpy as N
def polyratio_prepare(nsname, nominator, denominator):
inp = N.arange(1, 11, dtype='d')
x = C.Points(inp)
ns = env.globalns(nsname)
nominator_names=[]
denominator_names=[]
nominator_exp=0.0 if nominator else 1.0
cpower = 1.0
for i, nom in enumerate(nominator):
name = 'nom%i'%i
ns.defparameter(name, central=nom, fixed=True)
nominator_names.append(name)
nominator_exp+=nom*cpower
cpower*=inp
denominator_exp=0.0 if denominator else 1.0
cpower = 1.0
for i, denom in enumerate(denominator):
name = 'denom%i'%i
ns.defparameter(name, central=denom, fixed=True)
denominator_names.append(name)
denominator_exp+=denom*cpower
cpower*=inp
with ns:
pratio=C.PolyRatio(nominator_names, denominator_names)
x >> pratio.polyratio.points
res = pratio.polyratio.ratio.data()
res_exp = nominator_exp/denominator_exp
print('Nominator weights', nominator)
print('Denominator weights', denominator)
print('Result', res)
print('Expected', res_exp)
print()
assert(N.allclose(res, res_exp))
def test_polyratio_v00():
polyratio_prepare('test_polyratio_v01', [0.0, 1.0], [])
polyratio_prepare('test_polyratio_v02', [0.0, 2.0], [])
polyratio_prepare('test_polyratio_v03', [1.0, 0.0], [])
polyratio_prepare('test_polyratio_v04', [2.0, 0.0], [])
polyratio_prepare('test_polyratio_v05', [], [0.0, 1.0])
polyratio_prepare('test_polyratio_v06', [], [0.0, 2.0])
polyratio_prepare('test_polyratio_v07', [], [1.0, 0.0])
polyratio_prepare('test_polyratio_v08', [], [2.0, 0.0])
polyratio_prepare('test_polyratio_v09', [0.0, 0.0, 2.0], [])
polyratio_prepare('test_polyratio_v10', [], [0.0, 0.0, 2.0])
polyratio_prepare('test_polyratio_v11', [0.0, 2.0], [0.0, 1.0])
polyratio_prepare('test_polyratio_v12', [0.0, 0.0, 2.0], [0.0, 2.0])
polyratio_prepare('test_polyratio_v13', [0.0, 0.0, 2.0], [0.0, 0.0, 2.0])
if __name__ == "__main__":
run_unittests(globals())
| [
"numpy.allclose",
"gna.constructors.Points",
"gna.env.env.globalns",
"numpy.arange",
"gna.constructors.PolyRatio"
] | [((237, 263), 'numpy.arange', 'N.arange', (['(1)', '(11)'], {'dtype': '"""d"""'}), "(1, 11, dtype='d')\n", (245, 263), True, 'import numpy as N\n'), ((272, 285), 'gna.constructors.Points', 'C.Points', (['inp'], {}), '(inp)\n', (280, 285), True, 'from gna import constructors as C\n'), ((295, 315), 'gna.env.env.globalns', 'env.globalns', (['nsname'], {}), '(nsname)\n', (307, 315), False, 'from gna.env import env\n'), ((1293, 1317), 'numpy.allclose', 'N.allclose', (['res', 'res_exp'], {}), '(res, res_exp)\n', (1303, 1317), True, 'import numpy as N\n'), ((958, 1005), 'gna.constructors.PolyRatio', 'C.PolyRatio', (['nominator_names', 'denominator_names'], {}), '(nominator_names, denominator_names)\n', (969, 1005), True, 'from gna import constructors as C\n')] |
'''
MIT License
Copyright (c) 2022 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import numpy
from datasets import CLEVRDataset, build_predicates
from networks import EmbeddingNet, ReadoutNet
from torch.cuda.amp import autocast
from torch.utils.data import DataLoader
from tqdm import tqdm
import argparse
import json
import numpy as np
import matplotlib.pyplot as plt
import torch
import yaml
def calc_f1(pred, target, mask, majority):
pred[:, majority] = ~pred[:, majority]
target[:, majority] = ~target[:, majority]
tp = ((pred & target) * mask).sum(axis=0)
fp = ((pred & ~target) * mask).sum(axis=0)
fn = ((~pred & target) * mask).sum(axis=0)
precision = tp / (tp + fp) * 100
recall = tp / (tp + fn) * 100
f1 = 2 * precision * recall / (precision + recall)
f1[np.isnan(f1)] = 0
return f1
def bar_plot_group(data, labels, keys, gap, width, legloc, ylabel, title, legend=True):
n_bars = len(labels)
n_groups = len(keys)
x = np.arange(n_groups) * gap # the label locations
left = x - width * (n_bars - 1) / 2
for i, d in enumerate(data):
vals = [d[key] for key in keys]
rects = plt.bar(left + i * width, vals, width, label=labels[i])
autolabel(rects, plt.gca())
keys = [f'on_{key}' if key in ['tabletop', 'bookshelf'] else key for key in keys]
# plt.xticks(x, keys, rotation=60, fontsize=14)
plt.xticks(x, keys, rotation=0, fontsize=20)
# plt.yticks(np.arange(0, 101, 20), np.arange(0, 101, 20), fontsize=14)
plt.ylabel(ylabel, fontsize=18)
plt.title(title, fontsize=18)
if legend:
plt.legend(loc=legloc, fontsize=14)
def autolabel(rects, ax):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
if height > 0:
ax.annotate(
f"{height:.1f}",
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom'
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Data
parser.add_argument('--data_dir', default='data/geospa_depth_split/')
parser.add_argument('--split', default='valA')
parser.add_argument('--img_h', type=int, default=320)
parser.add_argument('--img_w', type=int, default=480)
parser.add_argument('--obj_h', type=int, default=32)
parser.add_argument('--obj_w', type=int, default=32)
parser.add_argument('--n_objects', type=int, default=10)
parser.add_argument('--n_views', type=int, default=1)
parser.add_argument('--multiview', action='store_true')
parser.add_argument('--depth', action='store_true')
parser.add_argument('--xyz', action='store_true')
parser.add_argument('--world', action='store_true')
parser.add_argument('--obj_depth', action='store_true')
# Model
parser.add_argument('--patch_size', type=int, default=32)
parser.add_argument('--width', type=int, default=768)
parser.add_argument('--layers', type=int, default=12)
parser.add_argument('--heads', type=int, default=12)
parser.add_argument('--mean_pool', action='store_true')
parser.add_argument('--type_emb_dim', type=int, default=0)
parser.add_argument('--hidden_dim', type=int, default=512)
# Evaluation
parser.add_argument('--checkpoint', default='log/geospa_train_split_0428/epoch_40.pth')
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--n_worker', type=int, default=1)
parser.add_argument('--results_file', default='results_file.txt')
args = parser.parse_args()
objects = [f'object{i:02d}' for i in range(args.n_objects)]
pred_cfg = {'unary': [], 'binary': ['%s front_of %s', '%s right_of %s', '%s contains %s', '%s supports %s']}
predicates = build_predicates(objects, pred_cfg['unary'], pred_cfg['binary'])
types = set()
for pred in pred_cfg['unary'] + pred_cfg['binary']:
pref = pred[3:-3]
if pref == 'on_surface':
types.add(f"on_{pred.split(', ')[-1][:-1]}")
else:
types.add(pref)
# data = CLEVRDataset(
# f'{args.data_dir}/{args.split}.h5',
# f'{args.data_dir}/objects.h5',
# args.n_objects, rand_patch=False
# )
# loader = DataLoader(data, args.batch_size, num_workers=args.n_worker)
#
# model = EmbeddingNet(
# (args.img_w, args.img_h), args.patch_size, args.n_objects,
# args.width, args.layers, args.heads
# )
# head = ReadoutNet(args.width, args.hidden_dim, 0, 4)
#
# checkpoint = torch.load(args.checkpoint, map_location='cpu')
# model.load_state_dict(checkpoint['model'])
# head.load_state_dict(checkpoint['head'])
# model = model.cuda().eval()
# head = head.cuda().eval()
#
# predictions = []
# targets = []
# masks = []
# for img, obj_patches, target, mask in tqdm(loader):
# img = img.cuda()
# obj_patches = obj_patches.cuda()
# with torch.no_grad():
# emb, attn = model(img, obj_patches)
# logits = head(emb)
# predictions.append((logits > 0).cpu().numpy())
# targets.append(target.bool().numpy())
# masks.append(mask.bool().numpy())
# predictions = np.concatenate(predictions)
# targets = np.concatenate(targets)
# masks = np.concatenate(masks)
#
# print(predictions.shape, targets.shape, masks.shape)
# np.save('predictions.npy', predictions)
# np.save('targets.npy', targets)
# np.save('masks.npy', masks)
predictions = np.load('predictions.npy')
targets = np.load('targets.npy')
masks = np.load('masks.npy')
print(predictions.shape, targets.shape, masks.shape)
predicates_logit_indices = {'all': range(360), 'front_of': range(90), 'right_of': range(90, 180), 'contains': range(180, 270), 'supports': range(270, 360)}
metrics = {}
metrics['target_true'] = {}
for predicate, logit_indices in predicates_logit_indices.items():
metrics['target_true'][predicate] = np.sum(targets[:, logit_indices] * masks[:, logit_indices]) / np.sum(masks[:, logit_indices]) * 100
metrics['prediction_true'] = {}
for predicate, logit_indices in predicates_logit_indices.items():
metrics['prediction_true'][predicate] = np.sum(predictions[:, logit_indices] * masks[:, logit_indices]) / np.sum(masks[:, logit_indices]) * 100
metrics['predicate_accuracy'] = {}
for predicate, logit_indices in predicates_logit_indices.items():
metrics['predicate_accuracy'][predicate] = np.sum((predictions[:, logit_indices] == targets[:, logit_indices]) * masks[:, logit_indices]) / np.sum(masks[:, logit_indices]) * 100
metrics['scene_accuracy'] = {}
for predicate, logit_indices in predicates_logit_indices.items():
a = np.sum((predictions[:, logit_indices] == targets[:, logit_indices]) * masks[:, logit_indices], axis=-1) / np.sum(masks[:, logit_indices], axis=-1)
metrics['scene_accuracy'][predicate] = np.nansum(a) / np.sum(np.isreal(a)) * 100
metrics['scene_all_accuracy'] = {}
for predicate, logit_indices in predicates_logit_indices.items():
a = np.all((predictions[:, logit_indices] == targets[:, logit_indices]) | ~masks[:, logit_indices], axis=-1)
metrics['scene_all_accuracy'][predicate] = np.nansum(a) / np.sum(np.isreal(a)) * 100
metrics['predicate_precision'] = {}
metrics['predicate_recall'] = {}
metrics['predicate_f1'] = {}
for predicate, logit_indices in predicates_logit_indices.items():
tp = ((predictions[:, logit_indices] & targets[:, logit_indices]) * masks[:, logit_indices]).sum()
fp = ((predictions[:, logit_indices] & ~targets[:, logit_indices]) * masks[:, logit_indices]).sum()
fn = ((~predictions[:, logit_indices] & targets[:, logit_indices]) * masks[:, logit_indices]).sum()
precision = tp / (tp + fp) * 100
recall = tp / (tp + fn) * 100
f1 = 2 * precision * recall / (precision + recall)
metrics['predicate_precision'][predicate] = precision
metrics['predicate_recall'][predicate] = recall
metrics['predicate_f1'][predicate] = f1
print(metrics)
json.dump(metrics, open(args.results_file, 'w'))
metrics_to_graph = {'scene_accuracy': metrics['scene_accuracy'],
'scene_all_accuracy': metrics['scene_all_accuracy'],
'predicate_f1': metrics['predicate_f1'],
'target_true': metrics['target_true']}
fig, axs = plt.subplots(len(metrics_to_graph), 1)
for i, (metric_name, metric) in enumerate(metrics_to_graph.items()):
width = 10
xs = numpy.arange(len(metric.keys())) * width * 2
heights = [value for predicate, value in metric.items()]
predicates = [predicate for predicate, value in metric.items()]
bars = axs[i].bar(xs, heights, width, label=predicates)
for j in range(len(xs)):
axs[i].annotate(
f"{heights[j]:.1f}",
xy=(xs[j], heights[j]),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom'
)
axs[i].set_ylim([0, 150])
axs[i].set_xticks(numpy.arange(len(metric.keys())) * 2 * width, metric.keys())
axs[i].set_title(metric_name)
plt.subplots_adjust(hspace=0.75)
plt.show() | [
"matplotlib.pyplot.title",
"numpy.load",
"numpy.nansum",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"numpy.sum",
"numpy.isreal",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.legend",
"numpy.isnan",
"numpy.arange",
"matplotlib.pyplot.gca",
"datasets.build_predicates",
"matplotlib.p... | [((2381, 2425), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', 'keys'], {'rotation': '(0)', 'fontsize': '(20)'}), '(x, keys, rotation=0, fontsize=20)\n', (2391, 2425), True, 'import matplotlib.pyplot as plt\n'), ((2506, 2537), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {'fontsize': '(18)'}), '(ylabel, fontsize=18)\n', (2516, 2537), True, 'import matplotlib.pyplot as plt\n'), ((2542, 2571), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(18)'}), '(title, fontsize=18)\n', (2551, 2571), True, 'import matplotlib.pyplot as plt\n'), ((3144, 3169), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3167, 3169), False, 'import argparse\n'), ((4901, 4965), 'datasets.build_predicates', 'build_predicates', (['objects', "pred_cfg['unary']", "pred_cfg['binary']"], {}), "(objects, pred_cfg['unary'], pred_cfg['binary'])\n", (4917, 4965), False, 'from datasets import CLEVRDataset, build_predicates\n'), ((6675, 6701), 'numpy.load', 'np.load', (['"""predictions.npy"""'], {}), "('predictions.npy')\n", (6682, 6701), True, 'import numpy as np\n'), ((6716, 6738), 'numpy.load', 'np.load', (['"""targets.npy"""'], {}), "('targets.npy')\n", (6723, 6738), True, 'import numpy as np\n'), ((6751, 6771), 'numpy.load', 'np.load', (['"""masks.npy"""'], {}), "('masks.npy')\n", (6758, 6771), True, 'import numpy as np\n'), ((10477, 10509), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.75)'}), '(hspace=0.75)\n', (10496, 10509), True, 'import matplotlib.pyplot as plt\n'), ((10514, 10524), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10522, 10524), True, 'import matplotlib.pyplot as plt\n'), ((1789, 1801), 'numpy.isnan', 'np.isnan', (['f1'], {}), '(f1)\n', (1797, 1801), True, 'import numpy as np\n'), ((1969, 1988), 'numpy.arange', 'np.arange', (['n_groups'], {}), '(n_groups)\n', (1978, 1988), True, 'import numpy as np\n'), ((2147, 2202), 'matplotlib.pyplot.bar', 'plt.bar', (['(left + i * width)', 'vals', 'width'], {'label': 'labels[i]'}), '(left + i * width, vals, width, label=labels[i])\n', (2154, 2202), True, 'import matplotlib.pyplot as plt\n'), ((2595, 2630), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': 'legloc', 'fontsize': '(14)'}), '(loc=legloc, fontsize=14)\n', (2605, 2630), True, 'import matplotlib.pyplot as plt\n'), ((8280, 8389), 'numpy.all', 'np.all', (['((predictions[:, logit_indices] == targets[:, logit_indices]) | ~masks[:,\n logit_indices])'], {'axis': '(-1)'}), '((predictions[:, logit_indices] == targets[:, logit_indices]) | ~\n masks[:, logit_indices], axis=-1)\n', (8286, 8389), True, 'import numpy as np\n'), ((2228, 2237), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2235, 2237), True, 'import matplotlib.pyplot as plt\n'), ((7923, 8031), 'numpy.sum', 'np.sum', (['((predictions[:, logit_indices] == targets[:, logit_indices]) * masks[:,\n logit_indices])'], {'axis': '(-1)'}), '((predictions[:, logit_indices] == targets[:, logit_indices]) * masks\n [:, logit_indices], axis=-1)\n', (7929, 8031), True, 'import numpy as np\n'), ((8029, 8069), 'numpy.sum', 'np.sum', (['masks[:, logit_indices]'], {'axis': '(-1)'}), '(masks[:, logit_indices], axis=-1)\n', (8035, 8069), True, 'import numpy as np\n'), ((7153, 7212), 'numpy.sum', 'np.sum', (['(targets[:, logit_indices] * masks[:, logit_indices])'], {}), '(targets[:, logit_indices] * masks[:, logit_indices])\n', (7159, 7212), True, 'import numpy as np\n'), ((7215, 7246), 'numpy.sum', 'np.sum', (['masks[:, logit_indices]'], {}), '(masks[:, logit_indices])\n', (7221, 7246), True, 'import numpy as np\n'), ((7407, 7470), 'numpy.sum', 'np.sum', (['(predictions[:, logit_indices] * masks[:, logit_indices])'], {}), '(predictions[:, logit_indices] * masks[:, logit_indices])\n', (7413, 7470), True, 'import numpy as np\n'), ((7473, 7504), 'numpy.sum', 'np.sum', (['masks[:, logit_indices]'], {}), '(masks[:, logit_indices])\n', (7479, 7504), True, 'import numpy as np\n'), ((7671, 7770), 'numpy.sum', 'np.sum', (['((predictions[:, logit_indices] == targets[:, logit_indices]) * masks[:,\n logit_indices])'], {}), '((predictions[:, logit_indices] == targets[:, logit_indices]) * masks\n [:, logit_indices])\n', (7677, 7770), True, 'import numpy as np\n'), ((7768, 7799), 'numpy.sum', 'np.sum', (['masks[:, logit_indices]'], {}), '(masks[:, logit_indices])\n', (7774, 7799), True, 'import numpy as np\n'), ((8117, 8129), 'numpy.nansum', 'np.nansum', (['a'], {}), '(a)\n', (8126, 8129), True, 'import numpy as np\n'), ((8436, 8448), 'numpy.nansum', 'np.nansum', (['a'], {}), '(a)\n', (8445, 8448), True, 'import numpy as np\n'), ((8139, 8151), 'numpy.isreal', 'np.isreal', (['a'], {}), '(a)\n', (8148, 8151), True, 'import numpy as np\n'), ((8458, 8470), 'numpy.isreal', 'np.isreal', (['a'], {}), '(a)\n', (8467, 8470), True, 'import numpy as np\n')] |
from keras.engine.training import Model
from keras.engine.topology import merge
import numpy as np
import keras.backend as K
from contextlib import contextmanager
import keras.callbacks as cbks
TODO = "todo"
@contextmanager
def trainable(model, trainable):
trainables = []
for layer in model.layers:
trainables.append(layer.trainable)
layer.trainable = trainable
yield
for t, layer in zip(trainables, model.layers):
layer.trainable = t
def prob_to_sentence(prob):
fake_idx = np.argmax(prob, axis=-1)
fake = np.zeros_like(prob)
fake[:, :, fake_idx] = 1
return fake
class SeqGAN:
def __init__(self, g, d, m, g_optimizer, d_optimizer):
self.g = g
self.d = d
self.m = m
self.z, self.seq_input = self.g.inputs
self.fake_prob, = self.g.outputs
with trainable(m, False):
m_input = merge([self.seq_input, self.fake_prob], mode='concat', concat_axis=1)
self.m_realness = self.m(m_input)
self.model_fit_g = Model([self.z, self.seq_input], [self.m_realness])
self.model_fit_g.compile(g_optimizer, K.binary_crossentropy)
self.d.compile(d_optimizer, loss=K.binary_crossentropy)
def z_shape(self, batch_size=64):
layer, _, _ = self.z._keras_history
return (batch_size,) + layer.output_shape[1:]
def sample_z(self, batch_size=64):
shape = self.z_shape(batch_size)
return np.random.uniform(-1, 1, shape)
def generate(self, z, seq_input, batch_size=32):
return self.g.predict([z, seq_input], batch_size=batch_size)
def train_on_batch(self, seq_input, real, d_target=None):
nb_real = len(real)
nb_fake = len(seq_input)
if d_target is None:
d_target = np.concatenate([
np.zeros((nb_fake, 1)),
np.ones((nb_real, 1))
])
fake_prob = self.generate(self.sample_z(nb_fake), seq_input)
fake = np.concatenate([seq_input, prob_to_sentence(fake_prob)], axis=1)
fake_and_real = np.concatenate([fake, real], axis=0)
d_loss = self.d.train_on_batch(fake_and_real, d_target)
d_realness = self.d.predict(fake)
m_loss = self.m.train_on_batch(
np.concatenate([seq_input, fake_prob], axis=1), d_realness)
g_loss = self.model_fit_g.train_on_batch([self.sample_z(nb_fake), seq_input],
np.ones((nb_fake, 1)))
return g_loss, d_loss, m_loss
def fit_generator(self, generator, nb_epoch, nb_batches_per_epoch, callbacks=[],
batch_size=None,
verbose=False):
if batch_size is None:
batch_size = 2*len(next(generator)[0])
out_labels = ['g', 'd', 'm']
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + callbacks + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger()]
callbacks = cbks.CallbackList(callbacks)
callbacks._set_model(self)
callbacks._set_params({
'nb_epoch': nb_epoch,
'nb_sample': nb_batches_per_epoch*batch_size,
'verbose': verbose,
'metrics': out_labels,
})
callbacks.on_train_begin()
for e in range(nb_epoch):
callbacks.on_epoch_begin(e)
for batch_index, (seq_input, real) in enumerate(generator):
callbacks.on_batch_begin(batch_index)
batch_logs = {}
batch_logs['batch'] = batch_index
batch_logs['size'] = len(real) + len(seq_input
)
outs = self.train_on_batch(seq_input, real)
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
if batch_index + 1 == nb_batches_per_epoch:
break
callbacks.on_epoch_end(e)
callbacks.on_train_end()
| [
"numpy.random.uniform",
"keras.engine.topology.merge",
"numpy.zeros_like",
"keras.callbacks.History",
"keras.callbacks.BaseLogger",
"numpy.argmax",
"keras.callbacks.CallbackList",
"numpy.zeros",
"numpy.ones",
"keras.engine.training.Model",
"keras.callbacks.ProgbarLogger",
"numpy.concatenate"
] | [((525, 549), 'numpy.argmax', 'np.argmax', (['prob'], {'axis': '(-1)'}), '(prob, axis=-1)\n', (534, 549), True, 'import numpy as np\n'), ((561, 580), 'numpy.zeros_like', 'np.zeros_like', (['prob'], {}), '(prob)\n', (574, 580), True, 'import numpy as np\n'), ((1472, 1503), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', 'shape'], {}), '(-1, 1, shape)\n', (1489, 1503), True, 'import numpy as np\n'), ((2086, 2122), 'numpy.concatenate', 'np.concatenate', (['[fake, real]'], {'axis': '(0)'}), '([fake, real], axis=0)\n', (2100, 2122), True, 'import numpy as np\n'), ((2844, 2858), 'keras.callbacks.History', 'cbks.History', ([], {}), '()\n', (2856, 2858), True, 'import keras.callbacks as cbks\n'), ((3016, 3044), 'keras.callbacks.CallbackList', 'cbks.CallbackList', (['callbacks'], {}), '(callbacks)\n', (3033, 3044), True, 'import keras.callbacks as cbks\n'), ((903, 972), 'keras.engine.topology.merge', 'merge', (['[self.seq_input, self.fake_prob]'], {'mode': '"""concat"""', 'concat_axis': '(1)'}), "([self.seq_input, self.fake_prob], mode='concat', concat_axis=1)\n", (908, 972), False, 'from keras.engine.topology import merge\n'), ((1050, 1100), 'keras.engine.training.Model', 'Model', (['[self.z, self.seq_input]', '[self.m_realness]'], {}), '([self.z, self.seq_input], [self.m_realness])\n', (1055, 1100), False, 'from keras.engine.training import Model\n'), ((2281, 2327), 'numpy.concatenate', 'np.concatenate', (['[seq_input, fake_prob]'], {'axis': '(1)'}), '([seq_input, fake_prob], axis=1)\n', (2295, 2327), True, 'import numpy as np\n'), ((2476, 2497), 'numpy.ones', 'np.ones', (['(nb_fake, 1)'], {}), '((nb_fake, 1))\n', (2483, 2497), True, 'import numpy as np\n'), ((2974, 2994), 'keras.callbacks.ProgbarLogger', 'cbks.ProgbarLogger', ([], {}), '()\n', (2992, 2994), True, 'import keras.callbacks as cbks\n'), ((1836, 1858), 'numpy.zeros', 'np.zeros', (['(nb_fake, 1)'], {}), '((nb_fake, 1))\n', (1844, 1858), True, 'import numpy as np\n'), ((1876, 1897), 'numpy.ones', 'np.ones', (['(nb_real, 1)'], {}), '((nb_real, 1))\n', (1883, 1897), True, 'import numpy as np\n'), ((2880, 2897), 'keras.callbacks.BaseLogger', 'cbks.BaseLogger', ([], {}), '()\n', (2895, 2897), True, 'import keras.callbacks as cbks\n')] |
import geopandas as gpd
gpd.options.use_pygeos=False
import pandas as pd
import os, json, geojson
import glob
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from geopandas.plotting import plot_polygon_collection
root = os.getcwd()
ne = gpd.read_file(os.path.join(root,'data','ne_10m_countries.gpkg'))
ne_prov = gpd.read_file(os.path.join(root,'data','ne_10m_admin_1_states_provinces.geojson'))
#ne['N_obs_SPOT'] = np.nan
#ne['N_T_SPOT'] = np.nan
#ne_prov['N_obs_SPOT'] = np.nan
#ne_prov['N_T_SPOT'] = np.nan
ne_prov = ne_prov.set_index('iso_3166_2', drop=False)
ne = ne.set_index('ISO_A2', drop=False)
do_prov = ['AU','BR','CA','CN','IN','US']
label_df = pd.read_csv(os.path.join(root,'data','label_df.csv'))
SPOT_country_df = pd.read_csv(os.path.join(root,'data','ne_SPOT.csv')).set_index('ISO_A2')
SPOT_prov_df = pd.read_csv(os.path.join(root,'data','ne_prov_SPOT.csv')).set_index('iso_3166_2')
# do SPOT merging
#ne[['ISO_A2','N_obs_SPOT','N_T_SPOT']]
#ne_prov[['iso_3166_2','N_obs_SPOT','N_T_SPOT']]
#print (ne[['ISO_A2','N_obs_SPOT','N_T_SPOT']])
#print (ne_prov[['iso_3166_2','N_obs_SPOT','N_T_SPOT']])
ne = ne.merge(SPOT_country_df, how='left',left_index=True,right_index=True)
print ('ne',ne)
ne_prov = ne_prov.merge(SPOT_prov_df, how='left',left_index=True,right_index=True)
print ('ne_prov',ne_prov)
ne['N_T_SPOT'] = ne['N_T_SPOT'].fillna(0)
# do S2 merging
ne = ne.merge(pd.DataFrame(label_df.groupby('iso2').size()), how='left',left_index=True,right_index=True).rename(columns={0:'N_obs_S2'})
ne = ne.merge(pd.DataFrame(label_df[['iso2','label']].groupby('iso2').sum()), how='left',left_index=True,right_index=True).rename(columns={'label':'N_T_S2'})
ne_prov = ne_prov.merge(pd.DataFrame(label_df.groupby('iso_prov').size()), how='left',left_index=True,right_index=True).rename(columns={0:'N_obs_S2'})
ne_prov = ne_prov.merge(pd.DataFrame(label_df[['iso_prov','label']].groupby('iso_prov').sum()), how='left',left_index=True,right_index=True).rename(columns={'label':'N_T_S2'})
ne['por_S2'] = ne['N_T_S2']/ne['N_obs_S2']
ne['por_SPOT'] = ne['N_T_SPOT']/ne['N_obs_SPOT']
ne_prov['por_S2'] = ne_prov['N_T_S2']/ne_prov['N_obs_S2']
ne_prov['por_SPOT'] = ne_prov['N_T_SPOT']/ne_prov['N_obs_SPOT']
ne['por_SPOT'] = ne['por_SPOT'].fillna(0)
ne_prov['por_SPOT'] = ne_prov['por_SPOT'].fillna(0)
ne['log10_obs_S2'] = np.log10(ne['N_obs_S2'])
ne['log10_obs_SPOT'] = np.log10(ne['N_obs_SPOT'])
ne_prov['log10_obs_S2'] = np.log10(ne_prov['N_obs_S2'])
ne_prov['log10_obs_SPOT'] = np.log10(ne_prov['N_obs_SPOT'])
ne['log10_obs_SPOT'] = ne['log10_obs_SPOT'].fillna(0)
ne[['por_S2','por_SPOT','N_obs_S2','N_obs_SPOT','N_T_S2','N_T_SPOT']].to_csv(os.path.join(os.getcwd(),'makefigs','data','fig-A7_country.csv'))
ne_prov.loc[ne_prov['iso_a2'].isin(do_prov),['por_S2','por_SPOT','N_obs_S2','N_obs_SPOT','N_T_S2','N_T_SPOT']].to_csv(os.path.join(os.getcwd(),'makefigs','data','fig-A7_prov.csv'))
vmax_S2=4
vmax_SPOT=4.5
por_max_S2 = 1
por_max_SPOT = 0.5
def conv_rgb_S2(row):
if np.isnan(row['log10_obs_S2']):
return [0,0,0,1] #'#%02x%02x%02x' %
else:
b = row['log10_obs_S2']/vmax_S2
r = row['por_S2']*b
g = (1-row['por_S2'])*b
return [r,g,b,1] #'#%02x%02x%02x' %
def conv_rgb_SPOT(row):
if np.isnan(row['log10_obs_SPOT']):
return [0,0,0,1] #'#%02x%02x%02x' %
else:
b = row['log10_obs_SPOT']/vmax_SPOT
r = np.clip(row['por_SPOT']/por_max_SPOT,0,1)*b
g = (1-np.clip(row['por_SPOT']/por_max_SPOT,0,1))*b
arr = np.array([r,g,b,1])
if ((arr>1).sum()+ (arr<0).sum())>0:
print (row)
return [r,g,b,1] #'#%02x%02x%02x' %
ne['color_S2'] = ne.apply(lambda row: conv_rgb_S2(row), axis=1)
ne_prov['color_S2'] = ne_prov.apply(lambda row: conv_rgb_S2(row), axis=1)
ne['color_SPOT'] = ne.apply(lambda row: conv_rgb_SPOT(row), axis=1)
ne_prov['color_SPOT'] = ne_prov.apply(lambda row: conv_rgb_SPOT(row), axis=1)
ne = ne[~ne.geometry.isna()]
def leg_gen(dim):
a = np.stack([np.linspace(0,1,dim)]*dim)
P = np.linspace(1,0,dim)
R = np.stack([P]*dim).T
G = 1-R
B = np.ones((dim,dim))
return np.moveaxis(np.stack([R,G,B]),0,-1)*np.moveaxis(np.stack([a]*3),0,-1)
leg_arr = leg_gen(50)
fig, axs = plt.subplots(2,1,figsize=(18,15))
#plot basemap
ne.plot(ax=axs[0], color='#d1d1d1')
ne.plot(ax=axs[1], color='#d1d1d1')
#plot polys S2
plot_polygon_collection(axs[0], ne.loc[~ne['ISO_A2'].isin(do_prov),'geometry'], color=ne.loc[~ne['ISO_A2'].isin(do_prov),'color_S2'])
plot_polygon_collection(axs[0], ne_prov.loc[ne_prov['iso_a2'].isin(do_prov),'geometry'], color=ne_prov.loc[ne_prov['iso_a2'].isin(do_prov),'color_S2'])
#plopt polys SPOT
plot_polygon_collection(axs[1], ne.loc[~ne['ISO_A2'].isin(do_prov),'geometry'], color=ne.loc[~ne['ISO_A2'].isin(do_prov),'color_SPOT'])
plot_polygon_collection(axs[1], ne_prov.loc[ne_prov['iso_a2'].isin(do_prov),'geometry'], color=ne_prov.loc[ne_prov['iso_a2'].isin(do_prov),'color_SPOT'])
#ne.loc[(~ne['por'].isna()) &(ne['N_T']>5 ) & (~ne['ISO_A2'].isin(do_prov)),:].plot(ax=axs[0], column='por', cmap='magma')
#ne_prov.loc[(~ne_prov['por'].isna()) & (ne_prov['N_T']>5) & (ne_prov['iso_a2'].isin(do_prov)),:].plot(ax=axs[0], column='por', cmap='magma')
#ne.loc[(~ne['por'].isna()) &(ne['N_T']<=5 ) & (~ne['ISO_A2'].isin(do_prov)),:].plot(ax=axs[0], column='log10_obs', cmap='bone', vmax=4)
#ne_prov.loc[(~ne_prov['por'].isna()) & (ne_prov['N_T']<=5) & (ne_prov['iso_a2'].isin(do_prov)),:].plot(ax=axs[0], column='log10_obs', cmap='bone', vmax=4)
ins1 = axs[0].inset_axes([0,0.12,0.2,0.25])
ins2 = axs[1].inset_axes([0,0.12,0.2,0.25])
ins1.imshow(leg_arr)
ins2.imshow(leg_arr)
ins1.set_xticks([ii*(50/4.) for ii in range(5)])
ins1.set_xticklabels([f'10$^{ii}$' for ii in range(5)])
ins1.set_yticks([ii*12.5 for ii in range(5)])
ins1.set_yticklabels([str(1-ii*12.5/50) for ii in range(5)])
ins1.set_ylabel('Precision')
ins1.set_xlabel('N$_{Observations}$')
ins2.set_xticks([ii*(45/4) for ii in range(5)])
ins2.set_xticklabels([f'10$^{ii}$' for ii in range(5)])
ins2.set_yticks([ii*(50/4) for ii in range(5)])
ins2.set_yticklabels(['0.0','0.125','0.25','0.375','>0.5'][::-1])
ins2.set_ylabel('Precision')
ins2.set_xlabel('N$_{Observations}$')
axs[0].set_ylim([-60,85])
axs[0].set_xticks([])
axs[0].set_yticks([])
axs[1].set_ylim([-60,85])
axs[1].set_xticks([])
axs[1].set_yticks([])
axs[0].set_title('(a) Sentinel-2')
axs[1].set_title('(b) SPOT6/7')
plt.savefig(os.path.join(root,'makefigs','figures','fig-A7_deploy_precision.png'))
plt.show() | [
"numpy.stack",
"matplotlib.pyplot.show",
"os.path.join",
"os.getcwd",
"numpy.ones",
"numpy.isnan",
"numpy.clip",
"numpy.array",
"numpy.linspace",
"numpy.log10",
"matplotlib.pyplot.subplots"
] | [((250, 261), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (259, 261), False, 'import os, json, geojson\n'), ((2363, 2387), 'numpy.log10', 'np.log10', (["ne['N_obs_S2']"], {}), "(ne['N_obs_S2'])\n", (2371, 2387), True, 'import numpy as np\n'), ((2411, 2437), 'numpy.log10', 'np.log10', (["ne['N_obs_SPOT']"], {}), "(ne['N_obs_SPOT'])\n", (2419, 2437), True, 'import numpy as np\n'), ((2464, 2493), 'numpy.log10', 'np.log10', (["ne_prov['N_obs_S2']"], {}), "(ne_prov['N_obs_S2'])\n", (2472, 2493), True, 'import numpy as np\n'), ((2522, 2553), 'numpy.log10', 'np.log10', (["ne_prov['N_obs_SPOT']"], {}), "(ne_prov['N_obs_SPOT'])\n", (2530, 2553), True, 'import numpy as np\n'), ((4262, 4298), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(18, 15)'}), '(2, 1, figsize=(18, 15))\n', (4274, 4298), True, 'import matplotlib.pyplot as plt\n'), ((6543, 6553), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6551, 6553), True, 'import matplotlib.pyplot as plt\n'), ((282, 333), 'os.path.join', 'os.path.join', (['root', '"""data"""', '"""ne_10m_countries.gpkg"""'], {}), "(root, 'data', 'ne_10m_countries.gpkg')\n", (294, 333), False, 'import os, json, geojson\n'), ((357, 426), 'os.path.join', 'os.path.join', (['root', '"""data"""', '"""ne_10m_admin_1_states_provinces.geojson"""'], {}), "(root, 'data', 'ne_10m_admin_1_states_provinces.geojson')\n", (369, 426), False, 'import os, json, geojson\n'), ((703, 745), 'os.path.join', 'os.path.join', (['root', '"""data"""', '"""label_df.csv"""'], {}), "(root, 'data', 'label_df.csv')\n", (715, 745), False, 'import os, json, geojson\n'), ((3022, 3051), 'numpy.isnan', 'np.isnan', (["row['log10_obs_S2']"], {}), "(row['log10_obs_S2'])\n", (3030, 3051), True, 'import numpy as np\n'), ((3281, 3312), 'numpy.isnan', 'np.isnan', (["row['log10_obs_SPOT']"], {}), "(row['log10_obs_SPOT'])\n", (3289, 3312), True, 'import numpy as np\n'), ((3548, 3570), 'numpy.array', 'np.array', (['[r, g, b, 1]'], {}), '([r, g, b, 1])\n', (3556, 3570), True, 'import numpy as np\n'), ((4057, 4079), 'numpy.linspace', 'np.linspace', (['(1)', '(0)', 'dim'], {}), '(1, 0, dim)\n', (4068, 4079), True, 'import numpy as np\n'), ((4126, 4145), 'numpy.ones', 'np.ones', (['(dim, dim)'], {}), '((dim, dim))\n', (4133, 4145), True, 'import numpy as np\n'), ((6472, 6544), 'os.path.join', 'os.path.join', (['root', '"""makefigs"""', '"""figures"""', '"""fig-A7_deploy_precision.png"""'], {}), "(root, 'makefigs', 'figures', 'fig-A7_deploy_precision.png')\n", (6484, 6544), False, 'import os, json, geojson\n'), ((2699, 2710), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2708, 2710), False, 'import os, json, geojson\n'), ((2883, 2894), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2892, 2894), False, 'import os, json, geojson\n'), ((4086, 4105), 'numpy.stack', 'np.stack', (['([P] * dim)'], {}), '([P] * dim)\n', (4094, 4105), True, 'import numpy as np\n'), ((776, 817), 'os.path.join', 'os.path.join', (['root', '"""data"""', '"""ne_SPOT.csv"""'], {}), "(root, 'data', 'ne_SPOT.csv')\n", (788, 817), False, 'import os, json, geojson\n'), ((864, 910), 'os.path.join', 'os.path.join', (['root', '"""data"""', '"""ne_prov_SPOT.csv"""'], {}), "(root, 'data', 'ne_prov_SPOT.csv')\n", (876, 910), False, 'import os, json, geojson\n'), ((3425, 3470), 'numpy.clip', 'np.clip', (["(row['por_SPOT'] / por_max_SPOT)", '(0)', '(1)'], {}), "(row['por_SPOT'] / por_max_SPOT, 0, 1)\n", (3432, 3470), True, 'import numpy as np\n'), ((4168, 4187), 'numpy.stack', 'np.stack', (['[R, G, B]'], {}), '([R, G, B])\n', (4176, 4187), True, 'import numpy as np\n'), ((4204, 4221), 'numpy.stack', 'np.stack', (['([a] * 3)'], {}), '([a] * 3)\n', (4212, 4221), True, 'import numpy as np\n'), ((3484, 3529), 'numpy.clip', 'np.clip', (["(row['por_SPOT'] / por_max_SPOT)", '(0)', '(1)'], {}), "(row['por_SPOT'] / por_max_SPOT, 0, 1)\n", (3491, 3529), True, 'import numpy as np\n'), ((4022, 4044), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'dim'], {}), '(0, 1, dim)\n', (4033, 4044), True, 'import numpy as np\n')] |
"""
*
* Copyright (c) 2021 <NAME>
* 2021 Autonomous Systems Lab ETH Zurich
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name Data Driven Dynamics nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
"""
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
__license__ = "BSD 3"
import numpy as np
import pandas as pd
from src.tools.ulog_tools import pandas_from_topic
from src.tools.quat_utils import slerp
# pre normalization thresholds
PWM_THRESHOLD = 1000
ACTUATOR_CONTROLS_THRESHOLD = -0.2
def compute_flight_time(act_df, pwm_threshold=None, control_threshold=None):
"""This function computes the flight time by a simple thresholding of actuator outputs or control values.
This works usually well for logs from the simulator or mission flights. But in some cases the assumption of an actuator output staying higher than the trsehhold for the hole flight might not be valid."""
if pwm_threshold is None:
pwm_threshold = PWM_THRESHOLD
if control_threshold is None:
control_threshold = ACTUATOR_CONTROLS_THRESHOLD
act_df_crp = act_df[act_df.iloc[:, 4] > pwm_threshold]
t_start = act_df_crp.iloc[1, 0]
t_end = act_df_crp.iloc[(act_df_crp.shape[0]-1), 0]
flight_time = {"t_start": t_start, "t_end": t_end}
return flight_time
def moving_average(x, w=7):
return np.convolve(x, np.ones(w), 'valid') / w
def filter_df(data_df, w=11):
data_np = data_df.to_numpy()
column_list = data_df.columns
new_df = pd.DataFrame()
for i in range(data_np.shape[1]):
new_df[column_list[i]] = moving_average(data_np[:, i])
return new_df
def resample_dataframe_list(df_list, time_window=None, f_des=100.0, slerp_enabled=False, filter=True):
"""create a single dataframe by resampling all dataframes to f_des [Hz]
Inputs: df_list : List of ulog topic dataframes to resample
t_start : Start time in us
t_end : End time in us
f_des : Desired frequency of resampled data
"""
if time_window is None:
# select full ulog time range
df = df_list[0]
timestamp_list = df["timestamp"].to_numpy()
t_start = timestamp_list[0]
t_end = timestamp_list[-1]
else:
t_start = time_window["t_start"]
t_end = time_window["t_end"]
# compute desired Period in us to be persistent with ulog timestamps
assert f_des > 0, 'Desired frequency must be greater than 0'
T_des = 1000000.0/f_des
n_samples = int((t_end-t_start)/T_des)
res_df = pd.DataFrame()
new_t_list = np.arange(t_start, t_end, T_des)
for df in df_list:
df = filter_df(df)
df_end = df["timestamp"].iloc[[-1]].to_numpy()
if df_end < t_end:
t_end = int(df_end)
for df in df_list:
# use slerp interpolation for quaternions
# add a better criteria than the exact naming at a later point.
if 'q0' in df and slerp_enabled:
q_mat = slerp_interpolate_from_df(df, new_t_list[0])
for i in range(1, len(new_t_list)):
q_new = slerp_interpolate_from_df(df, new_t_list[i])
q_mat = np.vstack((q_mat, q_new))
attitude_col_names = list(df.columns)
attitude_col_names.remove("timestamp")
new_df = pd.DataFrame(q_mat, columns=attitude_col_names)
else:
new_df = pd.DataFrame()
for col in df:
new_df[col] = np.interp(new_t_list, df.timestamp, df[col])
res_df = pd.concat([res_df, new_df], axis=1)
res_df = res_df.loc[:, ~res_df.columns.duplicated()]
return res_df
def slerp_interpolate_from_df(df, new_t):
df_sort = df.iloc[(df['timestamp']-new_t).abs().argsort()[:2]]
df_timestamps = df_sort['timestamp'].values.tolist()
t_ratio = (new_t - df_timestamps[0]) / \
(df_timestamps[1] - df_timestamps[0])
df_sort = df_sort.drop(columns=['timestamp'])
q_new = slerp(df_sort.iloc[0, :].to_numpy(
), df_sort.iloc[1, :].to_numpy(), np.array([t_ratio]))
return q_new
def crop_df(df, t_start, t_end):
""" crop df to contain 1 elemnt before t_start and one after t_end.
This way it is easy to interpolate the data between start and end time. """
df_start = df[df.timestamp <= t_start].iloc[[-1]]
df_end = df[df.timestamp >= t_end].iloc[[0]]
df = df[df.timestamp >= int(df_start.timestamp.to_numpy())]
df = df[df.timestamp <= int(df_end.timestamp.to_numpy())]
return df
| [
"pandas.DataFrame",
"numpy.ones",
"numpy.arange",
"numpy.array",
"numpy.interp",
"pandas.concat",
"numpy.vstack"
] | [((2910, 2924), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2922, 2924), True, 'import pandas as pd\n'), ((3977, 3991), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3989, 3991), True, 'import pandas as pd\n'), ((4009, 4041), 'numpy.arange', 'np.arange', (['t_start', 't_end', 'T_des'], {}), '(t_start, t_end, T_des)\n', (4018, 4041), True, 'import numpy as np\n'), ((4967, 5002), 'pandas.concat', 'pd.concat', (['[res_df, new_df]'], {'axis': '(1)'}), '([res_df, new_df], axis=1)\n', (4976, 5002), True, 'import pandas as pd\n'), ((5478, 5497), 'numpy.array', 'np.array', (['[t_ratio]'], {}), '([t_ratio])\n', (5486, 5497), True, 'import numpy as np\n'), ((2773, 2783), 'numpy.ones', 'np.ones', (['w'], {}), '(w)\n', (2780, 2783), True, 'import numpy as np\n'), ((4748, 4795), 'pandas.DataFrame', 'pd.DataFrame', (['q_mat'], {'columns': 'attitude_col_names'}), '(q_mat, columns=attitude_col_names)\n', (4760, 4795), True, 'import pandas as pd\n'), ((4832, 4846), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4844, 4846), True, 'import pandas as pd\n'), ((4600, 4625), 'numpy.vstack', 'np.vstack', (['(q_mat, q_new)'], {}), '((q_mat, q_new))\n', (4609, 4625), True, 'import numpy as np\n'), ((4904, 4948), 'numpy.interp', 'np.interp', (['new_t_list', 'df.timestamp', 'df[col]'], {}), '(new_t_list, df.timestamp, df[col])\n', (4913, 4948), True, 'import numpy as np\n')] |
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras.preprocessing.image import ImageDataGenerator
def generate_iterator(path, augmentation = True, color_mode = 'rgb',
batch_size = 32, shuffle = True, target_size = (128, 128),
seed = None, interpolation = 'bilinear', rescale = 1/255.0):
"""
This function will generate the iterator, that will be used for training, validation,
and testing.
Arguments:
path --> This is the path of the original directory. It is assumed that this string contains
the complete path; like "D:/Datasets/DiabeticRetinopathy/UWF_Dataset/UWF/train".
augmentation --> It is a boolean. If True, only two augmentation will be applied otherwise, no.
olor_mode --> It is either 'rgb' or 'gray'. The default value is 'rgb'
batch_size = An integer, the default value is 32.
shuffle --> A boolean, and the default value is True. For validation and testing data it should be False.
target_size --> A tuple mentioning the size of the input image (rows, cols, channels). The default
value is (128, 128, 3).
seed --> An integer. The default value is None
interpolation --> A string, the default value is 'nearest'
rescale --> rescaling factor. Defaults to None. If None or 0, no rescaling is applied, otherwise we multiply the data by the value
provided (after applying all other transformations)
Return:
iterator --> An iterator
"""
if augmentation:
Generator = ImageDataGenerator(rescale = rescale,
horizontal_flip = True,
vertical_flip = True,
rotation_range = 5,
zoom_range = 0.02)
# shear_range = 0.02,
# zoom_range = 0.02)
# samplewise_center=True,
# samplewise_std_normalization= True)
else:
Generator = ImageDataGenerator(rescale = rescale)
Iterator = Generator.flow_from_directory(directory = path, target_size=target_size,
color_mode='rgb', batch_size=batch_size,
shuffle=shuffle, seed=None, interpolation='bilinear')
return Iterator
def display_images(iterator):
"""
This function will display images.
Argument:
iterator --> The input should be an iterator with shape (batch_size, rows, cols, channels)
Return: This function does not return anything; instead, it displays the images of the given
iterator.
"""
classes = list(iterator.class_indices)
images, labels = iterator.next()
plt.figure(figsize = (8,8))
if np.max(images[0,...]) <= 1:
for i in range(0, 25):
plt.subplot(5,5,i+1)
plt.imshow(images[i,...])
plt.title(classes[np.argmax(labels[i])])
plt.axis('off')
else:
for i in range(0, 25):
plt.subplot(5,5,i+1)
plt.imshow(images[i,...].astype('uint8'))
plt.title(classes[np.argmax(labels[i])])
plt.axis('off')
plt.tight_layout()
| [
"matplotlib.pyplot.subplot",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"numpy.argmax",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.axis",
"numpy.max",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout"
] | [((2968, 2994), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (2978, 2994), True, 'import matplotlib.pyplot as plt\n'), ((3437, 3455), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3453, 3455), True, 'import matplotlib.pyplot as plt\n'), ((1639, 1756), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': 'rescale', 'horizontal_flip': '(True)', 'vertical_flip': '(True)', 'rotation_range': '(5)', 'zoom_range': '(0.02)'}), '(rescale=rescale, horizontal_flip=True, vertical_flip=\n True, rotation_range=5, zoom_range=0.02)\n', (1657, 1756), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((2213, 2248), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': 'rescale'}), '(rescale=rescale)\n', (2231, 2248), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((3003, 3025), 'numpy.max', 'np.max', (['images[0, ...]'], {}), '(images[0, ...])\n', (3009, 3025), True, 'import numpy as np\n'), ((3074, 3098), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(5)', '(i + 1)'], {}), '(5, 5, i + 1)\n', (3085, 3098), True, 'import matplotlib.pyplot as plt\n'), ((3107, 3133), 'matplotlib.pyplot.imshow', 'plt.imshow', (['images[i, ...]'], {}), '(images[i, ...])\n', (3117, 3133), True, 'import matplotlib.pyplot as plt\n'), ((3198, 3213), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3206, 3213), True, 'import matplotlib.pyplot as plt\n'), ((3267, 3291), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(5)', '(i + 1)'], {}), '(5, 5, i + 1)\n', (3278, 3291), True, 'import matplotlib.pyplot as plt\n'), ((3407, 3422), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3415, 3422), True, 'import matplotlib.pyplot as plt\n'), ((3163, 3183), 'numpy.argmax', 'np.argmax', (['labels[i]'], {}), '(labels[i])\n', (3172, 3183), True, 'import numpy as np\n'), ((3372, 3392), 'numpy.argmax', 'np.argmax', (['labels[i]'], {}), '(labels[i])\n', (3381, 3392), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
<NAME>
May 2020
path planner
"""
import rospy
from geometry_msgs.msg import PoseStamped
from geometry_msgs.msg import Pose
from geometry_msgs.msg import Point
from std_msgs.msg import Int8
from nav_msgs.msg import Path
from visualization_msgs.msg import Marker, MarkerArray
import uav_motion.msg
import actionlib
import numpy as np
from tf.transformations import quaternion_from_euler
from tf.transformations import euler_from_quaternion
from std_srvs.srv import Empty
import copy
from threading import Thread
import target_mapping.msg
import tf
from sensor_msgs.msg import Image
from sensor_msgs.msg import PointCloud2
from rtabmap_ros.srv import ResetPose, ResetPoseRequest
from utils.open3d_ros_conversion import convertCloudFromRosToOpen3d, convertCloudFromOpen3dToRos
import open3d as o3d
import rospkg
import yaml
import os
import time
import matplotlib.pyplot as plt
import visualization_msgs
rp = rospkg.RosPack()
pkg_path = rp.get_path('target_mapping')
config_path = os.path.join(pkg_path, 'config', 'target_mapping.yaml')
yaml_file = open(config_path)
params = yaml.load(yaml_file, Loader=yaml.FullLoader)
class PathPlanner(object):
def __init__(self):
self.id_ = -1
self.current_pose_ = PoseStamped()
self.current_pose_.pose.orientation.w = 1
self.saved_pose_ = PoseStamped()
self.marker_ = Marker()
self.cylinder_marker_ = Marker()
self.got_cylinder_marker_ = False
self.goal_position_ = Point()
self.goal_yaw_ = 0
self.plan_mode_ = 0
self.alpha = params['alpha']
self.bcem_alpha = params['bcem_alpha']
self.half_vfov = params['half_vfov'] # half vertical fov for mapping
# self.alpha is the camera angle, which is supposed to be 60 degrees according to the camera mount angle.
# However, if we set it as 60 degrees, the lower-bound scanning ray will be too long
# For example, alpha = 60 degrees, half FoV = 20 degrees, distance to keep is 1.5 meters.
# Then the vertical distance from the lower-bound scanning ray is 1.5*tan(60+20), which is 8.5 meters.
# The vertical distance from the upper-bound scanning ray is 1.5*tan(60-20), which is 1.3 meters.
self.mapping = False
rp = rospkg.RosPack()
pkg_path = rp.get_path('target_mapping')
self.pcd_path = os.path.join(pkg_path, 'pcd')
self.pc_map_ = PointCloud2()
self.path = Path()
self.path.header.frame_id = 'map'
self.local_path_pub = rospy.Publisher("/local_path", Path, queue_size=1)
self.poses = []
self.cylinder_marker_pub_ = rospy.Publisher('/path_planner/cylinder_marker', Marker, queue_size=2)
rospy.wait_for_service('stop_sampling')
self.stop_srv_client_ = rospy.ServiceProxy('stop_sampling', Empty)
self.as_ = actionlib.SimpleActionServer("/path_planner/target_plan", target_mapping.msg.TargetPlanAction,
execute_cb=self.targetPlanCallback, auto_start=False)
self.as_.start()
current_pose_sub_ = rospy.Subscriber('/mavros/local_position/pose', PoseStamped, self.poseCallback,
queue_size=1)
self.client_ = actionlib.SimpleActionClient('waypoints', uav_motion.msg.waypointsAction)
self.client_.wait_for_server()
#self.plan_thread_ = Thread(target=self.targetPlan, args=())
#self.plan_thread_.daemon = True
#self.plan_thread_.start()
self.resumeMap_srv_client_ = rospy.ServiceProxy('/rtabmap/resume', Empty)
self.pauseMap_srv_client_ = rospy.ServiceProxy('/rtabmap/pause', Empty)
self.newMap_srv_client_ = rospy.ServiceProxy('/rtabmap/trigger_new_map', Empty)
self.deleteMap_srv_client_ = rospy.ServiceProxy('/rtabmap/reset', Empty)
self.setPose_srv_client_ = rospy.ServiceProxy('/rtabmap/reset_odom_to_pose', ResetPose)
rospy.wait_for_service('/rtabmap/resume')
rospy.wait_for_service('/rtabmap/pause')
rospy.wait_for_service('/rtabmap/trigger_new_map')
#self.newMap_srv_client_()
self.deleteMap_srv_client_()
self.pauseMap_srv_client_()
map_sub_ = rospy.Subscriber('/rtabmap/cloud_map', PointCloud2, self.pointcloudCallback, queue_size=1)
rospy.loginfo("Path planner has been initialized!")
def startSearch(self):
positions = np.asarray(((0, 0, 24), (-15, 10, 24), (1, 12, 24), (0, 0, 20))) # granite dell search path
#positions = self.lawnmower(pt1=(50, 35), pt2=(-50, -35), origin=(30, 38), spacing=10, vertical=True) # pt1=(-50, -35)
#positions = self.lawnmower(pt1=(0, 35), pt2=(-50, -35), origin=(30, 38), spacing=10, vertical=True) # pt1=(-50, -35)
#positions = self.add_height(positions, 17.) # for blender_terrain, [10, 17]
yaws = self.getHeads(positions)
assert positions.shape[0] == len(yaws)
for i in range(len(yaws)):
goal = uav_motion.msg.waypointsGoal()
goal_p = positions[i]
self.goal_position_.x = float(goal_p[0])
self.goal_position_.y = float(goal_p[1])
self.goal_position_.z = float(goal_p[2])
q = self.current_pose_.pose.orientation
yaw = euler_from_quaternion((q.x, q.y, q.z, q.w))[2]
self.goal_yaw_ = yaw
goal.positions.append(self.goal_position_)
goal.yaws.append(yaw)
self.client_.send_goal(goal)
while True & (not rospy.is_shutdown()):
rospy.sleep(1.)
current_p = np.asarray((self.current_pose_.pose.position.x,
self.current_pose_.pose.position.y,
self.current_pose_.pose.position.z))
dist = np.linalg.norm(goal_p - current_p)
if self.got_cylinder_marker_:
self.cylinder_marker_pub_.publish(self.cylinder_marker_)
if dist < 0.2:
break
rospy.sleep(1.)
goal = uav_motion.msg.waypointsGoal()
goal.positions.append(self.goal_position_)
goal.yaws.append(yaws[i])
self.client_.send_goal(goal)
rospy.sleep(5.)
def getHeads(self, waypoints):
yaws = []
nm = waypoints.shape[0]
for i in range(nm-1):
currnt_p = waypoints[i][:2]
nxt_p = waypoints[i+1][:2]
dir = nxt_p - currnt_p
yaws.append(np.arctan2(dir[1], dir[0]))
yaws.append(0)
return yaws
def poseCallback(self, pose):
self.current_pose_ = pose
self.poses.append(pose)
self.path.poses = self.poses
self.local_path_pub.publish(self.path)
def pointcloudCallback(self, pc_msg):
if self.mapping:
self.pc_map_ = pc_msg
#xyz = ros_numpy.point_cloud2.pointcloud2_to_xyz_array(pc_msg)
def targetPlanCallback(self, target_plan):
if (self.plan_mode_ == 0) & (target_plan.mode.data != 0):
self.saved_pose_ = copy.deepcopy(self.current_pose_)
self.id_ = target_plan.id.data
self.plan_mode_ = target_plan.mode.data
if self.plan_mode_ != 0:
self.marker_ = target_plan.markers.markers[self.id_]
self.stop_srv_client_()
rospy.sleep(3.)
result = target_mapping.msg.TargetPlanResult()
if self.plan_mode_ == 1:
result.success = self.get_bcylinder_estimating_motion()
self.as_.set_succeeded(result)
elif self.plan_mode_ == 2:
result = self.getMapping()
self.as_.set_succeeded(result)
elif self.plan_mode_ == 0:
print("resuming")
save_position = Point()
save_position.x = self.saved_pose_.pose.position.x
save_position.y = self.saved_pose_.pose.position.y
save_position.z = self.saved_pose_.pose.position.z
q = self.saved_pose_.pose.orientation
yaw = euler_from_quaternion((q.x, q.y, q.z, q.w))[2]
goal = uav_motion.msg.waypointsGoal()
goal.positions.append(save_position)
goal.yaws.append(yaw)
self.client_.send_goal(goal)
while True & (not rospy.is_shutdown()):
rospy.sleep(1.)
current_p = np.asarray((self.current_pose_.pose.position.x,
self.current_pose_.pose.position.y,
self.current_pose_.pose.position.z))
goal_p = np.asarray((save_position.x,
save_position.y,
save_position.z))
dist = np.linalg.norm(goal_p - current_p)
if dist < 0.2:
break
rospy.sleep(1.)
goal = uav_motion.msg.waypointsGoal()
goal.positions.append(self.goal_position_)
goal.yaws.append(self.goal_yaw_)
self.client_.send_goal(goal)
result.success = True
self.as_.set_succeeded(result)
def get_bcylinder_estimating_motion(self):
print('b-cylinder estimation motion')
# 1. generate a circle
# use the center of the marker, (x, y),
# and the current drone height, (h), as the circle center, (x, y, h).
# then we only need to decide the radius of the circle.
# assume the target is always in the center of the image,
# we can compute the angle between camera's z axis and horizontal plane, alpha.
# the circle will be determined by object center (x, y, z), h, and alpha
marker_position = np.asarray((self.marker_.pose.position.x, self.marker_.pose.position.y, self.marker_.pose.position.z))
drone_position = np.asarray((self.current_pose_.pose.position.x, self.current_pose_.pose.position.y, self.current_pose_.pose.position.z))
h = self.current_pose_.pose.position.z
circle_center = np.asarray((self.marker_.pose.position.x, self.marker_.pose.position.y, h))
radius = (h - marker_position[2])/np.tan(self.bcem_alpha)
# 2. sample keypoints
# from drone's closest point to the farthest point
# get the closest point
dir_cp = drone_position - circle_center
dir_cp = dir_cp/np.linalg.norm(dir_cp)
# cp = circle_center + dir_cp * radius
# get the farthest point
"""
# this is ok to find the farthest point that is farthest to the longest axis
marker_q = (self.marker_.pose.orientation.x, self.marker_.pose.orientation.y, self.marker_.pose.orientation.z, self.marker_.pose.orientation.w)
marker_rot = tf.transformations.quaternion_matrix(marker_q)
marker_scale = (self.marker_.scale.x, self.marker_.scale.y, self.marker_.scale.z)
idx = np.argmax(marker_scale)
long_axis = marker_rot[:, idx]
"""
# or the farthest point is the opposite of the closest point
positions = []
yaws = []
N = 25 # the number of key points on the trajectory
step = 4*np.pi/(N-1)
yaw_cp = np.arctan2(-dir_cp[1], -dir_cp[0])
for i in range(N):
dir_i = self.rotateDirection(dir_cp, step*i)
pos = circle_center + dir_i * radius
#yaw = np.arctan2(-dir_i[1], -dir_i[0]) # this will cause some issues because atan2 is not continuous
yaw = yaw_cp + step*i
positions.append(pos)
yaws.append(yaw)
self.sendWaypoints(positions, yaws)
return True
def getMapping(self):
print('mapping motion')
# get target position
marker_position = np.asarray((self.marker_.pose.position.x, self.marker_.pose.position.y, self.marker_.pose.position.z))
# get target points
points = np.asarray([(p.x, p.y, p.z) for p in self.marker_.points])
# extract points in 3 sigma
three_sigma_stds = points.std(axis=0) * 3
pillar_radius_0 = three_sigma_stds[:2].max()
pillar_top_0 = marker_position[2] + three_sigma_stds[2]
pillar_bottom_0 = marker_position[2] - three_sigma_stds[2]
# approximate points with a pillar
pillar_radius_1 = np.linalg.norm(points[:, :2] - marker_position[:2], axis=1).max() # the radius can also be defined by Gaussian sigma distance
pillar_top_1 = points[:, 2].max()
pillar_bottom_1 = points[:, 2].min() #+ pillar_radius * np.tan(self.alpha)
pillar_radius = min(pillar_radius_0, pillar_radius_1)
pillar_top = min(pillar_top_0, pillar_top_1)
pillar_bottom = min(pillar_bottom_0, pillar_bottom_1)
cylinder_pos = marker_position
cylinder_scale = [pillar_radius*2, pillar_radius*2, pillar_top - points[:, 2].min()]
self.cylinder_marker_ = self.create_cylinder_marker(pos=cylinder_pos, scale=cylinder_scale)
self.got_cylinder_marker_ = True
"""
# get target height (not real height, it's eigenvalue of the vertical vector)
marker_q = (self.marker_.pose.orientation.x, self.marker_.pose.orientation.y, self.marker_.pose.orientation.z,
self.marker_.pose.orientation.w)
marker_rot = tf.transformations.quaternion_matrix(marker_q)
height = (marker_rot[:, 0] * self.marker_.scale.x)[2]
"""
# map plan: sweep from bottom to top
## get circular planes
dist = 1.5 # distance to keep between drone and the closest pillar surface
half_vfov = self.half_vfov
h1 = dist * np.tan(self.alpha + half_vfov)
h2 = dist * np.tan(self.alpha - half_vfov)
d = h1 - h2
N = int(round(np.ceil((pillar_top - pillar_bottom) / d))) # number of sweeping planes
heights = [pillar_bottom + d * i + h1 for i in range(N)]
n = 15 # number of waypoints on a circular path
radius = pillar_radius + dist
## get start position
drone_position = np.asarray((self.current_pose_.pose.position.x, self.current_pose_.pose.position.y,
self.marker_.pose.position.z))
dir_cp = drone_position - marker_position
dir_cp = dir_cp/np.linalg.norm(dir_cp)
## get path points
positions = []
yaws = []
last_yaw = 0
for i in range(N):
center = np.asarray((marker_position[0], marker_position[1], heights[i]))
p, y = self.circularPoints(dir_cp, center, radius, n)
positions.append(p)
yaws.append(y)
positions = np.asarray(positions).reshape(-1, 3)
yaws = np.asarray(yaws).reshape(-1, 1)
start_p = positions[0]
start_y = yaws[0]
point = Point(start_p[0], start_p[1], start_p[2])
goal = uav_motion.msg.waypointsGoal()
goal.positions.append(point)
goal.yaws.append(start_y)
self.client_.send_goal(goal)
while True & (not rospy.is_shutdown()):
rospy.sleep(1.)
current_p = np.asarray((self.current_pose_.pose.position.x,
self.current_pose_.pose.position.y,
self.current_pose_.pose.position.z))
dist = np.linalg.norm(start_p - current_p)
if dist < 0.2:
break
rospy.sleep(2.)
"""
pose = ResetPoseRequest()
pose.x = self.current_pose_.pose.position.x
pose.y = self.current_pose_.pose.position.y
pose.z = self.current_pose_.pose.position.z
q = self.current_pose_.pose.orientation
euler = euler_from_quaternion((q.x, q.y, q.z, q.w))
pose.roll = euler[0]
pose.pitch = euler[1]
pose.yaw = euler[2]
#self.setPose_srv_client_(pose)
"""
self.mapping = True
self.resumeMap_srv_client_()
self.sendWaypoints(positions[1:], yaws[1:])
last_p = positions[-1]
while True & (not rospy.is_shutdown()):
rospy.sleep(1.)
current_p = np.asarray((self.current_pose_.pose.position.x,
self.current_pose_.pose.position.y,
self.current_pose_.pose.position.z))
dist = np.linalg.norm(last_p - current_p)
if dist < 0.2:
break
self.mapping = False
# save pointcloud map
print('saving map')
pc_map_msg = copy.copy(self.pc_map_)
o3d_pc = convertCloudFromRosToOpen3d(pc_map_msg)
# downsampling
o3d_pc = o3d_pc.voxel_down_sample(0.05)
# extract points in a sphere
sphere_center = cylinder_pos
sphere_radius = np.linalg.norm(np.asarray(cylinder_scale)/2.)
pts = np.asarray(o3d_pc.points)
clrs = np.asarray(o3d_pc.colors)
in_sphere_bools = [np.linalg.norm(pt - sphere_center) <= sphere_radius for pt in pts]
in_pts = pts[in_sphere_bools]
in_clrs = clrs[in_sphere_bools]
map_pcd = o3d.geometry.PointCloud()
map_pcd.points = o3d.utility.Vector3dVector(in_pts)
map_pcd.colors = o3d.utility.Vector3dVector(in_clrs)
pcd_name = os.path.join(self.pcd_path, str(self.id_) + ".pcd")
o3d.io.write_point_cloud(pcd_name, map_pcd)
self.newMap_srv_client_()
self.deleteMap_srv_client_()
self.pauseMap_srv_client_()
self.got_cylinder_marker_ = False
result = target_mapping.msg.TargetPlanResult()
if len(in_sphere_bools) > 0:
result.success = True
result.pointcloud_map = convertCloudFromOpen3dToRos(map_pcd, 'map')
else:
result.success = False
return result
def circularPoints(self, dir_cp, center, radius, n):
positions = []
yaws = []
step = 2 * np.pi / n
yaw_cp = np.arctan2(-dir_cp[1], -dir_cp[0])
for i in range(n):
dir_i = self.rotateDirection(dir_cp, step * i)
pos = center + dir_i * radius
# yaw = np.arctan2(-dir_i[1], -dir_i[0]) # this will cause some issues because atan2 is not continuous
yaw = yaw_cp + step * i
positions.append(pos)
yaws.append(yaw)
return positions, yaws
def rotateDirection(self, d, theta):
r = np.array(((np.cos(theta), -np.sin(theta), 0),
(np.sin(theta), np.cos(theta), 0),
(0, 0, 0,)))
return np.matmul(r, d)
def sendWaypoints(self, positions, yaws):
goal = uav_motion.msg.waypointsGoal()
for i in range(len(yaws)):
p = positions[i]
yaw = yaws[i]
point = Point(p[0], p[1], p[2])
goal.positions.append(point)
goal.yaws.append(yaw)
self.client_.send_goal(goal)
def lawnmower(self, pt1, pt2, origin, spacing, vertical):
"""
:param pt1: start point (x, y)
:param pt2: end point (x, y)
:param origin: uav origin (x, y)
:param spacing:
:param vertical:
:return:
"""
origin = np.array(origin)
pt1 = np.array(pt1) - origin
pt2 = np.array(pt2) - origin
x1, y1 = pt1
x2, y2 = pt2
width = x2 - x1
length = y2 - y1
waypoints = [np.array((0., 0.)), pt1]
if vertical:
if width < 0:
spacing = - spacing
N = int(width / spacing / 2)
for i in range(N):
pt_0 = waypoints[-1]
pt_1 = pt_0 + np.array((0, length))
pt_2 = pt_1 + np.array((spacing, 0))
pt_3 = pt_2 + np.array((0, -length))
pt_4 = pt_3 + np.array((spacing, 0))
waypoints.append(pt_1)
waypoints.append(pt_2)
waypoints.append(pt_3)
waypoints.append(pt_4)
else:
if length < 0:
spacing = - spacing
N = int(length / spacing / 2)
for i in range(N):
pt_0 = waypoints[-1]
pt_1 = pt_0 + np.array((width, 0))
pt_2 = pt_1 + np.array((0, spacing))
pt_3 = pt_2 + np.array((-width, 0))
pt_4 = pt_3 + np.array((0, spacing))
waypoints.append(pt_1)
waypoints.append(pt_2)
waypoints.append(pt_3)
waypoints.append(pt_4)
waypoints.append(pt2)
return np.array(waypoints)
def plot_path(self, waypoints):
waypoints = np.array(waypoints)
x = waypoints[:, 0]
y = waypoints[:, 1]
plt.plot(x, y)
plt.show()
def add_height(self, waypoints, height):
N = waypoints.shape[0]
new_waypoints = np.zeros((N, 3))
new_waypoints[:, :2] = waypoints
new_waypoints[:, 2] = height
return new_waypoints
def create_cylinder_marker(self, pos=[0, 0, 0], qua=[0, 0, 0, 1], scale=[1, 1, 1]):
"""
:param pos: [x, y, z]
:param qua: [x, y, z, w]
:param scale: [diameter_x, diameter_y, height]; the first two params are diameters for an ellipse
:return:
"""
marker = Marker()
marker.header.frame_id = "map"
marker.header.stamp = rospy.Time.now()
marker.ns = "target_mapping"
marker.id = 0
marker.type = visualization_msgs.msg.Marker.CYLINDER
marker.action = visualization_msgs.msg.Marker.ADD
marker.scale.x = scale[0]
marker.scale.y = scale[1]
marker.scale.z = scale[2]
marker.color.a = .5
marker.color.r = 0.0
marker.color.g = 0.0
marker.color.b = 0.5
marker.pose.position.x = pos[0]
marker.pose.position.y = pos[1]
marker.pose.position.z = pos[2]
marker.pose.orientation.x = qua[0]
marker.pose.orientation.y = qua[1]
marker.pose.orientation.z = qua[2]
marker.pose.orientation.w = qua[3]
return marker
if __name__ == '__main__':
rospy.init_node('path_planner', anonymous=False)
path_planner = PathPlanner()
path_planner.startSearch()
try:
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo("Node killed!")
| [
"yaml.load",
"rospy.Subscriber",
"numpy.arctan2",
"actionlib.SimpleActionClient",
"rospy.ServiceProxy",
"open3d.geometry.PointCloud",
"numpy.sin",
"numpy.linalg.norm",
"utils.open3d_ros_conversion.convertCloudFromRosToOpen3d",
"os.path.join",
"geometry_msgs.msg.PoseStamped",
"sensor_msgs.msg.P... | [((932, 948), 'rospkg.RosPack', 'rospkg.RosPack', ([], {}), '()\n', (946, 948), False, 'import rospkg\n'), ((1004, 1059), 'os.path.join', 'os.path.join', (['pkg_path', '"""config"""', '"""target_mapping.yaml"""'], {}), "(pkg_path, 'config', 'target_mapping.yaml')\n", (1016, 1059), False, 'import os\n'), ((1099, 1143), 'yaml.load', 'yaml.load', (['yaml_file'], {'Loader': 'yaml.FullLoader'}), '(yaml_file, Loader=yaml.FullLoader)\n', (1108, 1143), False, 'import yaml\n'), ((22231, 22279), 'rospy.init_node', 'rospy.init_node', (['"""path_planner"""'], {'anonymous': '(False)'}), "('path_planner', anonymous=False)\n", (22246, 22279), False, 'import rospy\n'), ((1247, 1260), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (1258, 1260), False, 'from geometry_msgs.msg import PoseStamped\n'), ((1338, 1351), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (1349, 1351), False, 'from geometry_msgs.msg import PoseStamped\n'), ((1375, 1383), 'visualization_msgs.msg.Marker', 'Marker', ([], {}), '()\n', (1381, 1383), False, 'from visualization_msgs.msg import Marker, MarkerArray\n'), ((1416, 1424), 'visualization_msgs.msg.Marker', 'Marker', ([], {}), '()\n', (1422, 1424), False, 'from visualization_msgs.msg import Marker, MarkerArray\n'), ((1497, 1504), 'geometry_msgs.msg.Point', 'Point', ([], {}), '()\n', (1502, 1504), False, 'from geometry_msgs.msg import Point\n'), ((2286, 2302), 'rospkg.RosPack', 'rospkg.RosPack', ([], {}), '()\n', (2300, 2302), False, 'import rospkg\n'), ((2376, 2405), 'os.path.join', 'os.path.join', (['pkg_path', '"""pcd"""'], {}), "(pkg_path, 'pcd')\n", (2388, 2405), False, 'import os\n'), ((2430, 2443), 'sensor_msgs.msg.PointCloud2', 'PointCloud2', ([], {}), '()\n', (2441, 2443), False, 'from sensor_msgs.msg import PointCloud2\n'), ((2464, 2470), 'nav_msgs.msg.Path', 'Path', ([], {}), '()\n', (2468, 2470), False, 'from nav_msgs.msg import Path\n'), ((2543, 2593), 'rospy.Publisher', 'rospy.Publisher', (['"""/local_path"""', 'Path'], {'queue_size': '(1)'}), "('/local_path', Path, queue_size=1)\n", (2558, 2593), False, 'import rospy\n'), ((2655, 2725), 'rospy.Publisher', 'rospy.Publisher', (['"""/path_planner/cylinder_marker"""', 'Marker'], {'queue_size': '(2)'}), "('/path_planner/cylinder_marker', Marker, queue_size=2)\n", (2670, 2725), False, 'import rospy\n'), ((2735, 2774), 'rospy.wait_for_service', 'rospy.wait_for_service', (['"""stop_sampling"""'], {}), "('stop_sampling')\n", (2757, 2774), False, 'import rospy\n'), ((2807, 2849), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""stop_sampling"""', 'Empty'], {}), "('stop_sampling', Empty)\n", (2825, 2849), False, 'import rospy\n'), ((2869, 3022), 'actionlib.SimpleActionServer', 'actionlib.SimpleActionServer', (['"""/path_planner/target_plan"""', 'target_mapping.msg.TargetPlanAction'], {'execute_cb': 'self.targetPlanCallback', 'auto_start': '(False)'}), "('/path_planner/target_plan', target_mapping.\n msg.TargetPlanAction, execute_cb=self.targetPlanCallback, auto_start=False)\n", (2897, 3022), False, 'import actionlib\n'), ((3119, 3217), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/mavros/local_position/pose"""', 'PoseStamped', 'self.poseCallback'], {'queue_size': '(1)'}), "('/mavros/local_position/pose', PoseStamped, self.\n poseCallback, queue_size=1)\n", (3135, 3217), False, 'import rospy\n'), ((3281, 3354), 'actionlib.SimpleActionClient', 'actionlib.SimpleActionClient', (['"""waypoints"""', 'uav_motion.msg.waypointsAction'], {}), "('waypoints', uav_motion.msg.waypointsAction)\n", (3309, 3354), False, 'import actionlib\n'), ((3579, 3623), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""/rtabmap/resume"""', 'Empty'], {}), "('/rtabmap/resume', Empty)\n", (3597, 3623), False, 'import rospy\n'), ((3660, 3703), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""/rtabmap/pause"""', 'Empty'], {}), "('/rtabmap/pause', Empty)\n", (3678, 3703), False, 'import rospy\n'), ((3738, 3791), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""/rtabmap/trigger_new_map"""', 'Empty'], {}), "('/rtabmap/trigger_new_map', Empty)\n", (3756, 3791), False, 'import rospy\n'), ((3829, 3872), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""/rtabmap/reset"""', 'Empty'], {}), "('/rtabmap/reset', Empty)\n", (3847, 3872), False, 'import rospy\n'), ((3908, 3968), 'rospy.ServiceProxy', 'rospy.ServiceProxy', (['"""/rtabmap/reset_odom_to_pose"""', 'ResetPose'], {}), "('/rtabmap/reset_odom_to_pose', ResetPose)\n", (3926, 3968), False, 'import rospy\n'), ((3977, 4018), 'rospy.wait_for_service', 'rospy.wait_for_service', (['"""/rtabmap/resume"""'], {}), "('/rtabmap/resume')\n", (3999, 4018), False, 'import rospy\n'), ((4027, 4067), 'rospy.wait_for_service', 'rospy.wait_for_service', (['"""/rtabmap/pause"""'], {}), "('/rtabmap/pause')\n", (4049, 4067), False, 'import rospy\n'), ((4076, 4126), 'rospy.wait_for_service', 'rospy.wait_for_service', (['"""/rtabmap/trigger_new_map"""'], {}), "('/rtabmap/trigger_new_map')\n", (4098, 4126), False, 'import rospy\n'), ((4255, 4349), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/rtabmap/cloud_map"""', 'PointCloud2', 'self.pointcloudCallback'], {'queue_size': '(1)'}), "('/rtabmap/cloud_map', PointCloud2, self.pointcloudCallback,\n queue_size=1)\n", (4271, 4349), False, 'import rospy\n'), ((4355, 4406), 'rospy.loginfo', 'rospy.loginfo', (['"""Path planner has been initialized!"""'], {}), "('Path planner has been initialized!')\n", (4368, 4406), False, 'import rospy\n'), ((4456, 4520), 'numpy.asarray', 'np.asarray', (['((0, 0, 24), (-15, 10, 24), (1, 12, 24), (0, 0, 20))'], {}), '(((0, 0, 24), (-15, 10, 24), (1, 12, 24), (0, 0, 20)))\n', (4466, 4520), True, 'import numpy as np\n'), ((7426, 7442), 'rospy.sleep', 'rospy.sleep', (['(3.0)'], {}), '(3.0)\n', (7437, 7442), False, 'import rospy\n'), ((9803, 9909), 'numpy.asarray', 'np.asarray', (['(self.marker_.pose.position.x, self.marker_.pose.position.y, self.marker_.\n pose.position.z)'], {}), '((self.marker_.pose.position.x, self.marker_.pose.position.y,\n self.marker_.pose.position.z))\n', (9813, 9909), True, 'import numpy as np\n'), ((9931, 10056), 'numpy.asarray', 'np.asarray', (['(self.current_pose_.pose.position.x, self.current_pose_.pose.position.y,\n self.current_pose_.pose.position.z)'], {}), '((self.current_pose_.pose.position.x, self.current_pose_.pose.\n position.y, self.current_pose_.pose.position.z))\n', (9941, 10056), True, 'import numpy as np\n'), ((10123, 10198), 'numpy.asarray', 'np.asarray', (['(self.marker_.pose.position.x, self.marker_.pose.position.y, h)'], {}), '((self.marker_.pose.position.x, self.marker_.pose.position.y, h))\n', (10133, 10198), True, 'import numpy as np\n'), ((11275, 11309), 'numpy.arctan2', 'np.arctan2', (['(-dir_cp[1])', '(-dir_cp[0])'], {}), '(-dir_cp[1], -dir_cp[0])\n', (11285, 11309), True, 'import numpy as np\n'), ((11835, 11941), 'numpy.asarray', 'np.asarray', (['(self.marker_.pose.position.x, self.marker_.pose.position.y, self.marker_.\n pose.position.z)'], {}), '((self.marker_.pose.position.x, self.marker_.pose.position.y,\n self.marker_.pose.position.z))\n', (11845, 11941), True, 'import numpy as np\n'), ((11983, 12041), 'numpy.asarray', 'np.asarray', (['[(p.x, p.y, p.z) for p in self.marker_.points]'], {}), '([(p.x, p.y, p.z) for p in self.marker_.points])\n', (11993, 12041), True, 'import numpy as np\n'), ((14124, 14243), 'numpy.asarray', 'np.asarray', (['(self.current_pose_.pose.position.x, self.current_pose_.pose.position.y,\n self.marker_.pose.position.z)'], {}), '((self.current_pose_.pose.position.x, self.current_pose_.pose.\n position.y, self.marker_.pose.position.z))\n', (14134, 14243), True, 'import numpy as np\n'), ((14879, 14920), 'geometry_msgs.msg.Point', 'Point', (['start_p[0]', 'start_p[1]', 'start_p[2]'], {}), '(start_p[0], start_p[1], start_p[2])\n', (14884, 14920), False, 'from geometry_msgs.msg import Point\n'), ((15481, 15497), 'rospy.sleep', 'rospy.sleep', (['(2.0)'], {}), '(2.0)\n', (15492, 15497), False, 'import rospy\n'), ((16601, 16624), 'copy.copy', 'copy.copy', (['self.pc_map_'], {}), '(self.pc_map_)\n', (16610, 16624), False, 'import copy\n'), ((16642, 16681), 'utils.open3d_ros_conversion.convertCloudFromRosToOpen3d', 'convertCloudFromRosToOpen3d', (['pc_map_msg'], {}), '(pc_map_msg)\n', (16669, 16681), False, 'from utils.open3d_ros_conversion import convertCloudFromRosToOpen3d, convertCloudFromOpen3dToRos\n'), ((16911, 16936), 'numpy.asarray', 'np.asarray', (['o3d_pc.points'], {}), '(o3d_pc.points)\n', (16921, 16936), True, 'import numpy as np\n'), ((16952, 16977), 'numpy.asarray', 'np.asarray', (['o3d_pc.colors'], {}), '(o3d_pc.colors)\n', (16962, 16977), True, 'import numpy as np\n'), ((17168, 17193), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (17191, 17193), True, 'import open3d as o3d\n'), ((17219, 17253), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['in_pts'], {}), '(in_pts)\n', (17245, 17253), True, 'import open3d as o3d\n'), ((17279, 17314), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['in_clrs'], {}), '(in_clrs)\n', (17305, 17314), True, 'import open3d as o3d\n'), ((17394, 17437), 'open3d.io.write_point_cloud', 'o3d.io.write_point_cloud', (['pcd_name', 'map_pcd'], {}), '(pcd_name, map_pcd)\n', (17418, 17437), True, 'import open3d as o3d\n'), ((18011, 18045), 'numpy.arctan2', 'np.arctan2', (['(-dir_cp[1])', '(-dir_cp[0])'], {}), '(-dir_cp[1], -dir_cp[0])\n', (18021, 18045), True, 'import numpy as np\n'), ((18629, 18644), 'numpy.matmul', 'np.matmul', (['r', 'd'], {}), '(r, d)\n', (18638, 18644), True, 'import numpy as np\n'), ((19272, 19288), 'numpy.array', 'np.array', (['origin'], {}), '(origin)\n', (19280, 19288), True, 'import numpy as np\n'), ((20656, 20675), 'numpy.array', 'np.array', (['waypoints'], {}), '(waypoints)\n', (20664, 20675), True, 'import numpy as np\n'), ((20734, 20753), 'numpy.array', 'np.array', (['waypoints'], {}), '(waypoints)\n', (20742, 20753), True, 'import numpy as np\n'), ((20818, 20832), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (20826, 20832), True, 'import matplotlib.pyplot as plt\n'), ((20841, 20851), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20849, 20851), True, 'import matplotlib.pyplot as plt\n'), ((20953, 20969), 'numpy.zeros', 'np.zeros', (['(N, 3)'], {}), '((N, 3))\n', (20961, 20969), True, 'import numpy as np\n'), ((21393, 21401), 'visualization_msgs.msg.Marker', 'Marker', ([], {}), '()\n', (21399, 21401), False, 'from visualization_msgs.msg import Marker, MarkerArray\n'), ((21471, 21487), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (21485, 21487), False, 'import rospy\n'), ((22361, 22373), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (22371, 22373), False, 'import rospy\n'), ((6103, 6119), 'rospy.sleep', 'rospy.sleep', (['(1.0)'], {}), '(1.0)\n', (6114, 6119), False, 'import rospy\n'), ((6315, 6331), 'rospy.sleep', 'rospy.sleep', (['(5.0)'], {}), '(5.0)\n', (6326, 6331), False, 'import rospy\n'), ((7165, 7198), 'copy.deepcopy', 'copy.deepcopy', (['self.current_pose_'], {}), '(self.current_pose_)\n', (7178, 7198), False, 'import copy\n'), ((10241, 10264), 'numpy.tan', 'np.tan', (['self.bcem_alpha'], {}), '(self.bcem_alpha)\n', (10247, 10264), True, 'import numpy as np\n'), ((10459, 10481), 'numpy.linalg.norm', 'np.linalg.norm', (['dir_cp'], {}), '(dir_cp)\n', (10473, 10481), True, 'import numpy as np\n'), ((13711, 13741), 'numpy.tan', 'np.tan', (['(self.alpha + half_vfov)'], {}), '(self.alpha + half_vfov)\n', (13717, 13741), True, 'import numpy as np\n'), ((13762, 13792), 'numpy.tan', 'np.tan', (['(self.alpha - half_vfov)'], {}), '(self.alpha - half_vfov)\n', (13768, 13792), True, 'import numpy as np\n'), ((14350, 14372), 'numpy.linalg.norm', 'np.linalg.norm', (['dir_cp'], {}), '(dir_cp)\n', (14364, 14372), True, 'import numpy as np\n'), ((14510, 14574), 'numpy.asarray', 'np.asarray', (['(marker_position[0], marker_position[1], heights[i])'], {}), '((marker_position[0], marker_position[1], heights[i]))\n', (14520, 14574), True, 'import numpy as np\n'), ((15135, 15151), 'rospy.sleep', 'rospy.sleep', (['(1.0)'], {}), '(1.0)\n', (15146, 15151), False, 'import rospy\n'), ((15175, 15300), 'numpy.asarray', 'np.asarray', (['(self.current_pose_.pose.position.x, self.current_pose_.pose.position.y,\n self.current_pose_.pose.position.z)'], {}), '((self.current_pose_.pose.position.x, self.current_pose_.pose.\n position.y, self.current_pose_.pose.position.z))\n', (15185, 15300), True, 'import numpy as np\n'), ((15388, 15423), 'numpy.linalg.norm', 'np.linalg.norm', (['(start_p - current_p)'], {}), '(start_p - current_p)\n', (15402, 15423), True, 'import numpy as np\n'), ((16156, 16172), 'rospy.sleep', 'rospy.sleep', (['(1.0)'], {}), '(1.0)\n', (16167, 16172), False, 'import rospy\n'), ((16196, 16321), 'numpy.asarray', 'np.asarray', (['(self.current_pose_.pose.position.x, self.current_pose_.pose.position.y,\n self.current_pose_.pose.position.z)'], {}), '((self.current_pose_.pose.position.x, self.current_pose_.pose.\n position.y, self.current_pose_.pose.position.z))\n', (16206, 16321), True, 'import numpy as np\n'), ((16409, 16443), 'numpy.linalg.norm', 'np.linalg.norm', (['(last_p - current_p)'], {}), '(last_p - current_p)\n', (16423, 16443), True, 'import numpy as np\n'), ((17751, 17794), 'utils.open3d_ros_conversion.convertCloudFromOpen3dToRos', 'convertCloudFromOpen3dToRos', (['map_pcd', '"""map"""'], {}), "(map_pcd, 'map')\n", (17778, 17794), False, 'from utils.open3d_ros_conversion import convertCloudFromRosToOpen3d, convertCloudFromOpen3dToRos\n'), ((18848, 18871), 'geometry_msgs.msg.Point', 'Point', (['p[0]', 'p[1]', 'p[2]'], {}), '(p[0], p[1], p[2])\n', (18853, 18871), False, 'from geometry_msgs.msg import Point\n'), ((19303, 19316), 'numpy.array', 'np.array', (['pt1'], {}), '(pt1)\n', (19311, 19316), True, 'import numpy as np\n'), ((19340, 19353), 'numpy.array', 'np.array', (['pt2'], {}), '(pt2)\n', (19348, 19353), True, 'import numpy as np\n'), ((19475, 19495), 'numpy.array', 'np.array', (['(0.0, 0.0)'], {}), '((0.0, 0.0))\n', (19483, 19495), True, 'import numpy as np\n'), ((22422, 22451), 'rospy.loginfo', 'rospy.loginfo', (['"""Node killed!"""'], {}), "('Node killed!')\n", (22435, 22451), False, 'import rospy\n'), ((5327, 5370), 'tf.transformations.euler_from_quaternion', 'euler_from_quaternion', (['(q.x, q.y, q.z, q.w)'], {}), '((q.x, q.y, q.z, q.w))\n', (5348, 5370), False, 'from tf.transformations import euler_from_quaternion\n'), ((5606, 5622), 'rospy.sleep', 'rospy.sleep', (['(1.0)'], {}), '(1.0)\n', (5617, 5622), False, 'import rospy\n'), ((5650, 5775), 'numpy.asarray', 'np.asarray', (['(self.current_pose_.pose.position.x, self.current_pose_.pose.position.y,\n self.current_pose_.pose.position.z)'], {}), '((self.current_pose_.pose.position.x, self.current_pose_.pose.\n position.y, self.current_pose_.pose.position.z))\n', (5660, 5775), True, 'import numpy as np\n'), ((5874, 5908), 'numpy.linalg.norm', 'np.linalg.norm', (['(goal_p - current_p)'], {}), '(goal_p - current_p)\n', (5888, 5908), True, 'import numpy as np\n'), ((6585, 6611), 'numpy.arctan2', 'np.arctan2', (['dir[1]', 'dir[0]'], {}), '(dir[1], dir[0])\n', (6595, 6611), True, 'import numpy as np\n'), ((12381, 12440), 'numpy.linalg.norm', 'np.linalg.norm', (['(points[:, :2] - marker_position[:2])'], {'axis': '(1)'}), '(points[:, :2] - marker_position[:2], axis=1)\n', (12395, 12440), True, 'import numpy as np\n'), ((13835, 13876), 'numpy.ceil', 'np.ceil', (['((pillar_top - pillar_bottom) / d)'], {}), '((pillar_top - pillar_bottom) / d)\n', (13842, 13876), True, 'import numpy as np\n'), ((14721, 14742), 'numpy.asarray', 'np.asarray', (['positions'], {}), '(positions)\n', (14731, 14742), True, 'import numpy as np\n'), ((14773, 14789), 'numpy.asarray', 'np.asarray', (['yaws'], {}), '(yaws)\n', (14783, 14789), True, 'import numpy as np\n'), ((15101, 15120), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (15118, 15120), False, 'import rospy\n'), ((16122, 16141), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (16139, 16141), False, 'import rospy\n'), ((16866, 16892), 'numpy.asarray', 'np.asarray', (['cylinder_scale'], {}), '(cylinder_scale)\n', (16876, 16892), True, 'import numpy as np\n'), ((17005, 17039), 'numpy.linalg.norm', 'np.linalg.norm', (['(pt - sphere_center)'], {}), '(pt - sphere_center)\n', (17019, 17039), True, 'import numpy as np\n'), ((5568, 5587), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (5585, 5587), False, 'import rospy\n'), ((7852, 7859), 'geometry_msgs.msg.Point', 'Point', ([], {}), '()\n', (7857, 7859), False, 'from geometry_msgs.msg import Point\n'), ((8943, 8959), 'rospy.sleep', 'rospy.sleep', (['(1.0)'], {}), '(1.0)\n', (8954, 8959), False, 'import rospy\n'), ((18487, 18500), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (18493, 18500), True, 'import numpy as np\n'), ((18545, 18558), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (18551, 18558), True, 'import numpy as np\n'), ((18560, 18573), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (18566, 18573), True, 'import numpy as np\n'), ((19722, 19743), 'numpy.array', 'np.array', (['(0, length)'], {}), '((0, length))\n', (19730, 19743), True, 'import numpy as np\n'), ((19774, 19796), 'numpy.array', 'np.array', (['(spacing, 0)'], {}), '((spacing, 0))\n', (19782, 19796), True, 'import numpy as np\n'), ((19827, 19849), 'numpy.array', 'np.array', (['(0, -length)'], {}), '((0, -length))\n', (19835, 19849), True, 'import numpy as np\n'), ((19880, 19902), 'numpy.array', 'np.array', (['(spacing, 0)'], {}), '((spacing, 0))\n', (19888, 19902), True, 'import numpy as np\n'), ((20276, 20296), 'numpy.array', 'np.array', (['(width, 0)'], {}), '((width, 0))\n', (20284, 20296), True, 'import numpy as np\n'), ((20327, 20349), 'numpy.array', 'np.array', (['(0, spacing)'], {}), '((0, spacing))\n', (20335, 20349), True, 'import numpy as np\n'), ((20380, 20401), 'numpy.array', 'np.array', (['(-width, 0)'], {}), '((-width, 0))\n', (20388, 20401), True, 'import numpy as np\n'), ((20432, 20454), 'numpy.array', 'np.array', (['(0, spacing)'], {}), '((0, spacing))\n', (20440, 20454), True, 'import numpy as np\n'), ((8118, 8161), 'tf.transformations.euler_from_quaternion', 'euler_from_quaternion', (['(q.x, q.y, q.z, q.w)'], {}), '((q.x, q.y, q.z, q.w))\n', (8139, 8161), False, 'from tf.transformations import euler_from_quaternion\n'), ((8408, 8424), 'rospy.sleep', 'rospy.sleep', (['(1.0)'], {}), '(1.0)\n', (8419, 8424), False, 'import rospy\n'), ((8452, 8577), 'numpy.asarray', 'np.asarray', (['(self.current_pose_.pose.position.x, self.current_pose_.pose.position.y,\n self.current_pose_.pose.position.z)'], {}), '((self.current_pose_.pose.position.x, self.current_pose_.pose.\n position.y, self.current_pose_.pose.position.z))\n', (8462, 8577), True, 'import numpy as np\n'), ((8678, 8741), 'numpy.asarray', 'np.asarray', (['(save_position.x, save_position.y, save_position.z)'], {}), '((save_position.x, save_position.y, save_position.z))\n', (8688, 8741), True, 'import numpy as np\n'), ((8839, 8873), 'numpy.linalg.norm', 'np.linalg.norm', (['(goal_p - current_p)'], {}), '(goal_p - current_p)\n', (8853, 8873), True, 'import numpy as np\n'), ((18503, 18516), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (18509, 18516), True, 'import numpy as np\n'), ((8370, 8389), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (8387, 8389), False, 'import rospy\n')] |
import time, pyaudio, wave, sys, serial
import numpy as np
from base64 import b16encode
from pyfirmata import Arduino, util
arduino = serial.Serial('/dev/cu.usbmodem1411', 9600)
# import portaudio
CHUNK = 2**12 ## used to be 11 before I changed it; if it fucks up it is most definitely because i change the chunk size
FORMAT = pyaudio.paInt16
# the format of the audio (pyaudio reference)
CHANNELS = 2
# Number of channels
RATE = 44100
# scan rate
isRunning = True
isPlaying = True
# turtle.colormode(255)
# sets turtle color to rgb 255 values
BLACK = (0,0,0)
p = pyaudio.PyAudio()
colorMult = 0
# starts pyaudio
stream = p.open(format = pyaudio.paInt16, channels = CHANNELS, rate = RATE, input = True, frames_per_buffer = CHUNK)
# starts stream
def color(angle):
red = np.sin(angle)
green = np.sin(angle + 60)
blue = np.sin(angle + 120)
return (red, green, blue)
while isRunning:
data = np.fromstring(stream.read(CHUNK, exception_on_overflow = False), dtype=np.int16)
peak = np.average(np.abs(data))*2
bars = '#'*int(250*peak/2**16)
print('%05d %s'%(peak,bars))
brightness = int(250*peak/2**16)
print(brightness)
brightnessMult = brightness*255/200
# use this to get the intensity ---> brightness of node
if brightnessMult > 255:
brightnessMult = 255
if brightnessMult < 15:
brigtnessMult = 0
data = np.fromstring(stream.read(CHUNK), dtype = np.int16)
fft = abs(np.fft.fft(data).real)
fft = fft[:int(len(fft)/2)]
freq = np.fft.fftfreq(CHUNK, 1.0/44100)
freqPeak = freq[np.where(fft==np.max(fft))[0][0]] + 1
angle = 360*freqPeak/150000
# colorMult = brightness/
red = 100 * np.sin(angle)
green = 100 * np.sin(angle + 60)
blue = 100 * np.sin(angle + 120)
if red < 0:
red = 0
if green < 0:
green = 0
if blue < 0:
blue = 0
# turns off the pixel if it's not in range, reflective of nature of np.sin function
if isPlaying:
# print(brightness)
if freqPeak <= 5:
color = (0,0,0)
else:
color = (int(red), int(green), int(blue))
print (color)
arduino.write(color)
#time.sleep(1/360)
# else:
# color = (BLACK)
stream.stopstream()
stream.close()
p.terminate()
| [
"serial.Serial",
"numpy.abs",
"numpy.fft.fft",
"numpy.fft.fftfreq",
"numpy.sin",
"numpy.max",
"pyaudio.PyAudio"
] | [((135, 178), 'serial.Serial', 'serial.Serial', (['"""/dev/cu.usbmodem1411"""', '(9600)'], {}), "('/dev/cu.usbmodem1411', 9600)\n", (148, 178), False, 'import time, pyaudio, wave, sys, serial\n'), ((566, 583), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (581, 583), False, 'import time, pyaudio, wave, sys, serial\n'), ((779, 792), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (785, 792), True, 'import numpy as np\n'), ((805, 823), 'numpy.sin', 'np.sin', (['(angle + 60)'], {}), '(angle + 60)\n', (811, 823), True, 'import numpy as np\n'), ((835, 854), 'numpy.sin', 'np.sin', (['(angle + 120)'], {}), '(angle + 120)\n', (841, 854), True, 'import numpy as np\n'), ((1516, 1550), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['CHUNK', '(1.0 / 44100)'], {}), '(CHUNK, 1.0 / 44100)\n', (1530, 1550), True, 'import numpy as np\n'), ((1686, 1699), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1692, 1699), True, 'import numpy as np\n'), ((1718, 1736), 'numpy.sin', 'np.sin', (['(angle + 60)'], {}), '(angle + 60)\n', (1724, 1736), True, 'import numpy as np\n'), ((1754, 1773), 'numpy.sin', 'np.sin', (['(angle + 120)'], {}), '(angle + 120)\n', (1760, 1773), True, 'import numpy as np\n'), ((1017, 1029), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (1023, 1029), True, 'import numpy as np\n'), ((1450, 1466), 'numpy.fft.fft', 'np.fft.fft', (['data'], {}), '(data)\n', (1460, 1466), True, 'import numpy as np\n'), ((1583, 1594), 'numpy.max', 'np.max', (['fft'], {}), '(fft)\n', (1589, 1594), True, 'import numpy as np\n')] |
import logging
import numpy as np
from sklearn.decomposition import NMF as NMFSklearn
from sklearn.preprocessing import normalize
from scipy.sparse.linalg import eigs, LinearOperator
from config import num_tokens
from util import load_json, save_json
from tools.tokenizer_w2v import W2VTokenizer
from tools.vocab import Vocab
from base.topic import TopicFrame
settings = {
'initial_topics': 30,
'sliding_window_size': 8,
'min_tokens': 2,
}
class WENMF():
def __enter__(self):
self.tokenizer = W2VTokenizer()
self.vocab = Vocab()
self.bins = load_json('./data/wenmf/bins.json', [])
status = load_json('./data/wenmf/state.json',
{
'initial_done': False,
})
self.topic_frames = []
self.initial_done = status['initial_done']
return self
def __exit__(self, type, value, tb):
pass
@property
def num_topics(self):
if self.W is not None:
return self.W.shape[1]
return 0
def topicwords(self, topic):
topicwords = []
# Get corresponding column and sort descending
w = self.W[:,topic].flatten()
sorted_idcs = np.argsort(w)[::-1]
# Take the first num_tokens
for idx in sorted_idcs[:num_tokens]:
token = self.vocab.cur_dict[idx]
token = token.lower().replace('_',' ')
topicwords.append((token, w[idx]))
return topicwords
def tokenize_bin(self, bin):
tweets = []
for tweet in bin:
tokens = self.tokenizer.tokenize(tweet.text)
if len(tokens) >= settings['min_tokens']:
tweet.tokens = tokens
tweets.append(tweet)
bin.tweets = tweets
def compute_norm(self, reglambda = 0):
# Load the vocab
vocab = self.vocab.cur_dict
op = LinearOperator((len(vocab),len(vocab)),
matvec = lambda x: reglambda * x + self.v_mat.transpose() @ (self.v_mat @ x))
w,v = eigs(op,
k = 1,
which = 'LM',
maxiter = 100)
return np.real(w[0])
def update_v_mat(self):
# Load the embeddings
embedding_model = self.tokenizer.embedding_model
embeddings = embedding_model.get_embeddings()
vector_size = embedding_model.vector_size()
# Load the vocab
vocab = self.vocab.cur_dict
# construct V
v_shape = (vector_size, len(vocab))
self.v_mat = np.zeros(v_shape)
for idx, token in enumerate(vocab):
self.v_mat[:,idx] = embeddings[token]
# compute norm of VTV
self.vtv_norm = self.compute_norm()
def initial_wenmf(self):
print('Initial WENMF')
max_iter = 200 # Maximum number of iterations
eps = 1e-16 # Mimum value of H, small positive value for stability
omega = 0.5 # For the extrapolation of the update
threshold = 1e-16 # To stop updates
num_topics = self.W.shape[1]
self.update_v_mat()
Wp1 = self.W.copy()
Wp2 = self.W.copy()
Hp1 = self.H.copy()
Hp2 = self.H.copy()
for iteration in range(max_iter):
W_hat = Wp1 + omega * (Wp1 - Wp2)
H_hat = Hp1 + omega * (Hp1 - Hp2)
for r in range(num_topics):
idx = [i for i in range(num_topics) if i!=r]
g = -2 * (self.v_mat.T @ (self.v_mat @
((self.X @ self.H[None,r,:].T) -
(self.W[:,idx] @ (self.H[idx,:] @ self.H[None,r,:].T) -
W_hat[:,r,None] @ (self.H[None,r,:] @ self.H[None,r,:].T)))))
L = 2 * np.abs(self.H[None,r,:] @ self.H[None,r,:].T) * self.vtv_norm
self.W[:,r,None] = np.maximum(W_hat[:,r,None] - g / L, eps)
sum_wr = np.sum(self.W[:,r])
self.W[:,r] /= sum_wr
self.H[r,:] *= sum_wr
mean_w_change = np.max(np.abs((self.W - Wp1) / Wp1))
for r in range(num_topics):
tmp = ((self.v_mat @ self.W[:,r,None]).T @ self.v_mat)
g = -2 * (tmp @ self.X -
((tmp @ self.W) @ self.H - (tmp @ self.W[:,r,None]) @ self.H[None,r,:]) -
(tmp @ self.W[:,r,None]) @ H_hat[None,r,:])
L = 2 * np.sum(np.square(self.v_mat @ self.W[:,r,None]))
self.H[None,r,:] = np.maximum(H_hat[None,r,:] - g / L, eps)
mean_h_change = np.max(np.abs((self.H - Hp1) / Hp1))
Wp2 = Wp1
Hp2 = Hp1
Wp1 = self.W
Hp1 = self.H
if mean_w_change < threshold and mean_h_change < threshold:
break
def wenmf(self):
max_iter = 200 # Maximum number of iterations
eps = 1e-16 # Mimum value of H, small positive value for stability
omega = 0.5 # For the extrapolation of the update
threshold = 1e-16 # To stop updates
reglambda = 20 * self.X.shape[1] / self.W_pre.shape[1]
vtvlambda_norm = self.compute_norm(reglambda)
Wp1 = self.W.copy()
Wp2 = self.W.copy()
Hp1 = self.H.copy()
Hp2 = self.H.copy()
for iteration in range(max_iter):
W_hat = Wp1 + omega * (Wp1 - Wp2)
H_hat = Hp1 + omega * (Hp1 - Hp2)
for r in range(self.num_topics):
idx = [i for i in range(self.num_topics) if i!=r]
g = -2 * (self.v_mat.T @ (self.v_mat @
((self.X @ self.H[None,r,:].T) -
(self.W[:,idx] @ (self.H[idx,:] @ self.H[None,r,:].T) -
W_hat[:,r,None] @ (self.H[None,r,:] @ self.H[None,r,:].T)))))
L = 2 * np.abs(self.H[None,r,:] @ self.H[None,r,:].T)
if r < self.W_pre.shape[1]:
g += 2 * reglambda * (W_hat[:,r,None] - self.W_pre[:,r,None])
L *= vtvlambda_norm
else:
L *= self.vtv_norm
self.W[:,r,None] = np.maximum(W_hat[:,r,None] - g / L, eps)
sum_wr = np.sum(self.W[:,r])
self.W[:,r] /= sum_wr
self.H[r,:] *= sum_wr
mean_w_change = np.max(np.abs((self.W - Wp1) / Wp1))
for r in range(self.num_topics):
tmp = ((self.v_mat @ self.W[:,r,None]).T @ self.v_mat)
g = -2 * (tmp @ self.X -
((tmp @ self.W) @ self.H - (tmp @ self.W[:,r,None]) @ self.H[None,r,:]) -
(tmp @ self.W[:,r,None]) @ H_hat[None,r,:])
L = 2 * np.sum(np.square(self.v_mat @ self.W[:,r,None]))
self.H[None,r,:] = np.maximum(H_hat[None,r,:] - g / L, eps)
mean_h_change = np.max(np.abs((self.H - Hp1) / Hp1))
Wp2 = Wp1
Hp2 = Hp1
Wp1 = self.W
Hp1 = self.H
if mean_w_change < threshold and mean_h_change < threshold:
break
print('Done after {} iterations'.format(iteration + 1))
def update_model(self):
logging.debug('update_model')
self.W_pre = self.W.copy()
for add_topics in range(5):
logging.debug('Test {} additional topic(s)'.format(add_topics))
if add_topics > 0:
self.W = np.hstack((self.W, np.random.rand(self.W.shape[0],1)))
self.W[:,-1] /= np.sum(self.W[:,-1])
self.H = np.vstack((self.H, 0.001 * np.random.rand(1,self.H.shape[1])))
self.wenmf()
if add_topics > 0:
min_add_topic_strengh = np.min(np.mean(self.H[-add_topics:,:] / np.sum(self.H,0), 1))
logging.debug('min_add_topic_strengh={}'.format(min_add_topic_strengh))
if min_add_topic_strengh < 1 / settings['initial_topics']:
break
else:
self.finalW = self.W
self.finalH = self.H
else:
self.finalW = self.W
self.finalH = self.H
self.W = self.finalW
self.H = self.finalH
logging.debug('Number of topics: {}'.format(self.num_topics))
def summarize_bin(self):
# Save results of the last bin
last_bin = self.bins[-1]
h = self.H[:,-len(last_bin):]
# Normalize columns of h and get maximum
h = normalize(h, axis=0)
topic_assignment = np.argmax(h, axis=0)
# Update topic_frames
for topic in range(self.num_topics):
# Add new topic frames if necessary
if not topic < len(self.topic_frames):
self.topic_frames.append(TopicFrame())
# Push the current bin and the topicwords
self.topic_frames[topic].push_bin(last_bin, self.topicwords(topic))
# Push all tweets
for idx, tweet in enumerate(last_bin):
topic = topic_assignment[idx]
tweet.weight = h[topic,idx]
self.topic_frames[topic].push_tweet(tweet)
def add_bin(self, bin):
self.tokenize_bin(bin)
if self.initial_done:
# Update the bins list
old_bin = self.bins[0]
self.bins = self.bins[1:] + [bin]
# Update the vocabulary, the data matrix, and the embedding matrix
self.vocab.update(bin, old_bin)
keep_mask = np.invert(self.vocab.last_remove_mask)
self.X = self.vocab.count_matrix(self.bins, normalized = True)
self.update_v_mat()
# Add rows for new words and remove rows of removed words
self.W = np.vstack((self.W,
np.full((self.vocab.last_add_count,self.W.shape[1]),1e-16)))
self.W = self.W[keep_mask,:]
# Remove columns of the last bin and add columns for new bin
self.H = self.H[:,len(old_bin):]
self.H = np.hstack((self.H,
0.001 * np.random.rand(self.H.shape[0], len(bin))))
# Run the factorization and store the results
self.update_model()
self.summarize_bin()
else:
# Add data
self.vocab.update(bin, [])
self.bins = self.bins + [bin]
def finish_frame(self, frame):
if self.initial_done:
# Add all topic frames to the frame
frame.push_topics(self.topic_frames)
# Compute summed weights of all topics
topic_strengths = np.mean(self.H / np.sum(self.H,0), 1)
keep_mask = (topic_strengths >= (0.1 / settings['initial_topics']))
# Remove entries from W, H, and topic_frames
self.W = self.W[:,keep_mask]
self.H = self.H[keep_mask,:]
next_topic_frames = []
for idx, topic_frame in enumerate(self.topic_frames):
if keep_mask[idx]:
next_topic_frames.append(topic_frame.get_next())
self.topic_frames = next_topic_frames
logging.debug('Number of topics down to: {}'.format(self.num_topics))
else:
# Do not save the frame
frame.discard()
# Compute the solution to standard NMF for initialization
logging.debug('Initial NMF')
self.X = self.vocab.count_matrix(self.bins, normalized = True)
model = NMFSklearn(settings['initial_topics'], init='nndsvd')
self.W = model.fit_transform(self.X)
self.H = model.components_
# Run the wenmf
self.initial_wenmf()
# Remove bins
while len(self.bins) > settings['sliding_window_size']:
self.vocab.update([], self.bins[0])
keep_mask = np.invert(self.vocab.last_remove_mask)
self.W = self.W[keep_mask,:]
self.H = self.H[:,len(self.bins[0]):]
self.bins = self.bins[1:]
self.initial_done = True
self.X = None
| [
"numpy.maximum",
"numpy.sum",
"numpy.invert",
"numpy.argmax",
"numpy.abs",
"tools.tokenizer_w2v.W2VTokenizer",
"numpy.argsort",
"numpy.full",
"numpy.real",
"base.topic.TopicFrame",
"sklearn.decomposition.NMF",
"numpy.square",
"util.load_json",
"tools.vocab.Vocab",
"sklearn.preprocessing.... | [((524, 538), 'tools.tokenizer_w2v.W2VTokenizer', 'W2VTokenizer', ([], {}), '()\n', (536, 538), False, 'from tools.tokenizer_w2v import W2VTokenizer\n'), ((560, 567), 'tools.vocab.Vocab', 'Vocab', ([], {}), '()\n', (565, 567), False, 'from tools.vocab import Vocab\n'), ((588, 627), 'util.load_json', 'load_json', (['"""./data/wenmf/bins.json"""', '[]'], {}), "('./data/wenmf/bins.json', [])\n", (597, 627), False, 'from util import load_json, save_json\n'), ((645, 706), 'util.load_json', 'load_json', (['"""./data/wenmf/state.json"""', "{'initial_done': False}"], {}), "('./data/wenmf/state.json', {'initial_done': False})\n", (654, 706), False, 'from util import load_json, save_json\n'), ((2047, 2085), 'scipy.sparse.linalg.eigs', 'eigs', (['op'], {'k': '(1)', 'which': '"""LM"""', 'maxiter': '(100)'}), "(op, k=1, which='LM', maxiter=100)\n", (2051, 2085), False, 'from scipy.sparse.linalg import eigs, LinearOperator\n'), ((2164, 2177), 'numpy.real', 'np.real', (['w[0]'], {}), '(w[0])\n', (2171, 2177), True, 'import numpy as np\n'), ((2566, 2583), 'numpy.zeros', 'np.zeros', (['v_shape'], {}), '(v_shape)\n', (2574, 2583), True, 'import numpy as np\n'), ((7463, 7492), 'logging.debug', 'logging.debug', (['"""update_model"""'], {}), "('update_model')\n", (7476, 7492), False, 'import logging\n'), ((8787, 8807), 'sklearn.preprocessing.normalize', 'normalize', (['h'], {'axis': '(0)'}), '(h, axis=0)\n', (8796, 8807), False, 'from sklearn.preprocessing import normalize\n'), ((8835, 8855), 'numpy.argmax', 'np.argmax', (['h'], {'axis': '(0)'}), '(h, axis=0)\n', (8844, 8855), True, 'import numpy as np\n'), ((1204, 1217), 'numpy.argsort', 'np.argsort', (['w'], {}), '(w)\n', (1214, 1217), True, 'import numpy as np\n'), ((9789, 9827), 'numpy.invert', 'np.invert', (['self.vocab.last_remove_mask'], {}), '(self.vocab.last_remove_mask)\n', (9798, 9827), True, 'import numpy as np\n'), ((11648, 11676), 'logging.debug', 'logging.debug', (['"""Initial NMF"""'], {}), "('Initial NMF')\n", (11661, 11676), False, 'import logging\n'), ((11772, 11825), 'sklearn.decomposition.NMF', 'NMFSklearn', (["settings['initial_topics']"], {'init': '"""nndsvd"""'}), "(settings['initial_topics'], init='nndsvd')\n", (11782, 11825), True, 'from sklearn.decomposition import NMF as NMFSklearn\n'), ((3936, 3978), 'numpy.maximum', 'np.maximum', (['(W_hat[:, r, None] - g / L)', 'eps'], {}), '(W_hat[:, r, None] - g / L, eps)\n', (3946, 3978), True, 'import numpy as np\n'), ((4002, 4022), 'numpy.sum', 'np.sum', (['self.W[:, r]'], {}), '(self.W[:, r])\n', (4008, 4022), True, 'import numpy as np\n'), ((4133, 4161), 'numpy.abs', 'np.abs', (['((self.W - Wp1) / Wp1)'], {}), '((self.W - Wp1) / Wp1)\n', (4139, 4161), True, 'import numpy as np\n'), ((4607, 4649), 'numpy.maximum', 'np.maximum', (['(H_hat[None, r, :] - g / L)', 'eps'], {}), '(H_hat[None, r, :] - g / L, eps)\n', (4617, 4649), True, 'import numpy as np\n'), ((4683, 4711), 'numpy.abs', 'np.abs', (['((self.H - Hp1) / Hp1)'], {}), '((self.H - Hp1) / Hp1)\n', (4689, 4711), True, 'import numpy as np\n'), ((6351, 6393), 'numpy.maximum', 'np.maximum', (['(W_hat[:, r, None] - g / L)', 'eps'], {}), '(W_hat[:, r, None] - g / L, eps)\n', (6361, 6393), True, 'import numpy as np\n'), ((6417, 6437), 'numpy.sum', 'np.sum', (['self.W[:, r]'], {}), '(self.W[:, r])\n', (6423, 6437), True, 'import numpy as np\n'), ((6548, 6576), 'numpy.abs', 'np.abs', (['((self.W - Wp1) / Wp1)'], {}), '((self.W - Wp1) / Wp1)\n', (6554, 6576), True, 'import numpy as np\n'), ((7027, 7069), 'numpy.maximum', 'np.maximum', (['(H_hat[None, r, :] - g / L)', 'eps'], {}), '(H_hat[None, r, :] - g / L, eps)\n', (7037, 7069), True, 'import numpy as np\n'), ((7103, 7131), 'numpy.abs', 'np.abs', (['((self.H - Hp1) / Hp1)'], {}), '((self.H - Hp1) / Hp1)\n', (7109, 7131), True, 'import numpy as np\n'), ((7792, 7813), 'numpy.sum', 'np.sum', (['self.W[:, -1]'], {}), '(self.W[:, -1])\n', (7798, 7813), True, 'import numpy as np\n'), ((12151, 12189), 'numpy.invert', 'np.invert', (['self.vocab.last_remove_mask'], {}), '(self.vocab.last_remove_mask)\n', (12160, 12189), True, 'import numpy as np\n'), ((6043, 6092), 'numpy.abs', 'np.abs', (['(self.H[None, r, :] @ self.H[None, r, :].T)'], {}), '(self.H[None, r, :] @ self.H[None, r, :].T)\n', (6049, 6092), True, 'import numpy as np\n'), ((9072, 9084), 'base.topic.TopicFrame', 'TopicFrame', ([], {}), '()\n', (9082, 9084), False, 'from base.topic import TopicFrame\n'), ((10063, 10123), 'numpy.full', 'np.full', (['(self.vocab.last_add_count, self.W.shape[1])', '(1e-16)'], {}), '((self.vocab.last_add_count, self.W.shape[1]), 1e-16)\n', (10070, 10123), True, 'import numpy as np\n'), ((10897, 10914), 'numpy.sum', 'np.sum', (['self.H', '(0)'], {}), '(self.H, 0)\n', (10903, 10914), True, 'import numpy as np\n'), ((3839, 3888), 'numpy.abs', 'np.abs', (['(self.H[None, r, :] @ self.H[None, r, :].T)'], {}), '(self.H[None, r, :] @ self.H[None, r, :].T)\n', (3845, 3888), True, 'import numpy as np\n'), ((4530, 4572), 'numpy.square', 'np.square', (['(self.v_mat @ self.W[:, r, None])'], {}), '(self.v_mat @ self.W[:, r, None])\n', (4539, 4572), True, 'import numpy as np\n'), ((6950, 6992), 'numpy.square', 'np.square', (['(self.v_mat @ self.W[:, r, None])'], {}), '(self.v_mat @ self.W[:, r, None])\n', (6959, 6992), True, 'import numpy as np\n'), ((7724, 7758), 'numpy.random.rand', 'np.random.rand', (['self.W.shape[0]', '(1)'], {}), '(self.W.shape[0], 1)\n', (7738, 7758), True, 'import numpy as np\n'), ((7865, 7899), 'numpy.random.rand', 'np.random.rand', (['(1)', 'self.H.shape[1]'], {}), '(1, self.H.shape[1])\n', (7879, 7899), True, 'import numpy as np\n'), ((8050, 8067), 'numpy.sum', 'np.sum', (['self.H', '(0)'], {}), '(self.H, 0)\n', (8056, 8067), True, 'import numpy as np\n')] |
"""
"""
# Use relative imports when importing objects from elsewhere in the library
from ..scale import scale01
import numpy as np
test_input_cosmological_params = np.array([0.14, 0.967, 0.8, -5.0, 1.0])
def test_scale01():
res = scale01(test_input_cosmological_params)
assert res.shape == test_input_cosmological_params.shape
| [
"numpy.array"
] | [((165, 204), 'numpy.array', 'np.array', (['[0.14, 0.967, 0.8, -5.0, 1.0]'], {}), '([0.14, 0.967, 0.8, -5.0, 1.0])\n', (173, 204), True, 'import numpy as np\n')] |
import numpy
import re
import time
from tqdm import tqdm
import random
from recombee_api_client.api_client import RecombeeClient
from recombee_api_client.api_requests import (
AddDetailView, RecommendItemsToUser, Batch, AddRating, ResetDatabase, AddUserProperty,
AddItemProperty, SetItemValues)
from recombee_api_client.exceptions import ResponseException
from xminds.compat import logger
from xminds.lib.utils import retry
from xminds.ds.scaling import linearscaling
from .baserecoapi import BaseRecoApi
from .config import RECOMBEE_DBS_TOKENS
class RecombeeRecoApi(BaseRecoApi):
BATCH_SIZE = 10000
# Sometime a db might get stuck. In this case switch to another one.
# List of 2-tuples('API identifier', 'Private token')
DBS_TOKENS = RECOMBEE_DBS_TOKENS
DB_TOKEN = None # is randomised in reset to avoid bugs
def __init__(self, name, dataset, dataset_hash=None, db=None,
token=None, environment=None,
algorithm='recombee:personal',
transform_to_implicit=False,
transform_algorithm=''):
super().__init__(name, dataset, algorithm=algorithm, db=db, token=token,
dataset_hash=dataset_hash, environment=environment,
transform_to_implicit=transform_to_implicit,
transform_algorithm=transform_algorithm)
assert self.DBS_TOKENS, 'No ID found. Set varenvs RECOMBEE_API_DB_ID0/TOKEN0 at least'
self.DB_TOKEN = random.choice(self.DBS_TOKENS)
self.db = db or self.DB_TOKEN[0]
self.token = token or self.DB_TOKEN[1] # private token
if self.db is None or self.token is None:
raise RuntimeError(('To use recombee api, db and private token need be provided, '
'from credentials or varenv'))
logger.info('db: %s', self.db)
logger.info('token: %s', self.token)
self.client = RecombeeClient(self.db, self.token)
def get_client(self):
return self.client
def upload(self):
"""The dataset is trained after each batch, so the code is left in `self.fit`"""
pass
def fit(self):
logger.info('fit starting. Have usually to wait for the DB to be reset')
def kind_to_type(prop):
kind = self.prop_to_kind(prop)
if kind in 'iu':
return 'int'
if kind == 'f':
return 'double'
if kind == 'U':
return 'string'
raise NotImplementedError(prop)
dataset = self.dataset
# items
users, items = self.get_user_item_flat_properties(dataset)
users_m2ms, items_m2ms = self.get_user_item_m2m_properties(dataset, asdict=True)
# create_items_request = [AddItem(item_id) for item_id in items['item_id'].tolist()]
# resp = send_batch(create_items_request)
for prop in dataset.iget_items_properties(yield_id=True):
_type = kind_to_type(prop)
AddItemProperty(prop['property_name'], _type)
items_request = []
for values in self.iget_all_features_as_dict(items, items_m2ms):
item_id = values.pop('item_id')
items_request.append(SetItemValues(item_id, values, cascade_create=True))
self._send_batch(items_request)
# users
for prop in dataset.iget_users_properties(yield_id=True):
_type = kind_to_type(prop)
AddUserProperty(prop['property_name'], _type)
users_request = []
for values in self.iget_all_features_as_dict(users, users_m2ms):
user_id = values.pop('user_id')
users_request.append(SetItemValues(user_id, values, cascade_create=True))
self._send_batch(users_request)
# ratings preprocessing
ratings = dataset.ratings
ratings['rating'] = linearscaling(ratings['rating'], -1, 1)
# ratings upload
ratings_requests = []
logger.info('transform to implicit: %s', self.transform_to_implicit)
if self.transform_to_implicit is False:
for rating in ratings:
ratings_requests.append(AddRating(str(rating['user_id']), str(rating['item_id']),
int(rating['rating']), cascade_create=True))
else:
logger.info('transform algorithm: %s', self.transform_algorithm)
new_ratings = self.explicit_to_implicit(dataset, self.transform_algorithm)
for rating in new_ratings:
ratings_requests.append(AddDetailView(
str(rating['user_id']), str(rating['item_id']),
timestamp=str(rating['timestamp']),
cascade_create=True))
batch_size = self.BATCH_SIZE
n_batches = int(len(ratings_requests) / batch_size)
extra_batch = len(ratings_requests) % batch_size > 0
for i in tqdm(range(n_batches)):
self._send_batch(ratings_requests[i*batch_size:(i+1)*batch_size])
if extra_batch:
self._send_batch(ratings_requests[n_batches*batch_size:len(ratings_requests)])
def recommend(self, test_user_ids, n_items_per_user=32, exclude_rated_items=True, reco_delay=0):
reco_users = []
reco_items = []
reco_data = []
missing_recos = []
for i in tqdm(test_user_ids):
reco = self._get_user_topk_recombee(user_id=i, n_results=n_items_per_user,
exclude_rated_items=exclude_rated_items)
recomms = reco['recomms']
if len(recomms) == 0:
missing_recos.append(i)
continue
reco_users.extend([i] * len(recomms))
reco_items.extend([int(d['id']) for d in recomms])
reco_data.extend((len(recomms) - numpy.arange(len(recomms))).tolist())
time.sleep(reco_delay)
if missing_recos:
logger.warning(f'{len(missing_recos)} empty recos. First 10: {missing_recos[:10]}')
result = numpy.array(reco_users), numpy.array(reco_items), numpy.array(reco_data)
return result
@retry(base=10, multiplier=1.2, max_retry=2)
def reset(self):
"""
Given that deleting is slow and there is no IS_READY endpoint, we test that the DB is
ready by sending a rating corresponding to a missing item/user
"""
logger.info(f'Resetting into db {self.db}')
try:
self.client.send(ResetDatabase()) # breaks if already being reset
logger.info('Reset query sent. Sleep 10...')
time.sleep(10)
except TypeError as e:
logger.warning(f'Resetting Recombee dataset failed: e={e}')
pass
# wait until the status changes when getting a reco (from 'being erased' to 'missing user')
user_id = n = 1
t0 = time.time()
while True:
try:
self.client.send(RecommendItemsToUser(user_id, n, logic={"name": self.algorithm}))
except ResponseException as e:
match = re.match(r'.*status:\s*(\d+).*', str(e))
if not match:
raise RuntimeError(f'No status found in error message {e}')
status = match.group(1)
if status == '404': # missing user; the DB has been reset
logger.info('DB reset')
break
elif status == '422': # DB being erased
if time.time() - t0 > 3000:
raise RuntimeError(f'Resetting did not seem to work: error {e}')
logger.info('Not ready yet. Sleep 10')
time.sleep(10)
else:
raise NotImplementedError(f'Unknown status {status} from {e}')
@retry(base=10, multiplier=1.1, max_retry=5)
def _send_batch(self, batch):
if not batch:
# nothing to send
return
self.client.send(Batch(batch))
@retry(max_retry=2)
def _get_user_topk_recombee(self, user_id, n_results=32, exclude_rated_items=True):
if not exclude_rated_items:
filter_ = None
else:
mask = self.dataset.ratings['user_id'] == user_id
items_id = self.dataset.ratings['item_id'][mask].astype('U64').tolist()
filter_ = "'itemId' not in {" + ",".join([f'"{i}"' for i in items_id]) + '}'
return self.client.send(RecommendItemsToUser(
user_id, n_results,
filter=filter_,
logic={"name": self.algorithm})
)
| [
"tqdm.tqdm",
"xminds.compat.logger.info",
"xminds.ds.scaling.linearscaling",
"recombee_api_client.api_requests.RecommendItemsToUser",
"random.choice",
"recombee_api_client.api_client.RecombeeClient",
"xminds.lib.utils.retry",
"time.time",
"time.sleep",
"recombee_api_client.api_requests.ResetDataba... | [((6197, 6240), 'xminds.lib.utils.retry', 'retry', ([], {'base': '(10)', 'multiplier': '(1.2)', 'max_retry': '(2)'}), '(base=10, multiplier=1.2, max_retry=2)\n', (6202, 6240), False, 'from xminds.lib.utils import retry\n'), ((7886, 7929), 'xminds.lib.utils.retry', 'retry', ([], {'base': '(10)', 'multiplier': '(1.1)', 'max_retry': '(5)'}), '(base=10, multiplier=1.1, max_retry=5)\n', (7891, 7929), False, 'from xminds.lib.utils import retry\n'), ((8080, 8098), 'xminds.lib.utils.retry', 'retry', ([], {'max_retry': '(2)'}), '(max_retry=2)\n', (8085, 8098), False, 'from xminds.lib.utils import retry\n'), ((1509, 1539), 'random.choice', 'random.choice', (['self.DBS_TOKENS'], {}), '(self.DBS_TOKENS)\n', (1522, 1539), False, 'import random\n'), ((1862, 1892), 'xminds.compat.logger.info', 'logger.info', (['"""db: %s"""', 'self.db'], {}), "('db: %s', self.db)\n", (1873, 1892), False, 'from xminds.compat import logger\n'), ((1901, 1937), 'xminds.compat.logger.info', 'logger.info', (['"""token: %s"""', 'self.token'], {}), "('token: %s', self.token)\n", (1912, 1937), False, 'from xminds.compat import logger\n'), ((1960, 1995), 'recombee_api_client.api_client.RecombeeClient', 'RecombeeClient', (['self.db', 'self.token'], {}), '(self.db, self.token)\n', (1974, 1995), False, 'from recombee_api_client.api_client import RecombeeClient\n'), ((2203, 2275), 'xminds.compat.logger.info', 'logger.info', (['"""fit starting. Have usually to wait for the DB to be reset"""'], {}), "('fit starting. Have usually to wait for the DB to be reset')\n", (2214, 2275), False, 'from xminds.compat import logger\n'), ((3896, 3935), 'xminds.ds.scaling.linearscaling', 'linearscaling', (["ratings['rating']", '(-1)', '(1)'], {}), "(ratings['rating'], -1, 1)\n", (3909, 3935), False, 'from xminds.ds.scaling import linearscaling\n'), ((4000, 4068), 'xminds.compat.logger.info', 'logger.info', (['"""transform to implicit: %s"""', 'self.transform_to_implicit'], {}), "('transform to implicit: %s', self.transform_to_implicit)\n", (4011, 4068), False, 'from xminds.compat import logger\n'), ((5392, 5411), 'tqdm.tqdm', 'tqdm', (['test_user_ids'], {}), '(test_user_ids)\n', (5396, 5411), False, 'from tqdm import tqdm\n'), ((6459, 6502), 'xminds.compat.logger.info', 'logger.info', (['f"""Resetting into db {self.db}"""'], {}), "(f'Resetting into db {self.db}')\n", (6470, 6502), False, 'from xminds.compat import logger\n'), ((6936, 6947), 'time.time', 'time.time', ([], {}), '()\n', (6945, 6947), False, 'import time\n'), ((3037, 3082), 'recombee_api_client.api_requests.AddItemProperty', 'AddItemProperty', (["prop['property_name']", '_type'], {}), "(prop['property_name'], _type)\n", (3052, 3082), False, 'from recombee_api_client.api_requests import AddDetailView, RecommendItemsToUser, Batch, AddRating, ResetDatabase, AddUserProperty, AddItemProperty, SetItemValues\n'), ((3486, 3531), 'recombee_api_client.api_requests.AddUserProperty', 'AddUserProperty', (["prop['property_name']", '_type'], {}), "(prop['property_name'], _type)\n", (3501, 3531), False, 'from recombee_api_client.api_requests import AddDetailView, RecommendItemsToUser, Batch, AddRating, ResetDatabase, AddUserProperty, AddItemProperty, SetItemValues\n'), ((4371, 4435), 'xminds.compat.logger.info', 'logger.info', (['"""transform algorithm: %s"""', 'self.transform_algorithm'], {}), "('transform algorithm: %s', self.transform_algorithm)\n", (4382, 4435), False, 'from xminds.compat import logger\n'), ((5934, 5956), 'time.sleep', 'time.sleep', (['reco_delay'], {}), '(reco_delay)\n', (5944, 5956), False, 'import time\n'), ((6096, 6119), 'numpy.array', 'numpy.array', (['reco_users'], {}), '(reco_users)\n', (6107, 6119), False, 'import numpy\n'), ((6121, 6144), 'numpy.array', 'numpy.array', (['reco_items'], {}), '(reco_items)\n', (6132, 6144), False, 'import numpy\n'), ((6146, 6168), 'numpy.array', 'numpy.array', (['reco_data'], {}), '(reco_data)\n', (6157, 6168), False, 'import numpy\n'), ((6607, 6651), 'xminds.compat.logger.info', 'logger.info', (['"""Reset query sent. Sleep 10..."""'], {}), "('Reset query sent. Sleep 10...')\n", (6618, 6651), False, 'from xminds.compat import logger\n'), ((6664, 6678), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (6674, 6678), False, 'import time\n'), ((8060, 8072), 'recombee_api_client.api_requests.Batch', 'Batch', (['batch'], {}), '(batch)\n', (8065, 8072), False, 'from recombee_api_client.api_requests import AddDetailView, RecommendItemsToUser, Batch, AddRating, ResetDatabase, AddUserProperty, AddItemProperty, SetItemValues\n'), ((8531, 8623), 'recombee_api_client.api_requests.RecommendItemsToUser', 'RecommendItemsToUser', (['user_id', 'n_results'], {'filter': 'filter_', 'logic': "{'name': self.algorithm}"}), "(user_id, n_results, filter=filter_, logic={'name':\n self.algorithm})\n", (8551, 8623), False, 'from recombee_api_client.api_requests import AddDetailView, RecommendItemsToUser, Batch, AddRating, ResetDatabase, AddUserProperty, AddItemProperty, SetItemValues\n'), ((3260, 3311), 'recombee_api_client.api_requests.SetItemValues', 'SetItemValues', (['item_id', 'values'], {'cascade_create': '(True)'}), '(item_id, values, cascade_create=True)\n', (3273, 3311), False, 'from recombee_api_client.api_requests import AddDetailView, RecommendItemsToUser, Batch, AddRating, ResetDatabase, AddUserProperty, AddItemProperty, SetItemValues\n'), ((3709, 3760), 'recombee_api_client.api_requests.SetItemValues', 'SetItemValues', (['user_id', 'values'], {'cascade_create': '(True)'}), '(user_id, values, cascade_create=True)\n', (3722, 3760), False, 'from recombee_api_client.api_requests import AddDetailView, RecommendItemsToUser, Batch, AddRating, ResetDatabase, AddUserProperty, AddItemProperty, SetItemValues\n'), ((6545, 6560), 'recombee_api_client.api_requests.ResetDatabase', 'ResetDatabase', ([], {}), '()\n', (6558, 6560), False, 'from recombee_api_client.api_requests import AddDetailView, RecommendItemsToUser, Batch, AddRating, ResetDatabase, AddUserProperty, AddItemProperty, SetItemValues\n'), ((6722, 6781), 'xminds.compat.logger.warning', 'logger.warning', (['f"""Resetting Recombee dataset failed: e={e}"""'], {}), "(f'Resetting Recombee dataset failed: e={e}')\n", (6736, 6781), False, 'from xminds.compat import logger\n'), ((7018, 7082), 'recombee_api_client.api_requests.RecommendItemsToUser', 'RecommendItemsToUser', (['user_id', 'n'], {'logic': "{'name': self.algorithm}"}), "(user_id, n, logic={'name': self.algorithm})\n", (7038, 7082), False, 'from recombee_api_client.api_requests import AddDetailView, RecommendItemsToUser, Batch, AddRating, ResetDatabase, AddUserProperty, AddItemProperty, SetItemValues\n'), ((7437, 7460), 'xminds.compat.logger.info', 'logger.info', (['"""DB reset"""'], {}), "('DB reset')\n", (7448, 7460), False, 'from xminds.compat import logger\n'), ((7701, 7739), 'xminds.compat.logger.info', 'logger.info', (['"""Not ready yet. Sleep 10"""'], {}), "('Not ready yet. Sleep 10')\n", (7712, 7739), False, 'from xminds.compat import logger\n'), ((7760, 7774), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (7770, 7774), False, 'import time\n'), ((7567, 7578), 'time.time', 'time.time', ([], {}), '()\n', (7576, 7578), False, 'import time\n')] |
import gym
import copy
import random
import numpy as np
import matplotlib.pyplot as plt
class Node:
def __init__(self, parent=None, action=None):
self.parent = parent # parent of this node
self.action = action # action leading from parent to this node
self.children = []
self.sum_value = 0. # sum of values observed for this node, use sum_value/visits for the mean
self.visits = 1
def rollout(env, maxsteps=100):
""" Random policy for rollouts """
G = 0
for i in range(maxsteps):
action = env.action_space.sample()
_, reward, terminal, _ = env.step(action)
G += reward
if terminal:
return G
return G
def mcts(env, root, maxiter=500):
""" TODO: Use this function as a starting point for implementing Monte Carlo Tree Search
"""
# this is an example of how to add nodes to the root for all possible actions:
root.children = [Node(root, a) for a in range(env.action_space.n)]
eps = 0.01
path_array = []
for i in range(maxiter):
state = copy.deepcopy(env)
G = 0.
# TODO: traverse the tree using an epsilon greedy tree policy
terminal = True
current_path_length = 0
while len(root.children) > 0:
rand = random.uniform(0.0, 1.0)
new_node = random.choice(root.children)
if rand > eps:
# take greedy action
curr_max = -1000000
for n in root.children:
if n.sum_value > curr_max:
new_node = n
curr_max = n.sum_value
# step down to the next node
current_path_length += 1
root = new_node
_, reward, terminal, _ = state.step(root.action)
G += reward
path_array.append(current_path_length)
# TODO: Expansion of tree
if not terminal:
expanded_nodes = [Node(root, a) for a in range(state.action_space.n)]
root.children = expanded_nodes
# This performs a rollout (Simulation):
if not terminal:
G += rollout(state)
# TODO: update all visited nodes in the tree
# This updates values for the current node:
while True:
root.visits += 1
root.sum_value += G
if root.parent is not None:
root = root.parent
else:
break
plt.plot(range(len(path_array)), path_array, color='red')
plt.show()
def main():
env = gym.make("Taxi-v3")
env.seed(0) # use seed to make results better comparable
# run the algorithm 10 times:
rewards = []
for i in range(10):
env.reset()
terminal = False
root = Node() # Initialize empty tree
sum_reward = 0.
while not terminal:
#env.render()
mcts(env, root) # expand tree from root node using mcts
values = [c.sum_value / c.visits for c in root.children] # calculate values for child actions
bestchild = root.children[np.argmax(values)] # select the best child
_, reward, terminal, _ = env.step(bestchild.action) # perform action for child
root = bestchild # use the best child as next root
root.parent = None
sum_reward += reward
rewards.append(sum_reward)
print("finished run " + str(i + 1) + " with reward: " + str(sum_reward))
print("mean reward: ", np.mean(rewards))
if __name__ == "__main__":
main()
| [
"copy.deepcopy",
"matplotlib.pyplot.show",
"gym.make",
"random.uniform",
"numpy.argmax",
"random.choice",
"numpy.mean"
] | [((2551, 2561), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2559, 2561), True, 'import matplotlib.pyplot as plt\n'), ((2585, 2604), 'gym.make', 'gym.make', (['"""Taxi-v3"""'], {}), "('Taxi-v3')\n", (2593, 2604), False, 'import gym\n'), ((1085, 1103), 'copy.deepcopy', 'copy.deepcopy', (['env'], {}), '(env)\n', (1098, 1103), False, 'import copy\n'), ((3533, 3549), 'numpy.mean', 'np.mean', (['rewards'], {}), '(rewards)\n', (3540, 3549), True, 'import numpy as np\n'), ((1303, 1327), 'random.uniform', 'random.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (1317, 1327), False, 'import random\n'), ((1351, 1379), 'random.choice', 'random.choice', (['root.children'], {}), '(root.children)\n', (1364, 1379), False, 'import random\n'), ((3126, 3143), 'numpy.argmax', 'np.argmax', (['values'], {}), '(values)\n', (3135, 3143), True, 'import numpy as np\n')] |
'''
This script uses a high SNR pixel from a pre-processed histogram image and extracts the IRF of that scene
The data collected by this setup has a bi-modal IRF due to lens inter-reflections which explains the two peaks.
NOTE: This script may not work well with data acquired in synchronous mode that has pile-up.
You may need to correct for pile-up first
NOTE: The ext_5% when denoised end up with 0 photons everywhere so we need to reduce the amount of denoising
'''
#### Standard Library Imports
import os
import sys
sys.path.append('./tof-lib')
#### Library imports
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import gaussian_filter
from IPython.core import debugger
breakpoint = debugger.set_trace
#### Local imports
from scan_data_utils import irf_dirpath
from scan_data_utils import *
from bimodal2unimodal_hist_img import bimodal2unimodal_crop, get_unimodal_nt
from research_utils.timer import Timer
from research_utils.plot_utils import *
from research_utils.io_ops import load_json
from depth_decoding import IdentityCoding
if __name__=='__main__':
## Load parameters shared by all
scan_data_params = load_json('scan_params.json')
io_dirpaths = load_json('io_dirpaths.json')
hist_img_base_dirpath = io_dirpaths["preprocessed_hist_data_base_dirpath"]
## Load processed scene:
## Set scene that will be processed
scene_id = '20190207_face_scanning_low_mu/free'
# scene_id = '20190207_face_scanning_low_mu/det'
# scene_id = '20190207_face_scanning_low_mu/ground_truth'
# scene_id = '20190207_face_scanning_low_mu/ext_opt_filtering'
# scene_id = '20190207_face_scanning_low_mu/ext_5%'
# scene_id = '20190209_deer_high_mu/free'
# scene_id = '20190209_deer_high_mu/det'
# scene_id = '20190209_deer_high_mu/ext'
# scene_id = '20190209_deer_high_mu/ext_5%'
scene_id = '20181105_face/low_flux'
scene_id = '20181105_face/opt_flux'
assert(scene_id in scan_data_params['scene_ids']), "{} not in scene_ids".format(scene_id)
hist_dirpath = os.path.join(hist_img_base_dirpath, scene_id)
out_dirpath = os.path.join(irf_dirpath, scene_id)
os.makedirs(out_dirpath, exist_ok=True)
## Get params for scene
scan_params = scan_data_params['scene_params'][scene_id]
## Set parameters of histogram we want to load
irf_tres = scan_data_params['min_tbin_size'] # in picosecs
hist_img_tau = scan_data_params['hist_preprocessing_params']['hist_end_time'] - scan_data_params['hist_preprocessing_params']['hist_start_time']
hist_img_fname = get_hist_img_fname(scan_params['n_rows_fullres'], scan_params['n_cols_fullres'], irf_tres, hist_img_tau)
hist_img_fpath = os.path.join(hist_dirpath, hist_img_fname)
## Load histogram
assert(os.path.exists(hist_img_fpath)), "{} does not exist. Make sure to run preprocess_raw_hist_img.py first".format(hist_img_fpath)
hist_img = np.load(hist_img_fpath)
(nr,nc,nt) = hist_img.shape
(tbins, tbin_edges) = get_hist_bins(hist_img_tau, irf_tres)
## Apply denoising
if('ext_5%' in scene_id):
d_hist_img = gaussian_filter(hist_img, sigma=0.1, mode='wrap', truncate=3)
else:
d_hist_img = gaussian_filter(hist_img, sigma=1, mode='wrap', truncate=3)
min_signal_threshold=1.0
if('20190207_face_scanning_low_mu' in scene_id):
(r,c) = (109, 50)
elif('20190209_deer_high_mu' in scene_id):
(r,c) = (58, 60)
else:
(r,c) = (nr//2, nc//2)
(r_max,c_max) = np.unravel_index(np.argmax(hist_img.sum(axis=-1)), (nr,nc))
## extract selected irf and center it
irf = d_hist_img[r, c, :]
irf = np.roll(irf, -1*irf.argmax())
##
## Zero out bins with less than scene specific threshold
irf -= np.median(irf)
d_hist_img -= np.median(d_hist_img,axis=-1,keepdims=True)
irf[irf < min_signal_threshold] = 0.
d_hist_img[d_hist_img < min_signal_threshold] = 0.
## Save IRF
irf_fname = get_irf_fname(irf_tres, hist_img_tau)
np.save(os.path.join(out_dirpath, irf_fname), irf)
## Create uni-modal irf by zero-ing out the second peak OR cropping
pulse_len = time2bin(scan_data_params['irf_params']['pulse_len'], irf_tres)
second_pulse_offset = time2bin(scan_data_params['irf_params']['second_pulse_offset'], irf_tres)
unimodal_nt = get_unimodal_nt(nt, scan_data_params['irf_params']['pulse_len'], irf_tres)
# Generate uni-modal IRF with the same length as original
unimodal_irf_samelen = np.array(irf)
unimodal_irf_samelen[second_pulse_offset:second_pulse_offset+pulse_len] = 0.
np.save(os.path.join(out_dirpath, "unimodal-"+irf_fname), unimodal_irf_samelen)
# Generate uni-modal IRF where we crop the second pulse and reduce the length
unimodal_irf = bimodal2unimodal_crop(irf, first_pulse_start_idx=0, pulse_len=pulse_len, second_pulse_offset=second_pulse_offset)
unimodal_irf_tau = unimodal_irf.size*irf_tres
unimodal_irf_fname = get_irf_fname(irf_tres, unimodal_irf_tau)
np.save(os.path.join(out_dirpath, "unimodal-"+unimodal_irf_fname), unimodal_irf)
## Fit a cubic spline function to be able to generate any
f = fit_irf(irf)
x_fullres = np.arange(0, nt) * (1./nt)
## reconstruct depths with irf
coding_obj = IdentityCoding(nt, h_irf=irf, account_irf=True)
decoded_depths = coding_obj.max_peak_decoding(hist_img, rec_algo_id='matchfilt').squeeze()
## Plot some results
plt.clf()
plt.pause(0.1)
plt.subplot(3,3,1)
plt.imshow(hist_img.sum(axis=-1)); plt.title('Sum of Hist')
plt.subplot(3,3,2)
plt.imshow(hist_img.argmax(axis=-1)); plt.title('Argmax')
plt.subplot(3,3,3)
plt.imshow(decoded_depths); plt.title('MatchFilt w/ IRF');plt.colorbar()
plt.subplot(3,1,2)
plt.plot(hist_img[r,c], linewidth=2, alpha=0.75, label='Raw IRF: {},{}'.format(r,c))
plt.plot(irf, linewidth=2, alpha=0.75, label='Processed IRF: {},{}'.format(r,c))
plt.plot(d_hist_img[r+1,c], linewidth=2, alpha=0.75, label='Neighbor Pre-proc IRF: {},{}'.format(r+1,c))
# plt.plot(d_hist_img[r+1,c+1], linewidth=2, alpha=0.75, label='Neighbor Pre-proc IRF: {},{}'.format(r+1,c+1))
plt.plot(d_hist_img[93,45], linewidth=2, alpha=0.75, label='Neighbor Pre-proc IRF: {},{}'.format(93,45))
plt.legend(fontsize=14)
plt.subplot(3,1,3)
plt.plot(hist_img[r,c], linewidth=2, alpha=0.75, label='Raw IRF: {},{}'.format(r,c))
plt.plot(unimodal_irf, linewidth=2, alpha=0.75, label='Crop Uni-modal IRF: {},{}'.format(r,c))
plt.legend(fontsize=14)
# results_dirpath = os.path.join(io_dirpaths['results_dirpath'], 'real_data_results/irf_calib')
# out_fname = 'irf_{}_r-{}-c-{}_tres-{}ps_tlen-{}ps'.format(scene_id.replace('/','--'), r, c, int(irf_tres), int(hist_img_tau))
# save_currfig_png(results_dirpath, out_fname)
| [
"matplotlib.pyplot.title",
"numpy.load",
"matplotlib.pyplot.clf",
"numpy.arange",
"research_utils.io_ops.load_json",
"bimodal2unimodal_hist_img.bimodal2unimodal_crop",
"os.path.join",
"sys.path.append",
"matplotlib.pyplot.imshow",
"scipy.ndimage.gaussian_filter",
"os.path.exists",
"matplotlib.... | [((545, 573), 'sys.path.append', 'sys.path.append', (['"""./tof-lib"""'], {}), "('./tof-lib')\n", (560, 573), False, 'import sys\n'), ((1178, 1207), 'research_utils.io_ops.load_json', 'load_json', (['"""scan_params.json"""'], {}), "('scan_params.json')\n", (1187, 1207), False, 'from research_utils.io_ops import load_json\n'), ((1226, 1255), 'research_utils.io_ops.load_json', 'load_json', (['"""io_dirpaths.json"""'], {}), "('io_dirpaths.json')\n", (1235, 1255), False, 'from research_utils.io_ops import load_json\n'), ((2075, 2120), 'os.path.join', 'os.path.join', (['hist_img_base_dirpath', 'scene_id'], {}), '(hist_img_base_dirpath, scene_id)\n', (2087, 2120), False, 'import os\n'), ((2140, 2175), 'os.path.join', 'os.path.join', (['irf_dirpath', 'scene_id'], {}), '(irf_dirpath, scene_id)\n', (2152, 2175), False, 'import os\n'), ((2180, 2219), 'os.makedirs', 'os.makedirs', (['out_dirpath'], {'exist_ok': '(True)'}), '(out_dirpath, exist_ok=True)\n', (2191, 2219), False, 'import os\n'), ((2726, 2768), 'os.path.join', 'os.path.join', (['hist_dirpath', 'hist_img_fname'], {}), '(hist_dirpath, hist_img_fname)\n', (2738, 2768), False, 'import os\n'), ((2803, 2833), 'os.path.exists', 'os.path.exists', (['hist_img_fpath'], {}), '(hist_img_fpath)\n', (2817, 2833), False, 'import os\n'), ((2945, 2968), 'numpy.load', 'np.load', (['hist_img_fpath'], {}), '(hist_img_fpath)\n', (2952, 2968), True, 'import numpy as np\n'), ((3789, 3803), 'numpy.median', 'np.median', (['irf'], {}), '(irf)\n', (3798, 3803), True, 'import numpy as np\n'), ((3822, 3867), 'numpy.median', 'np.median', (['d_hist_img'], {'axis': '(-1)', 'keepdims': '(True)'}), '(d_hist_img, axis=-1, keepdims=True)\n', (3831, 3867), True, 'import numpy as np\n'), ((4359, 4433), 'bimodal2unimodal_hist_img.get_unimodal_nt', 'get_unimodal_nt', (['nt', "scan_data_params['irf_params']['pulse_len']", 'irf_tres'], {}), "(nt, scan_data_params['irf_params']['pulse_len'], irf_tres)\n", (4374, 4433), False, 'from bimodal2unimodal_hist_img import bimodal2unimodal_crop, get_unimodal_nt\n'), ((4523, 4536), 'numpy.array', 'np.array', (['irf'], {}), '(irf)\n', (4531, 4536), True, 'import numpy as np\n'), ((4803, 4920), 'bimodal2unimodal_hist_img.bimodal2unimodal_crop', 'bimodal2unimodal_crop', (['irf'], {'first_pulse_start_idx': '(0)', 'pulse_len': 'pulse_len', 'second_pulse_offset': 'second_pulse_offset'}), '(irf, first_pulse_start_idx=0, pulse_len=pulse_len,\n second_pulse_offset=second_pulse_offset)\n', (4824, 4920), False, 'from bimodal2unimodal_hist_img import bimodal2unimodal_crop, get_unimodal_nt\n'), ((5300, 5347), 'depth_decoding.IdentityCoding', 'IdentityCoding', (['nt'], {'h_irf': 'irf', 'account_irf': '(True)'}), '(nt, h_irf=irf, account_irf=True)\n', (5314, 5347), False, 'from depth_decoding import IdentityCoding\n'), ((5473, 5482), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5480, 5482), True, 'import matplotlib.pyplot as plt\n'), ((5487, 5501), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (5496, 5501), True, 'import matplotlib.pyplot as plt\n'), ((5506, 5526), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(1)'], {}), '(3, 3, 1)\n', (5517, 5526), True, 'import matplotlib.pyplot as plt\n'), ((5564, 5588), 'matplotlib.pyplot.title', 'plt.title', (['"""Sum of Hist"""'], {}), "('Sum of Hist')\n", (5573, 5588), True, 'import matplotlib.pyplot as plt\n'), ((5593, 5613), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(2)'], {}), '(3, 3, 2)\n', (5604, 5613), True, 'import matplotlib.pyplot as plt\n'), ((5654, 5673), 'matplotlib.pyplot.title', 'plt.title', (['"""Argmax"""'], {}), "('Argmax')\n", (5663, 5673), True, 'import matplotlib.pyplot as plt\n'), ((5678, 5698), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(3)', '(3)'], {}), '(3, 3, 3)\n', (5689, 5698), True, 'import matplotlib.pyplot as plt\n'), ((5701, 5727), 'matplotlib.pyplot.imshow', 'plt.imshow', (['decoded_depths'], {}), '(decoded_depths)\n', (5711, 5727), True, 'import matplotlib.pyplot as plt\n'), ((5729, 5758), 'matplotlib.pyplot.title', 'plt.title', (['"""MatchFilt w/ IRF"""'], {}), "('MatchFilt w/ IRF')\n", (5738, 5758), True, 'import matplotlib.pyplot as plt\n'), ((5759, 5773), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (5771, 5773), True, 'import matplotlib.pyplot as plt\n'), ((5778, 5798), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (5789, 5798), True, 'import matplotlib.pyplot as plt\n'), ((6308, 6331), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (6318, 6331), True, 'import matplotlib.pyplot as plt\n'), ((6336, 6356), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (6347, 6356), True, 'import matplotlib.pyplot as plt\n'), ((6547, 6570), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (6557, 6570), True, 'import matplotlib.pyplot as plt\n'), ((3140, 3201), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['hist_img'], {'sigma': '(0.1)', 'mode': '"""wrap"""', 'truncate': '(3)'}), "(hist_img, sigma=0.1, mode='wrap', truncate=3)\n", (3155, 3201), False, 'from scipy.ndimage import gaussian_filter\n'), ((3233, 3292), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['hist_img'], {'sigma': '(1)', 'mode': '"""wrap"""', 'truncate': '(3)'}), "(hist_img, sigma=1, mode='wrap', truncate=3)\n", (3248, 3292), False, 'from scipy.ndimage import gaussian_filter\n'), ((4045, 4081), 'os.path.join', 'os.path.join', (['out_dirpath', 'irf_fname'], {}), '(out_dirpath, irf_fname)\n', (4057, 4081), False, 'import os\n'), ((4630, 4680), 'os.path.join', 'os.path.join', (['out_dirpath', "('unimodal-' + irf_fname)"], {}), "(out_dirpath, 'unimodal-' + irf_fname)\n", (4642, 4680), False, 'import os\n'), ((5046, 5105), 'os.path.join', 'os.path.join', (['out_dirpath', "('unimodal-' + unimodal_irf_fname)"], {}), "(out_dirpath, 'unimodal-' + unimodal_irf_fname)\n", (5058, 5105), False, 'import os\n'), ((5219, 5235), 'numpy.arange', 'np.arange', (['(0)', 'nt'], {}), '(0, nt)\n', (5228, 5235), True, 'import numpy as np\n')] |
"""
test iotools for PSM3
"""
import os
from pvlib.iotools import psm3
from conftest import DATA_DIR, RERUNS, RERUNS_DELAY
import numpy as np
import pandas as pd
import pytest
from requests import HTTPError
from io import StringIO
import warnings
TMY_TEST_DATA = DATA_DIR / 'test_psm3_tmy-2017.csv'
YEAR_TEST_DATA = DATA_DIR / 'test_psm3_2017.csv'
YEAR_TEST_DATA_5MIN = DATA_DIR / 'test_psm3_2019_5min.csv'
MANUAL_TEST_DATA = DATA_DIR / 'test_read_psm3.csv'
LATITUDE, LONGITUDE = 40.5137, -108.5449
HEADER_FIELDS = [
'Source', 'Location ID', 'City', 'State', 'Country', 'Latitude',
'Longitude', 'Time Zone', 'Elevation', 'Local Time Zone',
'Dew Point Units', 'DHI Units', 'DNI Units', 'GHI Units',
'Temperature Units', 'Pressure Units', 'Wind Direction Units',
'Wind Speed', 'Surface Albedo Units', 'Version']
PVLIB_EMAIL = '<EMAIL>'
@pytest.fixture(scope="module")
def nrel_api_key():
"""Supplies pvlib-python's NREL Developer Network API key.
Azure Pipelines CI utilizes a secret variable set to NREL_API_KEY
to mitigate failures associated with using the default key of
"DEMO_KEY". A user is capable of using their own key this way if
desired however the default key should suffice for testing purposes.
"""
try:
demo_key = os.environ["NREL_API_KEY"]
except KeyError:
warnings.warn(
"WARNING: NREL API KEY environment variable not set! "
"Using DEMO_KEY instead. Unexpected failures may occur."
)
demo_key = 'DEMO_KEY'
return demo_key
def assert_psm3_equal(header, data, expected):
"""check consistency of PSM3 data"""
# check datevec columns
assert np.allclose(data.Year, expected.Year)
assert np.allclose(data.Month, expected.Month)
assert np.allclose(data.Day, expected.Day)
assert np.allclose(data.Hour, expected.Hour)
assert np.allclose(data.Minute, expected.Minute)
# check data columns
assert np.allclose(data.GHI, expected.GHI)
assert np.allclose(data.DNI, expected.DNI)
assert np.allclose(data.DHI, expected.DHI)
assert np.allclose(data.Temperature, expected.Temperature)
assert np.allclose(data.Pressure, expected.Pressure)
assert np.allclose(data['Dew Point'], expected['Dew Point'])
assert np.allclose(data['Surface Albedo'], expected['Surface Albedo'])
assert np.allclose(data['Wind Speed'], expected['Wind Speed'])
assert np.allclose(data['Wind Direction'], expected['Wind Direction'])
# check header
for hf in HEADER_FIELDS:
assert hf in header
# check timezone
assert (data.index.tzinfo.zone == 'Etc/GMT%+d' % -header['Time Zone'])
@pytest.mark.remote_data
@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
def test_get_psm3_tmy(nrel_api_key):
"""test get_psm3 with a TMY"""
header, data = psm3.get_psm3(LATITUDE, LONGITUDE, nrel_api_key,
PVLIB_EMAIL, names='tmy-2017')
expected = pd.read_csv(TMY_TEST_DATA)
assert_psm3_equal(header, data, expected)
@pytest.mark.remote_data
@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
def test_get_psm3_singleyear(nrel_api_key):
"""test get_psm3 with a single year"""
header, data = psm3.get_psm3(LATITUDE, LONGITUDE, nrel_api_key,
PVLIB_EMAIL, names='2017', interval=30)
expected = pd.read_csv(YEAR_TEST_DATA)
assert_psm3_equal(header, data, expected)
@pytest.mark.remote_data
@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
def test_get_psm3_5min(nrel_api_key):
"""test get_psm3 for 5-minute data"""
header, data = psm3.get_psm3(LATITUDE, LONGITUDE, nrel_api_key,
PVLIB_EMAIL, names='2019', interval=5)
assert len(data) == 525600/5
first_day = data.loc['2019-01-01']
expected = pd.read_csv(YEAR_TEST_DATA_5MIN)
assert_psm3_equal(header, first_day, expected)
@pytest.mark.remote_data
@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
def test_get_psm3_check_leap_day(nrel_api_key):
_, data_2012 = psm3.get_psm3(LATITUDE, LONGITUDE, nrel_api_key,
PVLIB_EMAIL, names="2012", interval=60,
leap_day=True)
assert len(data_2012) == (8760 + 24)
@pytest.mark.parametrize('latitude, longitude, api_key, names, interval',
[(LATITUDE, LONGITUDE, 'BAD', 'tmy-2017', 60),
(51, -5, nrel_api_key, '<KEY>', 60),
(LATITUDE, LONGITUDE, nrel_api_key, 'bad', 60),
(LATITUDE, LONGITUDE, nrel_api_key, '2017', 15),
])
@pytest.mark.remote_data
@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
def test_get_psm3_tmy_errors(
latitude, longitude, api_key, names, interval
):
"""Test get_psm3() for multiple erroneous input scenarios.
These scenarios include:
* Bad API key -> HTTP 403 forbidden because api_key is rejected
* Bad latitude/longitude -> Coordinates were not found in the NSRDB.
* Bad name -> Name is not one of the available options.
* Bad interval, single year -> Intervals can only be 30 or 60 minutes.
"""
with pytest.raises(HTTPError) as excinfo:
psm3.get_psm3(latitude, longitude, api_key, PVLIB_EMAIL,
names=names, interval=interval)
# ensure the HTTPError caught isn't due to overuse of the API key
assert "OVER_RATE_LIMIT" not in str(excinfo.value)
@pytest.fixture
def io_input(request):
"""file-like object for parse_psm3"""
with MANUAL_TEST_DATA.open() as f:
data = f.read()
obj = StringIO(data)
return obj
def test_parse_psm3(io_input):
"""test parse_psm3"""
header, data = psm3.parse_psm3(io_input)
expected = pd.read_csv(YEAR_TEST_DATA)
assert_psm3_equal(header, data, expected)
def test_read_psm3():
"""test read_psm3"""
header, data = psm3.read_psm3(MANUAL_TEST_DATA)
expected = pd.read_csv(YEAR_TEST_DATA)
assert_psm3_equal(header, data, expected)
| [
"pvlib.iotools.psm3.parse_psm3",
"pvlib.iotools.psm3.read_psm3",
"io.StringIO",
"pandas.read_csv",
"numpy.allclose",
"pytest.fixture",
"pvlib.iotools.psm3.get_psm3",
"pytest.raises",
"pytest.mark.flaky",
"pytest.mark.parametrize",
"warnings.warn"
] | [((859, 889), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (873, 889), False, 'import pytest\n'), ((2690, 2749), 'pytest.mark.flaky', 'pytest.mark.flaky', ([], {'reruns': 'RERUNS', 'reruns_delay': 'RERUNS_DELAY'}), '(reruns=RERUNS, reruns_delay=RERUNS_DELAY)\n', (2707, 2749), False, 'import pytest\n'), ((3070, 3129), 'pytest.mark.flaky', 'pytest.mark.flaky', ([], {'reruns': 'RERUNS', 'reruns_delay': 'RERUNS_DELAY'}), '(reruns=RERUNS, reruns_delay=RERUNS_DELAY)\n', (3087, 3129), False, 'import pytest\n'), ((3475, 3534), 'pytest.mark.flaky', 'pytest.mark.flaky', ([], {'reruns': 'RERUNS', 'reruns_delay': 'RERUNS_DELAY'}), '(reruns=RERUNS, reruns_delay=RERUNS_DELAY)\n', (3492, 3534), False, 'import pytest\n'), ((3954, 4013), 'pytest.mark.flaky', 'pytest.mark.flaky', ([], {'reruns': 'RERUNS', 'reruns_delay': 'RERUNS_DELAY'}), '(reruns=RERUNS, reruns_delay=RERUNS_DELAY)\n', (3971, 4013), False, 'import pytest\n'), ((4295, 4562), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""latitude, longitude, api_key, names, interval"""', "[(LATITUDE, LONGITUDE, 'BAD', 'tmy-2017', 60), (51, -5, nrel_api_key,\n '<KEY>', 60), (LATITUDE, LONGITUDE, nrel_api_key, 'bad', 60), (LATITUDE,\n LONGITUDE, nrel_api_key, '2017', 15)]"], {}), "('latitude, longitude, api_key, names, interval', [(\n LATITUDE, LONGITUDE, 'BAD', 'tmy-2017', 60), (51, -5, nrel_api_key,\n '<KEY>', 60), (LATITUDE, LONGITUDE, nrel_api_key, 'bad', 60), (LATITUDE,\n LONGITUDE, nrel_api_key, '2017', 15)])\n", (4318, 4562), False, 'import pytest\n'), ((4707, 4766), 'pytest.mark.flaky', 'pytest.mark.flaky', ([], {'reruns': 'RERUNS', 'reruns_delay': 'RERUNS_DELAY'}), '(reruns=RERUNS, reruns_delay=RERUNS_DELAY)\n', (4724, 4766), False, 'import pytest\n'), ((1684, 1721), 'numpy.allclose', 'np.allclose', (['data.Year', 'expected.Year'], {}), '(data.Year, expected.Year)\n', (1695, 1721), True, 'import numpy as np\n'), ((1733, 1772), 'numpy.allclose', 'np.allclose', (['data.Month', 'expected.Month'], {}), '(data.Month, expected.Month)\n', (1744, 1772), True, 'import numpy as np\n'), ((1784, 1819), 'numpy.allclose', 'np.allclose', (['data.Day', 'expected.Day'], {}), '(data.Day, expected.Day)\n', (1795, 1819), True, 'import numpy as np\n'), ((1831, 1868), 'numpy.allclose', 'np.allclose', (['data.Hour', 'expected.Hour'], {}), '(data.Hour, expected.Hour)\n', (1842, 1868), True, 'import numpy as np\n'), ((1880, 1921), 'numpy.allclose', 'np.allclose', (['data.Minute', 'expected.Minute'], {}), '(data.Minute, expected.Minute)\n', (1891, 1921), True, 'import numpy as np\n'), ((1958, 1993), 'numpy.allclose', 'np.allclose', (['data.GHI', 'expected.GHI'], {}), '(data.GHI, expected.GHI)\n', (1969, 1993), True, 'import numpy as np\n'), ((2005, 2040), 'numpy.allclose', 'np.allclose', (['data.DNI', 'expected.DNI'], {}), '(data.DNI, expected.DNI)\n', (2016, 2040), True, 'import numpy as np\n'), ((2052, 2087), 'numpy.allclose', 'np.allclose', (['data.DHI', 'expected.DHI'], {}), '(data.DHI, expected.DHI)\n', (2063, 2087), True, 'import numpy as np\n'), ((2099, 2150), 'numpy.allclose', 'np.allclose', (['data.Temperature', 'expected.Temperature'], {}), '(data.Temperature, expected.Temperature)\n', (2110, 2150), True, 'import numpy as np\n'), ((2162, 2207), 'numpy.allclose', 'np.allclose', (['data.Pressure', 'expected.Pressure'], {}), '(data.Pressure, expected.Pressure)\n', (2173, 2207), True, 'import numpy as np\n'), ((2219, 2272), 'numpy.allclose', 'np.allclose', (["data['Dew Point']", "expected['Dew Point']"], {}), "(data['Dew Point'], expected['Dew Point'])\n", (2230, 2272), True, 'import numpy as np\n'), ((2284, 2347), 'numpy.allclose', 'np.allclose', (["data['Surface Albedo']", "expected['Surface Albedo']"], {}), "(data['Surface Albedo'], expected['Surface Albedo'])\n", (2295, 2347), True, 'import numpy as np\n'), ((2359, 2414), 'numpy.allclose', 'np.allclose', (["data['Wind Speed']", "expected['Wind Speed']"], {}), "(data['Wind Speed'], expected['Wind Speed'])\n", (2370, 2414), True, 'import numpy as np\n'), ((2426, 2489), 'numpy.allclose', 'np.allclose', (["data['Wind Direction']", "expected['Wind Direction']"], {}), "(data['Wind Direction'], expected['Wind Direction'])\n", (2437, 2489), True, 'import numpy as np\n'), ((2841, 2920), 'pvlib.iotools.psm3.get_psm3', 'psm3.get_psm3', (['LATITUDE', 'LONGITUDE', 'nrel_api_key', 'PVLIB_EMAIL'], {'names': '"""tmy-2017"""'}), "(LATITUDE, LONGITUDE, nrel_api_key, PVLIB_EMAIL, names='tmy-2017')\n", (2854, 2920), False, 'from pvlib.iotools import psm3\n'), ((2969, 2995), 'pandas.read_csv', 'pd.read_csv', (['TMY_TEST_DATA'], {}), '(TMY_TEST_DATA)\n', (2980, 2995), True, 'import pandas as pd\n'), ((3236, 3328), 'pvlib.iotools.psm3.get_psm3', 'psm3.get_psm3', (['LATITUDE', 'LONGITUDE', 'nrel_api_key', 'PVLIB_EMAIL'], {'names': '"""2017"""', 'interval': '(30)'}), "(LATITUDE, LONGITUDE, nrel_api_key, PVLIB_EMAIL, names='2017',\n interval=30)\n", (3249, 3328), False, 'from pvlib.iotools import psm3\n'), ((3373, 3400), 'pandas.read_csv', 'pd.read_csv', (['YEAR_TEST_DATA'], {}), '(YEAR_TEST_DATA)\n', (3384, 3400), True, 'import pandas as pd\n'), ((3634, 3725), 'pvlib.iotools.psm3.get_psm3', 'psm3.get_psm3', (['LATITUDE', 'LONGITUDE', 'nrel_api_key', 'PVLIB_EMAIL'], {'names': '"""2019"""', 'interval': '(5)'}), "(LATITUDE, LONGITUDE, nrel_api_key, PVLIB_EMAIL, names='2019',\n interval=5)\n", (3647, 3725), False, 'from pvlib.iotools import psm3\n'), ((3842, 3874), 'pandas.read_csv', 'pd.read_csv', (['YEAR_TEST_DATA_5MIN'], {}), '(YEAR_TEST_DATA_5MIN)\n', (3853, 3874), True, 'import pandas as pd\n'), ((4081, 4188), 'pvlib.iotools.psm3.get_psm3', 'psm3.get_psm3', (['LATITUDE', 'LONGITUDE', 'nrel_api_key', 'PVLIB_EMAIL'], {'names': '"""2012"""', 'interval': '(60)', 'leap_day': '(True)'}), "(LATITUDE, LONGITUDE, nrel_api_key, PVLIB_EMAIL, names='2012',\n interval=60, leap_day=True)\n", (4094, 4188), False, 'from pvlib.iotools import psm3\n'), ((5673, 5687), 'io.StringIO', 'StringIO', (['data'], {}), '(data)\n', (5681, 5687), False, 'from io import StringIO\n'), ((5781, 5806), 'pvlib.iotools.psm3.parse_psm3', 'psm3.parse_psm3', (['io_input'], {}), '(io_input)\n', (5796, 5806), False, 'from pvlib.iotools import psm3\n'), ((5822, 5849), 'pandas.read_csv', 'pd.read_csv', (['YEAR_TEST_DATA'], {}), '(YEAR_TEST_DATA)\n', (5833, 5849), True, 'import pandas as pd\n'), ((5964, 5996), 'pvlib.iotools.psm3.read_psm3', 'psm3.read_psm3', (['MANUAL_TEST_DATA'], {}), '(MANUAL_TEST_DATA)\n', (5978, 5996), False, 'from pvlib.iotools import psm3\n'), ((6012, 6039), 'pandas.read_csv', 'pd.read_csv', (['YEAR_TEST_DATA'], {}), '(YEAR_TEST_DATA)\n', (6023, 6039), True, 'import pandas as pd\n'), ((5236, 5260), 'pytest.raises', 'pytest.raises', (['HTTPError'], {}), '(HTTPError)\n', (5249, 5260), False, 'import pytest\n'), ((5281, 5373), 'pvlib.iotools.psm3.get_psm3', 'psm3.get_psm3', (['latitude', 'longitude', 'api_key', 'PVLIB_EMAIL'], {'names': 'names', 'interval': 'interval'}), '(latitude, longitude, api_key, PVLIB_EMAIL, names=names,\n interval=interval)\n', (5294, 5373), False, 'from pvlib.iotools import psm3\n'), ((1344, 1477), 'warnings.warn', 'warnings.warn', (['"""WARNING: NREL API KEY environment variable not set! Using DEMO_KEY instead. Unexpected failures may occur."""'], {}), "(\n 'WARNING: NREL API KEY environment variable not set! Using DEMO_KEY instead. Unexpected failures may occur.'\n )\n", (1357, 1477), False, 'import warnings\n')] |
#coding=utf-8
import argparse
import os
import time
import logging
import random
import torch
import torch.backends.cudnn as cudnn
import torch.optim
from torch.utils.data import DataLoader
cudnn.benchmark = True
import numpy as np
import models
from data import datasets
from utils import Parser,str2bool
from predict import validate_softmax
parser = argparse.ArgumentParser()
parser.add_argument('-cfg', '--cfg', default='3DUNet_dice_fold0', required=True, type=str,
help='Your detailed configuration of the network')
parser.add_argument('-mode', '--mode', default=0, required=True, type=int,choices=[0,1,2],
help='0 for cross-validation on the training set; '
'1 for validing on the validation set; '
'2 for testing on the testing set.')
parser.add_argument('-gpu', '--gpu', default='0,1,2,3', type=str)
parser.add_argument('-is_out', '--is_out', default=False, type=str2bool,
help='If ture, output the .nii file')
parser.add_argument('-verbose', '--verbose', default=True, type=str2bool,
help='If True, print more infomation of the debuging output')
parser.add_argument('-use_TTA', '--use_TTA', default=False, type=str2bool,
help='It is a postprocess approach.')
parser.add_argument('-postprocess', '--postprocess', default=False, type=str2bool,
help='Another postprocess approach.')
parser.add_argument('-save_format', '--save_format', default='nii', choices=['nii','npy'], type=str,
help='[nii] for submission; [npy] for models ensemble')
parser.add_argument('-snapshot', '--snapshot', default=False, type=str2bool,
help='If True, saving the snopshot figure of all samples.')
parser.add_argument('-restore', '--restore', default=argparse.SUPPRESS, type=str,
help='The path to restore the model.') # 'model_epoch_300.pth'
path = os.path.dirname(__file__)
args = parser.parse_args()
args = Parser(args.cfg, log='train').add_args(args)
args.gpu = str(args.gpu)
ckpts = args.makedir()
args.resume = os.path.join(ckpts, args.restore) # specify the epoch
def main():
# setup environments and seeds
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
assert torch.cuda.is_available(), "Currently, we only support CUDA version"
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
Network = getattr(models, args.net) #
model = Network(**args.net_params)
model = torch.nn.DataParallel(model).cuda()
print(args.resume)
assert os.path.isfile(args.resume),"no checkpoint found at {}".format(args.resume)
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_iter = checkpoint['iter']
model.load_state_dict(checkpoint['state_dict'])
msg = ("=> loaded checkpoint '{}' (iter {})".format(args.resume, checkpoint['iter']))
msg += '\n' + str(args)
logging.info(msg)
if args.mode == 0:
root_path = args.train_data_dir
is_scoring = True
elif args.mode == 1:
root_path = args.valid_data_dir
is_scoring = False
elif args.mode == 2:
root_path = args.test_data_dir
is_scoring = False
else:
raise ValueError
Dataset = getattr(datasets, args.dataset) #
valid_list = os.path.join(root_path, args.valid_list)
valid_set = Dataset(valid_list, root=root_path,for_train=False, transforms=args.test_transforms)
valid_loader = DataLoader(
valid_set,
batch_size=1,
shuffle=False,
collate_fn=valid_set.collate,
num_workers=10,
pin_memory=True)
if args.is_out:
out_dir = './output/{}'.format(args.cfg)
os.makedirs(os.path.join(out_dir,'submission'),exist_ok=True)
os.makedirs(os.path.join(out_dir,'snapshot'),exist_ok=True)
else:
out_dir = ''
logging.info('-'*50)
logging.info(msg)
with torch.no_grad():
validate_softmax(
valid_loader,
model,
cfg=args.cfg,
savepath=out_dir,
save_format = args.save_format,
names=valid_set.names,
scoring=is_scoring,
verbose=args.verbose,
use_TTA=args.use_TTA,
snapshot=args.snapshot,
postprocess=args.postprocess,
cpu_only=False)
if __name__ == '__main__':
main()
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"torch.manual_seed",
"os.path.dirname",
"torch.load",
"torch.cuda.manual_seed",
"logging.info",
"os.path.isfile",
"predict.validate_softmax",
"torch.cuda.is_available",
"random.seed",
"utils.Parser",
"torch.nn.D... | [((358, 383), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (381, 383), False, 'import argparse\n'), ((1982, 2007), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1997, 2007), False, 'import os\n'), ((2150, 2183), 'os.path.join', 'os.path.join', (['ckpts', 'args.restore'], {}), '(ckpts, args.restore)\n', (2162, 2183), False, 'import os\n'), ((2313, 2338), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2336, 2338), False, 'import torch\n'), ((2387, 2415), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2404, 2415), False, 'import torch\n'), ((2420, 2453), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2442, 2453), False, 'import torch\n'), ((2458, 2480), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (2469, 2480), False, 'import random\n'), ((2485, 2510), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (2499, 2510), True, 'import numpy as np\n'), ((2676, 2703), 'os.path.isfile', 'os.path.isfile', (['args.resume'], {}), '(args.resume)\n', (2690, 2703), False, 'import os\n'), ((2829, 2852), 'torch.load', 'torch.load', (['args.resume'], {}), '(args.resume)\n', (2839, 2852), False, 'import torch\n'), ((3069, 3086), 'logging.info', 'logging.info', (['msg'], {}), '(msg)\n', (3081, 3086), False, 'import logging\n'), ((3461, 3501), 'os.path.join', 'os.path.join', (['root_path', 'args.valid_list'], {}), '(root_path, args.valid_list)\n', (3473, 3501), False, 'import os\n'), ((3623, 3741), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_set'], {'batch_size': '(1)', 'shuffle': '(False)', 'collate_fn': 'valid_set.collate', 'num_workers': '(10)', 'pin_memory': '(True)'}), '(valid_set, batch_size=1, shuffle=False, collate_fn=valid_set.\n collate, num_workers=10, pin_memory=True)\n', (3633, 3741), False, 'from torch.utils.data import DataLoader\n'), ((4030, 4052), 'logging.info', 'logging.info', (["('-' * 50)"], {}), "('-' * 50)\n", (4042, 4052), False, 'import logging\n'), ((4055, 4072), 'logging.info', 'logging.info', (['msg'], {}), '(msg)\n', (4067, 4072), False, 'import logging\n'), ((2043, 2072), 'utils.Parser', 'Parser', (['args.cfg'], {'log': '"""train"""'}), "(args.cfg, log='train')\n", (2049, 2072), False, 'from utils import Parser, str2bool\n'), ((4083, 4098), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4096, 4098), False, 'import torch\n'), ((4108, 4376), 'predict.validate_softmax', 'validate_softmax', (['valid_loader', 'model'], {'cfg': 'args.cfg', 'savepath': 'out_dir', 'save_format': 'args.save_format', 'names': 'valid_set.names', 'scoring': 'is_scoring', 'verbose': 'args.verbose', 'use_TTA': 'args.use_TTA', 'snapshot': 'args.snapshot', 'postprocess': 'args.postprocess', 'cpu_only': '(False)'}), '(valid_loader, model, cfg=args.cfg, savepath=out_dir,\n save_format=args.save_format, names=valid_set.names, scoring=is_scoring,\n verbose=args.verbose, use_TTA=args.use_TTA, snapshot=args.snapshot,\n postprocess=args.postprocess, cpu_only=False)\n', (4124, 4376), False, 'from predict import validate_softmax\n'), ((2606, 2634), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (2627, 2634), False, 'import torch\n'), ((3876, 3911), 'os.path.join', 'os.path.join', (['out_dir', '"""submission"""'], {}), "(out_dir, 'submission')\n", (3888, 3911), False, 'import os\n'), ((3946, 3979), 'os.path.join', 'os.path.join', (['out_dir', '"""snapshot"""'], {}), "(out_dir, 'snapshot')\n", (3958, 3979), False, 'import os\n')] |
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.resnet import ResNet50, ResNet101, ResNet152
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 64
args_lr = 0.0006
args_model = 'resnet50'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_random2/' + job_name + '*'
total_epochs = 50
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '50' in args_model:
base_model = ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '101' in args_model:
base_model = ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '152' in args_model:
base_model = ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
epoch_waste_dict[job_name] += epoch_waste_time
json_file3 = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file3)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_random2/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| [
"keras.models.load_model",
"os.remove",
"argparse.ArgumentParser",
"json.dumps",
"keras.applications.resnet.ResNet50",
"numpy.mean",
"glob.glob",
"keras.applications.resnet.ResNet152",
"keras.datasets.cifar10.load_data",
"keras.layers.Flatten",
"send_signal.send",
"keras.utils.to_categorical",... | [((893, 959), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Tensorflow Cifar10 Training"""'}), "(description='Tensorflow Cifar10 Training')\n", (916, 959), False, 'import argparse\n'), ((1903, 1914), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1912, 1914), False, 'import os\n'), ((1927, 1947), 'json.dumps', 'json.dumps', (['pid_dict'], {}), '(pid_dict)\n', (1937, 1947), False, 'import json\n'), ((2012, 2050), 'os.rename', 'os.rename', (['"""pid_lock.json"""', '"""pid.json"""'], {}), "('pid_lock.json', 'pid.json')\n", (2021, 2050), False, 'import os\n'), ((2525, 2544), 'keras.datasets.cifar10.load_data', 'cifar10.load_data', ([], {}), '()\n', (2542, 2544), False, 'from keras.datasets import cifar10\n'), ((3026, 3074), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_train', 'num_classes'], {}), '(y_train, num_classes)\n', (3052, 3074), False, 'import keras\n'), ((3084, 3131), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_test', 'num_classes'], {}), '(y_test, num_classes)\n', (3110, 3131), False, 'import keras\n'), ((5797, 5844), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'terminateProcess'], {}), '(signal.SIGTERM, terminateProcess)\n', (5810, 5844), False, 'import signal\n'), ((6032, 6059), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': 'logdir'}), '(log_dir=logdir)\n', (6043, 6059), False, 'from keras.callbacks import ReduceLROnPlateau, TensorBoard\n'), ((6644, 6687), 'send_signal.send', 'send_signal.send', (['args.node', '(10002)', 'message'], {}), '(args.node, 10002, message)\n', (6660, 6687), False, 'import send_signal\n'), ((7177, 7220), 'send_signal.send', 'send_signal.send', (['args.node', '(10002)', 'message'], {}), '(args.node, 10002, message)\n', (7193, 7220), False, 'import send_signal\n'), ((1868, 1881), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (1877, 1881), False, 'import json\n'), ((2726, 2750), 'numpy.mean', 'np.mean', (['x_train'], {'axis': '(0)'}), '(x_train, axis=0)\n', (2733, 2750), True, 'import numpy as np\n'), ((3197, 3231), 'keras.models.load_model', 'keras.models.load_model', (['save_file'], {}), '(save_file)\n', (3220, 3231), False, 'import keras\n'), ((3280, 3299), 'keras.models.Sequential', 'models.Sequential', ([], {}), '()\n', (3297, 3299), False, 'from keras import models, layers, optimizers\n'), ((5086, 5114), 'json.dumps', 'json.dumps', (['epoch_waste_dict'], {}), '(epoch_waste_dict)\n', (5096, 5114), False, 'import json\n'), ((5323, 5344), 'glob.glob', 'glob.glob', (['save_files'], {}), '(save_files)\n', (5332, 5344), False, 'import glob\n'), ((5678, 5705), 'json.dumps', 'json.dumps', (['checkpoint_dict'], {}), '(checkpoint_dict)\n', (5688, 5705), False, 'import json\n'), ((5785, 5795), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5793, 5795), False, 'import sys\n'), ((2084, 2105), 'glob.glob', 'glob.glob', (['save_files'], {}), '(save_files)\n', (2093, 2105), False, 'import glob\n'), ((3353, 3438), 'keras.applications.resnet.ResNet50', 'ResNet50', ([], {'weights': 'None', 'include_top': '(False)', 'input_shape': '(32, 32, 3)', 'pooling': 'None'}), '(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None\n )\n', (3361, 3438), False, 'from keras.applications.resnet import ResNet50, ResNet101, ResNet152\n'), ((3931, 3947), 'keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (3945, 3947), False, 'from keras import models, layers, optimizers\n'), ((4272, 4310), 'keras.layers.Dense', 'layers.Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (4284, 4310), False, 'from keras import models, layers, optimizers\n'), ((5004, 5017), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (5013, 5017), False, 'import json\n'), ((5354, 5366), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (5363, 5366), False, 'import os\n'), ((5613, 5626), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (5622, 5626), False, 'import json\n'), ((6391, 6402), 'time.time', 'time.time', ([], {}), '()\n', (6400, 6402), False, 'import time\n'), ((3485, 3571), 'keras.applications.resnet.ResNet101', 'ResNet101', ([], {'weights': 'None', 'include_top': '(False)', 'input_shape': '(32, 32, 3)', 'pooling': 'None'}), '(weights=None, include_top=False, input_shape=(32, 32, 3), pooling\n =None)\n', (3494, 3571), False, 'from keras.applications.resnet import ResNet50, ResNet101, ResNet152\n'), ((4432, 4448), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'args_lr'}), '(lr=args_lr)\n', (4436, 4448), False, 'from keras.optimizers import Adam\n'), ((3618, 3704), 'keras.applications.resnet.ResNet152', 'ResNet152', ([], {'weights': 'None', 'include_top': '(False)', 'input_shape': '(32, 32, 3)', 'pooling': 'None'}), '(weights=None, include_top=False, input_shape=(32, 32, 3), pooling\n =None)\n', (3627, 3704), False, 'from keras.applications.resnet import ResNet50, ResNet101, ResNet152\n'), ((4872, 4883), 'time.time', 'time.time', ([], {}), '()\n', (4881, 4883), False, 'import time\n')] |
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Benchmark for a computing the jacobian of a QNode, where the parametrized
gates are uniformly distributed throughout the circuit.
"""
from math import pi
import numpy as np
import pennylane as qml
import benchmark_utils as bu
class Benchmark(bu.BaseBenchmark):
"""Jacobian computation benchmark.
Creates a parametrized quantum circuit with a uniform
distribution of parametrized gates throughout the circuit
and evaluates its Jacobian.
"""
name = "Jacobian evaluation uniform"
min_wires = 2
n_vals = range(3, 27, 3)
def setup(self):
# pylint: disable=attribute-defined-outside-init,no-member
np.random.seed(143)
angles = np.random.uniform(high=2 * pi, size=self.n_wires)
self.random_angles = angles
self.all_wires = range(self.n_wires)
def benchmark(self, n=10):
# n is the number of parametrized layers in the circuit
if self.verbose:
print("circuit: {} parameters, {} wires".format(n * self.n_wires, self.n_wires))
params = [qml.numpy.array(self.random_angles, copy=True, requires_grad=True) for _ in range(n)]
def circuit(params):
"""Parametrized circuit."""
for layer in range(n):
qml.broadcast(qml.RX, pattern="single", wires=self.all_wires, parameters=params[layer])
qml.broadcast(qml.CNOT, pattern="double", wires=self.all_wires)
return [bu.expval(qml.PauliZ(w)) for w in self.all_wires]
qnode = bu.create_qnode(circuit, self.device, mutable=True, qnode_type=self.qnode_type)
qnode.jacobian([params])
return True
| [
"numpy.random.uniform",
"benchmark_utils.create_qnode",
"pennylane.broadcast",
"numpy.random.seed",
"pennylane.numpy.array",
"pennylane.PauliZ"
] | [((1253, 1272), 'numpy.random.seed', 'np.random.seed', (['(143)'], {}), '(143)\n', (1267, 1272), True, 'import numpy as np\n'), ((1290, 1339), 'numpy.random.uniform', 'np.random.uniform', ([], {'high': '(2 * pi)', 'size': 'self.n_wires'}), '(high=2 * pi, size=self.n_wires)\n', (1307, 1339), True, 'import numpy as np\n'), ((2115, 2194), 'benchmark_utils.create_qnode', 'bu.create_qnode', (['circuit', 'self.device'], {'mutable': '(True)', 'qnode_type': 'self.qnode_type'}), '(circuit, self.device, mutable=True, qnode_type=self.qnode_type)\n', (2130, 2194), True, 'import benchmark_utils as bu\n'), ((1654, 1720), 'pennylane.numpy.array', 'qml.numpy.array', (['self.random_angles'], {'copy': '(True)', 'requires_grad': '(True)'}), '(self.random_angles, copy=True, requires_grad=True)\n', (1669, 1720), True, 'import pennylane as qml\n'), ((1860, 1952), 'pennylane.broadcast', 'qml.broadcast', (['qml.RX'], {'pattern': '"""single"""', 'wires': 'self.all_wires', 'parameters': 'params[layer]'}), "(qml.RX, pattern='single', wires=self.all_wires, parameters=\n params[layer])\n", (1873, 1952), True, 'import pennylane as qml\n'), ((1964, 2027), 'pennylane.broadcast', 'qml.broadcast', (['qml.CNOT'], {'pattern': '"""double"""', 'wires': 'self.all_wires'}), "(qml.CNOT, pattern='double', wires=self.all_wires)\n", (1977, 2027), True, 'import pennylane as qml\n'), ((2058, 2071), 'pennylane.PauliZ', 'qml.PauliZ', (['w'], {}), '(w)\n', (2068, 2071), True, 'import pennylane as qml\n')] |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for stochastic block models (SBMs) with node features.
SimulateSbm, SimulateFeatures, and SimulateEdgeFeatures are top-level library
functions used by GenerateStochasticBlockModel in simulations.py. You can call
these separately to generate various parts of an SBM with features.
"""
import collections
import dataclasses
import enum
import math
import random
from typing import Dict, List, Optional, Sequence, Tuple
import graph_tool
from graph_tool import generation
import networkx as nx
import numpy as np
from graph_embedding.simulations import heterogeneous_sbm_utils as hsu
# pylint: disable=g-explicit-length-test
class MatchType(enum.Enum):
"""Indicates type of feature/graph membership matching to do.
RANDOM: feature memberships are generated randomly.
NESTED: for # feature groups >= # graph groups. Each feature cluster is a
sub-cluster of a graph cluster. Multiplicity of sub-clusters per
graph cluster is kept as uniform as possible.
GROUPED: for # feature groups <= # graph groups. Each graph cluster is a
sub-cluster of a feature cluster. Multiplicity of sub-clusters per
feature cluster is kept as uniform as possible.
"""
RANDOM = 1
NESTED = 2
GROUPED = 3
@dataclasses.dataclass
class EdgeProbabilityProfile:
"""Stores p-to-q ratios for Stochastic Block Model.
Attributes:
p_to_q_ratio1: Probability of in-cluster edges divided by probability of
out-cluster edges, for type 1 nodes. If the SBM is homogeneous, this
is the global p_to_q_ratio.
p_to_q_ratio2: Probability of in-cluster edges divided by probability of
out-cluster edges, for type 2 nodes.
p_to_q_ratio_cross: Probability of in-cluster edges divided by probability
of out-cluster edges, for node clusters that are linked across-type.
"""
p_to_q_ratio1: float = Ellipsis
p_to_q_ratio2: Optional[float] = 0.0
p_to_q_ratio_cross: Optional[float] = 0.0
@dataclasses.dataclass
class StochasticBlockModel:
"""Stores data for stochastic block model (SBM) graphs with features.
This class supports heterogeneous SBMs, in which each node is assumed to be
exactly one of two types. In this model, the following extra fields are used:
* type1_clusters: list of cluster indices for type 1 nodes. (For single-type
graphs, this contains the list of all cluster indices.)
* type2_clusters: list of cluster indices for type 2 nodes.
* cross_links: tuples of cluster indices that are linked cross-type.
* node_features2: features for type 2 nodes. (node_features1 is used as the
sole feature field for single-type SBM.)
Attributes:
graph: graph-tool Graph object.
graph_memberships: list of integer node classes.
node_features1: numpy array of node features for nodes of type 1. Features
for node with index i is in row i.
node_features2: numpy array of node features for nodes of type 2. Features
for node with index i is in row i - (# of nodes of type 1).
feature_memberships: list of integer node feature classes.
edge_features: map from edge tuple to numpy array. Only stores undirected
edges, i.e. (0, 1) will be in the map, but (1, 0) will not be.
cross_links: list of 2-tuples, each tuple a pair of cluster indices which
are cross-correlated between the types. (i, j) included in this list means
the i-th cluster from type 1 is correlated with the j-th cluster from type
2.
type1_clusters: list of the indices of type 1 clusters.
type2_clusters: list of the indices of type 2 clusters.
cross_links: list of cluster index pairs, each pair coding that the clusters
are linked across types.
"""
graph: graph_tool.Graph = Ellipsis
graph_memberships: np.ndarray = Ellipsis
node_features1: np.ndarray = Ellipsis
node_features2: Optional[np.ndarray] = Ellipsis
feature_memberships: np.ndarray = Ellipsis
edge_features: Dict[Tuple[int, int], np.ndarray] = Ellipsis
type1_clusters: Optional[List[int]] = Ellipsis
type2_clusters: Optional[List[int]] = Ellipsis
cross_links: Optional[List[Tuple[int, int]]] = Ellipsis
def _GetNestingMap(large_k, small_k):
"""Given two group sizes, computes a "nesting map" between groups.
This function will produce a bipartite map between two sets of "group nodes"
that will be used downstream to partition nodes in a bigger graph. The map
encodes which groups from the larger set are nested in certain groups from
the smaller set.
As currently implemented, nesting is assigned as evenly as possible. If
large_k is an integer multiple of small_k, each smaller-set group will be
mapped to exactly (large_k/small_k) larger-set groups. If there is a
remainder r, the first r smaller-set groups will each have one extra nested
larger-set group.
Args:
large_k: (int) size of the larger group set
small_k: (int) size of the smaller group set
Returns:
nesting_map: (dict) map from larger group set indices to lists of
smaller group set indices
"""
min_multiplicity = int(math.floor(large_k / small_k))
max_bloated_group_index = large_k - small_k * min_multiplicity - 1
nesting_map = collections.defaultdict(list)
pos = 0
for i in range(small_k):
for _ in range(min_multiplicity + int(i <= max_bloated_group_index)):
nesting_map[i].append(pos)
pos += 1
return nesting_map
def _GenerateFeatureMemberships(
graph_memberships,
num_groups = None,
match_type = MatchType.RANDOM):
"""Generates a feature membership assignment.
Args:
graph_memberships: (list) the integer memberships for the graph SBM
num_groups: (int) number of groups. If None, defaults to number of unique
values in graph_memberships.
match_type: (MatchType) see the enum class description.
Returns:
memberships: a int list - index i contains feature group of node i.
"""
# Parameter checks
if num_groups is not None and num_groups == 0:
raise ValueError("argument num_groups must be None or positive")
graph_num_groups = len(set(graph_memberships))
if num_groups is None:
num_groups = graph_num_groups
# Compute memberships
memberships = []
if match_type == MatchType.GROUPED:
if num_groups > graph_num_groups:
raise ValueError(
"for match type GROUPED, must have num_groups <= graph_num_groups")
nesting_map = _GetNestingMap(graph_num_groups, num_groups)
# Creates deterministic map from (smaller) graph clusters to (larger)
# feature clusters.
reverse_nesting_map = {}
for feature_cluster, graph_cluster_list in nesting_map.items():
for cluster in graph_cluster_list:
reverse_nesting_map[cluster] = feature_cluster
for cluster in graph_memberships:
memberships.append(reverse_nesting_map[cluster])
elif match_type == MatchType.NESTED:
if num_groups < graph_num_groups:
raise ValueError(
"for match type NESTED, must have num_groups >= graph_num_groups")
nesting_map = _GetNestingMap(num_groups, graph_num_groups)
# Creates deterministic map from (smaller) feature clusters to (larger)
# graph clusters.
for graph_cluster_id, feature_cluster_ids in nesting_map.items():
sorted_feature_cluster_ids = sorted(feature_cluster_ids)
num_feature_groups = len(sorted_feature_cluster_ids)
feature_pi = np.ones(num_feature_groups) / num_feature_groups
num_graph_cluster_nodes = np.sum(
[i == graph_cluster_id for i in graph_memberships])
sub_memberships = _GenerateNodeMemberships(num_graph_cluster_nodes,
feature_pi)
sub_memberships = [sorted_feature_cluster_ids[i] for i in sub_memberships]
memberships.extend(sub_memberships)
else: # MatchType.RANDOM
memberships = random.choices(range(num_groups), k=len(graph_memberships))
return np.array(sorted(memberships))
def _ComputeExpectedEdgeCounts(num_edges, num_vertices,
pi,
prop_mat):
"""Computes expected edge counts within and between communities.
Args:
num_edges: expected number of edges in the graph.
num_vertices: number of nodes in the graph
pi: interable of non-zero community size proportions. Must sum to 1.0, but
this check is left to the caller of this internal function.
prop_mat: square, symmetric matrix of community edge count rates. Entries
must be non-negative, but this check is left to the caller.
Returns:
symmetric matrix with shape prop_mat.shape giving expected edge counts.
"""
scale = np.matmul(pi, np.matmul(prop_mat, pi)) * num_vertices**2
prob_mat = prop_mat * num_edges / scale
return np.outer(pi, pi) * prob_mat * num_vertices**2
def _ComputeCommunitySizes(num_vertices, pi):
"""Helper function of GenerateNodeMemberships to compute group sizes.
Args:
num_vertices: number of nodes in graph.
pi: interable of non-zero community size proportions.
Returns:
community_sizes: np vector of group sizes. If num_vertices * pi[i] is a
whole number (up to machine precision), community_sizes[i] will be that
number. Otherwise, this function accounts for rounding errors by making
group sizes as balanced as possible (i.e. increasing smallest groups by
1 or decreasing largest groups by 1 if needed).
"""
community_sizes = [int(x * num_vertices) for x in pi]
if sum(community_sizes) != num_vertices:
size_order = np.argsort(community_sizes)
delta = sum(community_sizes) - num_vertices
adjustment = np.sign(delta)
if adjustment == 1:
size_order = np.flip(size_order)
for i in range(int(abs(delta))):
community_sizes[size_order[i]] -= adjustment
return community_sizes
def _GenerateNodeMemberships(num_vertices,
pi):
"""Gets node memberships for sbm.
Args:
num_vertices: number of nodes in graph.
pi: interable of non-zero community size proportions. Must sum to 1.0, but
this check is left to the caller of this internal function.
Returns:
np vector of ints representing community indices.
"""
community_sizes = _ComputeCommunitySizes(num_vertices, pi)
memberships = np.zeros(num_vertices, dtype=int)
node = 0
for i in range(len(pi)):
memberships[range(node, node + community_sizes[i])] = i
node += community_sizes[i]
return memberships
def SimulateSbm(sbm_data,
num_vertices,
num_edges,
pi,
prop_mat,
out_degs = None,
pi2 = None):
"""Generates a stochastic block model, storing data in sbm_data.graph.
This function uses graph_tool.generate_sbm. Refer to that
documentation for more information on the model and parameters.
This function can generate a heterogeneous SBM graph, meaning each node is
exactly one of two types (and both types are present). To generate a
heteroteneous SBM graph, `pi2` must be supplied, and additional fields of
`sbm_data` will be filled. See the StochasticBlockModel dataclass for details.
Args:
sbm_data: StochasticBlockModel dataclass to store result data.
num_vertices: (int) number of nodes in the graph.
num_edges: (float) expected number of edges in the graph.
pi: iterable of non-zero community size relative proportions. Community i
will be pi[i] / pi[j] times larger than community j.
prop_mat: square, symmetric matrix of community edge count rates.
out_degs: Out-degree propensity for each node. If not provided, a constant
value will be used. Note that the values will be normalized inside each
group, if they are not already so.
pi2: This is the pi vector for the vertices of type 2. Type 2 community k
will be pi2[k] / pi[j] times larger than type 1 community j. Supplying
this argument produces a heterogeneous model.
Returns: (none)
"""
if pi2 is None: pi2 = []
k1, k2 = len(pi), len(pi2)
pi = np.array(list(pi) + list(pi2)).astype(np.float64)
pi /= np.sum(pi)
if prop_mat.shape[0] != len(pi) or prop_mat.shape[1] != len(pi):
raise ValueError("prop_mat must be k x k; k = len(pi1) + len(pi2)")
sbm_data.graph_memberships = _GenerateNodeMemberships(num_vertices, pi)
sbm_data.type1_clusters = sorted(list(set(sbm_data.graph_memberships)))
if len(pi2) > 0:
sbm_data.cross_links = hsu.GetCrossLinks([k1, k2], 0, 1)
type1_clusters, type2_clusters = zip(*sbm_data.cross_links)
sbm_data.type1_clusters = sorted(list(set(type1_clusters)))
sbm_data.type2_clusters = sorted(list(set(type2_clusters)))
edge_counts = _ComputeExpectedEdgeCounts(
num_edges, num_vertices, pi, prop_mat)
sbm_data.graph = generation.generate_sbm(sbm_data.graph_memberships,
edge_counts, out_degs)
graph_tool.stats.remove_self_loops(sbm_data.graph)
graph_tool.stats.remove_parallel_edges(sbm_data.graph)
sbm_data.graph.reindex_edges()
def _GetFeatureCenters(num_groups, center_var, feature_dim):
"""Helper function to generate multivariate Normal feature centers.
Args:
num_groups: number of centers to generate.
center_var: diagonal element of the covariance matrix (off-diagonals = 0).
feature_dim: the dimension of each center.
Returns:
centers: numpy array with feature group centers as rows.
"""
centers = np.random.multivariate_normal(
np.zeros(feature_dim), np.identity(feature_dim) * center_var,
num_groups)
return centers
def SimulateFeatures(sbm_data,
center_var,
feature_dim,
num_groups = None,
match_type = MatchType.RANDOM,
cluster_var = 1.0,
center_var2 = 0.0,
feature_dim2 = 0,
type_correlation = 0.0,
type_center_var = 0.0):
"""Generates node features using multivate normal mixture model.
This function does nothing and throws a warning if
sbm_data.graph_memberships is empty. Run SimulateSbm to fill that field.
Feature data is stored as an attribute of sbm_data named 'node_features1'.
If the `type2_clusters` field in the input `sbm_data` is filled, this function
produces node features for a heterogeneous SBM. Specifically:
* Handling differing # graph clusters and # feature clusters is not
implemented for heterogeneous SBMs. `num_groups` and must equal the
length of sbm_data.type1_clusters (raises RuntimeWarning if not).
* The node_features{1,2} fields of the input sbm_data will store the features
generated for type {1,2} nodes.
Args:
sbm_data: StochasticBlockModel dataclass to store result data.
center_var: (float) variance of feature cluster centers. When this is 0.0,
the signal-to-noise ratio is 0.0. When equal to cluster_var, SNR is 1.0.
feature_dim: (int) dimension of the multivariate normal.
num_groups: (int) number of centers. Generated by a multivariate normal with
mean zero and covariance matrix cluster_var * I_{feature_dim}. This is
ignored if the input sbm_data is heterogeneous. Feature cluster counts
will be set equal to the graph cluster counts. If left as default (None),
and input sbm_data is homogeneous, set to len(sbm_data.type1_clusters).
match_type: (MatchType) see sbm_simulator.MatchType for details.
cluster_var: (float) variance of feature clusters around their centers.
center_var2: (float) center_var for nodes of type 2. Not needed if sbm_data
is not heterogeneous (see above).
feature_dim2: (int) feature_dim for nodes of type 2. Not needed if sbm_data
is not heterogeneous (see above).
type_correlation: (float) proportion of each cluster's center vector that
is shared with other clusters linked across types. Not needed if sbm_data
is not heterogeneous (see above).
type_center_var: (float) center_var for center vectors that are shared with
clusters linked across types. Not used if input sbm_data is not
heterogeneous.
Raises:
RuntimeWarning:
* if sbm_data no graph, no graph_memberships, or type1_clusters fields.
* if len(sbm_data.type2_clusters) > 0 and sbm_data.cross_links is not a
list.
"""
if sbm_data.graph is None or sbm_data.graph is Ellipsis:
raise RuntimeWarning("No graph found: no features generated. "
"Run SimulateSbm to generate a graph.")
if sbm_data.graph_memberships is None or sbm_data.graph_memberships is Ellipsis:
raise RuntimeWarning("No graph_memberships found: no features generated. "
"Run SimulateSbm to generate graph_memberships.")
if sbm_data.type1_clusters is None or sbm_data.type1_clusters is Ellipsis:
raise RuntimeWarning("No type1_clusters found: no features generated. "
"Run SimulateSbm to generate type1_clusters.")
if num_groups is None:
num_groups = len(sbm_data.type1_clusters)
centers = list(_GetFeatureCenters(num_groups, center_var, feature_dim))
num_groups2 = (0 if sbm_data.type2_clusters is Ellipsis
else len(sbm_data.type2_clusters))
if num_groups2 > 0:
# The SBM is heterogeneous. Check input and adjust variables.
if not isinstance(sbm_data.cross_links, list):
raise RuntimeWarning(
("len(sbm_data.type2_clusters) > 0, implying heterogeneous SBM, but "
"heterogeneous data `cross_links` is unfilled."))
# Generate heterogeneous feature centers.
centers += list(_GetFeatureCenters(num_groups2, center_var2, feature_dim2))
correspondence_graph = nx.Graph()
correspondence_graph.add_edges_from(sbm_data.cross_links)
connected_components = list(
nx.algorithms.connected_components(correspondence_graph))
cross_type_feature_dim = min(feature_dim, feature_dim2)
component_center_cov = np.identity(cross_type_feature_dim) * type_center_var
for component in connected_components:
component_center = np.random.multivariate_normal(
np.zeros(cross_type_feature_dim), component_center_cov, 1)[0]
for cluster_index in component:
centers[cluster_index][:cross_type_feature_dim] = (
component_center * type_correlation
+ centers[cluster_index][:cross_type_feature_dim] *
(1 - type_correlation))
# Get memberships
sbm_data.feature_memberships = _GenerateFeatureMemberships(
graph_memberships=sbm_data.graph_memberships,
num_groups=num_groups,
match_type=match_type)
cluster_indices = sbm_data.feature_memberships
if num_groups2 > 0:
cluster_indices = sbm_data.graph_memberships
features1 = []
features2 = []
cluster_cov1 = np.identity(feature_dim) * cluster_var
cluster_cov2 = np.identity(feature_dim2) * cluster_var
for cluster_index in cluster_indices:
cluster_cov = cluster_cov1
if num_groups2 > 0 and cluster_index in sbm_data.type2_clusters:
cluster_cov = cluster_cov2
feature = np.random.multivariate_normal(centers[cluster_index], cluster_cov,
1)[0]
if cluster_index in sbm_data.type1_clusters:
features1.append(feature)
else:
features2.append(feature)
sbm_data.node_features1 = np.array(features1)
if num_groups2 > 0:
sbm_data.node_features2 = np.array(features2)
def SimulateEdgeFeatures(sbm_data,
feature_dim,
center_distance = 0.0,
cluster_variance = 1.0):
"""Generates edge feature distribution via inter-class vs intra-class.
Edge feature data is stored as an sbm_data attribute named `edge_feature`, a
dict from 2-tuples of node IDs to numpy vectors.
Edge features have two centers: one at (0, 0, ....) and one at
(center_distance, center_distance, ....) for inter-class and intra-class
edges (respectively). They are generated from a multivariate normal with
covariance matrix = cluster_variance * I_d.
Requires non-None `graph` and `graph_memberships` attributes in sbm_data.
Use SimulateSbm to generate them. Throws warning if either are None.
Args:
sbm_data: StochasticBlockModel dataclass to store result data.
feature_dim: (int) dimension of the multivariate normal.
center_distance: (float) per-dimension distance between the intra-class and
inter-class means. Increasing this makes the edge feature signal stronger.
cluster_variance: (float) variance of clusters around their centers.
Raises:
RuntimeWarning: if simulator has no graph or a graph with no nodes.
"""
if sbm_data.graph is None:
raise RuntimeWarning("SbmSimulator has no graph: no features generated.")
if sbm_data.graph.num_vertices() == 0:
raise RuntimeWarning("graph has no nodes: no features generated.")
if sbm_data.graph_memberships is None:
raise RuntimeWarning("graph has no memberships: no features generated.")
center0 = np.zeros(shape=(feature_dim,))
center1 = np.ones(shape=(feature_dim,)) * center_distance
covariance = np.identity(feature_dim) * cluster_variance
sbm_data.edge_features = {}
for edge in sbm_data.graph.edges():
vertex1 = int(edge.source())
vertex2 = int(edge.target())
edge_tuple = tuple(sorted((vertex1, vertex2)))
if (sbm_data.graph_memberships[vertex1] ==
sbm_data.graph_memberships[vertex2]):
center = center1
else:
center = center0
sbm_data.edge_features[edge_tuple] = np.random.multivariate_normal(
center, covariance, 1)[0]
| [
"numpy.sum",
"numpy.ones",
"graph_embedding.simulations.heterogeneous_sbm_utils.GetCrossLinks",
"collections.defaultdict",
"numpy.argsort",
"numpy.identity",
"graph_tool.generation.generate_sbm",
"graph_tool.stats.remove_parallel_edges",
"numpy.outer",
"graph_tool.stats.remove_self_loops",
"nump... | [((5799, 5828), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (5822, 5828), False, 'import collections\n'), ((10872, 10905), 'numpy.zeros', 'np.zeros', (['num_vertices'], {'dtype': 'int'}), '(num_vertices, dtype=int)\n', (10880, 10905), True, 'import numpy as np\n'), ((12704, 12714), 'numpy.sum', 'np.sum', (['pi'], {}), '(pi)\n', (12710, 12714), True, 'import numpy as np\n'), ((13382, 13456), 'graph_tool.generation.generate_sbm', 'generation.generate_sbm', (['sbm_data.graph_memberships', 'edge_counts', 'out_degs'], {}), '(sbm_data.graph_memberships, edge_counts, out_degs)\n', (13405, 13456), False, 'from graph_tool import generation\n'), ((13502, 13552), 'graph_tool.stats.remove_self_loops', 'graph_tool.stats.remove_self_loops', (['sbm_data.graph'], {}), '(sbm_data.graph)\n', (13536, 13552), False, 'import graph_tool\n'), ((13555, 13609), 'graph_tool.stats.remove_parallel_edges', 'graph_tool.stats.remove_parallel_edges', (['sbm_data.graph'], {}), '(sbm_data.graph)\n', (13593, 13609), False, 'import graph_tool\n'), ((20001, 20020), 'numpy.array', 'np.array', (['features1'], {}), '(features1)\n', (20009, 20020), True, 'import numpy as np\n'), ((21690, 21720), 'numpy.zeros', 'np.zeros', ([], {'shape': '(feature_dim,)'}), '(shape=(feature_dim,))\n', (21698, 21720), True, 'import numpy as np\n'), ((5683, 5712), 'math.floor', 'math.floor', (['(large_k / small_k)'], {}), '(large_k / small_k)\n', (5693, 5712), False, 'import math\n'), ((10126, 10153), 'numpy.argsort', 'np.argsort', (['community_sizes'], {}), '(community_sizes)\n', (10136, 10153), True, 'import numpy as np\n'), ((10219, 10233), 'numpy.sign', 'np.sign', (['delta'], {}), '(delta)\n', (10226, 10233), True, 'import numpy as np\n'), ((13048, 13081), 'graph_embedding.simulations.heterogeneous_sbm_utils.GetCrossLinks', 'hsu.GetCrossLinks', (['[k1, k2]', '(0)', '(1)'], {}), '([k1, k2], 0, 1)\n', (13065, 13081), True, 'from graph_embedding.simulations import heterogeneous_sbm_utils as hsu\n'), ((14085, 14106), 'numpy.zeros', 'np.zeros', (['feature_dim'], {}), '(feature_dim)\n', (14093, 14106), True, 'import numpy as np\n'), ((18355, 18365), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (18363, 18365), True, 'import networkx as nx\n'), ((19450, 19474), 'numpy.identity', 'np.identity', (['feature_dim'], {}), '(feature_dim)\n', (19461, 19474), True, 'import numpy as np\n'), ((19506, 19531), 'numpy.identity', 'np.identity', (['feature_dim2'], {}), '(feature_dim2)\n', (19517, 19531), True, 'import numpy as np\n'), ((20073, 20092), 'numpy.array', 'np.array', (['features2'], {}), '(features2)\n', (20081, 20092), True, 'import numpy as np\n'), ((21733, 21762), 'numpy.ones', 'np.ones', ([], {'shape': '(feature_dim,)'}), '(shape=(feature_dim,))\n', (21740, 21762), True, 'import numpy as np\n'), ((21796, 21820), 'numpy.identity', 'np.identity', (['feature_dim'], {}), '(feature_dim)\n', (21807, 21820), True, 'import numpy as np\n'), ((9257, 9280), 'numpy.matmul', 'np.matmul', (['prop_mat', 'pi'], {}), '(prop_mat, pi)\n', (9266, 9280), True, 'import numpy as np\n'), ((9351, 9367), 'numpy.outer', 'np.outer', (['pi', 'pi'], {}), '(pi, pi)\n', (9359, 9367), True, 'import numpy as np\n'), ((10277, 10296), 'numpy.flip', 'np.flip', (['size_order'], {}), '(size_order)\n', (10284, 10296), True, 'import numpy as np\n'), ((14108, 14132), 'numpy.identity', 'np.identity', (['feature_dim'], {}), '(feature_dim)\n', (14119, 14132), True, 'import numpy as np\n'), ((18469, 18525), 'networkx.algorithms.connected_components', 'nx.algorithms.connected_components', (['correspondence_graph'], {}), '(correspondence_graph)\n', (18503, 18525), True, 'import networkx as nx\n'), ((18614, 18649), 'numpy.identity', 'np.identity', (['cross_type_feature_dim'], {}), '(cross_type_feature_dim)\n', (18625, 18649), True, 'import numpy as np\n'), ((19733, 19802), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['centers[cluster_index]', 'cluster_cov', '(1)'], {}), '(centers[cluster_index], cluster_cov, 1)\n', (19762, 19802), True, 'import numpy as np\n'), ((22215, 22267), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['center', 'covariance', '(1)'], {}), '(center, covariance, 1)\n', (22244, 22267), True, 'import numpy as np\n'), ((8065, 8125), 'numpy.sum', 'np.sum', (['[(i == graph_cluster_id) for i in graph_memberships]'], {}), '([(i == graph_cluster_id) for i in graph_memberships])\n', (8071, 8125), True, 'import numpy as np\n'), ((7984, 8011), 'numpy.ones', 'np.ones', (['num_feature_groups'], {}), '(num_feature_groups)\n', (7991, 8011), True, 'import numpy as np\n'), ((18777, 18809), 'numpy.zeros', 'np.zeros', (['cross_type_feature_dim'], {}), '(cross_type_feature_dim)\n', (18785, 18809), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import scipy.io
from scipy.interpolate import griddata
import time
from plotting import newfig, savefig
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
class PhysicsInformedNN:
# Initialize the class
def __init__(self, t, x, u, e,
layers_ue):
tx = np.concatenate([t, x], 1)
self.tx_min = tx.min(0)
self.tx_max = tx.max(0)
ue = np.concatenate([u, e], 1)
self.ue_mean = ue.mean(0)
self.ue_std = ue.std(0)
# data (inside the domain)
self.t = t
self.x = x
self.u = u
# layers
self.layers_ue = layers_ue
# initialize NN
self.weights_ue, self.biases_ue = self.initialize_NN(layers_ue)
# tf placeholders and graph
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True))
# placeholders for data on concentration (inside the domain)
self.learning_rate = tf.placeholder(tf.float32, shape=[])
self.t_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.x_tf = tf.placeholder(tf.float32, shape=[None, 1])
self.u_tf = tf.placeholder(tf.float32, shape=[None, 1])
# physics informed neural networks (inside the domain)
(self.u_pred,
self.e_pred,
self.eq_pred) = self.net_u(self.t_tf, self.x_tf)
# loss
self.loss = tf.reduce_sum(tf.square((self.u_tf - self.u_pred)/self.ue_std[0])) + \
tf.reduce_sum(tf.square(self.eq_pred/self.ue_std[0]))
self.loss_mean = tf.reduce_mean(tf.square((self.u_tf - self.u_pred)/self.ue_std[0])) + \
tf.reduce_mean(tf.square(self.eq_pred/self.ue_std[0]))
# optimizers
self.optimizer = tf.train.AdamOptimizer(learning_rate = self.learning_rate)
self.train_op = self.optimizer.minimize(self.loss)
init = tf.global_variables_initializer()
self.sess.run(init)
def initialize_NN(self, layers):
weights = []
biases = []
num_layers = len(layers)
for l in range(0,num_layers-1):
W = self.xavier_init(size=[layers[l], layers[l+1]])
b = tf.Variable(tf.zeros([1,layers[l+1]], dtype=tf.float32), dtype=tf.float32)
weights.append(W)
biases.append(b)
return weights, biases
def xavier_init(self, size):
in_dim = size[0]
out_dim = size[1]
xavier_stddev = np.sqrt(2.0/(in_dim + out_dim))
return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev), dtype=tf.float32)
def neural_net(self, H, weights, biases):
num_layers = len(weights) + 1
for l in range(0,num_layers-2):
W = weights[l]
b = biases[l]
H = tf.matmul(H, W) + b
H = H*tf.sigmoid(H)
W = weights[-1]
b = biases[-1]
H = tf.matmul(H, W) + b
return H
def net_u(self, t, x):
tx = tf.concat([t,x], 1)
tx = 2.0*(tx - self.tx_min)/(self.tx_max - self.tx_min) - 1.0
ue = self.neural_net(tx, self.weights_ue, self.biases_ue)
ue = self.ue_mean + ue*self.ue_std
u = ue[:,0:1]
e = ue[:,1:2]
u_t = tf.gradients(u, t)[0]
eu_x = tf.gradients(e*u, x)[0]
eu_xx = tf.gradients(eu_x, x)[0]
eq = u_t + eu_xx
return u, e, eq
def train(self, num_epochs, batch_size, learning_rate):
for epoch in range(num_epochs):
N = self.t.shape[0]
perm = np.random.permutation(N)
start_time = time.time()
for it in range(0, N, batch_size):
idx = perm[np.arange(it,it+batch_size)]
(t_batch,
x_batch,
u_batch) = (self.t[idx,:],
self.x[idx,:],
self.u[idx,:])
tf_dict = {self.t_tf: t_batch, self.x_tf: x_batch,
self.u_tf: u_batch, self.learning_rate: learning_rate}
self.sess.run(self.train_op, tf_dict)
# Print
if it % (10*batch_size) == 0:
elapsed = time.time() - start_time
loss_value, learning_rate_value = self.sess.run([self.loss_mean, self.learning_rate], tf_dict)
print('Epoch: %d, It: %d, Loss: %.3e, Time: %.2f, Learning Rate: %.3e' %
(epoch, it, loss_value, elapsed, learning_rate_value))
start_time = time.time()
def predict(self, t_star, x_star):
tf_dict = {self.t_tf: t_star, self.x_tf: x_star}
u_star = self.sess.run(self.u_pred, tf_dict)
e_star = self.sess.run(self.e_pred, tf_dict)
return u_star, e_star
def plot_solution(x_star, y_star, u_star, ax):
nn = 200
x = np.linspace(x_star.min(), x_star.max(), nn)
y = np.linspace(y_star.min(), y_star.max(), nn)
X, Y = np.meshgrid(x,y)
X_star = np.concatenate((x_star, y_star), axis=1)
U_star = griddata(X_star, u_star.flatten(), (X, Y), method='linear')
# h = ax.pcolor(X,Y,U_star, cmap = 'jet')
h = ax.imshow(U_star, interpolation='nearest', cmap='rainbow',
extent=[x_star.min(), x_star.max(), y_star.min(), y_star.max()],
origin='lower', aspect='auto')
return h
if __name__ == "__main__":
noise = 0.1
N_train = 20000
layers_ue = [2] + 10*[2*50] + [2]
# Load Data
data = scipy.io.loadmat('./turbulence.mat')
# Load Data
T_star = data['T']
X_star = data['X']
U_star = data['P']
E_star = data['E']
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# ax.plot_surface(T_star,X_star,U_star)
# ax.set_xlabel('$t$')
# ax.set_ylabel('$x$')
# ax.set_zlabel('$P_1(t,x)$')
# ax.axis('tight')
#
# fig = plt.figure()
# ax = fig.gca(projection='3d')
# ax.plot_surface(T_star,X_star,E_star)
# ax.set_xlabel('$t$')
# ax.set_ylabel('$x$')
# ax.set_zlabel('$E(t,x)$')
# ax.axis('tight')
T = T_star.shape[1]
N = T_star.shape[0]
t = T_star.flatten()[:,None] # NT x 1
x = X_star.flatten()[:,None] # NT x 1
u = U_star.flatten()[:,None] # NT x 1
e = E_star.flatten()[:,None] # NT x 1
######################################################################
######################## Noiseles Data ###############################
######################################################################
# Training Data
idx = np.random.choice(N*T, N_train, replace=False)
t_train = t[idx,:]
x_train = x[idx,:]
u_train = u[idx,:]
e_train = e[idx,:]
# add noise
u_train = u_train + noise*np.std(u_train)*np.random.randn(u_train.shape[0], u_train.shape[1])
# Training
model = PhysicsInformedNN(t_train, x_train, u_train, e_train,
layers_ue)
model.train(num_epochs = 1*10**5, batch_size = N_train, learning_rate=1e-3)
model.train(num_epochs = 2*10**5, batch_size = N_train, learning_rate=1e-4)
model.train(num_epochs = 3*10**5, batch_size = N_train, learning_rate=1e-5)
model.train(num_epochs = 4*10**5, batch_size = N_train, learning_rate=1e-6)
# loss_reg = tf.reduce_sum(tf.square((model.u_tf - model.u_pred)/model.ue_std[0]))
# loss_eq = tf.reduce_sum(tf.square(model.eq_pred/model.ue_std[0]))
# tf_dict = {model.t_tf: model.t, model.x_tf: model.x, model.u_tf: model.u}
# print(model.sess.run(loss_reg, tf_dict))
# print(model.sess.run(loss_eq, tf_dict))
# Prediction
u_pred, e_pred = model.predict(t, x)
# Error
error_u = np.linalg.norm(u-u_pred,2)/np.linalg.norm(u,2)
error_e = np.linalg.norm(e-e_pred,2)/np.linalg.norm(e,2)
print('Error p: %e' % (error_u))
print('Error e: %e' % (error_e))
######################################################################
############################# Plotting ###############################
######################################################################
fig, ax = newfig(1.0, 1.2)
ax.axis('off')
gs = gridspec.GridSpec(2, 2)
gs.update(top=0.9, bottom=0.1, left=0.1, right=0.9, wspace=0.5, hspace=0.7)
######## Exact p(t,x) ###########
ax = plt.subplot(gs[0:1, 0])
h = plot_solution(t,x,u,ax)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(h, cax=cax)
ax.set_xlabel('$t$')
ax.set_ylabel('$x$')
ax.set_title('Exact $P(t,x)$', fontsize = 10)
######## Learned p(t,x) ###########
ax = plt.subplot(gs[0:1, 1])
h = plot_solution(t,x,u_pred,ax)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(h, cax=cax)
ax.set_xlabel('$t$')
ax.set_ylabel('$x$')
ax.set_title('Learned $P(t,x)$', fontsize = 10)
######## Exact e(t,x,y) ###########
ax = plt.subplot(gs[1:2, 0])
h = plot_solution(t,x,e,ax)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(h, cax=cax)
ax.set_xlabel('$t$')
ax.set_ylabel('$x$')
ax.set_title('Exact $\\varepsilon(t,x)$', fontsize = 10)
######## Learned e(t,x,y) ###########
ax = plt.subplot(gs[1:2, 1])
h = plot_solution(t,x,e_pred,ax)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(h, cax=cax)
ax.set_xlabel('$t$')
ax.set_ylabel('$x$')
ax.set_title('Learned $\\varepsilon(t,x)$', fontsize = 10)
savefig('./figures/turbulence_1D_dissipation', crop = False)
scipy.io.savemat('turbulence_1D_dissipation_swish_noise3_results_%s.mat' %(time.strftime('%d_%m_%Y')),
{'t':t, 'x':x, 'u':u, 'e':e, 'u_pred':u_pred, 'e_pred':e_pred})
| [
"time.strftime",
"tensorflow.matmul",
"tensorflow.ConfigProto",
"numpy.linalg.norm",
"numpy.arange",
"tensorflow.truncated_normal",
"numpy.meshgrid",
"numpy.random.randn",
"numpy.std",
"tensorflow.concat",
"tensorflow.placeholder",
"numpy.random.choice",
"tensorflow.gradients",
"plotting.n... | [((5492, 5509), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (5503, 5509), True, 'import numpy as np\n'), ((5527, 5567), 'numpy.concatenate', 'np.concatenate', (['(x_star, y_star)'], {'axis': '(1)'}), '((x_star, y_star), axis=1)\n', (5541, 5567), True, 'import numpy as np\n'), ((7140, 7187), 'numpy.random.choice', 'np.random.choice', (['(N * T)', 'N_train'], {'replace': '(False)'}), '(N * T, N_train, replace=False)\n', (7156, 7187), True, 'import numpy as np\n'), ((8715, 8731), 'plotting.newfig', 'newfig', (['(1.0)', '(1.2)'], {}), '(1.0, 1.2)\n', (8721, 8731), False, 'from plotting import newfig, savefig\n'), ((8761, 8784), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(2)'], {}), '(2, 2)\n', (8778, 8784), True, 'import matplotlib.gridspec as gridspec\n'), ((8935, 8958), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0:1, 0]'], {}), '(gs[0:1, 0])\n', (8946, 8958), True, 'import matplotlib.pyplot as plt\n'), ((9005, 9028), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (9024, 9028), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((9285, 9308), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0:1, 1]'], {}), '(gs[0:1, 1])\n', (9296, 9308), True, 'import matplotlib.pyplot as plt\n'), ((9360, 9383), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (9379, 9383), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((9648, 9671), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1:2, 0]'], {}), '(gs[1:2, 0])\n', (9659, 9671), True, 'import matplotlib.pyplot as plt\n'), ((9718, 9741), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (9737, 9741), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((10007, 10030), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1:2, 1]'], {}), '(gs[1:2, 1])\n', (10018, 10030), True, 'import matplotlib.pyplot as plt\n'), ((10082, 10105), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (10101, 10105), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((10322, 10380), 'plotting.savefig', 'savefig', (['"""./figures/turbulence_1D_dissipation"""'], {'crop': '(False)'}), "('./figures/turbulence_1D_dissipation', crop=False)\n", (10329, 10380), False, 'from plotting import newfig, savefig\n'), ((453, 478), 'numpy.concatenate', 'np.concatenate', (['[t, x]', '(1)'], {}), '([t, x], 1)\n', (467, 478), True, 'import numpy as np\n'), ((565, 590), 'numpy.concatenate', 'np.concatenate', (['[u, e]', '(1)'], {}), '([u, e], 1)\n', (579, 590), True, 'import numpy as np\n'), ((1237, 1273), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[]'}), '(tf.float32, shape=[])\n', (1251, 1273), True, 'import tensorflow as tf\n'), ((1294, 1337), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 1]'}), '(tf.float32, shape=[None, 1])\n', (1308, 1337), True, 'import tensorflow as tf\n'), ((1358, 1401), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 1]'}), '(tf.float32, shape=[None, 1])\n', (1372, 1401), True, 'import tensorflow as tf\n'), ((1422, 1465), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 1]'}), '(tf.float32, shape=[None, 1])\n', (1436, 1465), True, 'import tensorflow as tf\n'), ((2070, 2126), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.learning_rate'}), '(learning_rate=self.learning_rate)\n', (2092, 2126), True, 'import tensorflow as tf\n'), ((2212, 2245), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2243, 2245), True, 'import tensorflow as tf\n'), ((2788, 2821), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (in_dim + out_dim))'], {}), '(2.0 / (in_dim + out_dim))\n', (2795, 2821), True, 'import numpy as np\n'), ((3344, 3364), 'tensorflow.concat', 'tf.concat', (['[t, x]', '(1)'], {}), '([t, x], 1)\n', (3353, 3364), True, 'import tensorflow as tf\n'), ((8275, 8304), 'numpy.linalg.norm', 'np.linalg.norm', (['(u - u_pred)', '(2)'], {}), '(u - u_pred, 2)\n', (8289, 8304), True, 'import numpy as np\n'), ((8302, 8322), 'numpy.linalg.norm', 'np.linalg.norm', (['u', '(2)'], {}), '(u, 2)\n', (8316, 8322), True, 'import numpy as np\n'), ((8336, 8365), 'numpy.linalg.norm', 'np.linalg.norm', (['(e - e_pred)', '(2)'], {}), '(e - e_pred, 2)\n', (8350, 8365), True, 'import numpy as np\n'), ((8363, 8383), 'numpy.linalg.norm', 'np.linalg.norm', (['e', '(2)'], {}), '(e, 2)\n', (8377, 8383), True, 'import numpy as np\n'), ((2847, 2907), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[in_dim, out_dim]'], {'stddev': 'xavier_stddev'}), '([in_dim, out_dim], stddev=xavier_stddev)\n', (2866, 2907), True, 'import tensorflow as tf\n'), ((3245, 3260), 'tensorflow.matmul', 'tf.matmul', (['H', 'W'], {}), '(H, W)\n', (3254, 3260), True, 'import tensorflow as tf\n'), ((3637, 3655), 'tensorflow.gradients', 'tf.gradients', (['u', 't'], {}), '(u, t)\n', (3649, 3655), True, 'import tensorflow as tf\n'), ((3674, 3696), 'tensorflow.gradients', 'tf.gradients', (['(e * u)', 'x'], {}), '(e * u, x)\n', (3686, 3696), True, 'import tensorflow as tf\n'), ((3714, 3735), 'tensorflow.gradients', 'tf.gradients', (['eu_x', 'x'], {}), '(eu_x, x)\n', (3726, 3735), True, 'import tensorflow as tf\n'), ((3967, 3991), 'numpy.random.permutation', 'np.random.permutation', (['N'], {}), '(N)\n', (3988, 3991), True, 'import numpy as np\n'), ((4030, 4041), 'time.time', 'time.time', ([], {}), '()\n', (4039, 4041), False, 'import time\n'), ((7345, 7396), 'numpy.random.randn', 'np.random.randn', (['u_train.shape[0]', 'u_train.shape[1]'], {}), '(u_train.shape[0], u_train.shape[1])\n', (7360, 7396), True, 'import numpy as np\n'), ((10467, 10492), 'time.strftime', 'time.strftime', (['"""%d_%m_%Y"""'], {}), "('%d_%m_%Y')\n", (10480, 10492), False, 'import time\n'), ((1007, 1075), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)', 'log_device_placement': '(True)'}), '(allow_soft_placement=True, log_device_placement=True)\n', (1021, 1075), True, 'import tensorflow as tf\n'), ((1698, 1751), 'tensorflow.square', 'tf.square', (['((self.u_tf - self.u_pred) / self.ue_std[0])'], {}), '((self.u_tf - self.u_pred) / self.ue_std[0])\n', (1707, 1751), True, 'import tensorflow as tf\n'), ((1789, 1829), 'tensorflow.square', 'tf.square', (['(self.eq_pred / self.ue_std[0])'], {}), '(self.eq_pred / self.ue_std[0])\n', (1798, 1829), True, 'import tensorflow as tf\n'), ((1878, 1931), 'tensorflow.square', 'tf.square', (['((self.u_tf - self.u_pred) / self.ue_std[0])'], {}), '((self.u_tf - self.u_pred) / self.ue_std[0])\n', (1887, 1931), True, 'import tensorflow as tf\n'), ((1975, 2015), 'tensorflow.square', 'tf.square', (['(self.eq_pred / self.ue_std[0])'], {}), '(self.eq_pred / self.ue_std[0])\n', (1984, 2015), True, 'import tensorflow as tf\n'), ((2518, 2564), 'tensorflow.zeros', 'tf.zeros', (['[1, layers[l + 1]]'], {'dtype': 'tf.float32'}), '([1, layers[l + 1]], dtype=tf.float32)\n', (2526, 2564), True, 'import tensorflow as tf\n'), ((3134, 3149), 'tensorflow.matmul', 'tf.matmul', (['H', 'W'], {}), '(H, W)\n', (3143, 3149), True, 'import tensorflow as tf\n'), ((3172, 3185), 'tensorflow.sigmoid', 'tf.sigmoid', (['H'], {}), '(H)\n', (3182, 3185), True, 'import tensorflow as tf\n'), ((7329, 7344), 'numpy.std', 'np.std', (['u_train'], {}), '(u_train)\n', (7335, 7344), True, 'import numpy as np\n'), ((4116, 4146), 'numpy.arange', 'np.arange', (['it', '(it + batch_size)'], {}), '(it, it + batch_size)\n', (4125, 4146), True, 'import numpy as np\n'), ((5031, 5042), 'time.time', 'time.time', ([], {}), '()\n', (5040, 5042), False, 'import time\n'), ((4683, 4694), 'time.time', 'time.time', ([], {}), '()\n', (4692, 4694), False, 'import time\n')] |
#!/usr/bin/env python3
"""Calculates the Frechet Inception Distance (FID) to evalulate GANs
The FID metric calculates the distance between two distributions of images.
Typically, we have summary statistics (mean & covariance matrix) of one
of these distributions, while the 2nd distribution is given by a GAN.
When run as a stand-alone program, it compares the distribution of
images that are stored as PNG/JPEG at a specified location with a
distribution given by summary statistics (in pickle format).
The FID is calculated by assuming that X_1 and X_2 are the activations of
the pool_3 layer of the inception net for generated samples and real world
samples respectively.
See --help to see further details.
Code apapted from https://github.com/bioinf-jku/TTUR to use PyTorch instead
of Tensorflow
Copyright 2018 Institute of Bioinformatics, JKU Linz
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import pathlib
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import numpy as np
import torch
from scipy import linalg
from torch.nn.functional import adaptive_avg_pool2d
import numpy as np
from PIL import Image
import torchvision
try:
from tqdm import tqdm
except ImportError:
# If not tqdm is not available, provide a mock version of it
def tqdm(x): return x
from utils.inception import InceptionV3
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('path', type=str, nargs=2,
help=('Path to the generated images or '
'to .npz statistic files'))
parser.add_argument('--batch-size', type=int, default=50,
help='Batch size to use')
parser.add_argument('--dims', type=int, default=2048,
choices=list(InceptionV3.BLOCK_INDEX_BY_DIM),
help=('Dimensionality of Inception features to use. '
'By default, uses pool3 features'))
parser.add_argument('-c', '--gpu', default='', type=str,
help='GPU to use (leave blank for CPU only)')
def imread(filename):
"""
Loads an image file into a (height, width, 3) uint8 ndarray.
"""
img = Image.open(filename)
if img.mode is not 'RGB':
img = img.convert('RGB')
img = torchvision.transforms.ToTensor()(img)
return img
def get_activations(files, model, batch_size=50, dims=2048,
cuda=False, verbose=False):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : Batch size of images for the model to process at once.
Make sure that the number of samples is a multiple of
the batch size, otherwise some samples are ignored. This
behavior is retained to match the original FID score
implementation.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the number
of calculated batches is reported.
Returns:
-- A numpy array of dimension (num images, dims) that contains the
activations of the given tensor when feeding inception with the
query tensor.
"""
model.eval()
if batch_size > len(files):
print(('Warning: batch size is bigger than the data size. '
'Setting batch size to data size'))
batch_size = len(files)
pred_arr = np.empty((len(files), dims))
for i in tqdm(range(0, len(files), batch_size)):
start = i
end = i + batch_size
batch = torch.stack([imread(str(f)) for f in files[start:end]],dim=0)
# Reshape to (n_images, 3, height, width)
# images = images.transpose((0, 3, 1, 2))
# images /= 255
#
# batch = torch.from_numpy(images).type(torch.FloatTensor)
if cuda:
batch = batch.cuda()
pred = model(batch)[0]
# If model output is not scalar, apply global spatial average pooling.
# This happens if you choose a dimensionality not equal 2048.
if pred.size(2) != 1 or pred.size(3) != 1:
pred = adaptive_avg_pool2d(pred, output_size=(1, 1))
pred_arr[start:end] = pred.cpu().data.numpy().reshape(pred.size(0), -1)
if verbose:
print(' done')
return pred_arr
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by <NAME>.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean)
def calculate_activation_statistics(files, model, batch_size=50,
dims=2048, cuda=False, verbose=False):
"""Calculation of the statistics used by the FID.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : The images numpy array is split into batches with
batch size batch_size. A reasonable batch size
depends on the hardware.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the
number of calculated batches is reported.
Returns:
-- mu : The mean over samples of the activations of the pool_3 layer of
the inception model.
-- sigma : The covariance matrix of the activations of the pool_3 layer of
the inception model.
"""
act = get_activations(files, model, batch_size, dims, cuda, verbose)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
def _compute_statistics_of_path(path, model, batch_size, dims, cuda):
if path.endswith('.npz'):
f = np.load(path)
m, s = f['mu'][:], f['sigma'][:]
f.close()
else:
path = pathlib.Path(path)
files = list(path.glob('*.jpg')) + list(path.glob('*.png'))
m, s = calculate_activation_statistics(files, model, batch_size,
dims, cuda)
return m, s
def calculate_fid_given_paths(paths, batch_size, cuda, dims):
"""Calculates the FID of two paths"""
for p in paths:
if not os.path.exists(p):
raise RuntimeError('Invalid path: %s' % p)
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx])
if cuda:
model.cuda()
m1, s1 = _compute_statistics_of_path(paths[0], model, batch_size,
dims, cuda)
m2, s2 = _compute_statistics_of_path(paths[1], model, batch_size,
dims, cuda)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value
def calculate_dataset_FID(path,batch_size,cuda,dims): #Calculate FID for entire train and test dataset and use as reference.
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx])
if cuda:
model.cuda()
m1, s1 = _compute_statistics_of_path(path, model, batch_size,
dims, cuda)
return m1, s1
def save_FID(m1,s1,path):
with open(f'{path}/data.npy', 'wb') as f:
np.save(f,m1)
np.save(f,s1)
def load_FID(path):
with open(f'{path}/data.npy', 'rb') as f:
m1 = np.load(f)
s1 = np.load(f)
return m1,s1
# if __name__ == '__main__':
# # args = parser.parse_args()
# args = dotdict
# args.path = ''
# args.batch_size = 64
# args.gpu = 0
# args.dims = 2048
# torch.cuda.set_device(args.gpu)
# m1,s1 = calculate_dataset_FID(args.path,args.batch_size,True,args.dims)
#
# # fid_value = calculate_fid_given_paths(args.path,
# # args.batch_size,
# # args.gpu != '',
# # args.dims)
# # print('FID: ', fid_value)
| [
"numpy.trace",
"numpy.load",
"numpy.abs",
"argparse.ArgumentParser",
"pathlib.Path",
"numpy.mean",
"numpy.atleast_2d",
"os.path.exists",
"numpy.isfinite",
"torch.nn.functional.adaptive_avg_pool2d",
"utils.inception.InceptionV3",
"numpy.cov",
"numpy.diagonal",
"numpy.save",
"numpy.iscompl... | [((1833, 1894), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'formatter_class': 'ArgumentDefaultsHelpFormatter'}), '(formatter_class=ArgumentDefaultsHelpFormatter)\n', (1847, 1894), False, 'from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\n'), ((2655, 2675), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (2665, 2675), False, 'from PIL import Image\n'), ((5885, 5903), 'numpy.atleast_1d', 'np.atleast_1d', (['mu1'], {}), '(mu1)\n', (5898, 5903), True, 'import numpy as np\n'), ((5914, 5932), 'numpy.atleast_1d', 'np.atleast_1d', (['mu2'], {}), '(mu2)\n', (5927, 5932), True, 'import numpy as np\n'), ((5947, 5968), 'numpy.atleast_2d', 'np.atleast_2d', (['sigma1'], {}), '(sigma1)\n', (5960, 5968), True, 'import numpy as np\n'), ((5982, 6003), 'numpy.atleast_2d', 'np.atleast_2d', (['sigma2'], {}), '(sigma2)\n', (5995, 6003), True, 'import numpy as np\n'), ((6707, 6731), 'numpy.iscomplexobj', 'np.iscomplexobj', (['covmean'], {}), '(covmean)\n', (6722, 6731), True, 'import numpy as np\n'), ((6961, 6978), 'numpy.trace', 'np.trace', (['covmean'], {}), '(covmean)\n', (6969, 6978), True, 'import numpy as np\n'), ((8148, 8168), 'numpy.mean', 'np.mean', (['act'], {'axis': '(0)'}), '(act, axis=0)\n', (8155, 8168), True, 'import numpy as np\n'), ((8181, 8206), 'numpy.cov', 'np.cov', (['act'], {'rowvar': '(False)'}), '(act, rowvar=False)\n', (8187, 8206), True, 'import numpy as np\n'), ((8958, 8982), 'utils.inception.InceptionV3', 'InceptionV3', (['[block_idx]'], {}), '([block_idx])\n', (8969, 8982), False, 'from utils.inception import InceptionV3\n'), ((9537, 9561), 'utils.inception.InceptionV3', 'InceptionV3', (['[block_idx]'], {}), '([block_idx])\n', (9548, 9561), False, 'from utils.inception import InceptionV3\n'), ((2749, 2782), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (2780, 2782), False, 'import torchvision\n'), ((8342, 8355), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (8349, 8355), True, 'import numpy as np\n'), ((8440, 8458), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (8452, 8458), False, 'import pathlib\n'), ((9815, 9829), 'numpy.save', 'np.save', (['f', 'm1'], {}), '(f, m1)\n', (9822, 9829), True, 'import numpy as np\n'), ((9837, 9851), 'numpy.save', 'np.save', (['f', 's1'], {}), '(f, s1)\n', (9844, 9851), True, 'import numpy as np\n'), ((9931, 9941), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (9938, 9941), True, 'import numpy as np\n'), ((9955, 9965), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (9962, 9965), True, 'import numpy as np\n'), ((4797, 4842), 'torch.nn.functional.adaptive_avg_pool2d', 'adaptive_avg_pool2d', (['pred'], {'output_size': '(1, 1)'}), '(pred, output_size=(1, 1))\n', (4816, 4842), False, 'from torch.nn.functional import adaptive_avg_pool2d\n'), ((6538, 6561), 'numpy.eye', 'np.eye', (['sigma1.shape[0]'], {}), '(sigma1.shape[0])\n', (6544, 6561), True, 'import numpy as np\n'), ((7040, 7056), 'numpy.trace', 'np.trace', (['sigma2'], {}), '(sigma2)\n', (7048, 7056), True, 'import numpy as np\n'), ((8817, 8834), 'os.path.exists', 'os.path.exists', (['p'], {}), '(p)\n', (8831, 8834), False, 'import os\n'), ((6350, 6370), 'numpy.isfinite', 'np.isfinite', (['covmean'], {}), '(covmean)\n', (6361, 6370), True, 'import numpy as np\n'), ((6825, 6845), 'numpy.abs', 'np.abs', (['covmean.imag'], {}), '(covmean.imag)\n', (6831, 6845), True, 'import numpy as np\n'), ((7009, 7025), 'numpy.trace', 'np.trace', (['sigma1'], {}), '(sigma1)\n', (7017, 7025), True, 'import numpy as np\n'), ((6760, 6780), 'numpy.diagonal', 'np.diagonal', (['covmean'], {}), '(covmean)\n', (6771, 6780), True, 'import numpy as np\n')] |
import numpy as np
from ..chain import parallel_test, serial_test
from ...constraints.affine import constraints, gaussian_hit_and_run
def test_gaussian_chain():
n = 30
A = np.eye(n)[:3]
b = np.ones(A.shape[0])
con = constraints(A, b)
state = np.random.standard_normal(n)
state[:3] = 0
gaussian_chain = gaussian_hit_and_run(con, state, nstep=100)
counter = 0
for step in gaussian_chain:
counter += 1
if counter >= 100:
break
test_statistic = lambda z: np.sum(z)
parallel = parallel_test(gaussian_chain,
gaussian_chain.state,
test_statistic,
ndraw=20)
serial = serial_test(gaussian_chain,
gaussian_chain.state,
test_statistic,
ndraw=20)
return parallel, serial
| [
"numpy.eye",
"numpy.random.standard_normal",
"numpy.sum",
"numpy.ones"
] | [((206, 225), 'numpy.ones', 'np.ones', (['A.shape[0]'], {}), '(A.shape[0])\n', (213, 225), True, 'import numpy as np\n'), ((267, 295), 'numpy.random.standard_normal', 'np.random.standard_normal', (['n'], {}), '(n)\n', (292, 295), True, 'import numpy as np\n'), ((184, 193), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (190, 193), True, 'import numpy as np\n'), ((536, 545), 'numpy.sum', 'np.sum', (['z'], {}), '(z)\n', (542, 545), True, 'import numpy as np\n')] |
from collections import defaultdict, Counter
import re
import math
import numpy as np
import os
import psutil
import zipfile
import pandas as pd
import tensorflow as tf
import bert
from bert import run_classifier
from bert import optimization
from bert import tokenization
from bert import modeling
import dill
import pickle
import Model
import SpellCorrector
from TokenGenerator import maskedId
from GenerateId import generateId
BERT_VOCAB = "PATH_TO/multi_cased_L-12_H-768_A-12/vocab.txt"
BERT_INIT_CHKPNT = "PATH_TO/multi_cased_L-12_H-768_A-12/bert_model.ckpt"
tokenization.validate_case_matches_checkpoint(False, BERT_INIT_CHKPNT)
tokenizer = tokenization.FullTokenizer(vocab_file=BERT_VOCAB, do_lower_case=False)
def bertScore(string):
"""
Function to generate the output list consisting top K replacements for each word in the sentence using BERT.
"""
corrector = SpellCorrector()
temp1 = []
temp2 = []
temp3 = []
con = list(string.split(" "))
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Model()
sess.run(tf.global_variables_initializer())
var_lists = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="bert")
for word in con:
possible_states = corrector.edit_candidates(word, fast=False)
if len(possible_states) == 1:
word = possible_states[0]
if word in possible_states:
temp1.append([word])
continue
text = string
text_mask = text.replace(word, "**mask**")
print(text_mask)
cls = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="cls")
replaced_masks = [
text_mask.replace("**mask**", state) for state in possible_states
]
# print(replaced_masks)
val = math.ceil(len(replaced_masks) / 5)
m = 0
n = 5
for i in range(0, val):
rep_new = replaced_masks[m:n]
tokens = tokenizer.tokenize(rep_new[0])
input_ids = [maskedId(tokens, i) for i in range(len(tokens))]
tokens_ids = tokenizer.convert_tokens_to_ids(tokens)
ids = [generateId(mask) for mask in rep_new]
tokens, input_ids, tokens_ids = list(zip(*ids))
indices, ids = [], []
for i in range(len(input_ids)):
indices.extend([i] * len(input_ids[i]))
ids.extend(input_ids[i])
masked_padded = tf.keras.preprocessing.sequence.pad_sequences(
ids, padding="post"
)
preds = sess.run(
tf.nn.log_softmax(model.logits), feed_dict={model.X: masked_padded}
)
preds = np.reshape(
preds, [masked_padded.shape[0], masked_padded.shape[1], 119547]
)
indices = np.array(indices)
scores = []
for i in range(len(tokens) - 1):
filter_preds = preds[indices == i]
total = np.sum(
[filter_preds[k, k + 1, x] for k, x in enumerate(tokens_ids[i])]
)
scores.append(total)
prob_scores = np.array(scores) / np.sum(scores)
probs = list(zip(possible_states, prob_scores))
for i in probs:
temp3.append(i)
m += 5
n += 5
temp3.sort(key=lambda x: x[1])
list(temp3)
j = 0
for i in temp3:
if j != 3:
temp2.append(i[0])
if j == 3:
break
j = j + 1
if len(temp2) != 0:
temp1.append(temp2)
else:
temp1.append([word])
temp2 = []
temp3 = []
sess.close()
return temp1
| [
"Model",
"bert.tokenization.validate_case_matches_checkpoint",
"numpy.sum",
"TokenGenerator.maskedId",
"tensorflow.get_collection",
"tensorflow.reset_default_graph",
"SpellCorrector",
"tensorflow.global_variables_initializer",
"tensorflow.nn.log_softmax",
"tensorflow.keras.preprocessing.sequence.p... | [((566, 636), 'bert.tokenization.validate_case_matches_checkpoint', 'tokenization.validate_case_matches_checkpoint', (['(False)', 'BERT_INIT_CHKPNT'], {}), '(False, BERT_INIT_CHKPNT)\n', (611, 636), False, 'from bert import tokenization\n'), ((649, 719), 'bert.tokenization.FullTokenizer', 'tokenization.FullTokenizer', ([], {'vocab_file': 'BERT_VOCAB', 'do_lower_case': '(False)'}), '(vocab_file=BERT_VOCAB, do_lower_case=False)\n', (675, 719), False, 'from bert import tokenization\n'), ((891, 907), 'SpellCorrector', 'SpellCorrector', ([], {}), '()\n', (905, 907), False, 'import SpellCorrector\n'), ((991, 1015), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1013, 1015), True, 'import tensorflow as tf\n'), ((1027, 1050), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (1048, 1050), True, 'import tensorflow as tf\n'), ((1063, 1070), 'Model', 'Model', ([], {}), '()\n', (1068, 1070), False, 'import Model\n'), ((1135, 1200), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""bert"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='bert')\n", (1152, 1200), True, 'import tensorflow as tf\n'), ((1084, 1117), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1115, 1117), True, 'import tensorflow as tf\n'), ((1573, 1637), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""cls"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='cls')\n", (1590, 1637), True, 'import tensorflow as tf\n'), ((2457, 2523), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'tf.keras.preprocessing.sequence.pad_sequences', (['ids'], {'padding': '"""post"""'}), "(ids, padding='post')\n", (2502, 2523), True, 'import tensorflow as tf\n'), ((2702, 2777), 'numpy.reshape', 'np.reshape', (['preds', '[masked_padded.shape[0], masked_padded.shape[1], 119547]'], {}), '(preds, [masked_padded.shape[0], masked_padded.shape[1], 119547])\n', (2712, 2777), True, 'import numpy as np\n'), ((2830, 2847), 'numpy.array', 'np.array', (['indices'], {}), '(indices)\n', (2838, 2847), True, 'import numpy as np\n'), ((2019, 2038), 'TokenGenerator.maskedId', 'maskedId', (['tokens', 'i'], {}), '(tokens, i)\n', (2027, 2038), False, 'from TokenGenerator import maskedId\n'), ((2154, 2170), 'GenerateId.generateId', 'generateId', (['mask'], {}), '(mask)\n', (2164, 2170), False, 'from GenerateId import generateId\n'), ((2600, 2631), 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['model.logits'], {}), '(model.logits)\n', (2617, 2631), True, 'import tensorflow as tf\n'), ((3167, 3183), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3175, 3183), True, 'import numpy as np\n'), ((3186, 3200), 'numpy.sum', 'np.sum', (['scores'], {}), '(scores)\n', (3192, 3200), True, 'import numpy as np\n')] |
import binascii
import itertools
import os
import time
import numpy
import six
import chainer
from chainer import configuration
from chainer import cuda
from chainer import function
from chainer.functions.activation import relu
from chainer.functions.activation import tanh
from chainer.functions.array import concat
from chainer.functions.array import reshape
from chainer.functions.array import split_axis
from chainer.functions.array import stack
from chainer.functions.connection import linear
from chainer.functions.noise import dropout
from chainer.utils import argument
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cuda.cudnn.cudnn
_cudnn_version = libcudnn.getVersion()
def _stack_weight(ws):
# TODO(unno): Input of the current LSTM implementaiton is shuffled
w = stack.stack(ws, axis=1)
shape = w.shape
return reshape.reshape(w, (shape[0] * shape[1],) + shape[2:])
class PointerArray(object):
def __init__(self, lst, back_pointer):
self._value = numpy.array(lst, dtype=numpy.intp)
# Store back_pointer to prevent the GC removes the original variable
self._back_pointer = back_pointer
@property
def data(self):
return self._value.ctypes.data
def _make_tensor_descriptor_array(xs):
"""Make an array of pointers denoting pointers of tensor descriptors.
"""
descs = []
for x in xs:
if x.ndim < 3:
shape = x.shape + (1,) * (3 - x.ndim)
x = x.reshape(shape)
desc = cudnn.create_tensor_nd_descriptor(x)
descs.append(desc)
return PointerArray([d.value for d in descs], descs)
def _make_ptr_array(xs):
"""Make an array of pointers denoting pointers of ndarrays.
"""
return PointerArray([x.data.ptr for x in xs], xs)
class DropoutStates(object):
def __init__(self, states, desc):
self.states = states
self.desc = desc
def set_dropout_ratio(self, handle, dropout):
cudnn.set_dropout_descriptor(self.desc, handle, dropout)
@staticmethod
def create(handle, dropout, seed):
states = cudnn.create_dropout_states(handle)
desc = cudnn.create_dropout_descriptor(
handle, dropout, states.data.ptr, states.size, seed)
return DropoutStates(states, desc)
class DropoutRandomStates(object):
def __init__(self, seed):
self._states = None
if seed is None:
try:
seed_str = binascii.hexlify(os.urandom(8))
seed = numpy.uint64(int(seed_str, 16))
except NotImplementedError:
seed = numpy.uint64(time.clock() * 1000000)
else:
seed = numpy.uint64(seed)
self._seed = seed
def create_dropout_states(self, dropout):
handle = cudnn.get_handle()
if self._states is None:
self._states = DropoutStates.create(handle, dropout, self._seed)
else:
self._states.set_dropout_ratio(handle, dropout)
return self._states
def _split(inputs, pos):
return inputs[:pos], inputs[pos:]
_random_states = {}
def get_random_state():
global _random_states
dev = cuda.Device()
rs = _random_states.get(dev.id, None)
if rs is None:
rs = DropoutRandomStates(os.getenv('CHAINER_SEED'))
_random_states[dev.id] = rs
return rs
if cuda.cudnn_enabled and _cudnn_version >= 5000:
# Define RNN parameters using dict.
_rnn_dirs = {
'uni': libcudnn.CUDNN_UNIDIRECTIONAL,
'bi': libcudnn.CUDNN_BIDIRECTIONAL,
}
_rnn_modes = {
'rnn_relu': libcudnn.CUDNN_RNN_RELU,
'rnn_tanh': libcudnn.CUDNN_RNN_TANH,
'gru': libcudnn.CUDNN_GRU,
'lstm': libcudnn.CUDNN_LSTM,
}
_rnn_n_params = {
libcudnn.CUDNN_RNN_RELU: 2,
libcudnn.CUDNN_RNN_TANH: 2,
libcudnn.CUDNN_GRU: 6,
libcudnn.CUDNN_LSTM: 8,
}
_rnn_params_direction = {
libcudnn.CUDNN_UNIDIRECTIONAL: 1,
libcudnn.CUDNN_BIDIRECTIONAL: 2,
}
_rnn_params_use_cell = {
libcudnn.CUDNN_RNN_RELU: False,
libcudnn.CUDNN_RNN_TANH: False,
libcudnn.CUDNN_GRU: False,
libcudnn.CUDNN_LSTM: True,
}
class BaseNStepRNN(function.Function):
def __init__(self, n_layers, states, rnn_dir, rnn_mode, **kwargs):
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
if rnn_dir not in _rnn_dirs:
candidate_list = ','.join(_rnn_dirs.keys())
raise ValueError('Invalid rnn_dir: "%s". Please select from [%s]'
% (rnn_dir, candidate_list))
if rnn_mode not in _rnn_modes:
candidate_list = ','.join(_rnn_modes.keys())
raise ValueError('Invalid rnn_mode: "%s". Please select from [%s]'
% (rnn_mode, candidate_list))
self.rnn_dir = _rnn_dirs[rnn_dir]
self.rnn_mode = _rnn_modes[rnn_mode]
self.rnn_direction = _rnn_params_direction[self.rnn_dir]
self.n_layers = n_layers
self.states = states
self.use_cell = _rnn_params_use_cell[self.rnn_mode]
self.n_W = _rnn_n_params[self.rnn_mode]
@property
def _n_cell(self):
if self.use_cell:
return 2
else:
return 1
@property
def _n_params(self):
return self.n_layers * self.rnn_direction * self.n_W
def check_type_forward(self, in_types):
type_check.expect(in_types.size() > self._n_cell + self._n_params * 2)
if self.use_cell:
(h_type, c_type), in_types = _split(in_types, 2)
h_size = self.n_layers * self.rnn_direction
type_check.expect(
h_type.dtype == numpy.float32,
c_type.dtype == numpy.float32,
h_type.ndim == 3,
h_type.shape[0] == h_size,
c_type.ndim == 3,
c_type.shape[0] == h_size,
# mini-batch size
h_type.shape[1] == c_type.shape[1],
# hidden size
h_type.shape[2] == c_type.shape[2],
)
else:
(h_type, ), in_types = _split(in_types, 1)
h_size = self.n_layers * self.rnn_direction
type_check.expect(
h_type.dtype == numpy.float32,
h_type.ndim == 3,
h_type.shape[0] == h_size,
)
w_types, in_types = _split(in_types, self._n_params)
b_types, in_types = _split(in_types, self._n_params)
x_types = in_types
for x_type in x_types:
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim == 2,
)
for x1_type, x2_type in six.moves.zip(x_types, x_types[1:]):
type_check.expect(
# Check if xs are sorted by descending lengths
x1_type.shape[0] >= x2_type.shape[0],
x1_type.shape[1] == x2_type.shape[1])
in_size = x_types[0].shape[1]
out_size = h_type.shape[2]
for layer in six.moves.range(self.n_layers):
for i in six.moves.range(self.n_W):
for di in six.moves.range(self.rnn_direction):
ind = (layer * self.rnn_direction + di) * self.n_W + i
w_type = w_types[ind]
b_type = b_types[ind]
if self.rnn_direction == 1:
# Uni-direction
if layer == 0 and i < (self.n_W // 2):
w_in = in_size
else:
w_in = out_size
else:
# Bi-direction
if layer == 0 and i < (self.n_W // 2):
w_in = in_size
elif layer > 0 and i < (self.n_W // 2):
w_in = out_size * self.rnn_direction
else:
w_in = out_size
type_check.expect(
w_type.dtype == numpy.float32,
w_type.ndim == 2,
w_type.shape[0] == out_size,
w_type.shape[1] == w_in,
b_type.dtype == numpy.float32,
b_type.ndim == 1,
b_type.shape[0] == out_size,
)
def forward(self, inputs):
if self.use_cell:
# LSTM
(hx, cx), inputs = _split(inputs, self._n_cell)
cx = cuda.cupy.ascontiguousarray(cx)
cx_desc = cudnn.create_tensor_nd_descriptor(cx)
cy = cuda.cupy.empty_like(cx)
cy_desc = cudnn.create_tensor_nd_descriptor(cy)
cx_data_ptr = cx.data.ptr
cy_data_ptr = cy.data.ptr
cx_desc_value = cx_desc.value
cy_desc_value = cy_desc.value
else:
# RNN, GRU
(hx, ), inputs = _split(inputs, self._n_cell)
cx = cy = None
cx_data_ptr = cy_data_ptr = 0
cx_desc_value = cy_desc_value = 0
ws, inputs = _split(inputs, self._n_params)
bs, inputs = _split(inputs, self._n_params)
x_list = inputs
hx = cuda.cupy.ascontiguousarray(hx)
x_desc = cudnn.create_tensor_nd_descriptor(x_list[0][..., None])
length = len(x_list)
n_units = hx.shape[2]
xs = cuda.cupy.concatenate(x_list, axis=0)
ys = cuda.cupy.empty((len(xs),
n_units * self.rnn_direction), dtype=xs.dtype)
handle = cudnn.get_handle()
self.handle = handle
rnn_desc = cudnn.create_rnn_descriptor(
n_units, self.n_layers, self.states.desc,
libcudnn.CUDNN_LINEAR_INPUT, self.rnn_dir,
self.rnn_mode, libcudnn.CUDNN_DATA_FLOAT)
self.rnn_desc = rnn_desc
c_x_descs = _make_tensor_descriptor_array(x_list)
hx_desc = cudnn.create_tensor_nd_descriptor(hx)
weights_size = libcudnn.getRNNParamsSize(
handle, rnn_desc.value, x_desc.value, libcudnn.CUDNN_DATA_FLOAT)
w = cuda.cupy.empty((weights_size // 4, 1, 1), dtype=numpy.float32)
w_desc = cudnn.create_filter_descriptor(w)
for layer in six.moves.range(self.n_layers):
for di in six.moves.range(self.rnn_direction):
# di = 0: forward, 1: backward
for lin_layer_id in six.moves.range(self.n_W):
mat_index = layer * self.rnn_direction + di
mat = cudnn.get_rnn_lin_layer_matrix_params(
handle, rnn_desc, mat_index,
x_desc, w_desc, w, lin_layer_id)
W_index = mat_index * self.n_W + lin_layer_id
m = mat.reshape(mat.size)
m[...] = ws[W_index].ravel()
bias = cudnn.get_rnn_lin_layer_bias_params(
handle, rnn_desc, mat_index,
x_desc, w_desc, w, lin_layer_id)
b = bias.reshape(bias.size)
b[...] = bs[W_index]
self.w = w
self.w_desc = w_desc
sections = numpy.cumsum([len(x) for x in x_list[:-1]])
y_list = cuda.cupy.split(ys, sections)
c_y_descs = _make_tensor_descriptor_array(y_list)
hy = cuda.cupy.empty_like(hx)
hy_desc = cudnn.create_tensor_nd_descriptor(hy)
work_size = libcudnn.getRNNWorkspaceSize(
handle, rnn_desc.value, length, c_x_descs.data)
workspace = cuda.cupy.empty((work_size,), dtype='b')
self.workspace = workspace
if not configuration.config.train:
libcudnn.RNNForwardInference(
handle, rnn_desc.value, length,
c_x_descs.data, xs.data.ptr, hx_desc.value, hx.data.ptr,
cx_desc_value, cx_data_ptr, w_desc.value, w.data.ptr,
c_y_descs.data, ys.data.ptr, hy_desc.value, hy.data.ptr,
cy_desc_value, cy_data_ptr, workspace.data.ptr, work_size)
else:
reserve_size = libcudnn.getRNNTrainingReserveSize(
handle, rnn_desc.value, length, c_x_descs.data)
self.reserve_space = cuda.cupy.empty((reserve_size,), dtype='b')
libcudnn.RNNForwardTraining(
handle, rnn_desc.value, length,
c_x_descs.data, xs.data.ptr, hx_desc.value, hx.data.ptr,
cx_desc_value, cx_data_ptr, w_desc.value, w.data.ptr,
c_y_descs.data, ys.data.ptr, hy_desc.value, hy.data.ptr,
cy_desc_value, cy_data_ptr,
workspace.data.ptr, work_size,
self.reserve_space.data.ptr, reserve_size)
self.c_y_descs = c_y_descs
self.ys = ys
self.c_x_descs = c_x_descs
if self.use_cell:
# LSTM
return tuple([hy, cy] + y_list)
else:
# GRU, RNN
return tuple([hy, ] + y_list)
def backward(self, inputs, grads):
if self.use_cell:
# LSTM
(hx, cx), inputs = _split(inputs, self._n_cell)
dhy, dcy = grads[:self._n_cell]
if dcy is None:
dcy = cuda.cupy.zeros_like(cx)
cx = cuda.cupy.ascontiguousarray(cx)
dcx = cuda.cupy.empty_like(cx)
cx_desc = cudnn.create_tensor_nd_descriptor(cx)
dcx_desc = cudnn.create_tensor_nd_descriptor(dcx)
dcy_desc = cudnn.create_tensor_nd_descriptor(dcy)
cx_data_ptr = cx.data.ptr
dcy_data_ptr = dcy.data.ptr
dcx_data_ptr = dcx.data.ptr
cx_desc_value = cx_desc.value
dcx_desc_value = dcx_desc.value
dcy_desc_value = dcy_desc.value
else:
# GRU, RNN
(hx, ), inputs = _split(inputs, self._n_cell)
dhy, = grads[:self._n_cell]
dcy = cx = dcx = None
cx_data_ptr = dcy_data_ptr = dcx_data_ptr = 0
cx_desc_value = dcx_desc_value = dcy_desc_value = 0
ws_size = self.n_layers * self.rnn_direction * self.n_W
ws, inputs = _split(inputs, ws_size)
bs, inputs = _split(inputs, ws_size)
x_list = inputs
hx = cuda.cupy.ascontiguousarray(hx)
if dhy is None:
dhy = cuda.cupy.zeros_like(hx)
dy_list = list(grads[self._n_cell:])
for i in six.moves.range(len(dy_list)):
if dy_list[i] is None:
dy_list[i] = cuda.cupy.zeros_like(x_list[i])
xs = cuda.cupy.concatenate(x_list, axis=0)
length = len(x_list)
dhx = cuda.cupy.empty_like(hx)
hx_desc = cudnn.create_tensor_nd_descriptor(hx)
dhy_desc = cudnn.create_tensor_nd_descriptor(dhy)
c_dy_descs = _make_tensor_descriptor_array(dy_list)
dys = cuda.cupy.concatenate(dy_list, axis=0)
rnn_desc = self.rnn_desc
handle = self.handle
work_size = libcudnn.getRNNWorkspaceSize(
handle, rnn_desc.value, length, self.c_x_descs.data)
workspace = cuda.cupy.empty((work_size,), dtype='b')
dhx_desc = cudnn.create_tensor_nd_descriptor(dhx)
dxs = cuda.cupy.empty_like(xs)
sections = numpy.cumsum([len(x) for x in x_list[:-1]])
dx_list = cuda.cupy.split(dxs, sections, 0)
c_dx_descs = _make_tensor_descriptor_array(dx_list)
libcudnn.RNNBackwardData(
handle, rnn_desc.value, length,
self.c_y_descs.data, self.ys.data.ptr,
c_dy_descs.data, dys.data.ptr, dhy_desc.value, dhy.data.ptr,
dcy_desc_value, dcy_data_ptr, self.w_desc.value, self.w.data.ptr,
hx_desc.value, hx.data.ptr, cx_desc_value, cx_data_ptr,
c_dx_descs.data, dxs.data.ptr, dhx_desc.value, dhx.data.ptr,
dcx_desc_value, dcx_data_ptr, workspace.data.ptr, work_size,
self.reserve_space.data.ptr, self.reserve_space.size)
dw = cuda.cupy.zeros_like(self.w)
dw_desc = cudnn.create_filter_descriptor(dw)
libcudnn.RNNBackwardWeights(
handle, rnn_desc.value, length,
self.c_x_descs.data, xs.data.ptr,
hx_desc.value, hx.data.ptr, self.c_y_descs.data, self.ys.data.ptr,
workspace.data.ptr, work_size, dw_desc.value, dw.data.ptr,
self.reserve_space.data.ptr, self.reserve_space.size)
dx = dx_list[0]
dx = dx.reshape(dx.shape + (1,))
dx_desc = cudnn.create_tensor_nd_descriptor(dx)
dws = []
dbs = []
for layer in six.moves.range(self.n_layers):
for di in six.moves.range(self.rnn_direction):
for lin_layer_id in six.moves.range(self.n_W):
mat_index = layer * self.rnn_direction + di
mat = cudnn.get_rnn_lin_layer_matrix_params(
handle, rnn_desc, mat_index,
dx_desc, dw_desc, dw, lin_layer_id)
W_index = mat_index * self.n_W + lin_layer_id
dws.append(mat.reshape(ws[W_index].shape))
bias = cudnn.get_rnn_lin_layer_bias_params(
handle, rnn_desc, mat_index,
dx_desc, dw_desc, dw, lin_layer_id)
dbs.append(bias.reshape(bs[W_index].shape))
if self.use_cell:
# LSTM
return tuple([dhx, dcx] + dws + dbs + dx_list)
else:
# GRU, RNN
return tuple([dhx, ] + dws + dbs + dx_list)
class NStepRNNTanh(BaseNStepRNN):
def __init__(self, n_layers, states, **kwargs):
BaseNStepRNN.__init__(self, n_layers, states, rnn_dir='uni',
rnn_mode='rnn_tanh', **kwargs)
class NStepRNNReLU(BaseNStepRNN):
def __init__(self, n_layers, states, **kwargs):
BaseNStepRNN.__init__(self, n_layers, states, rnn_dir='uni',
rnn_mode='rnn_relu', **kwargs)
class NStepBiRNNTanh(BaseNStepRNN):
def __init__(self, n_layers, states, **kwargs):
BaseNStepRNN.__init__(self, n_layers, states, rnn_dir='bi',
rnn_mode='rnn_tanh', **kwargs)
class NStepBiRNNReLU(BaseNStepRNN):
def __init__(self, n_layers, states, **kwargs):
BaseNStepRNN.__init__(self, n_layers, states, rnn_dir='bi',
rnn_mode='rnn_relu', **kwargs)
def n_step_rnn(
n_layers, dropout_ratio, hx, ws, bs, xs, activation='tanh', **kwargs):
"""n_step_rnn(n_layers, dropout_ratio, hx, ws, bs, xs, activation='tanh')
Stacked Uni-directional RNN function for sequence inputs.
This function calculates stacked Uni-directional RNN with sequences.
This function gets an initial hidden state :math:`h_0`,
an initial cell state :math:`c_0`, an input sequence :math:`x`,
weight matrices :math:`W`, and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` and :math:`c_t` for each
time :math:`t` from input :math:`x_t`.
.. math::
h_t = f(W_0 x_t + W_1 h_{t-1} + b_0 + b_1)
where :math:`f` is an activation function.
Weight matrices :math:`W` contains two matrices :math:`W_0` and
:math:`W_1`. :math:`W_0` is a parameter for an input sequence.
:math:`W_1` is a parameter for a hidden state.
Bias matrices :math:`b` contains two matrices :math:`b_0` and :math:`b_1`.
:math:`b_0` is a parameter for an input sequence.
:math:`b_1` is a parameter for a hidden state.
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Two weight matrices and two bias vectors are
required for each layer. So, when :math:`S` layers exist, you need to
prepare :math:`2S` weigth matrices and :math:`2S` bias vectors.
If the number of layers ``n_layers`` is greather than :math:`1`, input
of ``k``-th layer is hidden state ``h_t`` of ``k-1``-th layer.
Note that all input variables except first layer may have different shape
from the first layer.
.. warning::
``train`` and ``use_cudnn`` arguments are not supported anymore since
v2.
Instead, use ``chainer.using_config('train', train)`` and
``chainer.using_config('use_cudnn', use_cudnn)`` respectively.
See :func:`chainer.using_config`.
Args:
n_layers(int): Number of layers.
dropout_ratio(float): Dropout ratio.
hx (chainer.Variable): Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimention of hidden units.
ws (list of list of chainer.Variable): Weight matrices. ``ws[i]``
represents weights for i-th layer.
Each ``ws[i]`` is a list containing two matrices.
``ws[i][j]`` is corresponding with ``W_j`` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 1`` is ``(I, N)`` shape as they
are multiplied with input variables. All other matrices has
``(N, N)`` shape.
bs (list of list of chainer.Variable): Bias vectors. ``bs[i]``
represnents biases for i-th layer.
Each ``bs[i]`` is a list containing two vectors.
``bs[i][j]`` is corresponding with ``b_j`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimention of
hidden units.
xs (list of chainer.Variable): A list of :class:`~chainer.Variable`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this functions supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length, and transpose the sorted sequence.
:func:`~chainer.functions.transpose_sequence` transpose a list
of :func:`~chainer.Variable` holding sequence.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
activation (str): Activation function name.
Please select ``tanh`` or ``relu``.
Returns:
tuple: This functions returns a tuple concaining three elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
"""
return n_step_rnn_base(n_layers, dropout_ratio, hx, ws, bs, xs,
activation, use_bi_direction=False, **kwargs)
def n_step_birnn(
n_layers, dropout_ratio, hx, ws, bs, xs, activation='tanh', **kwargs):
"""n_step_birnn(n_layers, dropout_ratio, hx, ws, bs, xs, activation='tanh')
Stacked Bi-directional RNN function for sequence inputs.
This function calculates stacked Bi-directional RNN with sequences.
This function gets an initial hidden state :math:`h_0`, an initial
cell state :math:`c_0`, an input sequence :math:`x`,
weight matrices :math:`W`, and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` and :math:`c_t` for each
time :math:`t` from input :math:`x_t`.
.. math::
h^{f}_t &=& f(W^{f}_0 x_t + W^{f}_1 h_{t-1} + b^{f}_0 + b^{f}_1), \\\\
h^{b}_t &=& f(W^{b}_0 x_t + W^{b}_1 h_{t-1} + b^{b}_0 + b^{b}_1), \\\\
h_t &=& [h^{f}_t; h^{f}_t], \\\\
where :math:`f` is an activation function.
Weight matrices :math:`W` contains two matrices :math:`W^{f}` and
:math:`W^{b}`. :math:`W^{f}` is weight matrices for forward directional
RNN. :math:`W^{b}` is weight matrices for backward directional RNN.
:math:`W^{f}` contains :math:`W^{f}_0` for an input sequence and
:math:`W^{f}_1` for a hidden state.
:math:`W^{b}` contains :math:`W^{b}_0` for an input sequence and
:math:`W^{b}_1` for a hidden state.
Bias matrices :math:`b` contains two matrices :math:`b^{f}` and
:math:`b^{f}`. :math:`b^{f}` contains :math:`b^{f}_0` for an input sequence
and :math:`b^{f}_1` for a hidden state.
:math:`b^{b}` contains :math:`b^{b}_0` for an input sequence and
:math:`b^{b}_1` for a hidden state.
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Two weight matrices and two bias vectors are
required for each layer. So, when :math:`S` layers exist, you need to
prepare :math:`2S` weigth matrices and :math:`2S` bias vectors.
If the number of layers ``n_layers`` is greather than :math:`1`, input
of ``k``-th layer is hidden state ``h_t`` of ``k-1``-th layer.
Note that all input variables except first layer may have different shape
from the first layer.
.. warning::
``train`` and ``use_cudnn`` arguments are not supported anymore since
v2.
Instead, use ``chainer.using_config('train', train)`` and
``chainer.using_config('use_cudnn', use_cudnn)`` respectively.
See :func:`chainer.using_config`.
Args:
n_layers(int): Number of layers.
dropout_ratio(float): Dropout ratio.
hx (chainer.Variable): Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimention of hidden units.
ws (list of list of chainer.Variable): Weight matrices. ``ws[i + di]``
represents weights for i-th layer.
Note that ``di = 0`` for forward-RNN and ``di = 1`` for
backward-RNN.
Each ``ws[i + di]`` is a list containing two matrices.
``ws[i + di][j]`` is corresponding with ``W^{f}_j`` if ``di = 0``
and corresponding with ``W^{b}_j`` if ``di = 1`` in the equation.
Only ``ws[0][j]`` and ``ws[1][j]`` where ``0 <= j < 1`` are
``(I, N)`` shape as they are multiplied with input variables.
All other matrices has ``(N, N)`` shape.
bs (list of list of chainer.Variable): Bias vectors. ``bs[i + di]``
represnents biases for i-th layer.
Note that ``di = 0`` for forward-RNN and ``di = 1`` for
backward-RNN.
Each ``bs[i + di]`` is a list containing two vectors.
``bs[i + di][j]`` is corresponding with ``b^{f}_j`` if ``di = 0``
and corresponding with ``b^{b}_j`` if ``di = 1`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimention of
hidden units.
xs (list of chainer.Variable): A list of :class:`~chainer.Variable`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this functions supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length, and transpose the sorted sequence.
:func:`~chainer.functions.transpose_sequence` transpose a list
of :func:`~chainer.Variable` holding sequence.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
activation (str): Activation function name.
Please select ``tanh`` or ``relu``.
Returns:
tuple: This functions returns a tuple concaining three elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t``
is mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
"""
return n_step_rnn_base(n_layers, dropout_ratio, hx, ws, bs, xs,
activation, use_bi_direction=True)
def n_step_rnn_base(n_layers, dropout_ratio, hx, ws, bs, xs,
activation, use_bi_direction, **kwargs):
"""n_step_rnn_base(n_layers, dropout_ratio, hx, ws, bs, xs, activation, use_bi_direction)
Base function for Stack RNN/BiRNN functions.
This function is used at :func:`chainer.functions.n_step_birnn` and
:func:`chainer.functions.n_step_rnn`.
This function's behavior depends on following arguments,
``activation`` and ``use_bi_direction``.
.. warning::
``train`` and ``use_cudnn`` arguments are not supported anymore since
v2.
Instead, use ``chainer.using_config('train', train)`` and
``chainer.using_config('use_cudnn', use_cudnn)`` respectively.
See :func:`chainer.using_config`.
Args:
n_layers(int): Number of layers.
dropout_ratio(float): Dropout ratio.
hx (chainer.Variable): Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimention of hidden units.
ws (list of list of chainer.Variable): Weight matrices. ``ws[i]``
represents weights for i-th layer.
Each ``ws[i]`` is a list containing two matrices.
``ws[i][j]`` is corresponding with ``W_j`` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 1`` is ``(I, N)`` shape as they
are multiplied with input variables. All other matrices has
``(N, N)`` shape.
bs (list of list of chainer.Variable): Bias vectors. ``bs[i]``
represnents biases for i-th layer.
Each ``bs[i]`` is a list containing two vectors.
``bs[i][j]`` is corresponding with ``b_j`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimention of
hidden units.
xs (list of chainer.Variable): A list of :class:`~chainer.Variable`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this functions supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length, and transpose the sorted sequence.
:func:`~chainer.functions.transpose_sequence` transpose a list
of :func:`~chainer.Variable` holding sequence.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
activation (str): Activation function name.
Please select ``tanh`` or ``relu``.
use_bi_direction (bool): If ``True``, this function uses
Bi-directional RNN.
Returns:
tuple: This functions returns a tuple concaining three elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t``
is mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
.. seealso::
:func:`chainer.functions.n_step_rnn`
:func:`chainer.functions.n_step_birnn`
""" # NOQA
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config',
use_cudnn='use_cudnn argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
activation_list = ['tanh', 'relu']
if activation not in activation_list:
candidate = ','.join(activation_list)
raise ValueError('Invalid activation: "%s". Please select from [%s]'
% (activation, candidate))
xp = cuda.get_array_module(hx)
if xp is not numpy and chainer.should_use_cudnn('>=auto', 5000):
states = get_random_state().create_dropout_states(dropout_ratio)
# flatten all input variables
inputs = tuple(itertools.chain(
(hx, ),
itertools.chain.from_iterable(ws),
itertools.chain.from_iterable(bs),
xs))
if use_bi_direction:
# Bi-directional RNN
if activation == 'tanh':
rnn = NStepBiRNNTanh(n_layers, states)
elif activation == 'relu':
rnn = NStepBiRNNReLU(n_layers, states)
else:
# Uni-directional RNN
if activation == 'tanh':
rnn = NStepRNNTanh(n_layers, states)
elif activation == 'relu':
rnn = NStepRNNReLU(n_layers, states)
ret = rnn(*inputs)
hy, = ret[:1]
ys = ret[1:]
return hy, ys
else:
direction = 2 if use_bi_direction else 1
hx = split_axis.split_axis(hx, n_layers * direction, axis=0,
force_tuple=True)
hx = [reshape.reshape(h, h.shape[1:]) for h in hx]
xws = [_stack_weight([w[0]]) for w in ws]
hws = [_stack_weight([w[1]]) for w in ws]
xbs = [_stack_weight([b[0]]) for b in bs]
hbs = [_stack_weight([b[1]]) for b in bs]
xs_next = xs
hy = []
for layer in six.moves.range(n_layers):
def _one_directional_loop(di):
# di=0, forward RNN
# di=1, backward RNN
xs_list = xs_next if di == 0 else reversed(xs_next)
layer_idx = direction * layer + di
h = hx[layer_idx]
h_list = []
for x in xs_list:
batch = x.shape[0]
if h.shape[0] > batch:
h, h_rest = split_axis.split_axis(h, [batch], axis=0)
else:
h_rest = None
if layer > 0:
x = dropout.dropout(x, ratio=dropout_ratio)
rnn_in = (linear.linear(x, xws[layer_idx],
xbs[layer_idx]) +
linear.linear(h, hws[layer_idx], hbs[layer_idx]))
if activation == 'tanh':
h_bar = tanh.tanh(rnn_in)
elif activation == 'relu':
h_bar = relu.relu(rnn_in)
if h_rest is not None:
h = concat.concat([h_bar, h_rest], axis=0)
else:
h = h_bar
h_list.append(h_bar)
return h, h_list
# Forward RNN
h, h_forward = _one_directional_loop(di=0)
hy.append(h)
if use_bi_direction:
# Backward RNN
h, h_backward = _one_directional_loop(di=1)
h_backward.reverse()
# Concat
xs_next = [concat.concat([hfi, hbi], axis=1) for (hfi, hbi) in
six.moves.zip(h_forward, h_backward)]
hy.append(h)
else:
# Uni-directional RNN
xs_next = h_forward
ys = xs_next
hy = stack.stack(hy)
return hy, tuple(ys)
| [
"numpy.uint64",
"chainer.functions.array.reshape.reshape",
"chainer.cuda.cupy.split",
"chainer.cuda.cupy.zeros_like",
"six.moves.zip",
"chainer.functions.array.split_axis.split_axis",
"chainer.functions.noise.dropout.dropout",
"chainer.functions.activation.relu.relu",
"chainer.functions.array.stack.... | [((843, 866), 'chainer.functions.array.stack.stack', 'stack.stack', (['ws'], {'axis': '(1)'}), '(ws, axis=1)\n', (854, 866), False, 'from chainer.functions.array import stack\n'), ((898, 952), 'chainer.functions.array.reshape.reshape', 'reshape.reshape', (['w', '((shape[0] * shape[1],) + shape[2:])'], {}), '(w, (shape[0] * shape[1],) + shape[2:])\n', (913, 952), False, 'from chainer.functions.array import reshape\n'), ((3213, 3226), 'chainer.cuda.Device', 'cuda.Device', ([], {}), '()\n', (3224, 3226), False, 'from chainer import cuda\n'), ((32425, 32637), 'chainer.utils.argument.check_unexpected_kwargs', 'argument.check_unexpected_kwargs', (['kwargs'], {'train': '"""train argument is not supported anymore. Use chainer.using_config"""', 'use_cudnn': '"""use_cudnn argument is not supported anymore. Use chainer.using_config"""'}), "(kwargs, train=\n 'train argument is not supported anymore. Use chainer.using_config',\n use_cudnn=\n 'use_cudnn argument is not supported anymore. Use chainer.using_config')\n", (32457, 32637), False, 'from chainer.utils import argument\n'), ((32667, 32703), 'chainer.utils.argument.assert_kwargs_empty', 'argument.assert_kwargs_empty', (['kwargs'], {}), '(kwargs)\n', (32695, 32703), False, 'from chainer.utils import argument\n'), ((32971, 32996), 'chainer.cuda.get_array_module', 'cuda.get_array_module', (['hx'], {}), '(hx)\n', (32992, 32996), False, 'from chainer import cuda\n'), ((1049, 1083), 'numpy.array', 'numpy.array', (['lst'], {'dtype': 'numpy.intp'}), '(lst, dtype=numpy.intp)\n', (1060, 1083), False, 'import numpy\n'), ((4384, 4504), 'chainer.utils.argument.check_unexpected_kwargs', 'argument.check_unexpected_kwargs', (['kwargs'], {'train': '"""train argument is not supported anymore. Use chainer.using_config"""'}), "(kwargs, train=\n 'train argument is not supported anymore. Use chainer.using_config')\n", (4416, 4504), False, 'from chainer.utils import argument\n'), ((4536, 4572), 'chainer.utils.argument.assert_kwargs_empty', 'argument.assert_kwargs_empty', (['kwargs'], {}), '(kwargs)\n', (4564, 4572), False, 'from chainer.utils import argument\n'), ((6948, 6983), 'six.moves.zip', 'six.moves.zip', (['x_types', 'x_types[1:]'], {}), '(x_types, x_types[1:])\n', (6961, 6983), False, 'import six\n'), ((7283, 7313), 'six.moves.range', 'six.moves.range', (['self.n_layers'], {}), '(self.n_layers)\n', (7298, 7313), False, 'import six\n'), ((9502, 9533), 'chainer.cuda.cupy.ascontiguousarray', 'cuda.cupy.ascontiguousarray', (['hx'], {}), '(hx)\n', (9529, 9533), False, 'from chainer import cuda\n'), ((9681, 9718), 'chainer.cuda.cupy.concatenate', 'cuda.cupy.concatenate', (['x_list'], {'axis': '(0)'}), '(x_list, axis=0)\n', (9702, 9718), False, 'from chainer import cuda\n'), ((10401, 10464), 'chainer.cuda.cupy.empty', 'cuda.cupy.empty', (['(weights_size // 4, 1, 1)'], {'dtype': 'numpy.float32'}), '((weights_size // 4, 1, 1), dtype=numpy.float32)\n', (10416, 10464), False, 'from chainer import cuda\n'), ((10538, 10568), 'six.moves.range', 'six.moves.range', (['self.n_layers'], {}), '(self.n_layers)\n', (10553, 10568), False, 'import six\n'), ((11531, 11560), 'chainer.cuda.cupy.split', 'cuda.cupy.split', (['ys', 'sections'], {}), '(ys, sections)\n', (11546, 11560), False, 'from chainer import cuda\n'), ((11633, 11657), 'chainer.cuda.cupy.empty_like', 'cuda.cupy.empty_like', (['hx'], {}), '(hx)\n', (11653, 11657), False, 'from chainer import cuda\n'), ((11845, 11885), 'chainer.cuda.cupy.empty', 'cuda.cupy.empty', (['(work_size,)'], {'dtype': '"""b"""'}), "((work_size,), dtype='b')\n", (11860, 11885), False, 'from chainer import cuda\n'), ((14555, 14586), 'chainer.cuda.cupy.ascontiguousarray', 'cuda.cupy.ascontiguousarray', (['hx'], {}), '(hx)\n', (14582, 14586), False, 'from chainer import cuda\n'), ((14859, 14896), 'chainer.cuda.cupy.concatenate', 'cuda.cupy.concatenate', (['x_list'], {'axis': '(0)'}), '(x_list, axis=0)\n', (14880, 14896), False, 'from chainer import cuda\n'), ((14941, 14965), 'chainer.cuda.cupy.empty_like', 'cuda.cupy.empty_like', (['hx'], {}), '(hx)\n', (14961, 14965), False, 'from chainer import cuda\n'), ((15156, 15194), 'chainer.cuda.cupy.concatenate', 'cuda.cupy.concatenate', (['dy_list'], {'axis': '(0)'}), '(dy_list, axis=0)\n', (15177, 15194), False, 'from chainer import cuda\n'), ((15393, 15433), 'chainer.cuda.cupy.empty', 'cuda.cupy.empty', (['(work_size,)'], {'dtype': '"""b"""'}), "((work_size,), dtype='b')\n", (15408, 15433), False, 'from chainer import cuda\n'), ((15508, 15532), 'chainer.cuda.cupy.empty_like', 'cuda.cupy.empty_like', (['xs'], {}), '(xs)\n', (15528, 15532), False, 'from chainer import cuda\n'), ((15614, 15647), 'chainer.cuda.cupy.split', 'cuda.cupy.split', (['dxs', 'sections', '(0)'], {}), '(dxs, sections, 0)\n', (15629, 15647), False, 'from chainer import cuda\n'), ((16283, 16311), 'chainer.cuda.cupy.zeros_like', 'cuda.cupy.zeros_like', (['self.w'], {}), '(self.w)\n', (16303, 16311), False, 'from chainer import cuda\n'), ((16885, 16915), 'six.moves.range', 'six.moves.range', (['self.n_layers'], {}), '(self.n_layers)\n', (16900, 16915), False, 'import six\n'), ((33025, 33065), 'chainer.should_use_cudnn', 'chainer.should_use_cudnn', (['""">=auto"""', '(5000)'], {}), "('>=auto', 5000)\n", (33049, 33065), False, 'import chainer\n'), ((33994, 34067), 'chainer.functions.array.split_axis.split_axis', 'split_axis.split_axis', (['hx', '(n_layers * direction)'], {'axis': '(0)', 'force_tuple': '(True)'}), '(hx, n_layers * direction, axis=0, force_tuple=True)\n', (34015, 34067), False, 'from chainer.functions.array import split_axis\n'), ((34422, 34447), 'six.moves.range', 'six.moves.range', (['n_layers'], {}), '(n_layers)\n', (34437, 34447), False, 'import six\n'), ((36345, 36360), 'chainer.functions.array.stack.stack', 'stack.stack', (['hy'], {}), '(hy)\n', (36356, 36360), False, 'from chainer.functions.array import stack\n'), ((2722, 2740), 'numpy.uint64', 'numpy.uint64', (['seed'], {}), '(seed)\n', (2734, 2740), False, 'import numpy\n'), ((3321, 3346), 'os.getenv', 'os.getenv', (['"""CHAINER_SEED"""'], {}), "('CHAINER_SEED')\n", (3330, 3346), False, 'import os\n'), ((5860, 6115), 'chainer.utils.type_check.expect', 'type_check.expect', (['(h_type.dtype == numpy.float32)', '(c_type.dtype == numpy.float32)', '(h_type.ndim == 3)', '(h_type.shape[0] == h_size)', '(c_type.ndim == 3)', '(c_type.shape[0] == h_size)', '(h_type.shape[1] == c_type.shape[1])', '(h_type.shape[2] == c_type.shape[2])'], {}), '(h_type.dtype == numpy.float32, c_type.dtype == numpy.\n float32, h_type.ndim == 3, h_type.shape[0] == h_size, c_type.ndim == 3,\n c_type.shape[0] == h_size, h_type.shape[1] == c_type.shape[1], h_type.\n shape[2] == c_type.shape[2])\n', (5877, 6115), False, 'from chainer.utils import type_check\n'), ((6450, 6548), 'chainer.utils.type_check.expect', 'type_check.expect', (['(h_type.dtype == numpy.float32)', '(h_type.ndim == 3)', '(h_type.shape[0] == h_size)'], {}), '(h_type.dtype == numpy.float32, h_type.ndim == 3, h_type.\n shape[0] == h_size)\n', (6467, 6548), False, 'from chainer.utils import type_check\n'), ((6802, 6868), 'chainer.utils.type_check.expect', 'type_check.expect', (['(x_type.dtype == numpy.float32)', '(x_type.ndim == 2)'], {}), '(x_type.dtype == numpy.float32, x_type.ndim == 2)\n', (6819, 6868), False, 'from chainer.utils import type_check\n'), ((6997, 7094), 'chainer.utils.type_check.expect', 'type_check.expect', (['(x1_type.shape[0] >= x2_type.shape[0])', '(x1_type.shape[1] == x2_type.shape[1])'], {}), '(x1_type.shape[0] >= x2_type.shape[0], x1_type.shape[1] ==\n x2_type.shape[1])\n', (7014, 7094), False, 'from chainer.utils import type_check\n'), ((7336, 7361), 'six.moves.range', 'six.moves.range', (['self.n_W'], {}), '(self.n_W)\n', (7351, 7361), False, 'import six\n'), ((8793, 8824), 'chainer.cuda.cupy.ascontiguousarray', 'cuda.cupy.ascontiguousarray', (['cx'], {}), '(cx)\n', (8820, 8824), False, 'from chainer import cuda\n'), ((8903, 8927), 'chainer.cuda.cupy.empty_like', 'cuda.cupy.empty_like', (['cx'], {}), '(cx)\n', (8923, 8927), False, 'from chainer import cuda\n'), ((10592, 10627), 'six.moves.range', 'six.moves.range', (['self.rnn_direction'], {}), '(self.rnn_direction)\n', (10607, 10627), False, 'import six\n'), ((12521, 12564), 'chainer.cuda.cupy.empty', 'cuda.cupy.empty', (['(reserve_size,)'], {'dtype': '"""b"""'}), "((reserve_size,), dtype='b')\n", (12536, 12564), False, 'from chainer import cuda\n'), ((13563, 13594), 'chainer.cuda.cupy.ascontiguousarray', 'cuda.cupy.ascontiguousarray', (['cx'], {}), '(cx)\n', (13590, 13594), False, 'from chainer import cuda\n'), ((13613, 13637), 'chainer.cuda.cupy.empty_like', 'cuda.cupy.empty_like', (['cx'], {}), '(cx)\n', (13633, 13637), False, 'from chainer import cuda\n'), ((14630, 14654), 'chainer.cuda.cupy.zeros_like', 'cuda.cupy.zeros_like', (['hx'], {}), '(hx)\n', (14650, 14654), False, 'from chainer import cuda\n'), ((16939, 16974), 'six.moves.range', 'six.moves.range', (['self.rnn_direction'], {}), '(self.rnn_direction)\n', (16954, 16974), False, 'import six\n'), ((34117, 34148), 'chainer.functions.array.reshape.reshape', 'reshape.reshape', (['h', 'h.shape[1:]'], {}), '(h, h.shape[1:])\n', (34132, 34148), False, 'from chainer.functions.array import reshape\n'), ((7389, 7424), 'six.moves.range', 'six.moves.range', (['self.rnn_direction'], {}), '(self.rnn_direction)\n', (7404, 7424), False, 'import six\n'), ((10712, 10737), 'six.moves.range', 'six.moves.range', (['self.n_W'], {}), '(self.n_W)\n', (10727, 10737), False, 'import six\n'), ((13520, 13544), 'chainer.cuda.cupy.zeros_like', 'cuda.cupy.zeros_like', (['cx'], {}), '(cx)\n', (13540, 13544), False, 'from chainer import cuda\n'), ((14813, 14844), 'chainer.cuda.cupy.zeros_like', 'cuda.cupy.zeros_like', (['x_list[i]'], {}), '(x_list[i])\n', (14833, 14844), False, 'from chainer import cuda\n'), ((17012, 17037), 'six.moves.range', 'six.moves.range', (['self.n_W'], {}), '(self.n_W)\n', (17027, 17037), False, 'import six\n'), ((33250, 33283), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['ws'], {}), '(ws)\n', (33279, 33283), False, 'import itertools\n'), ((33297, 33330), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['bs'], {}), '(bs)\n', (33326, 33330), False, 'import itertools\n'), ((2519, 2532), 'os.urandom', 'os.urandom', (['(8)'], {}), '(8)\n', (2529, 2532), False, 'import os\n'), ((8248, 8456), 'chainer.utils.type_check.expect', 'type_check.expect', (['(w_type.dtype == numpy.float32)', '(w_type.ndim == 2)', '(w_type.shape[0] == out_size)', '(w_type.shape[1] == w_in)', '(b_type.dtype == numpy.float32)', '(b_type.ndim == 1)', '(b_type.shape[0] == out_size)'], {}), '(w_type.dtype == numpy.float32, w_type.ndim == 2, w_type.\n shape[0] == out_size, w_type.shape[1] == w_in, b_type.dtype == numpy.\n float32, b_type.ndim == 1, b_type.shape[0] == out_size)\n', (8265, 8456), False, 'from chainer.utils import type_check\n'), ((36072, 36105), 'chainer.functions.array.concat.concat', 'concat.concat', (['[hfi, hbi]'], {'axis': '(1)'}), '([hfi, hbi], axis=1)\n', (36085, 36105), False, 'from chainer.functions.array import concat\n'), ((34899, 34940), 'chainer.functions.array.split_axis.split_axis', 'split_axis.split_axis', (['h', '[batch]'], {'axis': '(0)'}), '(h, [batch], axis=0)\n', (34920, 34940), False, 'from chainer.functions.array import split_axis\n'), ((35068, 35107), 'chainer.functions.noise.dropout.dropout', 'dropout.dropout', (['x'], {'ratio': 'dropout_ratio'}), '(x, ratio=dropout_ratio)\n', (35083, 35107), False, 'from chainer.functions.noise import dropout\n'), ((35139, 35187), 'chainer.functions.connection.linear.linear', 'linear.linear', (['x', 'xws[layer_idx]', 'xbs[layer_idx]'], {}), '(x, xws[layer_idx], xbs[layer_idx])\n', (35152, 35187), False, 'from chainer.functions.connection import linear\n'), ((35264, 35312), 'chainer.functions.connection.linear.linear', 'linear.linear', (['h', 'hws[layer_idx]', 'hbs[layer_idx]'], {}), '(h, hws[layer_idx], hbs[layer_idx])\n', (35277, 35312), False, 'from chainer.functions.connection import linear\n'), ((35391, 35408), 'chainer.functions.activation.tanh.tanh', 'tanh.tanh', (['rnn_in'], {}), '(rnn_in)\n', (35400, 35408), False, 'from chainer.functions.activation import tanh\n'), ((35578, 35616), 'chainer.functions.array.concat.concat', 'concat.concat', (['[h_bar, h_rest]'], {'axis': '(0)'}), '([h_bar, h_rest], axis=0)\n', (35591, 35616), False, 'from chainer.functions.array import concat\n'), ((36151, 36187), 'six.moves.zip', 'six.moves.zip', (['h_forward', 'h_backward'], {}), '(h_forward, h_backward)\n', (36164, 36187), False, 'import six\n'), ((2665, 2677), 'time.clock', 'time.clock', ([], {}), '()\n', (2675, 2677), False, 'import time\n'), ((35488, 35505), 'chainer.functions.activation.relu.relu', 'relu.relu', (['rnn_in'], {}), '(rnn_in)\n', (35497, 35505), False, 'from chainer.functions.activation import relu\n')] |
import logging
import warnings
import os
import re
import os
import numpy as np
import pandas as pd
from copy import deepcopy
from lib_utils_io import read_file_tif, read_obj, write_obj, write_file_tif, create_darray_3d
from lib_utils_system import fill_tags2string, make_folder
from lib_utils_statistics import filter_data, compute_moments_data_gamma_distribution, compute_norm_data
import matplotlib.pylab as plt
class DriverStatistics:
def __init__(self, time_run, src_dict, dest_dict, ancillary_dict, template_tags, time_frequency='3H', time_offset=0,
data_geo=None, data_proj=None, data_transform=None,
flag_cleaning_statistics=True):
self.time_run = pd.Timestamp(time_run)
self.tag_folder = 'folder'
self.tag_filename = 'filename'
self.folder_name_src = src_dict[self.tag_folder]
self.file_name_src = src_dict[self.tag_filename]
self.folder_name_dest = dest_dict[self.tag_folder]
self.file_name_dest = dest_dict[self.tag_filename]
self.folder_name_ancillary = ancillary_dict[self.tag_folder]
self.file_name_ancillary = ancillary_dict[self.tag_filename]
self.time_frequency = time_frequency
self.time_offset = time_offset
self.time_regexp = r'\d{4}\d{2}\d{2}\w\d{2}\d{2}'
self.template_tags = template_tags
self.geo_values_tag = 'geo_values'
self.geo_mask_tag = 'geo_mask'
self.geo_field_capacity_tag = 'geo_field_capacity'
self.geo_wilting_point_tag = 'geo_wilting_point'
self.gamma_k_tag = 'k'
self.gamma_theta_tag = 'theta'
self.gamma_count_ratio_tag = 'count_ratio'
self.drought_index_tag = 'sspi'
self.month_n_ref = [1, 2, 3, 6]
self.file_prefix_src, self.file_suffix_src = self.define_filepart()
self.file_list_src = self.search_filename()
if not self.file_list_src:
logging.error(' ==> File list for statistics is empty')
raise IOError('Check folder and filename used for searching file(s)')
self.time_idx_expected, self.time_idx_start, self.time_idx_end = self.search_filetime()
self.data_geo = data_geo
self.data_proj = data_proj
self.data_transform = data_transform
self.file_ancillary = os.path.join(self.folder_name_ancillary, self.file_name_ancillary)
make_folder(self.folder_name_ancillary)
self.flag_cleaning_statistics = flag_cleaning_statistics
if self.flag_cleaning_statistics:
if os.path.exists(self.file_ancillary):
os.remove(self.file_ancillary)
def define_filepart(self, tag_null='*'):
template_file_name = self.file_name_src
template_values = {"statistics_datetime": tag_null}
file_name_tmp = fill_tags2string(template_file_name, self.template_tags, template_values)
file_prefix_src = file_name_tmp.split(tag_null)[0]
file_suffix_src = file_name_tmp.split(tag_null)[-1]
return file_prefix_src, file_suffix_src
def search_filename(self, ):
file_path_list = []
for dirpath, dirnames, filenames in os.walk(self.folder_name_src):
for filename in [f for f in filenames if f.startswith(
self.file_prefix_src) and f.endswith(self.file_suffix_src)]:
file_path = os.path.join(dirpath, filename)
file_path_list.append(file_path)
file_path_list_src = sorted(file_path_list)
return file_path_list_src
def search_filetime(self):
time_stamp_found = []
for file_name in self.file_list_src:
match_time = re.search(self.time_regexp, file_name)
time_str = match_time.group()
time_stamp = pd.Timestamp(time_str)
time_stamp_found.append(time_stamp)
time_idx_found = pd.DatetimeIndex(time_stamp_found)
time_start = time_stamp_found[0]
time_end = time_stamp_found[-1]
time_idx_expected = pd.date_range(start=time_start, end=time_end, freq=self.time_frequency)
time_idx_missed = time_idx_expected.difference(time_idx_found)
if time_idx_missed.__len__() > 0:
file_n_missed = time_idx_missed.__len__()
file_n_expected = time_idx_expected.__len__()
logging.warning(' ===> {0}/{1} files are unavailable'.format(file_n_missed, file_n_expected))
logging.warning(' ===> {0} time(s)'.format(list(time_idx_missed.values)))
time_idx_start = pd.date_range(start=time_idx_expected[0], end=time_idx_expected[-1], freq='MS')
time_stamp_end = []
for time_idx_step in time_idx_start:
time_idx_tmp = pd.date_range(start=time_idx_step, periods=time_idx_step.days_in_month * 8, freq='3H')[-1]
time_stamp_end.append(time_idx_tmp)
time_idx_end = pd.DatetimeIndex(time_stamp_end)
return time_idx_expected, time_idx_start, time_idx_end
def reader_data(self, data_threshold=0.0):
logging.info(' ---> Reading data ... ')
data_geo = self.data_geo[self.geo_values_tag]
mask_geo = self.data_geo[self.geo_mask_tag]
latitude_geo = data_geo['south_north'].values
longitude_geo = data_geo['west_east'].values
if not os.path.exists(self.file_ancillary):
ws_data = {}
ws_counter_all = {}
ws_counter_filtered = {}
for file_time_step_start, file_time_step_end in zip(self.time_idx_start, self.time_idx_end):
file_loc_step_start = self.time_idx_expected.get_loc(file_time_step_start)
file_loc_step_end = self.time_idx_expected.get_loc(file_time_step_end)
file_name_step_list = self.file_list_src[file_loc_step_start: file_loc_step_end]
file_time_key = file_time_step_start.strftime('%Y-%m')
logging.info(' ----> Analyze period ' + file_time_key + ' ... ')
file_time_n = file_name_step_list.__len__()
file_time_stamp_tmp = []
file_sample_all_tmp = np.zeros(shape=[data_geo.shape[0], data_geo.shape[1]])
file_sample_filtered_tmp = np.zeros(shape=[data_geo.shape[0], data_geo.shape[1]])
file_data_tmp = np.zeros(shape=[data_geo.shape[0], data_geo.shape[1], file_time_n])
for file_id, file_name_step in enumerate(file_name_step_list):
if os.path.exists(file_name_step):
try:
file_data_step, file_proj_step, file_geotrans_step = read_file_tif(file_name_step)
file_data_tmp[:, :, file_id] = file_data_step * mask_geo
match_time = re.search(self.time_regexp, file_name_step)
time_str = match_time.group()
time_stamp = pd.Timestamp(time_str)
file_time_stamp_tmp.append(time_stamp)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
idx_finite = np.argwhere(np.isfinite(file_data_step))
idx_filter = np.argwhere(
(np.isfinite(file_data_step)) & (file_data_step > data_threshold))
file_sample_all_tmp[
idx_finite[:, 0], idx_finite[:, 1]] = file_sample_all_tmp[idx_finite[:, 0],
idx_finite[:, 1]] + 1
file_sample_filtered_tmp[
idx_filter[:, 0], idx_filter[:, 1]] = file_sample_filtered_tmp[idx_filter[:, 0],
idx_filter[:, 1]] + 1
except IOError as io_exc:
logging.warning(' ===> Open ' + file_name_step + ' FAILED. Detect error ' + str(io_exc))
else:
logging.warning(' ===> Open ' + file_name_step + ' FAILED. File does not exist')
file_time_idx_tmp = pd.DatetimeIndex(file_time_stamp_tmp)
da_tmp = create_darray_3d(
file_data_tmp, file_time_idx_tmp, longitude_geo, latitude_geo,
geo_1d=True, coord_name_x='west_east', coord_name_y='south_north', coord_name_time='time',
dim_name_x='west_east', dim_name_y='south_north', dim_name_time='time')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
da_mean = da_tmp.resample(time="1D").mean()
value_max = da_mean.max().values
value_min = da_mean.min().values
logging.info(' ----> Value MIN: ' + str(value_min) + ' Value MAX: ' + str(value_max))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ws_data[file_time_key] = np.nanmean(da_mean.values, axis=2)
ws_counter_all[file_time_key] = file_sample_all_tmp
ws_counter_filtered[file_time_key] = file_sample_filtered_tmp
logging.info(' ----> Analyze period ' + file_time_key + ' ... DONE')
ws_counter_tmp = deepcopy(ws_counter_all)
for ws_key, ws_values in ws_counter_tmp.items():
if ws_values.max() == 0.0:
ws_counter_all.pop(ws_key)
ws_counter_filtered.pop(ws_key)
ws_data.pop(ws_key)
logging.warning(' ===> Analyze period ' + ws_key + ' is empty')
data_obj = {'data': ws_data, 'count_all': ws_counter_all, 'count_filtered': ws_counter_filtered}
write_obj(self.file_ancillary, data_obj)
else:
data_obj = read_obj(self.file_ancillary)
ws_data = data_obj['data']
ws_counter_all = data_obj['count_all']
ws_counter_filtered = data_obj['count_filtered']
logging.info(' ---> Reading data ... DONE')
return ws_data, ws_counter_all, ws_counter_filtered
def composer_data(self, ws_data, ws_counter_all, ws_counter_filtered):
logging.info(' ---> Computing statistics ... ')
if os.path.exists(self.file_ancillary):
data_obj = read_obj(self.file_ancillary)
if 'moments' not in list(data_obj.keys()):
geo_mask = self.data_geo[self.geo_mask_tag].values
geo_fc = self.data_geo[self.geo_field_capacity_tag].values
geo_wp = self.data_geo[self.geo_wilting_point_tag].values
ws_filter = filter_data(ws_data, geo_fc, geo_wp, index_name=self.drought_index_tag)
if self.drought_index_tag == 'sspi':
ws_moments = compute_moments_data_gamma_distribution(
ws_filter, ws_counter_all, ws_counter_filtered,
tag_gamma_k=self.gamma_k_tag, tag_gamma_theta=self.gamma_theta_tag,
tag_gamma_count_ratio=self.gamma_count_ratio_tag)
else:
logging.error(' ===> Statistical moments for index type are not available')
raise IOError('Statistical moments not implemented yet')
data_obj['filter'] = ws_filter
data_obj['moments'] = ws_moments
if os.path.exists(self.file_ancillary):
os.remove(self.file_ancillary)
write_obj(self.file_ancillary, data_obj)
logging.info(' ---> Computing statistics ... DONE')
else:
ws_moments = data_obj['moments']
logging.info(' ---> Computing statistics ... PREVIOUSLY DONE')
else:
logging.info(' ---> Computing statistics ... FAILED')
logging.error(' ===> File ancillary for statistics part is unavailable')
raise IOError('File does not exists')
return ws_moments
def writer_data(self, ws_moments):
logging.info(' ---> Writing results ... ')
month_n_ref = self.month_n_ref
folder_name_dest_raw = self.folder_name_dest
file_name_dest_raw = self.file_name_dest
data_vars = [self.gamma_k_tag, self.gamma_theta_tag, self.gamma_count_ratio_tag]
data_high, data_wide = self.data_geo[self.geo_values_tag].values.shape
months = list(range(1, 13))
for ref_month_id, (ref_month_key, ref_dataset) in zip(month_n_ref, ws_moments.items()):
for step_month_id in months:
dataset_list = []
metadata_list = []
for var_step in data_vars:
dataset_tmp = ref_dataset[var_step][step_month_id]
dataset_list.append(dataset_tmp)
metadata_list.append({'description_field': var_step})
step_month_id_str = '{:02d}'.format(step_month_id)
ref_month_id_str = '{:02d}'.format(ref_month_id)
template_values = {"month_reference": step_month_id_str, "month_period": ref_month_id_str}
folder_name_dest_def = fill_tags2string(folder_name_dest_raw, self.template_tags, template_values)
file_name_dest_def = fill_tags2string(file_name_dest_raw, self.template_tags, template_values)
file_path_dest_def = os.path.join(folder_name_dest_def, file_name_dest_def)
make_folder(folder_name_dest_def)
logging.info(' ----> Dumping filename ' + file_name_dest_def + ' ... ')
if self.flag_cleaning_statistics:
if os.path.exists(file_path_dest_def):
os.remove(file_path_dest_def)
if not os.path.exists(file_path_dest_def):
write_file_tif(file_path_dest_def, dataset_list,
data_wide, data_high, self.data_transform, self.data_proj,
file_metadata=metadata_list)
logging.info(' ----> Dumping filename ' + file_name_dest_def + ' ... DONE')
else:
logging.info(' ----> Dumping filename ' + file_name_dest_def + ' ... PREVIOUSLY DONE')
logging.info(' ---> Writing results ... DONE')
| [
"os.remove",
"os.walk",
"lib_utils_statistics.compute_moments_data_gamma_distribution",
"pandas.DatetimeIndex",
"lib_utils_statistics.filter_data",
"os.path.join",
"lib_utils_io.read_obj",
"numpy.nanmean",
"lib_utils_io.write_file_tif",
"logging.error",
"warnings.simplefilter",
"logging.warnin... | [((710, 732), 'pandas.Timestamp', 'pd.Timestamp', (['time_run'], {}), '(time_run)\n', (722, 732), True, 'import pandas as pd\n'), ((2326, 2392), 'os.path.join', 'os.path.join', (['self.folder_name_ancillary', 'self.file_name_ancillary'], {}), '(self.folder_name_ancillary, self.file_name_ancillary)\n', (2338, 2392), False, 'import os\n'), ((2401, 2440), 'lib_utils_system.make_folder', 'make_folder', (['self.folder_name_ancillary'], {}), '(self.folder_name_ancillary)\n', (2412, 2440), False, 'from lib_utils_system import fill_tags2string, make_folder\n'), ((2829, 2902), 'lib_utils_system.fill_tags2string', 'fill_tags2string', (['template_file_name', 'self.template_tags', 'template_values'], {}), '(template_file_name, self.template_tags, template_values)\n', (2845, 2902), False, 'from lib_utils_system import fill_tags2string, make_folder\n'), ((3179, 3208), 'os.walk', 'os.walk', (['self.folder_name_src'], {}), '(self.folder_name_src)\n', (3186, 3208), False, 'import os\n'), ((3888, 3922), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['time_stamp_found'], {}), '(time_stamp_found)\n', (3904, 3922), True, 'import pandas as pd\n'), ((4034, 4105), 'pandas.date_range', 'pd.date_range', ([], {'start': 'time_start', 'end': 'time_end', 'freq': 'self.time_frequency'}), '(start=time_start, end=time_end, freq=self.time_frequency)\n', (4047, 4105), True, 'import pandas as pd\n'), ((4549, 4628), 'pandas.date_range', 'pd.date_range', ([], {'start': 'time_idx_expected[0]', 'end': 'time_idx_expected[-1]', 'freq': '"""MS"""'}), "(start=time_idx_expected[0], end=time_idx_expected[-1], freq='MS')\n", (4562, 4628), True, 'import pandas as pd\n'), ((4891, 4923), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['time_stamp_end'], {}), '(time_stamp_end)\n', (4907, 4923), True, 'import pandas as pd\n'), ((5045, 5084), 'logging.info', 'logging.info', (['""" ---> Reading data ... """'], {}), "(' ---> Reading data ... ')\n", (5057, 5084), False, 'import logging\n'), ((10225, 10268), 'logging.info', 'logging.info', (['""" ---> Reading data ... DONE"""'], {}), "(' ---> Reading data ... DONE')\n", (10237, 10268), False, 'import logging\n'), ((10415, 10462), 'logging.info', 'logging.info', (['""" ---> Computing statistics ... """'], {}), "(' ---> Computing statistics ... ')\n", (10427, 10462), False, 'import logging\n'), ((10475, 10510), 'os.path.exists', 'os.path.exists', (['self.file_ancillary'], {}), '(self.file_ancillary)\n', (10489, 10510), False, 'import os\n'), ((12266, 12308), 'logging.info', 'logging.info', (['""" ---> Writing results ... """'], {}), "(' ---> Writing results ... ')\n", (12278, 12308), False, 'import logging\n'), ((14489, 14535), 'logging.info', 'logging.info', (['""" ---> Writing results ... DONE"""'], {}), "(' ---> Writing results ... DONE')\n", (14501, 14535), False, 'import logging\n'), ((1946, 2001), 'logging.error', 'logging.error', (['""" ==> File list for statistics is empty"""'], {}), "(' ==> File list for statistics is empty')\n", (1959, 2001), False, 'import logging\n'), ((2565, 2600), 'os.path.exists', 'os.path.exists', (['self.file_ancillary'], {}), '(self.file_ancillary)\n', (2579, 2600), False, 'import os\n'), ((3686, 3724), 're.search', 're.search', (['self.time_regexp', 'file_name'], {}), '(self.time_regexp, file_name)\n', (3695, 3724), False, 'import re\n'), ((3792, 3814), 'pandas.Timestamp', 'pd.Timestamp', (['time_str'], {}), '(time_str)\n', (3804, 3814), True, 'import pandas as pd\n'), ((5315, 5350), 'os.path.exists', 'os.path.exists', (['self.file_ancillary'], {}), '(self.file_ancillary)\n', (5329, 5350), False, 'import os\n'), ((9480, 9504), 'copy.deepcopy', 'deepcopy', (['ws_counter_all'], {}), '(ws_counter_all)\n', (9488, 9504), False, 'from copy import deepcopy\n'), ((9955, 9995), 'lib_utils_io.write_obj', 'write_obj', (['self.file_ancillary', 'data_obj'], {}), '(self.file_ancillary, data_obj)\n', (9964, 9995), False, 'from lib_utils_io import read_file_tif, read_obj, write_obj, write_file_tif, create_darray_3d\n'), ((10035, 10064), 'lib_utils_io.read_obj', 'read_obj', (['self.file_ancillary'], {}), '(self.file_ancillary)\n', (10043, 10064), False, 'from lib_utils_io import read_file_tif, read_obj, write_obj, write_file_tif, create_darray_3d\n'), ((10535, 10564), 'lib_utils_io.read_obj', 'read_obj', (['self.file_ancillary'], {}), '(self.file_ancillary)\n', (10543, 10564), False, 'from lib_utils_io import read_file_tif, read_obj, write_obj, write_file_tif, create_darray_3d\n'), ((12001, 12054), 'logging.info', 'logging.info', (['""" ---> Computing statistics ... FAILED"""'], {}), "(' ---> Computing statistics ... FAILED')\n", (12013, 12054), False, 'import logging\n'), ((12067, 12139), 'logging.error', 'logging.error', (['""" ===> File ancillary for statistics part is unavailable"""'], {}), "(' ===> File ancillary for statistics part is unavailable')\n", (12080, 12139), False, 'import logging\n'), ((2618, 2648), 'os.remove', 'os.remove', (['self.file_ancillary'], {}), '(self.file_ancillary)\n', (2627, 2648), False, 'import os\n'), ((3386, 3417), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (3398, 3417), False, 'import os\n'), ((4729, 4819), 'pandas.date_range', 'pd.date_range', ([], {'start': 'time_idx_step', 'periods': '(time_idx_step.days_in_month * 8)', 'freq': '"""3H"""'}), "(start=time_idx_step, periods=time_idx_step.days_in_month * 8,\n freq='3H')\n", (4742, 4819), True, 'import pandas as pd\n'), ((5917, 5981), 'logging.info', 'logging.info', (["(' ----> Analyze period ' + file_time_key + ' ... ')"], {}), "(' ----> Analyze period ' + file_time_key + ' ... ')\n", (5929, 5981), False, 'import logging\n'), ((6123, 6177), 'numpy.zeros', 'np.zeros', ([], {'shape': '[data_geo.shape[0], data_geo.shape[1]]'}), '(shape=[data_geo.shape[0], data_geo.shape[1]])\n', (6131, 6177), True, 'import numpy as np\n'), ((6221, 6275), 'numpy.zeros', 'np.zeros', ([], {'shape': '[data_geo.shape[0], data_geo.shape[1]]'}), '(shape=[data_geo.shape[0], data_geo.shape[1]])\n', (6229, 6275), True, 'import numpy as np\n'), ((6308, 6375), 'numpy.zeros', 'np.zeros', ([], {'shape': '[data_geo.shape[0], data_geo.shape[1], file_time_n]'}), '(shape=[data_geo.shape[0], data_geo.shape[1], file_time_n])\n', (6316, 6375), True, 'import numpy as np\n'), ((8296, 8333), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['file_time_stamp_tmp'], {}), '(file_time_stamp_tmp)\n', (8312, 8333), True, 'import pandas as pd\n'), ((8359, 8614), 'lib_utils_io.create_darray_3d', 'create_darray_3d', (['file_data_tmp', 'file_time_idx_tmp', 'longitude_geo', 'latitude_geo'], {'geo_1d': '(True)', 'coord_name_x': '"""west_east"""', 'coord_name_y': '"""south_north"""', 'coord_name_time': '"""time"""', 'dim_name_x': '"""west_east"""', 'dim_name_y': '"""south_north"""', 'dim_name_time': '"""time"""'}), "(file_data_tmp, file_time_idx_tmp, longitude_geo,\n latitude_geo, geo_1d=True, coord_name_x='west_east', coord_name_y=\n 'south_north', coord_name_time='time', dim_name_x='west_east',\n dim_name_y='south_north', dim_name_time='time')\n", (8375, 8614), False, 'from lib_utils_io import read_file_tif, read_obj, write_obj, write_file_tif, create_darray_3d\n'), ((9381, 9449), 'logging.info', 'logging.info', (["(' ----> Analyze period ' + file_time_key + ' ... DONE')"], {}), "(' ----> Analyze period ' + file_time_key + ' ... DONE')\n", (9393, 9449), False, 'import logging\n'), ((10866, 10937), 'lib_utils_statistics.filter_data', 'filter_data', (['ws_data', 'geo_fc', 'geo_wp'], {'index_name': 'self.drought_index_tag'}), '(ws_data, geo_fc, geo_wp, index_name=self.drought_index_tag)\n', (10877, 10937), False, 'from lib_utils_statistics import filter_data, compute_moments_data_gamma_distribution, compute_norm_data\n'), ((11615, 11650), 'os.path.exists', 'os.path.exists', (['self.file_ancillary'], {}), '(self.file_ancillary)\n', (11629, 11650), False, 'import os\n'), ((11720, 11760), 'lib_utils_io.write_obj', 'write_obj', (['self.file_ancillary', 'data_obj'], {}), '(self.file_ancillary, data_obj)\n', (11729, 11760), False, 'from lib_utils_io import read_file_tif, read_obj, write_obj, write_file_tif, create_darray_3d\n'), ((11777, 11828), 'logging.info', 'logging.info', (['""" ---> Computing statistics ... DONE"""'], {}), "(' ---> Computing statistics ... DONE')\n", (11789, 11828), False, 'import logging\n'), ((11912, 11974), 'logging.info', 'logging.info', (['""" ---> Computing statistics ... PREVIOUSLY DONE"""'], {}), "(' ---> Computing statistics ... PREVIOUSLY DONE')\n", (11924, 11974), False, 'import logging\n'), ((13386, 13461), 'lib_utils_system.fill_tags2string', 'fill_tags2string', (['folder_name_dest_raw', 'self.template_tags', 'template_values'], {}), '(folder_name_dest_raw, self.template_tags, template_values)\n', (13402, 13461), False, 'from lib_utils_system import fill_tags2string, make_folder\n'), ((13499, 13572), 'lib_utils_system.fill_tags2string', 'fill_tags2string', (['file_name_dest_raw', 'self.template_tags', 'template_values'], {}), '(file_name_dest_raw, self.template_tags, template_values)\n', (13515, 13572), False, 'from lib_utils_system import fill_tags2string, make_folder\n'), ((13610, 13664), 'os.path.join', 'os.path.join', (['folder_name_dest_def', 'file_name_dest_def'], {}), '(folder_name_dest_def, file_name_dest_def)\n', (13622, 13664), False, 'import os\n'), ((13681, 13714), 'lib_utils_system.make_folder', 'make_folder', (['folder_name_dest_def'], {}), '(folder_name_dest_def)\n', (13692, 13714), False, 'from lib_utils_system import fill_tags2string, make_folder\n'), ((13732, 13803), 'logging.info', 'logging.info', (["(' ----> Dumping filename ' + file_name_dest_def + ' ... ')"], {}), "(' ----> Dumping filename ' + file_name_dest_def + ' ... ')\n", (13744, 13803), False, 'import logging\n'), ((6479, 6509), 'os.path.exists', 'os.path.exists', (['file_name_step'], {}), '(file_name_step)\n', (6493, 6509), False, 'import os\n'), ((8685, 8710), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (8708, 8710), False, 'import warnings\n'), ((8732, 8763), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (8753, 8763), False, 'import warnings\n'), ((9051, 9076), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (9074, 9076), False, 'import warnings\n'), ((9098, 9129), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (9119, 9129), False, 'import warnings\n'), ((9175, 9209), 'numpy.nanmean', 'np.nanmean', (['da_mean.values'], {'axis': '(2)'}), '(da_mean.values, axis=2)\n', (9185, 9209), True, 'import numpy as np\n'), ((9768, 9831), 'logging.warning', 'logging.warning', (["(' ===> Analyze period ' + ws_key + ' is empty')"], {}), "(' ===> Analyze period ' + ws_key + ' is empty')\n", (9783, 9831), False, 'import logging\n'), ((11024, 11238), 'lib_utils_statistics.compute_moments_data_gamma_distribution', 'compute_moments_data_gamma_distribution', (['ws_filter', 'ws_counter_all', 'ws_counter_filtered'], {'tag_gamma_k': 'self.gamma_k_tag', 'tag_gamma_theta': 'self.gamma_theta_tag', 'tag_gamma_count_ratio': 'self.gamma_count_ratio_tag'}), '(ws_filter, ws_counter_all,\n ws_counter_filtered, tag_gamma_k=self.gamma_k_tag, tag_gamma_theta=self\n .gamma_theta_tag, tag_gamma_count_ratio=self.gamma_count_ratio_tag)\n', (11063, 11238), False, 'from lib_utils_statistics import filter_data, compute_moments_data_gamma_distribution, compute_norm_data\n'), ((11345, 11420), 'logging.error', 'logging.error', (['""" ===> Statistical moments for index type are not available"""'], {}), "(' ===> Statistical moments for index type are not available')\n", (11358, 11420), False, 'import logging\n'), ((11672, 11702), 'os.remove', 'os.remove', (['self.file_ancillary'], {}), '(self.file_ancillary)\n', (11681, 11702), False, 'import os\n'), ((13878, 13912), 'os.path.exists', 'os.path.exists', (['file_path_dest_def'], {}), '(file_path_dest_def)\n', (13892, 13912), False, 'import os\n'), ((13992, 14026), 'os.path.exists', 'os.path.exists', (['file_path_dest_def'], {}), '(file_path_dest_def)\n', (14006, 14026), False, 'import os\n'), ((14048, 14189), 'lib_utils_io.write_file_tif', 'write_file_tif', (['file_path_dest_def', 'dataset_list', 'data_wide', 'data_high', 'self.data_transform', 'self.data_proj'], {'file_metadata': 'metadata_list'}), '(file_path_dest_def, dataset_list, data_wide, data_high, self\n .data_transform, self.data_proj, file_metadata=metadata_list)\n', (14062, 14189), False, 'from lib_utils_io import read_file_tif, read_obj, write_obj, write_file_tif, create_darray_3d\n'), ((14275, 14350), 'logging.info', 'logging.info', (["(' ----> Dumping filename ' + file_name_dest_def + ' ... DONE')"], {}), "(' ----> Dumping filename ' + file_name_dest_def + ' ... DONE')\n", (14287, 14350), False, 'import logging\n'), ((14393, 14483), 'logging.info', 'logging.info', (["(' ----> Dumping filename ' + file_name_dest_def + ' ... PREVIOUSLY DONE')"], {}), "(' ----> Dumping filename ' + file_name_dest_def +\n ' ... PREVIOUSLY DONE')\n", (14405, 14483), False, 'import logging\n'), ((8178, 8263), 'logging.warning', 'logging.warning', (["(' ===> Open ' + file_name_step + ' FAILED. File does not exist')"], {}), "(' ===> Open ' + file_name_step + ' FAILED. File does not exist'\n )\n", (8193, 8263), False, 'import logging\n'), ((13938, 13967), 'os.remove', 'os.remove', (['file_path_dest_def'], {}), '(file_path_dest_def)\n', (13947, 13967), False, 'import os\n'), ((6621, 6650), 'lib_utils_io.read_file_tif', 'read_file_tif', (['file_name_step'], {}), '(file_name_step)\n', (6634, 6650), False, 'from lib_utils_io import read_file_tif, read_obj, write_obj, write_file_tif, create_darray_3d\n'), ((6778, 6821), 're.search', 're.search', (['self.time_regexp', 'file_name_step'], {}), '(self.time_regexp, file_name_step)\n', (6787, 6821), False, 'import re\n'), ((6921, 6943), 'pandas.Timestamp', 'pd.Timestamp', (['time_str'], {}), '(time_str)\n', (6933, 6943), True, 'import pandas as pd\n'), ((7044, 7069), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (7067, 7069), False, 'import warnings\n'), ((7103, 7134), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (7124, 7134), False, 'import warnings\n'), ((7193, 7220), 'numpy.isfinite', 'np.isfinite', (['file_data_step'], {}), '(file_data_step)\n', (7204, 7220), True, 'import numpy as np\n'), ((7317, 7344), 'numpy.isfinite', 'np.isfinite', (['file_data_step'], {}), '(file_data_step)\n', (7328, 7344), True, 'import numpy as np\n')] |
"""
This module formulate the FlowOCT problem in gurobipy.
"""
from gurobipy import Model, GRB, quicksum, LinExpr
import numpy as np
import pandas as pd
class FlowOPT_Robust:
def __init__(self, X, t, y, ipw, y_hat, robust, treatments_set, tree, X_col_labels,
time_limit, num_threads):
"""
:param X: numpy matrix or pandas dataframe of covariates
:param t: numpy array or pandas series/dataframe of treatment assignments
:param y: numpy array or pandas series/dataframe of observed outcomes
:param ipw: numpy array or pandas series/dataframe of inverse propensity weights
:param y_hat: numpy matrix or pandas dataframe of counterfactual estimates
:param robust: Boolean indicating whether or not the FlowOPT method should be Doubly Robust (True)
or Direct Method (False)
:param treatments_set: a list or set of all possible treatments
:param tree: Tree object
:param X_col_labels: a list of features in the covariate space X
:param time_limit: The given time limit for solving the MIP
:param num_threads: Number of threads for the solver to use
"""
# self.mode = mode
self.X = pd.DataFrame(X, columns=X_col_labels)
self.y = y
self.t = t
self.ipw = ipw
self.y_hat = y_hat
self.robust = robust
self.treatments_set = treatments_set
self.X_col_labels = X_col_labels
self.datapoints = np.arange(0, self.X.shape[0])
self.tree = tree
# Decision Variables
self.b = 0
self.p = 0
self.w = 0
self.zeta = 0
self.z = 0
# Gurobi model
self.model = Model("FlowOPT")
if num_threads is not None:
self.model.params.Threads = num_threads
self.model.params.TimeLimit = time_limit
###########################################################
# Create the MIP formulation
###########################################################
def create_main_problem(self):
"""
This function creates and return a gurobi model formulating
the FlowOPT_Robust problem
:return: gurobi model object with the FlowOPT_Robust formulation
"""
self.b = self.model.addVars(self.tree.Nodes, self.X_col_labels, vtype=GRB.BINARY, name='b')
self.p = self.model.addVars(self.tree.Nodes + self.tree.Leaves, vtype=GRB.BINARY, name='p')
self.w = self.model.addVars(self.tree.Nodes + self.tree.Leaves, self.treatments_set, vtype=GRB.CONTINUOUS,
lb=0,
name='w')
self.zeta = self.model.addVars(self.datapoints, self.tree.Nodes + self.tree.Leaves, self.treatments_set,
vtype=GRB.CONTINUOUS, lb=0, name='zeta')
self.z = self.model.addVars(self.datapoints, self.tree.Nodes + self.tree.Leaves, vtype=GRB.CONTINUOUS, lb=0,
name='z')
############################### define constraints
# z[i,n] = z[i,l(n)] + z[i,r(n)] + zeta[i,n] forall i, n in Nodes
for n in self.tree.Nodes:
n_left = int(self.tree.get_left_children(n))
n_right = int(self.tree.get_right_children(n))
self.model.addConstrs(
(self.z[i, n] == self.z[i, n_left] + self.z[i, n_right] + quicksum(
self.zeta[i, n, k] for k in self.treatments_set)) for i in self.datapoints)
# z[i,l(n)] <= sum(b[n,f], f if x[i,f]<=0) forall i, n in Nodes
for i in self.datapoints:
self.model.addConstrs((self.z[i, int(self.tree.get_left_children(n))] <= quicksum(
self.b[n, f] for f in self.X_col_labels if self.X.at[i, f] <= 0)) for n in self.tree.Nodes)
# z[i,r(n)] <= sum(b[n,f], f if x[i,f]=1) forall i, n in Nodes
for i in self.datapoints:
self.model.addConstrs((self.z[i, int(self.tree.get_right_children(n))] <= quicksum(
self.b[n, f] for f in self.X_col_labels if self.X.at[i, f] == 1)) for n in self.tree.Nodes)
# sum(b[n,f], f) + p[n] + sum(p[m], m in A(n)) = 1 forall n in Nodes
self.model.addConstrs(
(quicksum(self.b[n, f] for f in self.X_col_labels) + self.p[n] + quicksum(
self.p[m] for m in self.tree.get_ancestors(n)) == 1) for n in
self.tree.Nodes)
# p[n] + sum(p[m], m in A(n)) = 1 forall n in Leaves
self.model.addConstrs(
(self.p[n] + quicksum(
self.p[m] for m in self.tree.get_ancestors(n)) == 1) for n in
self.tree.Leaves)
# zeta[i,n] <= w[n,T[i]] for all n in N+L, i
for n in self.tree.Nodes + self.tree.Leaves:
for k in self.treatments_set:
self.model.addConstrs(
self.zeta[i, n, k] <= self.w[n, k] for i in self.datapoints)
# sum(w[n,k], k in treatments) = p[n]
self.model.addConstrs(
(quicksum(self.w[n, k] for k in self.treatments_set) == self.p[n]) for n in
self.tree.Nodes + self.tree.Leaves)
for n in self.tree.Leaves:
self.model.addConstrs(
quicksum(self.zeta[i, n, k] for k in self.treatments_set) == self.z[i, n] for i in self.datapoints)
self.model.addConstrs(self.z[i, 1] == 1 for i in self.datapoints)
# define objective function
obj = LinExpr(0)
for i in self.datapoints:
for n in self.tree.Nodes + self.tree.Leaves:
for k in self.treatments_set:
obj.add(self.zeta[i, n, k] * (self.y_hat[i][int(k)])) # we assume that each column corresponds to an ordered list t, which might be problematic
treat = self.t[i]
if self.robust:
if int(treat) == int(k):
obj.add(self.zeta[i, n, k] * (
self.y[i] - self.y_hat[i][int(k)]) /
self.ipw[i])
self.model.setObjective(obj, GRB.MAXIMIZE)
class FlowOPT_IPW:
def __init__(self, X, t, y, ipw, treatments_set, tree, X_col_labels,
time_limit, num_threads):
"""
:param X: numpy matrix or pandas dataframe of covariates
:param t: numpy array or pandas series/dataframe of treatment assignments
:param y: numpy array or pandas series/dataframe of observed outcomes
:param ipw: numpy array or pandas series/dataframe of inverse propensity weights
:param treatments_set: a list or set of all possible treatments
:param tree: Tree object
:param X_col_labels: a list of features in the covariate space X
:param time_limit: The given time limit for solving the MIP
:param num_threads: Number of threads for the solver to use
"""
self.X = pd.DataFrame(X, columns=X_col_labels)
self.y = y
self.t = t
self.ipw = ipw
self.treatments_set = treatments_set
self.X_col_labels = X_col_labels
# datapoints contains the indicies of our training data
self.datapoints = np.arange(0, self.X.shape[0])
self.tree = tree
# Decision Variables
self.b = 0
self.p = 0
self.w = 0
self.zeta = 0
self.z = 0
# Gurobi model
self.model = Model("IPW")
if num_threads is not None:
self.model.params.Threads = num_threads
self.model.params.TimeLimit = time_limit
###########################################################
# Create the MIP formulation
###########################################################
def create_main_problem(self):
"""
This function creates and return a gurobi model formulating
the FlowOPT_IPW problem
:return: gurobi model object with the FlowOPT_IPW formulation
"""
############################### define variables
self.b = self.model.addVars(self.tree.Nodes, self.X_col_labels, vtype=GRB.BINARY, name='b')
self.p = self.model.addVars(self.tree.Nodes + self.tree.Leaves, vtype=GRB.BINARY, name='p')
self.w = self.model.addVars(self.tree.Nodes + self.tree.Leaves, self.treatments_set, vtype=GRB.CONTINUOUS,
lb=0,
name='w')
self.zeta = self.model.addVars(self.datapoints, self.tree.Nodes + self.tree.Leaves, vtype=GRB.CONTINUOUS,
lb=0,
name='zeta')
self.z = self.model.addVars(self.datapoints, self.tree.Nodes + self.tree.Leaves, vtype=GRB.CONTINUOUS, lb=0,
name='z')
############################### define constraints
# z[i,n] = z[i,l(n)] + z[i,r(n)] + zeta[i,n] forall i, n in Nodes
for n in self.tree.Nodes:
n_left = int(self.tree.get_left_children(n))
n_right = int(self.tree.get_right_children(n))
self.model.addConstrs(
(self.z[i, n] == self.z[i, n_left] + self.z[i, n_right] + self.zeta[i, n]) for i in self.datapoints)
# z[i,l(n)] <= sum(b[n,f], f if x[i,f]<=0) forall i, n in Nodes
for i in self.datapoints:
self.model.addConstrs((self.z[i, int(self.tree.get_left_children(n))] <= quicksum(
self.b[n, f] for f in self.X_col_labels if self.X.at[i, f] <= 0)) for n in self.tree.Nodes)
# z[i,r(n)] <= sum(b[n,f], f if x[i,f]=1) forall i, n in Nodes
for i in self.datapoints:
self.model.addConstrs((self.z[i, int(self.tree.get_right_children(n))] <= quicksum(
self.b[n, f] for f in self.X_col_labels if self.X.at[i, f] == 1)) for n in self.tree.Nodes)
# sum(b[n,f], f) + p[n] + sum(p[m], m in A(n)) = 1 forall n in Nodes
self.model.addConstrs(
(quicksum(self.b[n, f] for f in self.X_col_labels) + self.p[n] + quicksum(
self.p[m] for m in self.tree.get_ancestors(n)) == 1) for n in
self.tree.Nodes)
# p[n] + sum(p[m], m in A(n)) = 1 forall n in Leaves
self.model.addConstrs(
(self.p[n] + quicksum(
self.p[m] for m in self.tree.get_ancestors(n)) == 1) for n in
self.tree.Leaves)
# zeta[i,n] <= w[n,T[i]] for all n in N+L, i
for n in self.tree.Nodes + self.tree.Leaves:
self.model.addConstrs(
self.zeta[i, n] <= self.w[n, self.t[i]] for i in self.datapoints)
# sum(w[n,k], k in treatments) = p[n]
self.model.addConstrs(
(quicksum(self.w[n, k] for k in self.treatments_set) == self.p[n]) for n in
self.tree.Nodes + self.tree.Leaves)
for n in self.tree.Leaves:
self.model.addConstrs(self.zeta[i, n] == self.z[i, n] for i in self.datapoints)
# define objective function
obj = LinExpr(0)
for i in self.datapoints:
obj.add(self.z[i, 1] * (self.y[i]) / self.ipw[i])
self.model.setObjective(obj, GRB.MAXIMIZE)
| [
"pandas.DataFrame",
"gurobipy.Model",
"numpy.arange",
"gurobipy.quicksum",
"gurobipy.LinExpr"
] | [((1292, 1329), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {'columns': 'X_col_labels'}), '(X, columns=X_col_labels)\n', (1304, 1329), True, 'import pandas as pd\n'), ((1560, 1589), 'numpy.arange', 'np.arange', (['(0)', 'self.X.shape[0]'], {}), '(0, self.X.shape[0])\n', (1569, 1589), True, 'import numpy as np\n'), ((1789, 1805), 'gurobipy.Model', 'Model', (['"""FlowOPT"""'], {}), "('FlowOPT')\n", (1794, 1805), False, 'from gurobipy import Model, GRB, quicksum, LinExpr\n'), ((5568, 5578), 'gurobipy.LinExpr', 'LinExpr', (['(0)'], {}), '(0)\n', (5575, 5578), False, 'from gurobipy import Model, GRB, quicksum, LinExpr\n'), ((7096, 7133), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {'columns': 'X_col_labels'}), '(X, columns=X_col_labels)\n', (7108, 7133), True, 'import pandas as pd\n'), ((7372, 7401), 'numpy.arange', 'np.arange', (['(0)', 'self.X.shape[0]'], {}), '(0, self.X.shape[0])\n', (7381, 7401), True, 'import numpy as np\n'), ((7601, 7613), 'gurobipy.Model', 'Model', (['"""IPW"""'], {}), "('IPW')\n", (7606, 7613), False, 'from gurobipy import Model, GRB, quicksum, LinExpr\n'), ((11204, 11214), 'gurobipy.LinExpr', 'LinExpr', (['(0)'], {}), '(0)\n', (11211, 11214), False, 'from gurobipy import Model, GRB, quicksum, LinExpr\n'), ((5132, 5183), 'gurobipy.quicksum', 'quicksum', (['(self.w[n, k] for k in self.treatments_set)'], {}), '(self.w[n, k] for k in self.treatments_set)\n', (5140, 5183), False, 'from gurobipy import Model, GRB, quicksum, LinExpr\n'), ((10902, 10953), 'gurobipy.quicksum', 'quicksum', (['(self.w[n, k] for k in self.treatments_set)'], {}), '(self.w[n, k] for k in self.treatments_set)\n', (10910, 10953), False, 'from gurobipy import Model, GRB, quicksum, LinExpr\n'), ((3798, 3871), 'gurobipy.quicksum', 'quicksum', (['(self.b[n, f] for f in self.X_col_labels if self.X.at[i, f] <= 0)'], {}), '(self.b[n, f] for f in self.X_col_labels if self.X.at[i, f] <= 0)\n', (3806, 3871), False, 'from gurobipy import Model, GRB, quicksum, LinExpr\n'), ((4111, 4184), 'gurobipy.quicksum', 'quicksum', (['(self.b[n, f] for f in self.X_col_labels if self.X.at[i, f] == 1)'], {}), '(self.b[n, f] for f in self.X_col_labels if self.X.at[i, f] == 1)\n', (4119, 4184), False, 'from gurobipy import Model, GRB, quicksum, LinExpr\n'), ((5342, 5399), 'gurobipy.quicksum', 'quicksum', (['(self.zeta[i, n, k] for k in self.treatments_set)'], {}), '(self.zeta[i, n, k] for k in self.treatments_set)\n', (5350, 5399), False, 'from gurobipy import Model, GRB, quicksum, LinExpr\n'), ((9613, 9686), 'gurobipy.quicksum', 'quicksum', (['(self.b[n, f] for f in self.X_col_labels if self.X.at[i, f] <= 0)'], {}), '(self.b[n, f] for f in self.X_col_labels if self.X.at[i, f] <= 0)\n', (9621, 9686), False, 'from gurobipy import Model, GRB, quicksum, LinExpr\n'), ((9926, 9999), 'gurobipy.quicksum', 'quicksum', (['(self.b[n, f] for f in self.X_col_labels if self.X.at[i, f] == 1)'], {}), '(self.b[n, f] for f in self.X_col_labels if self.X.at[i, f] == 1)\n', (9934, 9999), False, 'from gurobipy import Model, GRB, quicksum, LinExpr\n'), ((3497, 3554), 'gurobipy.quicksum', 'quicksum', (['(self.zeta[i, n, k] for k in self.treatments_set)'], {}), '(self.zeta[i, n, k] for k in self.treatments_set)\n', (3505, 3554), False, 'from gurobipy import Model, GRB, quicksum, LinExpr\n'), ((4353, 4402), 'gurobipy.quicksum', 'quicksum', (['(self.b[n, f] for f in self.X_col_labels)'], {}), '(self.b[n, f] for f in self.X_col_labels)\n', (4361, 4402), False, 'from gurobipy import Model, GRB, quicksum, LinExpr\n'), ((10168, 10217), 'gurobipy.quicksum', 'quicksum', (['(self.b[n, f] for f in self.X_col_labels)'], {}), '(self.b[n, f] for f in self.X_col_labels)\n', (10176, 10217), False, 'from gurobipy import Model, GRB, quicksum, LinExpr\n')] |
import numpy as np
from multiagent.core import World, Agent, Landmark
from multiagent.scenario import BaseScenario
class Scenario(BaseScenario):
def make_world(self, **kwargs):
self.before_make_world(**kwargs)
world = World()
world.np_random = self.np_random
# set any world properties first
world.dim_c = 2
num_good_agents = 1
num_adversaries = 3
num_agents = num_adversaries + num_good_agents
num_landmarks = 2
# add agents
world.agents = [Agent() for _ in range(num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = True
agent.silent = True
agent.adversary = True if i < num_adversaries else False
agent.size = 0.075 if agent.adversary else 0.05
agent.accel = 3.0 if agent.adversary else 4.0
#agent.accel = 20.0 if agent.adversary else 25.0
agent.max_speed = 1.0 if agent.adversary else 1.3
self.change_entity_attribute(agent, **kwargs)
# add landmarks
world.landmarks = [Landmark() for _ in range(num_landmarks)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' % i
landmark.collide = True
landmark.movable = False
landmark.size = 0.2
landmark.boundary = False
self.change_entity_attribute(landmark, **kwargs)
# make initial conditions
self.reset_world(world)
return world
def reset_world(self, world):
# random properties for agents
for i, agent in enumerate(world.agents):
agent.color = np.array([0.35, 0.85, 0.35]) if not agent.adversary else np.array([0.85, 0.35, 0.35])
# random properties for landmarks
for i, landmark in enumerate(world.landmarks):
landmark.color = np.array([0.25, 0.25, 0.25])
# set random initial states
for agent in world.agents:
agent.state.p_pos = self.np_random.uniform(-1, +1, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
for i, landmark in enumerate(world.landmarks):
if not landmark.boundary:
landmark.state.p_pos = self.np_random.uniform(-0.9, +0.9, world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
def benchmark_data(self, agent, world):
# returns data for benchmarking purposes
if agent.adversary:
collisions = 0
for a in self.good_agents(world):
if self.is_collision(a, agent):
collisions += 1
return collisions
else:
return 0
def is_collision(self, agent1, agent2):
delta_pos = agent1.state.p_pos - agent2.state.p_pos
dist = np.sqrt(np.sum(np.square(delta_pos)))
dist_min = agent1.size + agent2.size
return True if dist < dist_min else False
# return all agents that are not adversaries
def good_agents(self, world):
return [agent for agent in world.agents if not agent.adversary]
# return all adversarial agents
def adversaries(self, world):
return [agent for agent in world.agents if agent.adversary]
def reward(self, agent, world):
# Agents are rewarded based on minimum agent distance to each landmark
main_reward = self.adversary_reward(agent, world) if agent.adversary else self.agent_reward(agent, world)
return main_reward
def agent_reward(self, agent, world):
# Agents are negatively rewarded if caught by adversaries
rew = 0
shape = False
adversaries = self.adversaries(world)
if shape: # reward can optionally be shaped (increased reward for increased distance from adversary)
for adv in adversaries:
rew += 0.1 * np.sqrt(np.sum(np.square(agent.state.p_pos - adv.state.p_pos)))
if agent.collide:
for a in adversaries:
if self.is_collision(a, agent):
rew -= 10
# agents are penalized for exiting the screen, so that they can be caught by the adversaries
def bound(x):
if x < 0.9:
return 0
if x < 1.0:
return (x - 0.9) * 10
return min(np.exp(2 * x - 2), 10)
for p in range(world.dim_p):
x = abs(agent.state.p_pos[p])
rew -= bound(x)
return rew
def adversary_reward(self, agent, world):
# Adversaries are rewarded for collisions with agents
rew = 0
shape = False
agents = self.good_agents(world)
adversaries = self.adversaries(world)
if shape: # reward can optionally be shaped (decreased reward for increased distance from agents)
for adv in adversaries:
rew -= 0.1 * min([np.sqrt(np.sum(np.square(a.state.p_pos - adv.state.p_pos))) for a in agents])
if agent.collide:
for ag in agents:
for adv in adversaries:
if self.is_collision(ag, adv):
rew += 10
return rew
def observation(self, agent, world):
# get positions of all entities in this agent's reference frame
entity_pos = []
for entity in world.landmarks:
if not entity.boundary:
entity_pos.append(entity.state.p_pos - agent.state.p_pos)
# communication of all other agents
comm = []
other_pos = []
other_vel = []
for other in world.agents:
if other is agent: continue
comm.append(other.state.c)
other_pos.append(other.state.p_pos - agent.state.p_pos)
if not other.adversary:
other_vel.append(other.state.p_vel)
return np.concatenate([agent.state.p_vel] + [agent.state.p_pos] + entity_pos + other_pos + other_vel)
| [
"numpy.square",
"numpy.zeros",
"multiagent.core.Landmark",
"numpy.array",
"numpy.exp",
"multiagent.core.World",
"numpy.concatenate",
"multiagent.core.Agent"
] | [((241, 248), 'multiagent.core.World', 'World', ([], {}), '()\n', (246, 248), False, 'from multiagent.core import World, Agent, Landmark\n'), ((5981, 6079), 'numpy.concatenate', 'np.concatenate', (['([agent.state.p_vel] + [agent.state.p_pos] + entity_pos + other_pos + other_vel\n )'], {}), '([agent.state.p_vel] + [agent.state.p_pos] + entity_pos +\n other_pos + other_vel)\n', (5995, 6079), True, 'import numpy as np\n'), ((546, 553), 'multiagent.core.Agent', 'Agent', ([], {}), '()\n', (551, 553), False, 'from multiagent.core import World, Agent, Landmark\n'), ((1164, 1174), 'multiagent.core.Landmark', 'Landmark', ([], {}), '()\n', (1172, 1174), False, 'from multiagent.core import World, Agent, Landmark\n'), ((1964, 1992), 'numpy.array', 'np.array', (['[0.25, 0.25, 0.25]'], {}), '([0.25, 0.25, 0.25])\n', (1972, 1992), True, 'import numpy as np\n'), ((2172, 2193), 'numpy.zeros', 'np.zeros', (['world.dim_p'], {}), '(world.dim_p)\n', (2180, 2193), True, 'import numpy as np\n'), ((2222, 2243), 'numpy.zeros', 'np.zeros', (['world.dim_c'], {}), '(world.dim_c)\n', (2230, 2243), True, 'import numpy as np\n'), ((1748, 1776), 'numpy.array', 'np.array', (['[0.35, 0.85, 0.35]'], {}), '([0.35, 0.85, 0.35])\n', (1756, 1776), True, 'import numpy as np\n'), ((1805, 1833), 'numpy.array', 'np.array', (['[0.85, 0.35, 0.35]'], {}), '([0.85, 0.35, 0.35])\n', (1813, 1833), True, 'import numpy as np\n'), ((2463, 2484), 'numpy.zeros', 'np.zeros', (['world.dim_p'], {}), '(world.dim_p)\n', (2471, 2484), True, 'import numpy as np\n'), ((2964, 2984), 'numpy.square', 'np.square', (['delta_pos'], {}), '(delta_pos)\n', (2973, 2984), True, 'import numpy as np\n'), ((4462, 4479), 'numpy.exp', 'np.exp', (['(2 * x - 2)'], {}), '(2 * x - 2)\n', (4468, 4479), True, 'import numpy as np\n'), ((4017, 4063), 'numpy.square', 'np.square', (['(agent.state.p_pos - adv.state.p_pos)'], {}), '(agent.state.p_pos - adv.state.p_pos)\n', (4026, 4063), True, 'import numpy as np\n'), ((5038, 5080), 'numpy.square', 'np.square', (['(a.state.p_pos - adv.state.p_pos)'], {}), '(a.state.p_pos - adv.state.p_pos)\n', (5047, 5080), True, 'import numpy as np\n')] |
import os
import pytest
import numpy as np
from quantum_systems import (
BasisSet,
GeneralOrbitalSystem,
TwoDimensionalDoubleWell,
TwoDimensionalHarmonicOscillator,
)
from quantum_systems.quantum_dots.two_dim.two_dim_helper import (
get_double_well_one_body_elements,
theta_1_tilde_integral,
theta_2_tilde_integral,
)
def theta_1_tilde_integral_wolfram(m_p, m_q):
if abs(m_p - m_q) == 1:
return 0
integral = (
-1j
* (
-m_q
+ m_p
+ (m_q - m_p) * np.exp(1j * np.pi * (m_q - m_p))
- 2 * 1j * np.exp(1j * np.pi * (m_q - m_p) / 2)
)
* (1 + np.exp(1j * np.pi * (m_q - m_p)))
/ ((m_q - m_p) ** 2 - 1)
)
return integral
def theta_2_tilde_integral_wolfram(m_p, m_q):
if abs(m_p - m_q) == 1:
return 0
integral = -((1 + np.exp(1j * np.pi * (m_q - m_p))) ** 2) / (
(m_q - m_p) ** 2 - 1
)
return integral
def test_theta_1_tilde_integral():
for m_p in range(-100, 101):
for m_q in range(-100, 101):
assert (
abs(
theta_1_tilde_integral_wolfram(m_p, m_q)
- theta_1_tilde_integral(m_p, m_q)
)
< 1e-10
)
def test_theta_2_tilde_integral():
for m_p in range(-100, 101):
for m_q in range(-100, 101):
assert (
abs(
theta_2_tilde_integral_wolfram(m_p, m_q)
- theta_2_tilde_integral(m_p, m_q)
)
< 1e-10
)
def test_zero_barrier():
"""Test checking if we can reproduce the regular two-dimensional harmonic
oscillator system when setting the barrier strength to zero.
"""
n = 2
l = 12
radius = 10
num_grid_points = 401
tddw = TwoDimensionalDoubleWell(
l, radius, num_grid_points, barrier_strength=0, axis=0
)
tdho = TwoDimensionalHarmonicOscillator(l, radius, num_grid_points)
np.testing.assert_allclose(tddw.h, tdho.h, atol=1e-7)
np.testing.assert_allclose(tddw.u, tdho.u, atol=1e-7)
np.testing.assert_allclose(tddw.spf, tdho.spf, atol=1e-7)
def test_spf_energies():
test_energies = np.array(
[0.81129823, 1.37162083, 1.93581042, 2.21403823, 2.37162083, 2.93581042]
)
n = 2
l = 6
radius = 10
num_grid_points = 401
omega = 1
mass = 1
barrier_strength = 2
axis = 1
h_dw = get_double_well_one_body_elements(
l, omega, mass, barrier_strength, dtype=np.complex128, axis=axis
)
epsilon, C = np.linalg.eigh(h_dw)
np.testing.assert_allclose(epsilon[: len(test_energies)], test_energies)
def test_change_of_basis():
# We try to construct a two-dimensional double-well system from a
# two-dimensional harmonic oscillator system by using the change-of-basis
# function with C found from diagonalizing the double-well one-body
# Hamiltonian.
n = 2
l = 12
omega = 1
mass = 1
barrier_strength = 3
radius = 10
num_grid_points = 401
axis = 0
tdho = GeneralOrbitalSystem(
n,
TwoDimensionalHarmonicOscillator(
l, radius, num_grid_points, omega=omega
),
)
h_dw = get_double_well_one_body_elements(
l, omega, mass, barrier_strength, dtype=np.complex128, axis=axis
)
epsilon, C_dw = np.linalg.eigh(h_dw)
C = BasisSet.add_spin_one_body(C_dw, np=np)
tdho.change_basis(C)
tddw = GeneralOrbitalSystem(
n,
TwoDimensionalDoubleWell(
l,
radius,
num_grid_points,
omega=omega,
mass=mass,
barrier_strength=barrier_strength,
axis=axis,
),
)
tddw.change_basis(C)
np.testing.assert_allclose(tdho.u, tddw.u, atol=1e-7)
np.testing.assert_allclose(tdho.spf, tddw.spf, atol=1e-7)
@pytest.fixture(scope="module")
def get_tddw():
n = 2
l = 10
axis = 0
radius = 8
num_grid_points = 201
barrier_strength = 3
omega = 0.8
tddw = GeneralOrbitalSystem(
n,
TwoDimensionalDoubleWell(
l,
radius,
num_grid_points,
barrier_strength=barrier_strength,
omega=omega,
axis=axis,
),
)
return tddw
def test_tddw(get_tddw):
tddw = get_tddw
h_dw = get_double_well_one_body_elements(
tddw.l // 2,
tddw._basis_set.omega,
tddw._basis_set.mass,
tddw._basis_set.barrier_strength,
dtype=np.complex128,
axis=0,
)
epsilon, C_dw = np.linalg.eigh(h_dw)
C = BasisSet.add_spin_one_body(C_dw, np=np)
tddw.change_basis(C[:, : tddw.l])
dip = np.load(os.path.join("tests", "dat", "tddw_dipole_moment.npy"))
np.testing.assert_allclose(
np.abs(dip), np.abs(tddw.dipole_moment), atol=1e-10
)
h = np.load(os.path.join("tests", "dat", "tddw_h.npy"))
np.testing.assert_allclose(h, tddw.h, atol=1e-10)
u = np.load(os.path.join("tests", "dat", "tddw_u.npy"))
np.testing.assert_allclose(np.abs(u), np.abs(tddw.u), atol=1e-10)
spf = np.load(os.path.join("tests", "dat", "tddw_spf.npy"))
np.testing.assert_allclose(np.abs(spf), np.abs(tddw.spf), atol=1e-10)
| [
"numpy.abs",
"quantum_systems.quantum_dots.two_dim.two_dim_helper.get_double_well_one_body_elements",
"pytest.fixture",
"numpy.linalg.eigh",
"quantum_systems.BasisSet.add_spin_one_body",
"numpy.array",
"numpy.exp",
"quantum_systems.TwoDimensionalHarmonicOscillator",
"quantum_systems.quantum_dots.two... | [((3950, 3980), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (3964, 3980), False, 'import pytest\n'), ((1867, 1952), 'quantum_systems.TwoDimensionalDoubleWell', 'TwoDimensionalDoubleWell', (['l', 'radius', 'num_grid_points'], {'barrier_strength': '(0)', 'axis': '(0)'}), '(l, radius, num_grid_points, barrier_strength=0, axis=0\n )\n', (1891, 1952), False, 'from quantum_systems import BasisSet, GeneralOrbitalSystem, TwoDimensionalDoubleWell, TwoDimensionalHarmonicOscillator\n'), ((1974, 2034), 'quantum_systems.TwoDimensionalHarmonicOscillator', 'TwoDimensionalHarmonicOscillator', (['l', 'radius', 'num_grid_points'], {}), '(l, radius, num_grid_points)\n', (2006, 2034), False, 'from quantum_systems import BasisSet, GeneralOrbitalSystem, TwoDimensionalDoubleWell, TwoDimensionalHarmonicOscillator\n'), ((2040, 2094), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['tddw.h', 'tdho.h'], {'atol': '(1e-07)'}), '(tddw.h, tdho.h, atol=1e-07)\n', (2066, 2094), True, 'import numpy as np\n'), ((2098, 2152), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['tddw.u', 'tdho.u'], {'atol': '(1e-07)'}), '(tddw.u, tdho.u, atol=1e-07)\n', (2124, 2152), True, 'import numpy as np\n'), ((2156, 2214), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['tddw.spf', 'tdho.spf'], {'atol': '(1e-07)'}), '(tddw.spf, tdho.spf, atol=1e-07)\n', (2182, 2214), True, 'import numpy as np\n'), ((2261, 2348), 'numpy.array', 'np.array', (['[0.81129823, 1.37162083, 1.93581042, 2.21403823, 2.37162083, 2.93581042]'], {}), '([0.81129823, 1.37162083, 1.93581042, 2.21403823, 2.37162083, \n 2.93581042])\n', (2269, 2348), True, 'import numpy as np\n'), ((2498, 2602), 'quantum_systems.quantum_dots.two_dim.two_dim_helper.get_double_well_one_body_elements', 'get_double_well_one_body_elements', (['l', 'omega', 'mass', 'barrier_strength'], {'dtype': 'np.complex128', 'axis': 'axis'}), '(l, omega, mass, barrier_strength, dtype=\n np.complex128, axis=axis)\n', (2531, 2602), False, 'from quantum_systems.quantum_dots.two_dim.two_dim_helper import get_double_well_one_body_elements, theta_1_tilde_integral, theta_2_tilde_integral\n'), ((2630, 2650), 'numpy.linalg.eigh', 'np.linalg.eigh', (['h_dw'], {}), '(h_dw)\n', (2644, 2650), True, 'import numpy as np\n'), ((3293, 3397), 'quantum_systems.quantum_dots.two_dim.two_dim_helper.get_double_well_one_body_elements', 'get_double_well_one_body_elements', (['l', 'omega', 'mass', 'barrier_strength'], {'dtype': 'np.complex128', 'axis': 'axis'}), '(l, omega, mass, barrier_strength, dtype=\n np.complex128, axis=axis)\n', (3326, 3397), False, 'from quantum_systems.quantum_dots.two_dim.two_dim_helper import get_double_well_one_body_elements, theta_1_tilde_integral, theta_2_tilde_integral\n'), ((3428, 3448), 'numpy.linalg.eigh', 'np.linalg.eigh', (['h_dw'], {}), '(h_dw)\n', (3442, 3448), True, 'import numpy as np\n'), ((3457, 3496), 'quantum_systems.BasisSet.add_spin_one_body', 'BasisSet.add_spin_one_body', (['C_dw'], {'np': 'np'}), '(C_dw, np=np)\n', (3483, 3496), False, 'from quantum_systems import BasisSet, GeneralOrbitalSystem, TwoDimensionalDoubleWell, TwoDimensionalHarmonicOscillator\n'), ((3831, 3885), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['tdho.u', 'tddw.u'], {'atol': '(1e-07)'}), '(tdho.u, tddw.u, atol=1e-07)\n', (3857, 3885), True, 'import numpy as np\n'), ((3889, 3947), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['tdho.spf', 'tddw.spf'], {'atol': '(1e-07)'}), '(tdho.spf, tddw.spf, atol=1e-07)\n', (3915, 3947), True, 'import numpy as np\n'), ((4445, 4608), 'quantum_systems.quantum_dots.two_dim.two_dim_helper.get_double_well_one_body_elements', 'get_double_well_one_body_elements', (['(tddw.l // 2)', 'tddw._basis_set.omega', 'tddw._basis_set.mass', 'tddw._basis_set.barrier_strength'], {'dtype': 'np.complex128', 'axis': '(0)'}), '(tddw.l // 2, tddw._basis_set.omega, tddw.\n _basis_set.mass, tddw._basis_set.barrier_strength, dtype=np.complex128,\n axis=0)\n', (4478, 4608), False, 'from quantum_systems.quantum_dots.two_dim.two_dim_helper import get_double_well_one_body_elements, theta_1_tilde_integral, theta_2_tilde_integral\n'), ((4676, 4696), 'numpy.linalg.eigh', 'np.linalg.eigh', (['h_dw'], {}), '(h_dw)\n', (4690, 4696), True, 'import numpy as np\n'), ((4705, 4744), 'quantum_systems.BasisSet.add_spin_one_body', 'BasisSet.add_spin_one_body', (['C_dw'], {'np': 'np'}), '(C_dw, np=np)\n', (4731, 4744), False, 'from quantum_systems import BasisSet, GeneralOrbitalSystem, TwoDimensionalDoubleWell, TwoDimensionalHarmonicOscillator\n'), ((5022, 5071), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['h', 'tddw.h'], {'atol': '(1e-10)'}), '(h, tddw.h, atol=1e-10)\n', (5048, 5071), True, 'import numpy as np\n'), ((3178, 3251), 'quantum_systems.TwoDimensionalHarmonicOscillator', 'TwoDimensionalHarmonicOscillator', (['l', 'radius', 'num_grid_points'], {'omega': 'omega'}), '(l, radius, num_grid_points, omega=omega)\n', (3210, 3251), False, 'from quantum_systems import BasisSet, GeneralOrbitalSystem, TwoDimensionalDoubleWell, TwoDimensionalHarmonicOscillator\n'), ((3576, 3702), 'quantum_systems.TwoDimensionalDoubleWell', 'TwoDimensionalDoubleWell', (['l', 'radius', 'num_grid_points'], {'omega': 'omega', 'mass': 'mass', 'barrier_strength': 'barrier_strength', 'axis': 'axis'}), '(l, radius, num_grid_points, omega=omega, mass=mass,\n barrier_strength=barrier_strength, axis=axis)\n', (3600, 3702), False, 'from quantum_systems import BasisSet, GeneralOrbitalSystem, TwoDimensionalDoubleWell, TwoDimensionalHarmonicOscillator\n'), ((4167, 4283), 'quantum_systems.TwoDimensionalDoubleWell', 'TwoDimensionalDoubleWell', (['l', 'radius', 'num_grid_points'], {'barrier_strength': 'barrier_strength', 'omega': 'omega', 'axis': 'axis'}), '(l, radius, num_grid_points, barrier_strength=\n barrier_strength, omega=omega, axis=axis)\n', (4191, 4283), False, 'from quantum_systems import BasisSet, GeneralOrbitalSystem, TwoDimensionalDoubleWell, TwoDimensionalHarmonicOscillator\n'), ((4803, 4857), 'os.path.join', 'os.path.join', (['"""tests"""', '"""dat"""', '"""tddw_dipole_moment.npy"""'], {}), "('tests', 'dat', 'tddw_dipole_moment.npy')\n", (4815, 4857), False, 'import os\n'), ((4899, 4910), 'numpy.abs', 'np.abs', (['dip'], {}), '(dip)\n', (4905, 4910), True, 'import numpy as np\n'), ((4912, 4938), 'numpy.abs', 'np.abs', (['tddw.dipole_moment'], {}), '(tddw.dipole_moment)\n', (4918, 4938), True, 'import numpy as np\n'), ((4974, 5016), 'os.path.join', 'os.path.join', (['"""tests"""', '"""dat"""', '"""tddw_h.npy"""'], {}), "('tests', 'dat', 'tddw_h.npy')\n", (4986, 5016), False, 'import os\n'), ((5089, 5131), 'os.path.join', 'os.path.join', (['"""tests"""', '"""dat"""', '"""tddw_u.npy"""'], {}), "('tests', 'dat', 'tddw_u.npy')\n", (5101, 5131), False, 'import os\n'), ((5164, 5173), 'numpy.abs', 'np.abs', (['u'], {}), '(u)\n', (5170, 5173), True, 'import numpy as np\n'), ((5175, 5189), 'numpy.abs', 'np.abs', (['tddw.u'], {}), '(tddw.u)\n', (5181, 5189), True, 'import numpy as np\n'), ((5222, 5266), 'os.path.join', 'os.path.join', (['"""tests"""', '"""dat"""', '"""tddw_spf.npy"""'], {}), "('tests', 'dat', 'tddw_spf.npy')\n", (5234, 5266), False, 'import os\n'), ((5299, 5310), 'numpy.abs', 'np.abs', (['spf'], {}), '(spf)\n', (5305, 5310), True, 'import numpy as np\n'), ((5312, 5328), 'numpy.abs', 'np.abs', (['tddw.spf'], {}), '(tddw.spf)\n', (5318, 5328), True, 'import numpy as np\n'), ((663, 697), 'numpy.exp', 'np.exp', (['(1.0j * np.pi * (m_q - m_p))'], {}), '(1.0j * np.pi * (m_q - m_p))\n', (669, 697), True, 'import numpy as np\n'), ((873, 907), 'numpy.exp', 'np.exp', (['(1.0j * np.pi * (m_q - m_p))'], {}), '(1.0j * np.pi * (m_q - m_p))\n', (879, 907), True, 'import numpy as np\n'), ((601, 639), 'numpy.exp', 'np.exp', (['(1.0j * np.pi * (m_q - m_p) / 2)'], {}), '(1.0j * np.pi * (m_q - m_p) / 2)\n', (607, 639), True, 'import numpy as np\n'), ((1205, 1237), 'quantum_systems.quantum_dots.two_dim.two_dim_helper.theta_1_tilde_integral', 'theta_1_tilde_integral', (['m_p', 'm_q'], {}), '(m_p, m_q)\n', (1227, 1237), False, 'from quantum_systems.quantum_dots.two_dim.two_dim_helper import get_double_well_one_body_elements, theta_1_tilde_integral, theta_2_tilde_integral\n'), ((1526, 1558), 'quantum_systems.quantum_dots.two_dim.two_dim_helper.theta_2_tilde_integral', 'theta_2_tilde_integral', (['m_p', 'm_q'], {}), '(m_p, m_q)\n', (1548, 1558), False, 'from quantum_systems.quantum_dots.two_dim.two_dim_helper import get_double_well_one_body_elements, theta_1_tilde_integral, theta_2_tilde_integral\n'), ((545, 579), 'numpy.exp', 'np.exp', (['(1.0j * np.pi * (m_q - m_p))'], {}), '(1.0j * np.pi * (m_q - m_p))\n', (551, 579), True, 'import numpy as np\n')] |
import numpy as np
from numpy import clip, inf
from statsmodels.tsa.holtwinters import Holt
class HoltWintersPredictor(object):
def __init__(self, data_in, num_prediction_periods):
self.__history = data_in
self.__num_prediction_periods = num_prediction_periods
y = np.array(data_in).reshape(-1, 1)
y = np.clip(y, 0.00001, None) # HoltWinters doesn't like zeros
self.__model = Holt(y, exponential=True, damped=True)
self.__results = self.__model.fit(optimized=True)
@property
def configuration(self):
return ""
def predict_counts(self):
start_index = len(self.__history)
y = self.__model.predict(self.__results.params, start=start_index + 1, end=start_index + self.__num_prediction_periods)
y_list = y.ravel().tolist()
return clip(y_list, 0, inf)
| [
"numpy.array",
"numpy.clip",
"statsmodels.tsa.holtwinters.Holt"
] | [((342, 365), 'numpy.clip', 'np.clip', (['y', '(1e-05)', 'None'], {}), '(y, 1e-05, None)\n', (349, 365), True, 'import numpy as np\n'), ((426, 464), 'statsmodels.tsa.holtwinters.Holt', 'Holt', (['y'], {'exponential': '(True)', 'damped': '(True)'}), '(y, exponential=True, damped=True)\n', (430, 464), False, 'from statsmodels.tsa.holtwinters import Holt\n'), ((837, 857), 'numpy.clip', 'clip', (['y_list', '(0)', 'inf'], {}), '(y_list, 0, inf)\n', (841, 857), False, 'from numpy import clip, inf\n'), ((297, 314), 'numpy.array', 'np.array', (['data_in'], {}), '(data_in)\n', (305, 314), True, 'import numpy as np\n')] |
"""."""
import numpy as np
import pyaccel
from siriuspy.namesys import SiriusPVName as _PVName
from siriuspy.devices import SOFB
from ..optimization import SimulAnneal
from ..utils import ThreadedMeasBaseClass as _BaseClass, \
ParamsBaseClass as _ParamsBaseClass
class Params(_ParamsBaseClass):
"""."""
def __init__(self):
"""."""
super().__init__()
self.deltas = {
'CH': 0.3e-3, 'CV': 0.15e-3, 'InjSept': 0.3e-3, 'InjKckr': 0.3e-3}
self.wait_time = 2
self.timeout_orb = 10
self.num_points = 10
class MeasureRespMatTBBO(_BaseClass):
"""."""
def __init__(self, all_corrs):
"""."""
super().__init__(params=Params(), target=self._measure_matrix_thread)
self.devices = {
'bo_sofb': SOFB(SOFB.DEVICES.BO),
'tb_sofb': SOFB(SOFB.DEVICES.TB),
}
self._all_corrs = all_corrs
self._matrix = dict()
self._corrs_to_measure = []
@property
def trajx(self):
"""."""
return np.hstack(
[self.devices['tb_sofb'].trajx, self.devices['bo_sofb'].trajx])
@property
def trajy(self):
"""."""
return np.hstack(
[self.devices['tb_sofb'].trajy, self.devices['bo_sofb'].trajy])
def wait(self, timeout=10):
"""."""
self.devices['tb_sofb'].wait_buffer(timeout=timeout)
self.devices['bo_sofb'].wait_buffer(timeout=timeout)
def reset(self, wait=0):
"""."""
if self._stopevt.wait(wait):
return False
self.devices['tb_sofb'].cmd_reset()
self.devices['bo_sofb'].cmd_reset()
if self._stopevt.wait(1):
return False
return True
@property
def corr_names(self):
"""."""
corrs = sorted([
c for c in self._all_corrs if not c.dev.startswith('CV')])
corrs.extend(sorted([
c for c in self._all_corrs if c.dev.startswith('CV')]))
return corrs
@property
def corrs_to_measure(self):
"""."""
if not self._corrs_to_measure:
return sorted(self._all_corrs.keys() - self._matrix.keys())
return self._corrs_to_measure
@corrs_to_measure.setter
def corrs_to_measure(self, value):
"""."""
self._corrs_to_measure = sorted([_PVName(n) for n in value])
@property
def matrix(self):
"""."""
mat = np.zeros([len(self._all_corrs), 2*self.trajx.size], dtype=float)
for i, cor in enumerate(self.corr_names):
line = self._matrix.get(cor)
if line is not None:
mat[i, :] = line
return mat
@property
def nr_points(self):
"""."""
return min(
self.devices['tb_sofb'].nr_points,
self.devices['bo_sofb'].nr_points)
@nr_points.setter
def nr_points(self, value):
self.devices['tb_sofb'].nr_points = int(value)
self.devices['bo_sofb'].nr_points = int(value)
def _measure_matrix_thread(self):
self.nr_points = self.params.num_points
corrs = self.corrs_to_measure
print('Starting...')
for i, cor in enumerate(corrs):
print('{0:2d}|{1:2d}: {2:20s}'.format(i, len(corrs), cor), end='')
orb = []
delta = self.params.deltas[cor.dev]
origkick = self._all_corrs[cor].strength
print('orig ', end='')
if not self.reset(self.params.wait_time):
break
self.wait(self.params.timeout_orb)
orb.append(-np.hstack([self.trajx, self.trajy]))
sig = -2*int(origkick > 0) + 1
print('pos' if sig > 0 else 'neg')
self._all_corrs[cor].strength = origkick + sig*delta
if not self.reset(self.params.wait_time):
break
self.wait(self.params.timeout_orb)
orb.append(np.hstack([self.trajx, self.trajy]))
self._all_corrs[cor].strength = origkick
if self._stopevt.is_set():
print('Stopped!')
break
else:
self._matrix[cor] = np.array(orb).sum(axis=0)/(sig*delta)
else:
print('Finished!')
def calc_model_respmatTBBO(
tb_mod, model, corr_names, elems, meth='middle', ishor=True):
"""."""
bpms = np.array(pyaccel.lattice.find_indices(model, 'fam_name', 'BPM'))[1:]
_, cumulmat = pyaccel.tracking.find_m44(
model, indices='open', fixed_point=[0, 0, 0, 0])
matrix = np.zeros((len(corr_names), 2*bpms.size))
for idx, corr in enumerate(corr_names):
elem = elems[corr]
indcs = np.array(elem.model_indices)
if corr.sec == 'BO':
print('Booster ', corr)
indcs += len(tb_mod)
cortype = elem.magnet_type
kxl = kyl = ksxl = ksyl = 0
if corr.dev == 'InjSept':
# kxl = tb_mod[indcs[0][1]].KxL
# kyl = tb_mod[indcs[0][1]].KyL
# ksxl = tb_mod[indcs[0][1]].KsxL
# ksyl = tb_mod[indcs[0][1]].KsyL
midx = pyaccel.lattice.find_indices(
tb_mod, 'fam_name', 'InjSeptM66')
for m in midx:
kxl += tb_mod[m].KxL
kyl += tb_mod[m].KyL
ksxl += tb_mod[m].KsxL
ksyl += tb_mod[m].KsyL
if not ishor and corr.dev in {'InjSept', 'InjKckr'}:
cortype = 'vertical'
matrix[idx, :] = _get_respmat_line(
cumulmat, indcs, bpms, length=elem.model_length,
kxl=kxl, kyl=kyl, ksxl=ksxl, ksyl=ksyl,
cortype=cortype, meth=meth)
return matrix
def _get_respmat_line(
cumul_mat, indcs, bpms, length, kxl=0, kyl=0, ksxl=0, ksyl=0,
cortype='vertical', meth='middle'):
idx = 3 if cortype.startswith('vertical') else 1
cor = indcs[0]
if meth.lower().startswith('end'):
cor = indcs[-1]+1
elif meth.lower().startswith('mid'):
# create a symplectic integrator of second order
# for the last half of the element:
drift = np.eye(4, dtype=float)
drift[0, 1] = length/2 / 2
drift[2, 3] = length/2 / 2
quad = np.eye(4, dtype=float)
quad[1, 0] = -kxl/2
quad[3, 2] = -kyl/2
quad[1, 2] = -ksxl/2
quad[3, 0] = -ksyl/2
half_cor = np.dot(np.dot(drift, quad), drift)
m0c = cumul_mat[cor]
if meth.lower().startswith('mid'):
m0c = np.linalg.solve(half_cor, m0c)
mat = np.linalg.solve(m0c.T, cumul_mat[bpms].transpose((0, 2, 1)))
mat = mat.transpose(0, 2, 1)
# if meth.lower().startswith('mid'):
# mat = np.dot(mat, half_cor)
respx = mat[:, 0, idx]
respy = mat[:, 2, idx]
respx[bpms < indcs[0]] = 0
respy[bpms < indcs[0]] = 0
return np.hstack([respx, respy])
class FindSeptQuad(SimulAnneal):
"""."""
def __init__(self, tb_model, bo_model, corr_names, elems,
respmat, nturns=5, save=False, in_sept=True):
"""."""
super().__init__(save=save)
self.tb_model = tb_model
self.bo_model = bo_model
self.corr_names = corr_names
self.elems = elems
self.nturns = nturns
self.respmat = respmat
self.in_sept = in_sept
def initialization(self):
"""."""
return
def calc_obj_fun(self):
"""."""
if self.in_sept:
sept_idx = pyaccel.lattice.find_indices(
self.tb_model, 'fam_name', 'InjSept')
else:
sept_idx = self.elems['TB-04:MA-CV-2'].model_indices
k, ks = self._position
pyaccel.lattice.set_attribute(self.tb_model, 'K', sept_idx, k)
pyaccel.lattice.set_attribute(self.tb_model, 'Ks', sept_idx, ks)
respmat = calc_model_respmatTBBO(
self.tb_model, self.bo_model, self.corr_names, self.elems)
respmat -= self.respmat
return np.sqrt(np.mean(respmat*respmat))
| [
"numpy.dot",
"numpy.hstack",
"pyaccel.lattice.set_attribute",
"numpy.array",
"numpy.mean",
"pyaccel.tracking.find_m44",
"pyaccel.lattice.find_indices",
"siriuspy.namesys.SiriusPVName",
"numpy.eye",
"numpy.linalg.solve",
"siriuspy.devices.SOFB"
] | [((4472, 4546), 'pyaccel.tracking.find_m44', 'pyaccel.tracking.find_m44', (['model'], {'indices': '"""open"""', 'fixed_point': '[0, 0, 0, 0]'}), "(model, indices='open', fixed_point=[0, 0, 0, 0])\n", (4497, 4546), False, 'import pyaccel\n'), ((6843, 6868), 'numpy.hstack', 'np.hstack', (['[respx, respy]'], {}), '([respx, respy])\n', (6852, 6868), True, 'import numpy as np\n'), ((1055, 1128), 'numpy.hstack', 'np.hstack', (["[self.devices['tb_sofb'].trajx, self.devices['bo_sofb'].trajx]"], {}), "([self.devices['tb_sofb'].trajx, self.devices['bo_sofb'].trajx])\n", (1064, 1128), True, 'import numpy as np\n'), ((1209, 1282), 'numpy.hstack', 'np.hstack', (["[self.devices['tb_sofb'].trajy, self.devices['bo_sofb'].trajy]"], {}), "([self.devices['tb_sofb'].trajy, self.devices['bo_sofb'].trajy])\n", (1218, 1282), True, 'import numpy as np\n'), ((4698, 4726), 'numpy.array', 'np.array', (['elem.model_indices'], {}), '(elem.model_indices)\n', (4706, 4726), True, 'import numpy as np\n'), ((6502, 6532), 'numpy.linalg.solve', 'np.linalg.solve', (['half_cor', 'm0c'], {}), '(half_cor, m0c)\n', (6517, 6532), True, 'import numpy as np\n'), ((7672, 7734), 'pyaccel.lattice.set_attribute', 'pyaccel.lattice.set_attribute', (['self.tb_model', '"""K"""', 'sept_idx', 'k'], {}), "(self.tb_model, 'K', sept_idx, k)\n", (7701, 7734), False, 'import pyaccel\n'), ((7743, 7807), 'pyaccel.lattice.set_attribute', 'pyaccel.lattice.set_attribute', (['self.tb_model', '"""Ks"""', 'sept_idx', 'ks'], {}), "(self.tb_model, 'Ks', sept_idx, ks)\n", (7772, 7807), False, 'import pyaccel\n'), ((803, 824), 'siriuspy.devices.SOFB', 'SOFB', (['SOFB.DEVICES.BO'], {}), '(SOFB.DEVICES.BO)\n', (807, 824), False, 'from siriuspy.devices import SOFB\n'), ((849, 870), 'siriuspy.devices.SOFB', 'SOFB', (['SOFB.DEVICES.TB'], {}), '(SOFB.DEVICES.TB)\n', (853, 870), False, 'from siriuspy.devices import SOFB\n'), ((4394, 4448), 'pyaccel.lattice.find_indices', 'pyaccel.lattice.find_indices', (['model', '"""fam_name"""', '"""BPM"""'], {}), "(model, 'fam_name', 'BPM')\n", (4422, 4448), False, 'import pyaccel\n'), ((5129, 5191), 'pyaccel.lattice.find_indices', 'pyaccel.lattice.find_indices', (['tb_mod', '"""fam_name"""', '"""InjSeptM66"""'], {}), "(tb_mod, 'fam_name', 'InjSeptM66')\n", (5157, 5191), False, 'import pyaccel\n'), ((6124, 6146), 'numpy.eye', 'np.eye', (['(4)'], {'dtype': 'float'}), '(4, dtype=float)\n', (6130, 6146), True, 'import numpy as np\n'), ((6232, 6254), 'numpy.eye', 'np.eye', (['(4)'], {'dtype': 'float'}), '(4, dtype=float)\n', (6238, 6254), True, 'import numpy as np\n'), ((7470, 7536), 'pyaccel.lattice.find_indices', 'pyaccel.lattice.find_indices', (['self.tb_model', '"""fam_name"""', '"""InjSept"""'], {}), "(self.tb_model, 'fam_name', 'InjSept')\n", (7498, 7536), False, 'import pyaccel\n'), ((7976, 8002), 'numpy.mean', 'np.mean', (['(respmat * respmat)'], {}), '(respmat * respmat)\n', (7983, 8002), True, 'import numpy as np\n'), ((2352, 2362), 'siriuspy.namesys.SiriusPVName', '_PVName', (['n'], {}), '(n)\n', (2359, 2362), True, 'from siriuspy.namesys import SiriusPVName as _PVName\n'), ((3939, 3974), 'numpy.hstack', 'np.hstack', (['[self.trajx, self.trajy]'], {}), '([self.trajx, self.trajy])\n', (3948, 3974), True, 'import numpy as np\n'), ((6395, 6414), 'numpy.dot', 'np.dot', (['drift', 'quad'], {}), '(drift, quad)\n', (6401, 6414), True, 'import numpy as np\n'), ((3600, 3635), 'numpy.hstack', 'np.hstack', (['[self.trajx, self.trajy]'], {}), '([self.trajx, self.trajy])\n', (3609, 3635), True, 'import numpy as np\n'), ((4179, 4192), 'numpy.array', 'np.array', (['orb'], {}), '(orb)\n', (4187, 4192), True, 'import numpy as np\n')] |
import numpy as np
import torch
import torch.nn as nn
from ....ops.iou3d_nms import iou3d_nms_utils
class ProposalTargetLayer(nn.Module):
def __init__(self, roi_sampler_cfg):
super().__init__()
self.roi_sampler_cfg = roi_sampler_cfg
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
roi_scores: (B, num_rois)
gt_boxes: (B, N, 7 + C + 1)
roi_labels: (B, num_rois)
Returns:
batch_dict:
rois: (B, M, 7 + C)
gt_of_rois: (B, M, 7 + C)
gt_iou_of_rois: (B, M)
roi_scores: (B, M)
roi_labels: (B, M)
reg_valid_mask: (B, M)
rcnn_cls_labels: (B, M)
"""
batch_rois, batch_gt_of_rois, batch_roi_ious, batch_roi_scores, batch_roi_labels = self.sample_rois_for_rcnn(
batch_dict=batch_dict
)
# regression valid mask
reg_valid_mask = (batch_roi_ious > self.roi_sampler_cfg.REG_FG_THRESH).long()
# classification label
if self.roi_sampler_cfg.CLS_SCORE_TYPE == 'cls':
batch_cls_labels = (batch_roi_ious > self.roi_sampler_cfg.CLS_FG_THRESH).long()
ignore_mask = (batch_roi_ious > self.roi_sampler_cfg.CLS_BG_THRESH) & \
(batch_roi_ious < self.roi_sampler_cfg.CLS_FG_THRESH)
batch_cls_labels[ignore_mask > 0] = -1
elif self.roi_sampler_cfg.CLS_SCORE_TYPE == 'roi_iou':
iou_bg_thresh = self.roi_sampler_cfg.CLS_BG_THRESH
iou_fg_thresh = self.roi_sampler_cfg.CLS_FG_THRESH
fg_mask = batch_roi_ious > iou_fg_thresh
bg_mask = batch_roi_ious < iou_bg_thresh
interval_mask = (fg_mask == 0) & (bg_mask == 0)
batch_cls_labels = (fg_mask > 0).float()
batch_cls_labels[interval_mask] = \
(batch_roi_ious[interval_mask] - iou_bg_thresh) / (iou_fg_thresh - iou_bg_thresh)
elif self.roi_sampler_cfg.CLS_SCORE_TYPE == 'raw_roi_iou':
batch_cls_labels = batch_roi_ious
else:
raise NotImplementedError
targets_dict = {'rois': batch_rois, 'gt_of_rois': batch_gt_of_rois, 'gt_iou_of_rois': batch_roi_ious,
'roi_scores': batch_roi_scores, 'roi_labels': batch_roi_labels,
'reg_valid_mask': reg_valid_mask,
'rcnn_cls_labels': batch_cls_labels}
return targets_dict
def sample_rois_for_rcnn(self, batch_dict):
"""
Args:
batch_dict:
batch_size:
rois: (B, num_rois, 7 + C)
roi_scores: (B, num_rois)
gt_boxes: (B, N, 7 + C + 1)
roi_labels: (B, num_rois)
Returns:
"""
batch_size = batch_dict['batch_size']
rois = batch_dict['rois']
roi_scores = batch_dict['roi_scores']
roi_labels = batch_dict['roi_labels']
gt_boxes = batch_dict['gt_boxes']
code_size = rois.shape[-1]
batch_rois = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE, code_size)
batch_gt_of_rois = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE, code_size + 1)
batch_roi_ious = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE)
batch_roi_scores = rois.new_zeros(batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE)
batch_roi_labels = rois.new_zeros((batch_size, self.roi_sampler_cfg.ROI_PER_IMAGE), dtype=torch.long)
for index in range(batch_size):
cur_roi, cur_gt, cur_roi_labels, cur_roi_scores = \
rois[index], gt_boxes[index], roi_labels[index], roi_scores[index]
k = cur_gt.__len__() - 1
while k > 0 and cur_gt[k].sum() == 0:
k -= 1
cur_gt = cur_gt[:k + 1]
cur_gt = cur_gt.new_zeros((1, cur_gt.shape[1])) if len(cur_gt) == 0 else cur_gt
if self.roi_sampler_cfg.get('SAMPLE_ROI_BY_EACH_CLASS', False):
max_overlaps, gt_assignment = self.get_max_iou_with_same_class(
rois=cur_roi, roi_labels=cur_roi_labels,
gt_boxes=cur_gt[:, 0:7], gt_labels=cur_gt[:, -1].long()
)
else:
iou3d = iou3d_nms_utils.boxes_iou3d_gpu(cur_roi, cur_gt[:, 0:7]) # (M, N)
max_overlaps, gt_assignment = torch.max(iou3d, dim=1)
sampled_inds = self.subsample_rois(max_overlaps=max_overlaps)
batch_rois[index] = cur_roi[sampled_inds]
batch_roi_labels[index] = cur_roi_labels[sampled_inds]
batch_roi_ious[index] = max_overlaps[sampled_inds]
batch_roi_scores[index] = cur_roi_scores[sampled_inds]
batch_gt_of_rois[index] = cur_gt[gt_assignment[sampled_inds]]
return batch_rois, batch_gt_of_rois, batch_roi_ious, batch_roi_scores, batch_roi_labels
def subsample_rois(self, max_overlaps):
# sample fg, easy_bg, hard_bg
fg_rois_per_image = int(np.round(self.roi_sampler_cfg.FG_RATIO * self.roi_sampler_cfg.ROI_PER_IMAGE))
fg_thresh = min(self.roi_sampler_cfg.REG_FG_THRESH, self.roi_sampler_cfg.CLS_FG_THRESH)
fg_inds = torch.nonzero((max_overlaps >= fg_thresh)).view(-1)
easy_bg_inds = torch.nonzero((max_overlaps < self.roi_sampler_cfg.CLS_BG_THRESH_LO)).view(-1)
hard_bg_inds = torch.nonzero((max_overlaps < self.roi_sampler_cfg.REG_FG_THRESH) &
(max_overlaps >= self.roi_sampler_cfg.CLS_BG_THRESH_LO)).view(-1)
fg_num_rois = fg_inds.numel()
bg_num_rois = hard_bg_inds.numel() + easy_bg_inds.numel()
if fg_num_rois > 0 and bg_num_rois > 0:
# sampling fg
fg_rois_per_this_image = min(fg_rois_per_image, fg_num_rois)
rand_num = torch.from_numpy(np.random.permutation(fg_num_rois)).type_as(max_overlaps).long()
fg_inds = fg_inds[rand_num[:fg_rois_per_this_image]]
# sampling bg
bg_rois_per_this_image = self.roi_sampler_cfg.ROI_PER_IMAGE - fg_rois_per_this_image
bg_inds = self.sample_bg_inds(
hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, self.roi_sampler_cfg.HARD_BG_RATIO
)
elif fg_num_rois > 0 and bg_num_rois == 0:
# sampling fg
rand_num = np.floor(np.random.rand(self.roi_sampler_cfg.ROI_PER_IMAGE) * fg_num_rois)
rand_num = torch.from_numpy(rand_num).type_as(max_overlaps).long()
fg_inds = fg_inds[rand_num]
bg_inds = []
elif bg_num_rois > 0 and fg_num_rois == 0:
# sampling bg
bg_rois_per_this_image = self.roi_sampler_cfg.ROI_PER_IMAGE
bg_inds = self.sample_bg_inds(
hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, self.roi_sampler_cfg.HARD_BG_RATIO
)
else:
print('maxoverlaps:(min=%f, max=%f)' % (max_overlaps.min().item(), max_overlaps.max().item()))
print('ERROR: FG=%d, BG=%d' % (fg_num_rois, bg_num_rois))
raise NotImplementedError
sampled_inds = torch.cat((fg_inds, bg_inds), dim=0)
return sampled_inds
@staticmethod
def sample_bg_inds(hard_bg_inds, easy_bg_inds, bg_rois_per_this_image, hard_bg_ratio):
if hard_bg_inds.numel() > 0 and easy_bg_inds.numel() > 0:
hard_bg_rois_num = min(int(bg_rois_per_this_image * hard_bg_ratio), len(hard_bg_inds))
easy_bg_rois_num = bg_rois_per_this_image - hard_bg_rois_num
# sampling hard bg
rand_idx = torch.randint(low=0, high=hard_bg_inds.numel(), size=(hard_bg_rois_num,)).long()
hard_bg_inds = hard_bg_inds[rand_idx]
# sampling easy bg
rand_idx = torch.randint(low=0, high=easy_bg_inds.numel(), size=(easy_bg_rois_num,)).long()
easy_bg_inds = easy_bg_inds[rand_idx]
bg_inds = torch.cat([hard_bg_inds, easy_bg_inds], dim=0)
elif hard_bg_inds.numel() > 0 and easy_bg_inds.numel() == 0:
hard_bg_rois_num = bg_rois_per_this_image
# sampling hard bg
rand_idx = torch.randint(low=0, high=hard_bg_inds.numel(), size=(hard_bg_rois_num,)).long()
bg_inds = hard_bg_inds[rand_idx]
elif hard_bg_inds.numel() == 0 and easy_bg_inds.numel() > 0:
easy_bg_rois_num = bg_rois_per_this_image
# sampling easy bg
rand_idx = torch.randint(low=0, high=easy_bg_inds.numel(), size=(easy_bg_rois_num,)).long()
bg_inds = easy_bg_inds[rand_idx]
else:
raise NotImplementedError
return bg_inds
@staticmethod
def get_max_iou_with_same_class(rois, roi_labels, gt_boxes, gt_labels):
"""
Args:
rois: (N, 7)
roi_labels: (N)
gt_boxes: (N, )
gt_labels:
Returns:
"""
"""
:param rois: (N, 7)
:param roi_labels: (N)
:param gt_boxes: (N, 8)
:return:
"""
max_overlaps = rois.new_zeros(rois.shape[0])
gt_assignment = roi_labels.new_zeros(roi_labels.shape[0])
for k in range(gt_labels.min().item(), gt_labels.max().item() + 1):
roi_mask = (roi_labels == k)
gt_mask = (gt_labels == k)
if roi_mask.sum() > 0 and gt_mask.sum() > 0:
cur_roi = rois[roi_mask]
cur_gt = gt_boxes[gt_mask]
original_gt_assignment = gt_mask.nonzero().view(-1)
iou3d = iou3d_nms_utils.boxes_iou3d_gpu(cur_roi, cur_gt) # (M, N)
cur_max_overlaps, cur_gt_assignment = torch.max(iou3d, dim=1)
max_overlaps[roi_mask] = cur_max_overlaps
gt_assignment[roi_mask] = original_gt_assignment[cur_gt_assignment]
return max_overlaps, gt_assignment
| [
"torch.nonzero",
"torch.cat",
"torch.max",
"numpy.random.permutation",
"numpy.random.rand",
"numpy.round",
"torch.from_numpy"
] | [((7329, 7365), 'torch.cat', 'torch.cat', (['(fg_inds, bg_inds)'], {'dim': '(0)'}), '((fg_inds, bg_inds), dim=0)\n', (7338, 7365), False, 'import torch\n'), ((5197, 5273), 'numpy.round', 'np.round', (['(self.roi_sampler_cfg.FG_RATIO * self.roi_sampler_cfg.ROI_PER_IMAGE)'], {}), '(self.roi_sampler_cfg.FG_RATIO * self.roi_sampler_cfg.ROI_PER_IMAGE)\n', (5205, 5273), True, 'import numpy as np\n'), ((8137, 8183), 'torch.cat', 'torch.cat', (['[hard_bg_inds, easy_bg_inds]'], {'dim': '(0)'}), '([hard_bg_inds, easy_bg_inds], dim=0)\n', (8146, 8183), False, 'import torch\n'), ((4561, 4584), 'torch.max', 'torch.max', (['iou3d'], {'dim': '(1)'}), '(iou3d, dim=1)\n', (4570, 4584), False, 'import torch\n'), ((5390, 5430), 'torch.nonzero', 'torch.nonzero', (['(max_overlaps >= fg_thresh)'], {}), '(max_overlaps >= fg_thresh)\n', (5403, 5430), False, 'import torch\n'), ((5465, 5532), 'torch.nonzero', 'torch.nonzero', (['(max_overlaps < self.roi_sampler_cfg.CLS_BG_THRESH_LO)'], {}), '(max_overlaps < self.roi_sampler_cfg.CLS_BG_THRESH_LO)\n', (5478, 5532), False, 'import torch\n'), ((5567, 5696), 'torch.nonzero', 'torch.nonzero', (['((max_overlaps < self.roi_sampler_cfg.REG_FG_THRESH) & (max_overlaps >=\n self.roi_sampler_cfg.CLS_BG_THRESH_LO))'], {}), '((max_overlaps < self.roi_sampler_cfg.REG_FG_THRESH) & (\n max_overlaps >= self.roi_sampler_cfg.CLS_BG_THRESH_LO))\n', (5580, 5696), False, 'import torch\n'), ((9877, 9900), 'torch.max', 'torch.max', (['iou3d'], {'dim': '(1)'}), '(iou3d, dim=1)\n', (9886, 9900), False, 'import torch\n'), ((6556, 6606), 'numpy.random.rand', 'np.random.rand', (['self.roi_sampler_cfg.ROI_PER_IMAGE'], {}), '(self.roi_sampler_cfg.ROI_PER_IMAGE)\n', (6570, 6606), True, 'import numpy as np\n'), ((6032, 6066), 'numpy.random.permutation', 'np.random.permutation', (['fg_num_rois'], {}), '(fg_num_rois)\n', (6053, 6066), True, 'import numpy as np\n'), ((6645, 6671), 'torch.from_numpy', 'torch.from_numpy', (['rand_num'], {}), '(rand_num)\n', (6661, 6671), False, 'import torch\n')] |
#!/usr/bin/python3.6
########################################################
## FEDERAL UNIVERSITY OF MINAS GERAIS ##
## COMPUTER SCIENCE DEPARTMENT ##
## WIRELESS NETWORKS LABORATORY ##
## ##
## Author: <NAME> ##
## <NAME> ##
## ##
########################################################
from regression import Regression
from regression import REG_ALGORITHMS
import csv
import numpy as np
import argparse
import socket
import sys
import select
import configparser
from copa_api import APICopa
from numpy import array
#import Queue
from multiprocessing import Queue
def parse_args():
parser = argparse.ArgumentParser(description='Machine Learning Regression Module')
parser.add_argument('--bufmg', action='store', type=str, default='../tests/ufmg_norm.csv',
help='CSV input database path for UFMG pool')
parser.add_argument('--bufrgs', action='store', type=str, default='../tests/ufrgs_norm.csv',
help='CSV input database path for UFRGS pool')
parser.add_argument('-a', '--algorithm', action='store', type=str,
choices=REG_ALGORITHMS, default='nnet',
help='Regression algorithm to be used for prediction')
parser.add_argument('-f', '--features', action='store', type=int, default=5,
help='Number of features in the database')
parser.add_argument('-x', action='store', type=float, default=0.5, help='Define the tolerance')
parser.add_argument('--fps', action='store', type=int, default=30, help='Default fps')
return parser.parse_args()
def get_locus(conf_file = 'pools.conf'):
config = configparser.ConfigParser()
try:
config.read(conf_file)
return config['POOLS']['pool1'], config['POOLS']['pool2']
except FileExistsError as e:
print("Warning: Configuration File not found. Using default values")
def is_edge(pool):
return get_locus()[0] == pool
def is_cloud(pool):
return get_locus()[1] == pool
def migrate(pools, tolerance_predict):
#time_edge, time_cloud = pools[get_locus()[0]], pools[get_locus()[1]]
global pool_locus
time_edge, time_cloud = 0,0
try:
time_edge = pools[get_locus()[0]]
except KeyError:
pools[get_locus()[0]] = 999999999
try:
time_cloud = pools[get_locus()[1]]
except KeyError:
pools[get_locus()[1]] = 999999999
pool_destination = pool_locus
if is_edge(pool_locus) and time_edge >= tolerance_predict:
pool_destination = min(pools, key=pools.get)
elif is_cloud(pool_locus) and time_cloud < tolerance_predict:
pool_destination = get_locus()[0]
if pool_destination != pool_locus:
print("Pool Locus", pool_locus)
print("Pool Desti", pool_destination)
APICopa().migrateContainer(pool_locus, pool_destination.replace("\'",""))
pool_locus = pool_destination
if __name__ == '__main__':
global pool_locus
pool_locus = get_locus()[1]
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setblocking(0)
server_address = ('192.168.0.52', 10001)
server.bind(server_address)
server.listen(5)
inputs = [server]
outputs = [ ]
pools = {}
message_queues = {}
args = parse_args()
# Start connection
# Build model
model_ufmg = Regression(args.bufmg, args.algorithm, args.features)
model_ufrgs = Regression(args.bufrgs, args.algorithm, args.features)
model_ufmg.fit()
model_ufrgs.fit()
# Train model
# Predict output
#server = Server()
#print(server.handleConnection())
while inputs:
# Wait for at least one of the sockets to be ready for processing
#print >>sys.stderr, '\nwaiting for the next event'
readable, writable, exceptional = select.select(inputs, outputs, inputs)
# Handle inputs
for s in readable:
if s is server:
# A "readable" server socket is ready to accept a connection
connection, client_address = s.accept()
#print >>sys.stderr, 'new connection from', client_address
connection.setblocking(0)
inputs.append(connection)
# Give the connection a queue for data we want to send
#message_queues[connection] = Queue.Queue()
else:
#data = s.recv(1024)
data = s.recv(1024).decode('utf-8')
if data:
#print(data)
# A readable client socket has data
#print >>sys.stderr, 'received "%s" from %s' % (data, s.getpeername())
#message_queues[s].put(data)
cpu_percentage__ = data.split(',')[0]
cpu_percentage = str(cpu_percentage__).split('[')[1]
cpu_time = data.split(',')[1]
m_available = data.split(',')[2]
m_swap = data.split(',')[3]
frame_rate = data.split(',')[4]
#net_traffic = data.split(',')[4]
#transmission_capture = data.split(',')[5]
#transmission_observer = data.split(',')[6]
#t = regression.predict(array([[cpu_percentage,cpu_time,m_available,m_swap,net_traffic,
# transmission_capture,transmission_observer]], dtype=float))
#t = regression.predict(array([[cpu_percentage,cpu_time,m_available,m_swap,frame_rate]], dtype=float))
#print(t)
poolSplit = str(data.split(',')[-1]).split(']')[0]
pool = poolSplit.split()[0]
#print(pool)
features = array([[cpu_percentage, cpu_time, m_available, m_swap, frame_rate]], dtype=float)
t = model_ufmg.predict(features) if pool == "\'UFMG\'" else model_ufrgs.predict(features)
#print(t)
pools[pool] = t
#pools[poolSplit]
print(pools)
#migrate(pools, args.x/args.fps)
# Add output channel for response
#print(regression.predict([[1.7,2014202.7,3940237312,89915392,0.0001804828643798828,1547732123.2172844,1547732122.9963086
if s not in outputs:
outputs.append(s)
else:
# Interpret empty result as closed connection
#print >>sys.stderr, 'closing', client_address, 'after reading no data'
# Stop listening for input on the connection
if s in outputs:
outputs.remove(s)
inputs.remove(s)
s.close()
# Remove message queue
#del message_queues[s]
| [
"argparse.ArgumentParser",
"socket.socket",
"regression.Regression",
"select.select",
"numpy.array",
"configparser.ConfigParser",
"copa_api.APICopa"
] | [((811, 884), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Machine Learning Regression Module"""'}), "(description='Machine Learning Regression Module')\n", (834, 884), False, 'import argparse\n'), ((1858, 1885), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (1883, 1885), False, 'import configparser\n'), ((3216, 3265), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (3229, 3265), False, 'import socket\n'), ((3564, 3617), 'regression.Regression', 'Regression', (['args.bufmg', 'args.algorithm', 'args.features'], {}), '(args.bufmg, args.algorithm, args.features)\n', (3574, 3617), False, 'from regression import Regression\n'), ((3636, 3690), 'regression.Regression', 'Regression', (['args.bufrgs', 'args.algorithm', 'args.features'], {}), '(args.bufrgs, args.algorithm, args.features)\n', (3646, 3690), False, 'from regression import Regression\n'), ((4033, 4071), 'select.select', 'select.select', (['inputs', 'outputs', 'inputs'], {}), '(inputs, outputs, inputs)\n', (4046, 4071), False, 'import select\n'), ((3008, 3017), 'copa_api.APICopa', 'APICopa', ([], {}), '()\n', (3015, 3017), False, 'from copa_api import APICopa\n'), ((6065, 6151), 'numpy.array', 'array', (['[[cpu_percentage, cpu_time, m_available, m_swap, frame_rate]]'], {'dtype': 'float'}), '([[cpu_percentage, cpu_time, m_available, m_swap, frame_rate]], dtype=\n float)\n', (6070, 6151), False, 'from numpy import array\n')] |
from __future__ import division
import numpy as np
import scipy
import pandas as pd
import copy
import sklearn
from scipy.cluster import hierarchy
import matplotlib as mpl
from matplotlib import pyplot as plt
import seaborn as sns
import gc
def calc_DE_mannwhitneyu(X, names1, names2):
pvalues = []
medianA = []
medianB = []
meanA = []
meanB = []
for gene in X.index:
A = X[names1].loc[gene]
B = X[names2].loc[gene]
if np.count_nonzero(A) == 0 and np.count_nonzero(B) == 0:
pvalues.append(np.nan)
medianA.append(0)
medianB.append(0)
meanA.append(0)
meanB.append(0)
continue
_, pvalue = scipy.stats.mannwhitneyu(A, B)
pvalues.append(pvalue)
medianA.append(np.median(A))
medianB.append(np.median(B))
meanA.append(np.mean(A))
meanB.append(np.mean(B))
df_DE = pd.DataFrame({"pvalue": pvalues, "medianA": medianA, "medianB": medianB,
"meanA": meanA, "meanB": meanB}, index=X.index)
df_DE.sort_values("pvalue", inplace=True)
df_DE["pvalue_adj"] = df_DE["pvalue"] * df_DE["pvalue"].shape[0]
return df_DE
def call_hits_dropout(df, dropout_cutoff=1, window_size=500, N=2000, pvalue_cutoff=0.05):
df = copy.deepcopy(df)
# Calculate mean
df["mean"] = np.mean(df, axis=1)
df.sort_values(by="mean", ascending=False, inplace=True)
# Calculate dropouts per gene
df["dropouts"] = np.sum(df.ix[:,:-1] < dropout_cutoff, axis=1) / df.shape[1]
def f(df, i, window_size):
""" Get dropout distribution for window around focal gene.
Calculate empirical p value. """
i_upper = int(max(0, i - window_size/2))
i_lower = int(min(df.shape[0], i + window_size/2))
# Make ensemble of dropout values
ensemble = []
for k, (gene_name, row) in enumerate(df.iloc[i_upper:i_lower].iterrows()):
if k + i_upper != i:
ensemble.append(row["dropouts"])
# Get dropouts of focal gene
myDropouts = df.iloc[i]["dropouts"]
# What fraction of ensemble is more extreme than focal gene?
pvalue_empirical = sum(ensemble > myDropouts) / len(ensemble)
return ensemble, myDropouts, pvalue_empirical
hits = []
for i in range(0, N):
ensemble, myDropouts, pvalue_empirical = f(df, i, window_size)
geneName = df.iloc[i].name
if pvalue_empirical < pvalue_cutoff:
hits.append(geneName)
return hits
def call_hits_corr(df, dropout_cutoff=1, N=2000, corr_cutoff=0.8, max_canonical_vector=10, canonical_vector_spacing=3):
df = copy.deepcopy(df)
def f(df, C):
""" Calculate correlations with canonical vector C """
geneNames = []
corrs = []
pvalues = []
for k, (geneName, row) in enumerate(df.iterrows()):
myRankedExpr = row.sort_values()
myRankedExpr = np.log10(myRankedExpr + 1)
myY = myRankedExpr / max(myRankedExpr)
corr, pvalue = scipy.stats.pearsonr(myY, C)
geneNames.append(geneName)
corrs.append(corr)
pvalues.append(pvalue)
df_corrs = pd.DataFrame({"geneName":geneNames, "corr":corrs, "pvalue":pvalues})
return df_corrs
df["mean_expr"] = np.mean(df, axis=1)
df = df.sort_values("mean_expr", ascending=False)
df = df.T.drop("mean_expr").T
df = df.head(N)
df[df < dropout_cutoff] = 0
L = df.shape[1] # length of expr vector (num cells)
df_corrs = pd.DataFrame()
for k in range(1, max_canonical_vector, canonical_vector_spacing):
C = np.zeros(L) # canonical expression profile
C[-k:] = 1.
df_myCorr = f(df, C)
df_corrs = pd.concat([df_corrs, df_myCorr])
# Filter for highly correlated hits
df_hits = df_corrs.loc[df_corrs["corr"] > corr_cutoff].sort_values(by="corr", ascending=False)
df_hits = df_hits.drop_duplicates(subset="geneName")
hits = list(df_hits["geneName"])
return hits, df_hits
def get_zscores(df, num_bin=20):
myMean = np.mean(df, axis=1)
myVar = np.var(df, axis=1)
bins = np.linspace(min(myMean), max(myMean), num_bin)
df["mean"] = myMean
df["var"] = myVar
df["mean_bin"] = pd.cut(myMean, bins, right=True, labels=range(1,len(bins)), include_lowest=True)
for _, group in df.groupby("mean_bin"):
myDispersion = np.log10(group["var"] / group["mean"])
myDispersionStd = np.std(myDispersion)
if myDispersionStd == 0: z_scores = np.zeros(len(group))
z_scores = (myDispersion - np.mean(myDispersion)) / myDispersionStd
#group.index.dropna()
df.loc[group.index.dropna(), "z_score"] = z_scores
mean = df["mean"]
z_score = df["z_score"]
df.drop(["mean", "var", "mean_bin", "z_score"], axis=1, inplace=True) # clean up
return mean, z_score, df
def corr(df, exclude_max=0):
# Calculate pairwise correlations between genes (columns of df)
# Very slow because pandas corr() function is slow (compared to np.corrcoef()).
if exclude_max > 0:
for i in range(exclude_max):
for col, row in enumerate(np.nanargmax(np.array(df), axis=0)):
df.iloc[row,col] = np.nan
return df.corr()
def get_correlated_genes(df, seeds, correlation_cutoff, min_hits=1, exclude_max=0):
correlated_genes = []
uncorrelated_seeds = []
allCorrelations = pd.DataFrame(np.corrcoef(df), index=df.index, columns=df.index)
for seed in seeds:
myCorrelations = allCorrelations.loc[seed].drop(seed)
myHits = myCorrelations[np.abs(myCorrelations) > correlation_cutoff]
if exclude_max > 0:
# keep only hits that are still hits after excluding maximum expressing sample
myValidatedHits = []
for hit in myHits.index:
corr_excludeMax = np.abs(corr(df.loc[[seed, hit]].T, exclude_max=exclude_max).loc[seed, hit])
if corr_excludeMax > correlation_cutoff:
myValidatedHits.append(hit)
else:
myValidatedHits = myHits.index
if len(myValidatedHits) < min_hits:
if seed == "SpikeIn1": print ("SpikeIn1 was uncorrelated seed with len(validatedHits)="), len(myValidatedHits)
uncorrelated_seeds.append(seed)
else:
correlated_genes.extend(myValidatedHits)
return correlated_genes, uncorrelated_seeds
def prune_singleton_correlations(df, product_cutoff=0.9):
""" Identify genes that are driven by a single cell that highly expresses both """
notSingletons = []
singletons = []
for gene1 in df.index:
for gene2 in df.index:
if gene1 == gene2: continue
A = df.loc[gene1]
B = df.loc[gene2]
AB = A*B
myCutoff = product_cutoff * max(A) * max(B)
x = sum(AB > myCutoff)
if x > 1:
notSingletons.append(gene1)
break
singletons.append(gene1)
return notSingletons, singletons
def filter_genes_overdispersed_correlates_dropouts(X, TFs, CSMs=None, exclude=None, N=50, correlation_cutoff=0.5, min_hits=1, exclude_max=0, dropout_rate_low=0.0, dropout_rate_high=1.0):
""" Filter for informative genes for cell type identification.
(1) Drop genes that are not expressed.
(2) Find overdispersed genes.
(3) Expand gene set by finding correlated genes.
(4) Drop genes with no correlates.
(5) Drop genes with high or low dropout rate. """
# Drop genes with low max expression
X = X.loc[np.max(X, axis=1) > 2]
# Find overdispersed genes
myDispersion = dispersion(X)
myDispersion.calc_dispersion(num_bin=20) # calculate overdispersion
hits_genome = myDispersion.get_hits(N=N)
hits_TF = myDispersion.get_hits(N=N, candidates=TFs) # get hits among TFs
if CSMs != None:
hits_CSM = myDispersion.get_hits(N=N, candidates=CSMs) # get hits among CSMs
hits = hits_genome.append([hits_TF, hits_CSM]).drop_duplicates().sort_values(ascending=False)
else:
hits = hits_genome.append([hits_TF]).drop_duplicates().sort_values(ascending=False)
if exclude is not None:
hits = list(set(hits.index) - set(exclude))
else:
hits = list(hits.index)
# Expand gene set by finding correlated genes
# Remove genes that have no correlates (presumably noise -- they don't belong to a "module")
if len(hits) > 1000: print ("Warning: calculating correlations between all genes and >1000 hits")
correlated_genes, uncorrelated_seeds = get_correlated_genes(X, hits, correlation_cutoff=correlation_cutoff, min_hits=min_hits, exclude_max=exclude_max)
hits_pruned = list(set(list(hits)) - set(list(uncorrelated_seeds)))
hits_pruned_expanded = list(set(hits_pruned + list(set(correlated_genes))))
Y = X.loc[hits_pruned_expanded]
# Filter genes by dropout rate
dropout_rate = np.sum(Y < 2, axis=1) / Y.shape[1]
Y = Y.loc[(dropout_rate > dropout_rate_low) & (dropout_rate < dropout_rate_high)]
return Y
class dispersion():
def __init__(self, X):
self.X = X
self.X["max"] = np.max(self.X, axis=1)
self.X.sort_values(by="max", ascending=False, inplace=True)
self.X.drop("max", axis=1, inplace=True)
self.mean = np.mean(self.X, axis=1)
def calc_dispersion(self, num_bin=20):
_, dispersion, _ = get_zscores(self.X, num_bin)
self.dispersion = dispersion
def plot(self, ax):
ax.scatter(self.mean, self.dispersion)
ax.set_xlabel("Mean expression (log2(CPM+1))")
ax.set_ylabel("Dispersion (Z-score of log2(variance/mean))")
ax.set_xlim(left=-0.5)
def get_hits(self, N = None, dispersion_cutoff = None, mean_cutoff = None, candidates=None):
if candidates != None:
# filter genes by candidates
dispersion_sorted = self.dispersion.loc[candidates].sort_values(ascending=False)
else:
dispersion_sorted = self.dispersion.sort_values(ascending=False)
if N != None:
# return top N hits
hits = dispersion_sorted[:N]
elif dispersion_cutoff != None and mean_cutoff != None:
# return hits with dispersion and mean greater than cutoffs
hits = dispersion_sorted.loc[self.X.loc[self.dispersion > dispersion_cutoff].loc[self.mean > mean_cutoff].index].sort_values(ascending=False)
else:
print ("Error: N, or dispersion_cutoff and mean_cutoff must be specified")
hits = None
return hits
class PCA():
def __init__(self, X, df, n_components):
self.X = X
self.df = df
self.n_components = n_components
def pca(self):
self.pca = sklearn.decomposition.PCA(n_components=self.n_components, random_state=1)
self.X_pca = self.pca.fit_transform(self.X.T)
self.loadings = pd.DataFrame(self.pca.components_.T)
self.loadings.set_index(self.X.index, inplace=True)
def explained_variance_ratio_(self):
return self.pca.explained_variance_ratio_
def plot(self, ax, component_x=0, component_y=1, color_by=None):
if color_by is not None:
c = self.df.loc[color_by]
else:
c = None
ax.scatter(self.X_pca[:,component_x], self.X_pca[:,component_y], c=c)
ax.set_xlabel("PCA " + str(component_x + 1) + " ({0:.2%} variance)".format(self.pca.explained_variance_ratio_[component_x]))
ax.set_ylabel("PCA " + str(component_y + 1) + " ({0:.2%} variance)".format(self.pca.explained_variance_ratio_[component_y]))
plt.tight_layout()
def plot_loadings(self, ax, component=0, num_genes=20):
myLoadings = self.loadings[component].sort_values(inplace=False, ascending=False)
plot_data = pd.concat([myLoadings.iloc[0:int(num_genes/2)], myLoadings.iloc[-int(num_genes/2):]]).sort_values(ascending=True)
ax.barh(range(len(plot_data)), plot_data, align="center")
ax.set_xlabel("PC " + str(component + 1) + " Loading")
yticklabels = plot_data.index
ax.set_yticks(range(len(plot_data)))
ax.set_yticklabels(yticklabels)
plt.tight_layout()
def top_loaded_genes(self, z_score_cutoff=2, max_component=30, plot=False):
max_component = min(self.n_components, max_component)
hits = []
for component in range(0, max_component):
myLoadings = self.loadings[component].sort_values(inplace=False, ascending=False)
myMean = np.mean(myLoadings)
myStd = np.std(myLoadings)
myZScores = (myLoadings - myMean) / myStd
myHits = list(myZScores.loc[abs(myZScores) > z_score_cutoff].index)
hits.extend(myHits)
if plot:
plt.hist(myZScores, bins=np.linspace(-10, 10, 100))
plt.xlabel("Z-score of PCA loading")
plt.ylabel("Genes")
hits = list(set(hits))
return hits
class TSNE():
def __init__(self, X, df, df_libs, n_components=2):
self.X = X
self.df = df
self.n_components = n_components
self.df_libs = df_libs
def calc_TSNE(self, perplexity=30, random_state=0, learning_rate=500.0, early_exaggeration=4.0, metric="correlation", n_iter=1000, method="barnes_hut"):
if metric=="correlation":
self.dist = 1-self.X.corr()
self.dist = np.clip(self.dist, 0.0, max(np.max(self.dist))) # clip negative values to 0 (small negative values can occur due to floating point imprecision)
# self.D = self.dist / sum(np.sum(self.dist))
self.D = self.dist
elif metric=="cosine":
self.dist = scipy.spatial.distance.squareform(scipy.spatial.distance.pdist(self.X.T, metric="cosine"))
self.D = self.dist
self.TSNE = sklearn.manifold.TSNE(n_components=self.n_components, metric="precomputed",
perplexity=perplexity, early_exaggeration=early_exaggeration,
learning_rate=learning_rate, n_iter=n_iter,
method=method, verbose=2, random_state=random_state)
self.X_tsne = self.TSNE.fit_transform(self.D)
def plot(self, fig, ax, colorBy=None, colorMode="gene", **kwargs):
if colorMode == "gene":
if colorBy == None:
print ("Error: colorBy must be a list with 1 or 2 gene names")
return None
elif isinstance(colorBy, (bytes, str)):
singleColor = True
Z = self.df.loc[colorBy]
# Z = Z / max(Z) # normalize to max
C = Z
elif len(colorBy) == 2:
singleColor = False
# c = [[0., 0., 0.] for _ in range(self.X_tsne.shape[0])] # initialize to black
c = [[1., 1., 1.] for _ in range(self.X_tsne.shape[0])] # initialize to white
color_tuple_indexes = [0, 2] # choose color channels
for i, feature in zip(color_tuple_indexes[:len(colorBy[:2])], colorBy[:2]):
Z = self.df.loc[feature]
Z = Z / max(Z) # normalize to max
# Z = Z / 2 + 0.5 # compress range to 0.5 to 1 to make brighter
for myColor, myIntensity in zip(c, Z):
# myColor[i] = myIntensity
myColor[i] = 1. - myIntensity
C = map(tuple, c)
elif len(colorBy) > 2:
print ("Warning: colorBy uses a maximum of 2 colors")
"""
# set uncolored points to white
# not relevant when initialized to white
c_whiteDefault = []
color_intensity_cutoff = 0.1
for color in c:
if ((color[0] < color_intensity_cutoff) and
(color[1] < color_intensity_cutoff) and
(color[2] < color_intensity_cutoff)):
c_whiteDefault.append([1., 1., 1.])
else:
c_whiteDefault.append(color)
# C = map(tuple, c_whiteDefault)
"""
elif colorMode == "genotype":
C = self.df_libs.loc[self.df.columns]["color"]
elif colorMode == "Tissue.type":
C = self.df_libs.loc[self.df.columns]["Tissue.type.color"]
elif colorMode == "custom":
C = colorBy
if colorMode == "gene":
norm = mpl.colors.Normalize(vmin=0, vmax=16)
sc = ax.scatter(self.X_tsne[:,0], self.X_tsne[:,1], s=30, edgecolor="k", linewidths=0.05, c=C, norm=norm, **kwargs)
else:
sc = ax.scatter(self.X_tsne[:,0], self.X_tsne[:,1], s=30, edgecolor="k", linewidths=0.05, c=C, **kwargs)
ax.set_xlabel("tSNE 1")
ax.set_ylabel("tSNE 2")
# make legend
if colorMode == "gene" and singleColor == False:
(left, right) = ax.get_xlim() # get current xlim
for i, feature in zip(color_tuple_indexes, colorBy):
c = [1., 1., 1.]
c[i] = 0.
ax.scatter(max(self.X_tsne[:,0]) + 1e6, 0, s=30, c=c, linewidths=0.05,label=feature)
ax.set_xlim(left, right)
ax.legend(loc="center left", bbox_to_anchor=(1.05, 0.5)) # legend outside plot
# make colorbar
if colorMode == "gene" and singleColor == True:
# cbar = fig.colorbar(sc, label="Log2(CPM+1)")
plt.tight_layout()
ax.set_aspect("equal")
# return sc, cbar
return sc
plt.tight_layout()
ax.set_aspect("equal")
return sc
class hclust:
def __init__(self, X, df):
self.X = X
self.df = df
def cluster(self, method="average", metric="correlation"):
pdist = scipy.spatial.distance.pdist(self.X, metric=metric)
self.row_linkage = hierarchy.linkage(pdist, metric=metric, method=method) # cluster on gene vectors
pdist = scipy.spatial.distance.pdist(self.X.T, metric=metric)
pdist[np.isnan(pdist)] = 1.0
self.col_linkage = hierarchy.linkage(pdist, metric=metric, method=method)
def plot(self, figsize=(9,9), cmap="YlGnBu_r", **kwargs):
cm = sns.clustermap(self.X, row_linkage=self.row_linkage, col_linkage=self.col_linkage,
figsize=figsize, cmap=cmap, **kwargs)
plt.setp(cm.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
return cm
def get_labels(self, what=None, n_clusters=2):
if what == "row":
labels = hierarchy.cut_tree(self.row_linkage, n_clusters)
elif what == "col":
labels = hierarchy.cut_tree(self.col_linkage, n_clusters)
else:
print ('Error: what must be "row" or "col"')
return labels
def cut(self, n_clusters_genes=2, n_clusters_cells=2):
# Currently only works for clusters = 2
gene_labels = hierarchy.cut_tree(self.row_linkage, n_clusters_genes)
cell_labels = hierarchy.cut_tree(self.col_linkage, n_clusters_cells)
X1 = self.X[self.X.columns[map(bool, 1-cell_labels)]]
X2 = self.X[self.X.columns[map(bool, cell_labels)]]
genes1 = list(self.X.T[self.X.T.columns[map(bool, 1-gene_labels)]].T.index)
genes2 = list(self.X.T[self.X.T.columns[map(bool, gene_labels)]].T.index)
df1 = self.df[X1.columns]
df2 = self.df[X2.columns]
return X1, df1, genes1, X2, df2, genes2
def get_terminal_branch_lengths(Z, max_label):
""" Get terminal branch lengths of a linkage matrix """
L = [d for d, label in zip(Z[:,2], Z[:,0]) if label < max_label] + [d for d, label in zip(Z[:,2], Z[:,1]) if label < max_label]
return L
class ICIM:
def __init__(self, X, df, TFs, CSMs, exclude, N, correlation_cutoff,
min_hits, exclude_max, dropout_rate_low, dropout_rate_high,
metric, stop_condition, N_stop, linkage_dist_stop):
self.df = df
self.population = {}
self.population["0"] = X
self.markers = {}
self.markers["0"] = []
self.hclust = {}
# parameters
self.TFs = TFs
self.CSMs = CSMs
self.exclude = exclude
self.N = N
self.correlation_cutoff = correlation_cutoff
self.min_hits = min_hits
self.exclude_max = exclude_max
self.dropout_rate_low = dropout_rate_low
self.dropout_rate_high = dropout_rate_high
self.metric = metric
self.N_stop = N_stop
self.stop_condition = stop_condition
self.linkage_dist_stop = linkage_dist_stop
def step(self, parent, TFs=None, CSMs=None, exclude=None,
N=None, correlation_cutoff=None, min_hits=None,
exclude_max=None, dropout_rate_low=None,
dropout_rate_high=None, metric=None, stop_condition=None,
N_stop=None, linkage_dist_stop=None, verbose=False):
# perform one iteration by splitting parent population
if TFs is None: TFs = self.TFs
if CSMs is None: CSMs = self.CSMs
if exclude is None: exclude = self.exclude
if N is None: N = self.N
if correlation_cutoff is None: correlation_cutoff = self.correlation_cutoff
if min_hits is None: min_hits = self.min_hits
if exclude_max is None: exclude_max = self.exclude_max
if dropout_rate_low is None: dropout_rate_low = self.dropout_rate_low
if dropout_rate_high is None: dropout_rate_high = self.dropout_rate_high
if metric is None: metric = self.metric
if stop_condition is None: stop_condition = self.stop_condition
if N_stop is None: N_stop = self.N_stop
if linkage_dist_stop is None: linkage_dist_stop = self.linkage_dist_stop
myPop = self.population[parent]
Y = filter_genes_overdispersed_correlates_dropouts(myPop, TFs, CSMs, exclude,
N, correlation_cutoff, min_hits, exclude_max,
dropout_rate_low, dropout_rate_high)
if verbose:
print( "Found", Y.shape[0], "genes")
if Y.shape[0] <= 5: return []
myClust = hclust(Y, self.df)
myClust.cluster(method="average", metric=metric)
_, X1, markerGenes1, _, X2, markerGenes2 = myClust.cut()
self.hclust[parent] = myClust
if stop_condition == "linkage_dist":
Z = myClust.col_linkage
L_terminal = get_terminal_branch_lengths(Z, myClust.X.shape[1]) # terminal branch lengths
min_linkage_dist = min(L_terminal) # smallest terminal branch length (most similar to neighbor)
if min_linkage_dist > linkage_dist_stop:
# do not keep child populations and markers
# return empty queue
if verbose:
print ("Failed linkage distance condition. Stopping.")
return []
child1 = parent + "0"
self.population[child1] = X1
self.markers[child1] = markerGenes1
child2 = parent + "1"
self.population[child2] = X2
self.markers[child2] = markerGenes2
if verbose:
print ("Child populations", X1.shape[1], X2.shape[1])
queue = []
if stop_condition == "num_cells":
if X1.shape[1] >= N_stop:
queue.append(child1)
if X2.shape[1] >= N_stop:
queue.append(child2)
elif stop_condition == "linkage_dist":
if X1.shape[1] >= 20:
queue.append(child1)
if X2.shape[1] >= 20:
queue.append(child2)
else:
print ('Error: stop_condition must be "num_cells" or "linkage_dist"')
return []
gc.collect()
return queue
def calc(self, skip=[], verbose=False):
if verbose:
print ("Initial step")
queue = []
queue.extend(self.step("0", verbose=verbose))
if verbose:
print
while len(queue) != 0:
parent = queue.pop()
if verbose:
print (parent)
myQueue = self.step(parent, verbose=verbose)
myQueue = list(set(myQueue) - set(skip))
queue.extend(myQueue)
if verbose:
print
return None
def get_all_markers(self):
allMarkers = list(set([item for sublist in self.markers.values() for item in sublist]))
return allMarkers
| [
"numpy.sum",
"numpy.abs",
"scipy.cluster.hierarchy.linkage",
"numpy.isnan",
"gc.collect",
"numpy.mean",
"scipy.spatial.distance.pdist",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"matplotlib.colors.Normalize",
"seaborn.clustermap",
"numpy.std",
"scipy.stats.mannwhitneyu",
"numpy... | [((971, 1095), 'pandas.DataFrame', 'pd.DataFrame', (["{'pvalue': pvalues, 'medianA': medianA, 'medianB': medianB, 'meanA': meanA,\n 'meanB': meanB}"], {'index': 'X.index'}), "({'pvalue': pvalues, 'medianA': medianA, 'medianB': medianB,\n 'meanA': meanA, 'meanB': meanB}, index=X.index)\n", (983, 1095), True, 'import pandas as pd\n'), ((1359, 1376), 'copy.deepcopy', 'copy.deepcopy', (['df'], {}), '(df)\n', (1372, 1376), False, 'import copy\n'), ((1419, 1438), 'numpy.mean', 'np.mean', (['df'], {'axis': '(1)'}), '(df, axis=1)\n', (1426, 1438), True, 'import numpy as np\n'), ((2794, 2811), 'copy.deepcopy', 'copy.deepcopy', (['df'], {}), '(df)\n', (2807, 2811), False, 'import copy\n'), ((3493, 3512), 'numpy.mean', 'np.mean', (['df'], {'axis': '(1)'}), '(df, axis=1)\n', (3500, 3512), True, 'import numpy as np\n'), ((3736, 3750), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3748, 3750), True, 'import pandas as pd\n'), ((4308, 4327), 'numpy.mean', 'np.mean', (['df'], {'axis': '(1)'}), '(df, axis=1)\n', (4315, 4327), True, 'import numpy as np\n'), ((4341, 4359), 'numpy.var', 'np.var', (['df'], {'axis': '(1)'}), '(df, axis=1)\n', (4347, 4359), True, 'import numpy as np\n'), ((751, 781), 'scipy.stats.mannwhitneyu', 'scipy.stats.mannwhitneyu', (['A', 'B'], {}), '(A, B)\n', (775, 781), False, 'import scipy\n'), ((1560, 1606), 'numpy.sum', 'np.sum', (['(df.ix[:, :-1] < dropout_cutoff)'], {'axis': '(1)'}), '(df.ix[:, :-1] < dropout_cutoff, axis=1)\n', (1566, 1606), True, 'import numpy as np\n'), ((3372, 3443), 'pandas.DataFrame', 'pd.DataFrame', (["{'geneName': geneNames, 'corr': corrs, 'pvalue': pvalues}"], {}), "({'geneName': geneNames, 'corr': corrs, 'pvalue': pvalues})\n", (3384, 3443), True, 'import pandas as pd\n'), ((3840, 3851), 'numpy.zeros', 'np.zeros', (['L'], {}), '(L)\n', (3848, 3851), True, 'import numpy as np\n'), ((3956, 3988), 'pandas.concat', 'pd.concat', (['[df_corrs, df_myCorr]'], {}), '([df_corrs, df_myCorr])\n', (3965, 3988), True, 'import pandas as pd\n'), ((4645, 4683), 'numpy.log10', 'np.log10', (["(group['var'] / group['mean'])"], {}), "(group['var'] / group['mean'])\n", (4653, 4683), True, 'import numpy as np\n'), ((4711, 4731), 'numpy.std', 'np.std', (['myDispersion'], {}), '(myDispersion)\n', (4717, 4731), True, 'import numpy as np\n'), ((5713, 5728), 'numpy.corrcoef', 'np.corrcoef', (['df'], {}), '(df)\n', (5724, 5728), True, 'import numpy as np\n'), ((9353, 9374), 'numpy.sum', 'np.sum', (['(Y < 2)'], {'axis': '(1)'}), '(Y < 2, axis=1)\n', (9359, 9374), True, 'import numpy as np\n'), ((9589, 9611), 'numpy.max', 'np.max', (['self.X'], {'axis': '(1)'}), '(self.X, axis=1)\n', (9595, 9611), True, 'import numpy as np\n'), ((9752, 9775), 'numpy.mean', 'np.mean', (['self.X'], {'axis': '(1)'}), '(self.X, axis=1)\n', (9759, 9775), True, 'import numpy as np\n'), ((11241, 11314), 'sklearn.decomposition.PCA', 'sklearn.decomposition.PCA', ([], {'n_components': 'self.n_components', 'random_state': '(1)'}), '(n_components=self.n_components, random_state=1)\n', (11266, 11314), False, 'import sklearn\n'), ((11395, 11431), 'pandas.DataFrame', 'pd.DataFrame', (['self.pca.components_.T'], {}), '(self.pca.components_.T)\n', (11407, 11431), True, 'import pandas as pd\n'), ((12126, 12144), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (12142, 12144), True, 'from matplotlib import pyplot as plt\n'), ((12700, 12718), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (12716, 12718), True, 'from matplotlib import pyplot as plt\n'), ((14405, 14651), 'sklearn.manifold.TSNE', 'sklearn.manifold.TSNE', ([], {'n_components': 'self.n_components', 'metric': '"""precomputed"""', 'perplexity': 'perplexity', 'early_exaggeration': 'early_exaggeration', 'learning_rate': 'learning_rate', 'n_iter': 'n_iter', 'method': 'method', 'verbose': '(2)', 'random_state': 'random_state'}), "(n_components=self.n_components, metric='precomputed',\n perplexity=perplexity, early_exaggeration=early_exaggeration,\n learning_rate=learning_rate, n_iter=n_iter, method=method, verbose=2,\n random_state=random_state)\n", (14426, 14651), False, 'import sklearn\n'), ((18311, 18329), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (18327, 18329), True, 'from matplotlib import pyplot as plt\n'), ((18559, 18610), 'scipy.spatial.distance.pdist', 'scipy.spatial.distance.pdist', (['self.X'], {'metric': 'metric'}), '(self.X, metric=metric)\n', (18587, 18610), False, 'import scipy\n'), ((18639, 18693), 'scipy.cluster.hierarchy.linkage', 'hierarchy.linkage', (['pdist'], {'metric': 'metric', 'method': 'method'}), '(pdist, metric=metric, method=method)\n', (18656, 18693), False, 'from scipy.cluster import hierarchy\n'), ((18737, 18790), 'scipy.spatial.distance.pdist', 'scipy.spatial.distance.pdist', (['self.X.T'], {'metric': 'metric'}), '(self.X.T, metric=metric)\n', (18765, 18790), False, 'import scipy\n'), ((18857, 18911), 'scipy.cluster.hierarchy.linkage', 'hierarchy.linkage', (['pdist'], {'metric': 'metric', 'method': 'method'}), '(pdist, metric=metric, method=method)\n', (18874, 18911), False, 'from scipy.cluster import hierarchy\n'), ((18991, 19116), 'seaborn.clustermap', 'sns.clustermap', (['self.X'], {'row_linkage': 'self.row_linkage', 'col_linkage': 'self.col_linkage', 'figsize': 'figsize', 'cmap': 'cmap'}), '(self.X, row_linkage=self.row_linkage, col_linkage=self.\n col_linkage, figsize=figsize, cmap=cmap, **kwargs)\n', (19005, 19116), True, 'import seaborn as sns\n'), ((19717, 19771), 'scipy.cluster.hierarchy.cut_tree', 'hierarchy.cut_tree', (['self.row_linkage', 'n_clusters_genes'], {}), '(self.row_linkage, n_clusters_genes)\n', (19735, 19771), False, 'from scipy.cluster import hierarchy\n'), ((19795, 19849), 'scipy.cluster.hierarchy.cut_tree', 'hierarchy.cut_tree', (['self.col_linkage', 'n_clusters_cells'], {}), '(self.col_linkage, n_clusters_cells)\n', (19813, 19849), False, 'from scipy.cluster import hierarchy\n'), ((24746, 24758), 'gc.collect', 'gc.collect', ([], {}), '()\n', (24756, 24758), False, 'import gc\n'), ((838, 850), 'numpy.median', 'np.median', (['A'], {}), '(A)\n', (847, 850), True, 'import numpy as np\n'), ((876, 888), 'numpy.median', 'np.median', (['B'], {}), '(B)\n', (885, 888), True, 'import numpy as np\n'), ((912, 922), 'numpy.mean', 'np.mean', (['A'], {}), '(A)\n', (919, 922), True, 'import numpy as np\n'), ((946, 956), 'numpy.mean', 'np.mean', (['B'], {}), '(B)\n', (953, 956), True, 'import numpy as np\n'), ((3104, 3130), 'numpy.log10', 'np.log10', (['(myRankedExpr + 1)'], {}), '(myRankedExpr + 1)\n', (3112, 3130), True, 'import numpy as np\n'), ((3211, 3239), 'scipy.stats.pearsonr', 'scipy.stats.pearsonr', (['myY', 'C'], {}), '(myY, C)\n', (3231, 3239), False, 'import scipy\n'), ((7953, 7970), 'numpy.max', 'np.max', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (7959, 7970), True, 'import numpy as np\n'), ((13052, 13071), 'numpy.mean', 'np.mean', (['myLoadings'], {}), '(myLoadings)\n', (13059, 13071), True, 'import numpy as np\n'), ((13093, 13111), 'numpy.std', 'np.std', (['myLoadings'], {}), '(myLoadings)\n', (13099, 13111), True, 'import numpy as np\n'), ((17156, 17193), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': '(0)', 'vmax': '(16)'}), '(vmin=0, vmax=16)\n', (17176, 17193), True, 'import matplotlib as mpl\n'), ((18191, 18209), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (18207, 18209), True, 'from matplotlib import pyplot as plt\n'), ((18806, 18821), 'numpy.isnan', 'np.isnan', (['pdist'], {}), '(pdist)\n', (18814, 18821), True, 'import numpy as np\n'), ((19336, 19384), 'scipy.cluster.hierarchy.cut_tree', 'hierarchy.cut_tree', (['self.row_linkage', 'n_clusters'], {}), '(self.row_linkage, n_clusters)\n', (19354, 19384), False, 'from scipy.cluster import hierarchy\n'), ((497, 516), 'numpy.count_nonzero', 'np.count_nonzero', (['A'], {}), '(A)\n', (513, 516), True, 'import numpy as np\n'), ((526, 545), 'numpy.count_nonzero', 'np.count_nonzero', (['B'], {}), '(B)\n', (542, 545), True, 'import numpy as np\n'), ((4838, 4859), 'numpy.mean', 'np.mean', (['myDispersion'], {}), '(myDispersion)\n', (4845, 4859), True, 'import numpy as np\n'), ((5888, 5910), 'numpy.abs', 'np.abs', (['myCorrelations'], {}), '(myCorrelations)\n', (5894, 5910), True, 'import numpy as np\n'), ((13389, 13425), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Z-score of PCA loading"""'], {}), "('Z-score of PCA loading')\n", (13399, 13425), True, 'from matplotlib import pyplot as plt\n'), ((13443, 13462), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Genes"""'], {}), "('Genes')\n", (13453, 13462), True, 'from matplotlib import pyplot as plt\n'), ((19436, 19484), 'scipy.cluster.hierarchy.cut_tree', 'hierarchy.cut_tree', (['self.col_linkage', 'n_clusters'], {}), '(self.col_linkage, n_clusters)\n', (19454, 19484), False, 'from scipy.cluster import hierarchy\n'), ((5443, 5455), 'numpy.array', 'np.array', (['df'], {}), '(df)\n', (5451, 5455), True, 'import numpy as np\n'), ((13997, 14014), 'numpy.max', 'np.max', (['self.dist'], {}), '(self.dist)\n', (14003, 14014), True, 'import numpy as np\n'), ((14295, 14350), 'scipy.spatial.distance.pdist', 'scipy.spatial.distance.pdist', (['self.X.T'], {'metric': '"""cosine"""'}), "(self.X.T, metric='cosine')\n", (14323, 14350), False, 'import scipy\n'), ((13345, 13370), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(100)'], {}), '(-10, 10, 100)\n', (13356, 13370), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 24 17:11:22 2020
@author: chens
"""
#from os.path import dirname
import numpy as np
import geoist as gi
import geoist.others.fetch_data as data
from geoist.others.fetch_data import _retrieve_file as downloadurl
from geoist.others.fetch_data import usgs_catalog
from geoist.catalog import QCreport as qc
from geoist.catalog import QCmulti as cp
from geoist.catalog import Catalogue as cat
from geoist.catalog import Exploration as exp
from geoist.catalog import MapTools as mapt
from geoist.catalog import Selection as sel
from geoist.catalog import Seismicity as sem
from geoist.catalog import Declusterer as declus
from geoist.catalog import Smoothing as sm
from geoist.catalog import CatUtils as ct
## 下载CENC地震目录
# pathname = dirname(__file__)
# print(pathname)
url = data.ispec_catalog_url
print(url)
filename = '2020-03-25CENC-M4.7.dat'
localpath = downloadurl(url+filename, filename)
print(localpath) #文件路径
## 下载USGS地震目录
## 参考:https://earthquake.usgs.gov/fdsnws/event/1/
usgsfile = 'usgscat2.csv'
localpath2 = usgs_catalog(usgsfile, '2014-01-01', '2014-01-02') #, '-90','90','-180','180',minmag = '5')
print(localpath2)
dbusgs = cat.Database(usgsfile)
dbusgs.Import0(localpath2)
dbusgs.Info()
## 建立地震目录数据库
catname = 'CENCM4.7'
db2 = cat.Database(catname)
header = ['Year', 'Month','Day','Hour','Minute','Second','Latitude','Longitude', 'Depth','MagType','MagSize','Log']
db2.Import0(localpath, Header = header, Delimiter= ' ', flag = False)
db2.Info()
db2.SetField('LocCode','CENC')
db2.SetField('MagCode','CENC4.7')
#地震筛选
# Search Area (China) using internal filter
lon = [70, 135]
lat = [15, 55]
db2.Filter('Latitude',lat[0],Opr='>=')
db2.Filter('Latitude',lat[1],Opr='<=')
db2.Filter('Longitude',lon[0],Opr='>=')
db2.Filter('Longitude',lon[1],Opr='<=')
db2.Info()
#二维时间序列图
exp.MagTimePlot(db2)
exp.MagTimeBars(db2)
exp.RateDensityPlot(db2)
# G-R关系
enum, mbin =exp.GetKeyHisto(db2,'MagSize',Bnum=20, Norm=False)
minc= (max(mbin)-min(mbin))/10.
#拟合b值
a,b = sem.MfdOptimize(enum, mbin, minc, max(mbin))
print('b-value=',b)
#复发概率
sem.MfdPlot(a,b, max(mbin),Enum=enum, Ecum=np.cumsum(enum[::-1])[::-1], Mbin=mbin, Minc=[minc])
## 去余震
dbm, log1 = declus.WindowSearch(db2, WinFun= declus.GardnerKnopoff, WinScale=1)
dbm.Info()
## 震中分布图
x1,y1,z1 = exp.GetHypocenter(db2)
x2,y2,z2 = exp.GetHypocenter(dbm)
cfg = {'Bounds': [70., 15., 135., 55.],
'FigSize': [16., 12.],
'Background': ['none',[0.9,0.8,0.6],[0.5,0.8,1.]],
'Grid': [10., 10.]}
M = mapt.GeoMap(cfg)
M.BasePlot()
M.DrawBounds()
M.DrawGrid()
#震中分布图
M.PointPlot(x1, y1, Set=['o','g',5,1], Label='全部')
M.PointPlot(x2, y2, Set=['*','r',2,1], Label='去余震')
M.Legend()
M.Title('中国及邻区震中分布图')
M.Show()
#平滑地震目录
p = [(90.,20.),(90.,40.),(105.,40.),(105.,20.),(90.,20.)]
db3 = sel.AreaSelect(db2,p)
P = ct.Polygon()
P.Load(p)
db3.Info()
wkt = ct.XYToWkt(P.x, P.y)
xsm, ysm, asm = sm.SmoothMFD(db3, 1., wkt, Delta=0.5)
cfg1 = {'Bounds': [90., 20., 105., 40.],
'FigSize': [10., 12.],
'Background': ['none',[0.9,0.8,0.6],[0.5,0.8,1.]],
'Grid': [5., 5.]}
m1 = mapt.GeoMap(cfg1)
m1.BasePlot()
m1.MeshPlot(xsm, ysm, asm)
#m1.AreaPlot(P.x, P.y, Set=['y',0.5,'k',1])
#m1.PointPlot(xsm, ysm, Set=['o','b',2,1], Label='Grid')
m1.PointPlot(x1, y1, Set=['o','g',5,1], Label='全部')
m1.DrawGrid()
m1.Title('川滇地区地震目录高斯平滑')
m1.Show()
## 得到系统路径和记录日志
#print(gi.EXAMPLES_PATH, gi.DATA_PATH, gi.TEMP_PATH)
nwcat = qc.pathname+'\\mwcat1900utc.csv'
print(qc.pathname+'\\cn-cat-mw.txt')
qc.pathname = gi.TEMP_PATH
qc.network = catname
qc.start_year = '1970'
qc.end_year = '2020'
qc.time_window = 2.0
qc.dist_window = 15.0
##gi.__verison__
gi.log.info(catname+'/catalog qctest')
## 地震目录质量检测
pathname,prefix = qc.create_figures_new(qc.qcinit(), db2)
## 生成HTML报告
qc.generate_html(pathname,prefix,qc.to_show)
## 地震目录对比
dbm.Header['Name'] = 'CENCm'
db2.Header['Name'] = 'CENC4.7'
## 建立地震目录数据库
catname = 'cnmw'
localpath = qc.pathname+'\\mwcat1900utc.csv'
db6 = cat.Database(catname)
header = ['Year', 'Month','Day','Hour','Minute','Second','Latitude','Longitude','MagSize','Depth','Log']
db6.Import0(localpath, Header = header, Delimiter= ',', flag = False)
db6.SetField('MagType', 'Mw')
db6.Info()
outputname = cp.create_figures_new(db = [db2, db6], pathname = gi.TEMP_PATH,
startyear = 1970 , endyear = 2015, dhrs = 8)
## 生成HTML报告
no_output_matches = True
cp.generate_html(outputname, no_output_matches)
| [
"geoist.catalog.QCreport.qcinit",
"geoist.log.info",
"geoist.catalog.Smoothing.SmoothMFD",
"geoist.catalog.Selection.AreaSelect",
"numpy.cumsum",
"geoist.others.fetch_data._retrieve_file",
"geoist.catalog.QCreport.generate_html",
"geoist.catalog.Exploration.RateDensityPlot",
"geoist.catalog.Explorat... | [((905, 942), 'geoist.others.fetch_data._retrieve_file', 'downloadurl', (['(url + filename)', 'filename'], {}), '(url + filename, filename)\n', (916, 942), True, 'from geoist.others.fetch_data import _retrieve_file as downloadurl\n'), ((1069, 1119), 'geoist.others.fetch_data.usgs_catalog', 'usgs_catalog', (['usgsfile', '"""2014-01-01"""', '"""2014-01-02"""'], {}), "(usgsfile, '2014-01-01', '2014-01-02')\n", (1081, 1119), False, 'from geoist.others.fetch_data import usgs_catalog\n'), ((1189, 1211), 'geoist.catalog.Catalogue.Database', 'cat.Database', (['usgsfile'], {}), '(usgsfile)\n', (1201, 1211), True, 'from geoist.catalog import Catalogue as cat\n'), ((1295, 1316), 'geoist.catalog.Catalogue.Database', 'cat.Database', (['catname'], {}), '(catname)\n', (1307, 1316), True, 'from geoist.catalog import Catalogue as cat\n'), ((1843, 1863), 'geoist.catalog.Exploration.MagTimePlot', 'exp.MagTimePlot', (['db2'], {}), '(db2)\n', (1858, 1863), True, 'from geoist.catalog import Exploration as exp\n'), ((1864, 1884), 'geoist.catalog.Exploration.MagTimeBars', 'exp.MagTimeBars', (['db2'], {}), '(db2)\n', (1879, 1884), True, 'from geoist.catalog import Exploration as exp\n'), ((1885, 1909), 'geoist.catalog.Exploration.RateDensityPlot', 'exp.RateDensityPlot', (['db2'], {}), '(db2)\n', (1904, 1909), True, 'from geoist.catalog import Exploration as exp\n'), ((1930, 1982), 'geoist.catalog.Exploration.GetKeyHisto', 'exp.GetKeyHisto', (['db2', '"""MagSize"""'], {'Bnum': '(20)', 'Norm': '(False)'}), "(db2, 'MagSize', Bnum=20, Norm=False)\n", (1945, 1982), True, 'from geoist.catalog import Exploration as exp\n'), ((2216, 2282), 'geoist.catalog.Declusterer.WindowSearch', 'declus.WindowSearch', (['db2'], {'WinFun': 'declus.GardnerKnopoff', 'WinScale': '(1)'}), '(db2, WinFun=declus.GardnerKnopoff, WinScale=1)\n', (2235, 2282), True, 'from geoist.catalog import Declusterer as declus\n'), ((2315, 2337), 'geoist.catalog.Exploration.GetHypocenter', 'exp.GetHypocenter', (['db2'], {}), '(db2)\n', (2332, 2337), True, 'from geoist.catalog import Exploration as exp\n'), ((2349, 2371), 'geoist.catalog.Exploration.GetHypocenter', 'exp.GetHypocenter', (['dbm'], {}), '(dbm)\n', (2366, 2371), True, 'from geoist.catalog import Exploration as exp\n'), ((2536, 2552), 'geoist.catalog.MapTools.GeoMap', 'mapt.GeoMap', (['cfg'], {}), '(cfg)\n', (2547, 2552), True, 'from geoist.catalog import MapTools as mapt\n'), ((2819, 2841), 'geoist.catalog.Selection.AreaSelect', 'sel.AreaSelect', (['db2', 'p'], {}), '(db2, p)\n', (2833, 2841), True, 'from geoist.catalog import Selection as sel\n'), ((2845, 2857), 'geoist.catalog.CatUtils.Polygon', 'ct.Polygon', ([], {}), '()\n', (2855, 2857), True, 'from geoist.catalog import CatUtils as ct\n'), ((2886, 2906), 'geoist.catalog.CatUtils.XYToWkt', 'ct.XYToWkt', (['P.x', 'P.y'], {}), '(P.x, P.y)\n', (2896, 2906), True, 'from geoist.catalog import CatUtils as ct\n'), ((2923, 2961), 'geoist.catalog.Smoothing.SmoothMFD', 'sm.SmoothMFD', (['db3', '(1.0)', 'wkt'], {'Delta': '(0.5)'}), '(db3, 1.0, wkt, Delta=0.5)\n', (2935, 2961), True, 'from geoist.catalog import Smoothing as sm\n'), ((3127, 3144), 'geoist.catalog.MapTools.GeoMap', 'mapt.GeoMap', (['cfg1'], {}), '(cfg1)\n', (3138, 3144), True, 'from geoist.catalog import MapTools as mapt\n'), ((3687, 3727), 'geoist.log.info', 'gi.log.info', (["(catname + '/catalog qctest')"], {}), "(catname + '/catalog qctest')\n", (3698, 3727), True, 'import geoist as gi\n'), ((3810, 3856), 'geoist.catalog.QCreport.generate_html', 'qc.generate_html', (['pathname', 'prefix', 'qc.to_show'], {}), '(pathname, prefix, qc.to_show)\n', (3826, 3856), True, 'from geoist.catalog import QCreport as qc\n'), ((4008, 4029), 'geoist.catalog.Catalogue.Database', 'cat.Database', (['catname'], {}), '(catname)\n', (4020, 4029), True, 'from geoist.catalog import Catalogue as cat\n'), ((4260, 4361), 'geoist.catalog.QCmulti.create_figures_new', 'cp.create_figures_new', ([], {'db': '[db2, db6]', 'pathname': 'gi.TEMP_PATH', 'startyear': '(1970)', 'endyear': '(2015)', 'dhrs': '(8)'}), '(db=[db2, db6], pathname=gi.TEMP_PATH, startyear=1970,\n endyear=2015, dhrs=8)\n', (4281, 4361), True, 'from geoist.catalog import QCmulti as cp\n'), ((4444, 4491), 'geoist.catalog.QCmulti.generate_html', 'cp.generate_html', (['outputname', 'no_output_matches'], {}), '(outputname, no_output_matches)\n', (4460, 4491), True, 'from geoist.catalog import QCmulti as cp\n'), ((3779, 3790), 'geoist.catalog.QCreport.qcinit', 'qc.qcinit', ([], {}), '()\n', (3788, 3790), True, 'from geoist.catalog import QCreport as qc\n'), ((2143, 2164), 'numpy.cumsum', 'np.cumsum', (['enum[::-1]'], {}), '(enum[::-1])\n', (2152, 2164), True, 'import numpy as np\n')] |
import numpy as np
def generate_frame(num_datapoints, size_side, thickness):
"""
Generates a square with specified num_datapoints and size_side, cenetered at the origin
:param int num_datapoints: number of datapoints that constitute the square
:param int size_side: length of the side of the square
:param float thickness: the size of the region across which the points are uniformly distributed
:return: - x axis values (numpy array), and
- y axis values (numpy array)
of the datapoints constituting the square
"""
num_side = num_datapoints // 4
avgline = np.linspace(-size_side / 2, size_side / 2, num=num_side)
lower_window = np.random.uniform(low=-thickness / 2., high=thickness / 2., size=int(num_side)) - size_side / 2.
upper_window = np.random.uniform(low=-thickness / 2., high=thickness / 2., size=int(num_side)) + size_side / 2.
x = np.concatenate((np.tile(avgline, 2), lower_window, upper_window))
y = np.concatenate((lower_window, upper_window, np.tile(avgline, 2)))
return x, y
| [
"numpy.tile",
"numpy.linspace"
] | [((622, 678), 'numpy.linspace', 'np.linspace', (['(-size_side / 2)', '(size_side / 2)'], {'num': 'num_side'}), '(-size_side / 2, size_side / 2, num=num_side)\n', (633, 678), True, 'import numpy as np\n'), ((936, 955), 'numpy.tile', 'np.tile', (['avgline', '(2)'], {}), '(avgline, 2)\n', (943, 955), True, 'import numpy as np\n'), ((1038, 1057), 'numpy.tile', 'np.tile', (['avgline', '(2)'], {}), '(avgline, 2)\n', (1045, 1057), True, 'import numpy as np\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
sys.path.append("..")
import unittest
import numpy as np
from op_test_xpu import XPUOpTest
import paddle
from paddle import enable_static
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.op import Operator
from paddle.fluid.tests.unittests.op_test import (
OpTest, convert_float_to_uint16, convert_uint16_to_float)
from paddle import _C_ops
paddle.enable_static()
class TestSumOp(XPUOpTest):
def setUp(self):
self.op_type = "sum"
self.init_kernel_type()
self.init_kernel_type()
x0 = np.random.random((3, 40)).astype(self.dtype)
x1 = np.random.random((3, 40)).astype(self.dtype)
x2 = np.random.random((3, 40)).astype(self.dtype)
self.inputs = {"X": [("x0", x0), ("x1", x1), ("x2", x2)]}
y = x0 + x1 + x2
self.outputs = {'Out': y}
def init_kernel_type(self):
self.dtype = np.float32
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['x0'], 'Out')
#----------- test fp16 -----------
class TestFP16SumOp(TestSumOp):
def init_kernel_type(self):
self.dtype = np.float16
def test_check_output(self):
place = core.XPUPlace(0)
# if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-2)
# FIXME: Because of the precision fp16, max_relative_error
# should be 0.15 here.
def test_check_grad(self):
place = core.XPUPlace(0)
# if core.is_float16_supported(place):
self.check_grad_with_place(
place, ['x0'], 'Out', max_relative_error=0.15)
def create_test_sum_fp16_class(parent):
class TestSumFp16Case(parent):
def init_kernel_type(self):
self.dtype = np.float16
def test_w_is_selected_rows(self):
place = core.XPUPlace(0)
# if core.is_float16_supported(place):
for inplace in [True, False]:
self.check_with_place(place, inplace)
cls_name = "{0}_{1}".format(parent.__name__, "SumFp16Test")
TestSumFp16Case.__name__ = cls_name
globals()[cls_name] = TestSumFp16Case
class API_Test_Add_n(unittest.TestCase):
def test_api(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input0 = fluid.layers.fill_constant(
shape=[2, 3], dtype='int64', value=5)
input1 = fluid.layers.fill_constant(
shape=[2, 3], dtype='int64', value=3)
expected_result = np.empty((2, 3))
expected_result.fill(8)
sum_value = paddle.add_n([input0, input1])
exe = fluid.Executor(fluid.XPUPlace(0))
result = exe.run(fetch_list=[sum_value])
self.assertEqual((result == expected_result).all(), True)
with fluid.dygraph.guard():
input0 = paddle.ones(shape=[2, 3], dtype='float32')
expected_result = np.empty((2, 3))
expected_result.fill(2)
sum_value = paddle.add_n([input0, input0])
self.assertEqual((sum_value.numpy() == expected_result).all(), True)
class TestRaiseSumError(unittest.TestCase):
def test_errors(self):
def test_type():
fluid.layers.sum([11, 22])
self.assertRaises(TypeError, test_type)
def test_dtype():
data1 = fluid.data(name="input1", shape=[10], dtype="int8")
data2 = fluid.data(name="input2", shape=[10], dtype="int8")
fluid.layers.sum([data1, data2])
self.assertRaises(TypeError, test_dtype)
def test_dtype1():
data1 = fluid.data(name="input1", shape=[10], dtype="int8")
fluid.layers.sum(data1)
self.assertRaises(TypeError, test_dtype1)
class TestRaiseSumsError(unittest.TestCase):
def test_errors(self):
def test_type():
fluid.layers.sums([11, 22])
self.assertRaises(TypeError, test_type)
def test_dtype():
data1 = fluid.data(name="input1", shape=[10], dtype="int8")
data2 = fluid.data(name="input2", shape=[10], dtype="int8")
fluid.layers.sums([data1, data2])
self.assertRaises(TypeError, test_dtype)
def test_dtype1():
data1 = fluid.data(name="input1", shape=[10], dtype="int8")
fluid.layers.sums(data1)
self.assertRaises(TypeError, test_dtype1)
def test_out_type():
data1 = fluid.data(name="input1", shape=[10], dtype="flaot32")
data2 = fluid.data(name="input2", shape=[10], dtype="float32")
fluid.layers.sums([data1, data2], out=[10])
self.assertRaises(TypeError, test_out_type)
def test_out_dtype():
data1 = fluid.data(name="input1", shape=[10], dtype="flaot32")
data2 = fluid.data(name="input2", shape=[10], dtype="float32")
out = fluid.data(name="out", shape=[10], dtype="int8")
fluid.layers.sums([data1, data2], out=out)
self.assertRaises(TypeError, test_out_dtype)
class TestSumOpError(unittest.TestCase):
def test_errors(self):
def test_empty_list_input():
with fluid.dygraph.guard():
fluid._C_ops.sum([])
def test_list_of_none_input():
with fluid.dygraph.guard():
fluid._C_ops.sum([None])
self.assertRaises(Exception, test_empty_list_input)
self.assertRaises(Exception, test_list_of_none_input)
if __name__ == "__main__":
enable_static()
unittest.main()
| [
"sys.path.append",
"unittest.main",
"paddle.fluid.data",
"paddle.fluid._C_ops.sum",
"paddle.fluid.core.XPUPlace",
"paddle.enable_static",
"numpy.empty",
"paddle.fluid.dygraph.guard",
"paddle.add_n",
"paddle.ones",
"numpy.random.random",
"paddle.fluid.layers.fill_constant",
"paddle.fluid.XPUP... | [((662, 683), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (677, 683), False, 'import sys\n'), ((1039, 1061), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (1059, 1061), False, 'import paddle\n'), ((6191, 6206), 'paddle.enable_static', 'enable_static', ([], {}), '()\n', (6204, 6206), False, 'from paddle import enable_static\n'), ((6211, 6226), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6224, 6226), False, 'import unittest\n'), ((1886, 1902), 'paddle.fluid.core.XPUPlace', 'core.XPUPlace', (['(0)'], {}), '(0)\n', (1899, 1902), True, 'import paddle.fluid.core as core\n'), ((2143, 2159), 'paddle.fluid.core.XPUPlace', 'core.XPUPlace', (['(0)'], {}), '(0)\n', (2156, 2159), True, 'import paddle.fluid.core as core\n'), ((2515, 2531), 'paddle.fluid.core.XPUPlace', 'core.XPUPlace', (['(0)'], {}), '(0)\n', (2528, 2531), True, 'import paddle.fluid.core as core\n'), ((2982, 3046), 'paddle.fluid.layers.fill_constant', 'fluid.layers.fill_constant', ([], {'shape': '[2, 3]', 'dtype': '"""int64"""', 'value': '(5)'}), "(shape=[2, 3], dtype='int64', value=5)\n", (3008, 3046), True, 'import paddle.fluid as fluid\n'), ((3085, 3149), 'paddle.fluid.layers.fill_constant', 'fluid.layers.fill_constant', ([], {'shape': '[2, 3]', 'dtype': '"""int64"""', 'value': '(3)'}), "(shape=[2, 3], dtype='int64', value=3)\n", (3111, 3149), True, 'import paddle.fluid as fluid\n'), ((3197, 3213), 'numpy.empty', 'np.empty', (['(2, 3)'], {}), '((2, 3))\n', (3205, 3213), True, 'import numpy as np\n'), ((3274, 3304), 'paddle.add_n', 'paddle.add_n', (['[input0, input1]'], {}), '([input0, input1])\n', (3286, 3304), False, 'import paddle\n'), ((3495, 3516), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (3514, 3516), True, 'import paddle.fluid as fluid\n'), ((3539, 3581), 'paddle.ones', 'paddle.ones', ([], {'shape': '[2, 3]', 'dtype': '"""float32"""'}), "(shape=[2, 3], dtype='float32')\n", (3550, 3581), False, 'import paddle\n'), ((3612, 3628), 'numpy.empty', 'np.empty', (['(2, 3)'], {}), '((2, 3))\n', (3620, 3628), True, 'import numpy as np\n'), ((3689, 3719), 'paddle.add_n', 'paddle.add_n', (['[input0, input0]'], {}), '([input0, input0])\n', (3701, 3719), False, 'import paddle\n'), ((3912, 3938), 'paddle.fluid.layers.sum', 'fluid.layers.sum', (['[11, 22]'], {}), '([11, 22])\n', (3928, 3938), True, 'import paddle.fluid as fluid\n'), ((4035, 4086), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""input1"""', 'shape': '[10]', 'dtype': '"""int8"""'}), "(name='input1', shape=[10], dtype='int8')\n", (4045, 4086), True, 'import paddle.fluid as fluid\n'), ((4107, 4158), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""input2"""', 'shape': '[10]', 'dtype': '"""int8"""'}), "(name='input2', shape=[10], dtype='int8')\n", (4117, 4158), True, 'import paddle.fluid as fluid\n'), ((4171, 4203), 'paddle.fluid.layers.sum', 'fluid.layers.sum', (['[data1, data2]'], {}), '([data1, data2])\n', (4187, 4203), True, 'import paddle.fluid as fluid\n'), ((4302, 4353), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""input1"""', 'shape': '[10]', 'dtype': '"""int8"""'}), "(name='input1', shape=[10], dtype='int8')\n", (4312, 4353), True, 'import paddle.fluid as fluid\n'), ((4366, 4389), 'paddle.fluid.layers.sum', 'fluid.layers.sum', (['data1'], {}), '(data1)\n', (4382, 4389), True, 'import paddle.fluid as fluid\n'), ((4552, 4579), 'paddle.fluid.layers.sums', 'fluid.layers.sums', (['[11, 22]'], {}), '([11, 22])\n', (4569, 4579), True, 'import paddle.fluid as fluid\n'), ((4676, 4727), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""input1"""', 'shape': '[10]', 'dtype': '"""int8"""'}), "(name='input1', shape=[10], dtype='int8')\n", (4686, 4727), True, 'import paddle.fluid as fluid\n'), ((4748, 4799), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""input2"""', 'shape': '[10]', 'dtype': '"""int8"""'}), "(name='input2', shape=[10], dtype='int8')\n", (4758, 4799), True, 'import paddle.fluid as fluid\n'), ((4812, 4845), 'paddle.fluid.layers.sums', 'fluid.layers.sums', (['[data1, data2]'], {}), '([data1, data2])\n', (4829, 4845), True, 'import paddle.fluid as fluid\n'), ((4944, 4995), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""input1"""', 'shape': '[10]', 'dtype': '"""int8"""'}), "(name='input1', shape=[10], dtype='int8')\n", (4954, 4995), True, 'import paddle.fluid as fluid\n'), ((5008, 5032), 'paddle.fluid.layers.sums', 'fluid.layers.sums', (['data1'], {}), '(data1)\n', (5025, 5032), True, 'import paddle.fluid as fluid\n'), ((5134, 5188), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""input1"""', 'shape': '[10]', 'dtype': '"""flaot32"""'}), "(name='input1', shape=[10], dtype='flaot32')\n", (5144, 5188), True, 'import paddle.fluid as fluid\n'), ((5209, 5263), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""input2"""', 'shape': '[10]', 'dtype': '"""float32"""'}), "(name='input2', shape=[10], dtype='float32')\n", (5219, 5263), True, 'import paddle.fluid as fluid\n'), ((5276, 5319), 'paddle.fluid.layers.sums', 'fluid.layers.sums', (['[data1, data2]'], {'out': '[10]'}), '([data1, data2], out=[10])\n', (5293, 5319), True, 'import paddle.fluid as fluid\n'), ((5424, 5478), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""input1"""', 'shape': '[10]', 'dtype': '"""flaot32"""'}), "(name='input1', shape=[10], dtype='flaot32')\n", (5434, 5478), True, 'import paddle.fluid as fluid\n'), ((5499, 5553), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""input2"""', 'shape': '[10]', 'dtype': '"""float32"""'}), "(name='input2', shape=[10], dtype='float32')\n", (5509, 5553), True, 'import paddle.fluid as fluid\n'), ((5572, 5620), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""out"""', 'shape': '[10]', 'dtype': '"""int8"""'}), "(name='out', shape=[10], dtype='int8')\n", (5582, 5620), True, 'import paddle.fluid as fluid\n'), ((5633, 5675), 'paddle.fluid.layers.sums', 'fluid.layers.sums', (['[data1, data2]'], {'out': 'out'}), '([data1, data2], out=out)\n', (5650, 5675), True, 'import paddle.fluid as fluid\n'), ((1219, 1244), 'numpy.random.random', 'np.random.random', (['(3, 40)'], {}), '((3, 40))\n', (1235, 1244), True, 'import numpy as np\n'), ((1277, 1302), 'numpy.random.random', 'np.random.random', (['(3, 40)'], {}), '((3, 40))\n', (1293, 1302), True, 'import numpy as np\n'), ((1335, 1360), 'numpy.random.random', 'np.random.random', (['(3, 40)'], {}), '((3, 40))\n', (1351, 1360), True, 'import numpy as np\n'), ((2926, 2941), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (2939, 2941), True, 'import paddle.fluid as fluid\n'), ((2943, 2958), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (2956, 2958), True, 'import paddle.fluid as fluid\n'), ((3338, 3355), 'paddle.fluid.XPUPlace', 'fluid.XPUPlace', (['(0)'], {}), '(0)\n', (3352, 3355), True, 'import paddle.fluid as fluid\n'), ((5854, 5875), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (5873, 5875), True, 'import paddle.fluid as fluid\n'), ((5893, 5913), 'paddle.fluid._C_ops.sum', 'fluid._C_ops.sum', (['[]'], {}), '([])\n', (5909, 5913), True, 'import paddle.fluid as fluid\n'), ((5971, 5992), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (5990, 5992), True, 'import paddle.fluid as fluid\n'), ((6010, 6034), 'paddle.fluid._C_ops.sum', 'fluid._C_ops.sum', (['[None]'], {}), '([None])\n', (6026, 6034), True, 'import paddle.fluid as fluid\n')] |
import torch
import datetime
import numpy
import random
from .opt import *
from .visualization import *
def initialize(args):
# create and init device
print("{} | Torch Version: {}".format(datetime.datetime.now(), torch.__version__))
if args.seed > 0:
print("Set to reproducibility mode with seed: {}".format(args.seed))
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
numpy.random.seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
random.seed(args.seed)
gpus = [int(id) for id in args.gpu.split(',') if int(id) >= 0]
device = torch.device("cuda:{}" .format(gpus[0]) if torch.cuda.is_available() and len(gpus) > 0 and gpus[0] >= 0 else "cpu")
print("Training {0} for {1} epochs using a batch size of {2} on {3}".format(args.name, args.epochs, args.batch_size, device))
# create visualizer
visualizer = NullVisualizer() if args.visdom is None\
else VisdomVisualizer(args.name, args.visdom,\
count=4 if 4 <= args.batch_size else args.batch_size)
if args.visdom is None:
args.visdom_iters = 0
# create & init model
return device, visualizer
def init_optimizer(model, args):
opt_params = OptimizerParameters(learning_rate=args.lr, momentum=args.momentum,\
momentum2=args.momentum2, epsilon=args.epsilon)
optimizer = get_optimizer(args.optimizer, model.parameters(), opt_params)
if args.opt_state is not None:
opt_state = torch.load(args.opt_state)
print("Loading previously saved optimizer state from {}".format(args.opt_state))
optimizer.load_state_dict(opt_state["optimizer_state_dict"])
return optimizer
| [
"numpy.random.seed",
"torch.manual_seed",
"torch.load",
"torch.cuda.manual_seed_all",
"random.seed",
"torch.cuda.is_available",
"datetime.datetime.now"
] | [((370, 398), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (387, 398), False, 'import torch\n'), ((408, 445), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (434, 445), False, 'import torch\n'), ((455, 483), 'numpy.random.seed', 'numpy.random.seed', (['args.seed'], {}), '(args.seed)\n', (472, 483), False, 'import numpy\n'), ((603, 625), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (614, 625), False, 'import random\n'), ((1603, 1629), 'torch.load', 'torch.load', (['args.opt_state'], {}), '(args.opt_state)\n', (1613, 1629), False, 'import torch\n'), ((211, 234), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (232, 234), False, 'import datetime\n'), ((759, 784), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (782, 784), False, 'import torch\n')] |
"""
Functions for loading and handling Digital Earth Africa data.
"""
# Import required packages
import os
from osgeo import gdal
import requests
import zipfile
import warnings
import numpy as np
import xarray as xr
import pandas as pd
import datetime
import pytz
from collections import Counter
from datacube.utils import masking
from scipy.ndimage import binary_dilation
from odc.algo import mask_cleanup
from copy import deepcopy
import odc.algo
def _dc_query_only(**kw):
"""
Remove load-only parameters, the rest
can be passed to Query
Returns
=======
dict of query parameters
"""
def _impl(
measurements=None,
output_crs=None,
resolution=None,
resampling=None,
skip_broken_datasets=None,
dask_chunks=None,
fuse_func=None,
align=None,
datasets=None,
progress_cbk=None,
group_by=None,
**query,
):
return query
return _impl(**kw)
def _common_bands(dc, products):
"""
Takes a list of products and returns a list of measurements/bands
that are present in all products
Returns
-------
List of band names
"""
common = None
bands = None
for p in products:
p = dc.index.products.get_by_name(p)
if common is None:
common = set(p.measurements)
bands = list(p.measurements)
else:
common = common.intersection(set(p.measurements))
return [band for band in bands if band in common]
def load_ard(
dc,
products=None,
min_gooddata=0.0,
categories_to_mask_ls=dict(
cloud="high_confidence", cloud_shadow="high_confidence"
),
categories_to_mask_s2=[
"cloud high probability",
"cloud medium probability",
"thin cirrus",
"cloud shadows",
"saturated or defective",
],
categories_to_mask_s1=["invalid data"],
mask_filters=None,
mask_pixel_quality=True,
ls7_slc_off=True,
predicate=None,
dtype="auto",
verbose=True,
**kwargs,
):
"""
Loads analysis ready data.
Loads and combines Landsat USGS Collections 2, Sentinel-2, and Sentinel-1 for
multiple sensors (i.e. ls5t, ls7e, ls8c and ls9 for Landsat; s2a and s2b for Sentinel-2),
optionally applies pixel quality masks, and drops time steps that
contain greater than a minimum proportion of good quality (e.g. non-
cloudy or shadowed) pixels.
The function supports loading the following DE Africa products:
Landsat:
* ls5_sr ('sr' denotes surface reflectance)
* ls7_sr
* ls8_sr
* ls9_sr
* ls5_st ('st' denotes surface temperature)
* ls7_st
* ls8_st
* ls9_st
Sentinel-2:
* s2_l2a
Sentinel-1:
* s1_rtc
Last modified: Feb 2021
Parameters
----------
dc : datacube Datacube object
The Datacube to connect to, i.e. `dc = datacube.Datacube()`.
This allows you to also use development datacubes if required.
products : list
A list of product names to load data from. For example:
* Landsat C2: ``['ls5_sr', 'ls7_sr', 'ls8_sr', 'ls9_sr']``
* Sentinel-2: ``['s2_l2a']``
* Sentinel-1: ``['s1_rtc']``
min_gooddata : float, optional
An optional float giving the minimum percentage of good quality
pixels required for a satellite observation to be loaded.
Defaults to 0.0 which will return all observations regardless of
pixel quality (set to e.g. 0.99 to return only observations with
more than 99% good quality pixels).
categories_to_mask_ls : dict, optional
An optional dictionary that is used to identify poor quality pixels
for masking. This mask is used for both masking out low
quality pixels (e.g. cloud or shadow), and for dropping
observations entirely based on the `min_gooddata` calculation.
categories_to_mask_s2 : list, optional
An optional list of Sentinel-2 Scene Classification Layer (SCL) names
that identify poor quality pixels for masking.
categories_to_mask_s1 : list, optional
An optional list of Sentinel-1 mask names that identify poor
quality pixels for masking.
mask_filters : iterable of tuples, optional
Iterable tuples of morphological operations - ("<operation>", <radius>)
to apply on mask, where:
operation: string, can be one of these morphological operations:
* ``'closing'`` = remove small holes in cloud - morphological closing
* ``'opening'`` = shrinks away small areas of the mask
* ``'dilation'`` = adds padding to the mask
* ``'erosion'`` = shrinks bright regions and enlarges dark regions
radius: int
e.g. ``mask_filters=[('erosion', 5),("opening", 2),("dilation", 2)]``
mask_pixel_quality : bool, optional
An optional boolean indicating whether to apply the poor data
mask to all observations that were not filtered out for having
less good quality pixels than ``min_gooddata``. E.g. if
``min_gooddata=0.99``, the filtered observations may still contain
up to 1% poor quality pixels. The default of ``False`` simply
returns the resulting observations without masking out these
pixels; ``True`` masks them and sets them to NaN using the poor data
mask. This will convert numeric values to floating point values
which can cause memory issues, set to False to prevent this.
ls7_slc_off : bool, optional
An optional boolean indicating whether to include data from
after the Landsat 7 SLC failure (i.e. SLC-off). Defaults to
``True``, which keeps all Landsat 7 observations > May 31 2003.
predicate : function, optional
An optional function that can be passed in to restrict the
datasets that are loaded by the function. A filter function
should take a `datacube.model.Dataset` object as an input (i.e.
as returned from `dc.find_datasets`), and return a boolean.
For example, a filter function could be used to return True on
only datasets acquired in January:
``dataset.time.begin.month == 1``
dtype : string, optional
An optional parameter that controls the data type/dtype that
layers are coerced to after loading. Valid values: ''`native`'',
``'auto'``, ``'float{16|32|64}'``.
When ``'auto'`` is used, the data will be
converted to ``'float32'`` if masking is used, otherwise data will
be returned in the native data type of the data. Be aware that
if data is loaded in its native dtype, nodata and masked
pixels will be returned with the data's native nodata value
(typically ``-999``), not ``NaN``.
NOTE: If loading Landsat, the data is automatically rescaled so
'native' dtype will return a value error.
verbose : bool, optional
If True, print progress statements during loading
**kwargs : dict, optional
A set of keyword arguments to ``dc.load`` that define the
spatiotemporal query used to extract data. This typically
includes ``measurements``, ``x`, ``y``, ``time``, ``resolution``,
``resampling``, ``group_by`` and ``crs``. Keyword arguments can
either be listed directly in the ``load_ard`` call like any
other parameter (e.g. ``measurements=['red']``), or by
passing in a query kwarg dictionary (e.g. ``**query``). For a
list of possible options, see the ``dc.load`` documentation:
https://datacube-core.readthedocs.io/en/latest/dev/api/generate/datacube.Datacube.load.html
Returns
-------
combined_ds : xarray Dataset
An xarray dataset containing only satellite observations that
contains greater than `min_gooddata` proportion of good quality
pixels.
"""
#########
# Setup #
#########
# prevent function altering original query object
kwargs = deepcopy(kwargs)
# We deal with `dask_chunks` separately
dask_chunks = kwargs.pop("dask_chunks", None)
requested_measurements = kwargs.pop("measurements", None)
# Warn user if they combine lazy load with min_gooddata
if verbose:
if (min_gooddata > 0.0) and dask_chunks is not None:
warnings.warn(
"Setting 'min_gooddata' percentage to > 0.0 "
"will cause dask arrays to compute when "
"loading pixel-quality data to calculate "
"'good pixel' percentage. This can "
"slow the return of your dataset."
)
# Verify that products were provided and determine if Sentinel-2
# or Landsat data is being loaded
if not products:
raise ValueError(
"Please provide a list of product names to load data from. "
"Valid options are: Landsat C2 SR: ['ls5_sr', 'ls7_sr', 'ls8_sr', 'ls9_sr'], or "
"Landsat C2 ST: ['ls5_st', 'ls7_st', 'ls8_st', 'ls9_st'], or "
"Sentinel-2: ['s2_l2a'], or"
"Sentinel-1: ['s1_rtc'], or"
)
# convert products to list if user passed as a string
if type(products) == str:
products=[products]
if all(["ls" in product for product in products]):
product_type = "ls"
elif all(["s2" in product for product in products]):
product_type = "s2"
elif all(["s1" in product for product in products]):
product_type = "s1"
# check if the landsat product is surface temperature
st = False
if (product_type == "ls") & (all(["st" in product for product in products])):
st = True
# Check some parameters before proceeding
if (product_type == "ls") & (dtype == "native"):
raise ValueError(
"Cannot load Landsat bands in native dtype "
"as values require rescaling which converts dtype to float"
)
if product_type == "ls":
if any(k in categories_to_mask_ls for k in ("cirrus", "cirrus_confidence")):
raise ValueError(
"'cirrus' categories for the pixel quality mask"
" are not supported by load_ard"
)
# If `measurements` are specified but do not include pixel quality bands,
# add these to `measurements` according to collection
if product_type == "ls":
if verbose:
print("Using pixel quality parameters for USGS Collection 2")
fmask_band = "pixel_quality"
elif product_type == "s2":
if verbose:
print("Using pixel quality parameters for Sentinel 2")
fmask_band = "SCL"
elif product_type == "s1":
if verbose:
print("Using pixel quality parameters for Sentinel 1")
fmask_band = "mask"
measurements = requested_measurements.copy() if requested_measurements else None
# define a list of acceptable aliases to load landsat. We can't rely on 'common'
# measurements as native band names have the same name for different measurements.
ls_aliases = ["pixel_quality", "radiometric_saturation"]
if st:
ls_aliases = [
"surface_temperature",
"surface_temperature_quality",
"atmospheric_transmittance",
"thermal_radiance",
"emissivity",
"emissivity_stddev",
"cloud_distance",
"upwell_radiance",
"downwell_radiance",
] + ls_aliases
else:
ls_aliases = ["red", "green", "blue", "nir", "swir_1", "swir_2"] + ls_aliases
if measurements is not None:
if product_type == "ls":
# check we aren't loading aerosol bands from LS8
aerosol_bands = [
"aerosol_qa",
"qa_aerosol",
"atmos_opacity",
"coastal_aerosol",
"SR_QA_AEROSOL",
]
if any(b in aerosol_bands for b in measurements):
raise ValueError(
"load_ard doesn't support loading aerosol or "
"atmospeheric opacity related bands "
"for Landsat, instead use dc.load()"
)
# check measurements are in acceptable aliases list for landsat
if set(measurements).issubset(ls_aliases):
pass
else:
raise ValueError(
"load_ard does not support all band aliases for Landsat, "
"use only the following band names to load Landsat data: "
+ str(ls_aliases)
)
# Deal with "load all" case: pick a set of bands common across
# all products
if measurements is None:
if product_type == "ls":
measurements = ls_aliases
else:
measurements = _common_bands(dc, products)
# If `measurements` are specified but do not include pq, add.
if measurements:
if fmask_band not in measurements:
measurements.append(fmask_band)
# Get list of data and mask bands so that we can later exclude
# mask bands from being masked themselves (also handle the case of rad_sat)
data_bands = [
band
for band in measurements
if band not in (fmask_band, "radiometric_saturation")
]
mask_bands = [band for band in measurements if band not in data_bands]
#################
# Find datasets #
#################
# Pull out query params only to pass to dc.find_datasets
query = _dc_query_only(**kwargs)
# Extract datasets for each product using subset of dcload_kwargs
dataset_list = []
# Get list of datasets for each product
if verbose:
print("Finding datasets")
for product in products:
# Obtain list of datasets for product
if verbose:
print(f" {product}")
if product_type == "ls":
# handle LS seperately to S2/S1 due to collection_category
# force the user to load Tier 1
datasets = dc.find_datasets(
product=product, collection_category='T1', **query
)
else:
datasets = dc.find_datasets(product=product, **query)
# Remove Landsat 7 SLC-off observations if ls7_slc_off=False
if not ls7_slc_off and product in ["ls7_sr"]:
if verbose:
print(" Ignoring SLC-off observations for ls7")
datasets = [
i
for i in datasets
if i.time.begin < datetime.datetime(2003, 5, 31, tzinfo=pytz.UTC)
]
# Add any returned datasets to list
dataset_list.extend(datasets)
# Raise exception if no datasets are returned
if len(dataset_list) == 0:
raise ValueError(
"No data available for query: ensure that "
"the products specified have data for the "
"time and location requested"
)
# If predicate is specified, use this function to filter the list
# of datasets prior to load (this now redundant as dc.load now supports
# a predicate filter)
if predicate:
if verbose:
print(f"Filtering datasets using filter function")
dataset_list = [ds for ds in dataset_list if predicate(ds)]
# Raise exception if filtering removes all datasets
if len(dataset_list) == 0:
raise ValueError("No data available after filtering with " "filter function")
#############
# Load data #
#############
# Note we always load using dask here so that
# we can lazy load data before filtering by good data
ds = dc.load(
datasets=dataset_list,
measurements=measurements,
dask_chunks={} if dask_chunks is None else dask_chunks,
**kwargs,
)
####################
# Filter good data #
####################
# need to distinguish between products due to different
# pq band properties
# collection 2 USGS
if product_type == "ls":
mask, _ = masking.create_mask_value(
ds[fmask_band].attrs["flags_definition"], **categories_to_mask_ls
)
pq_mask = (ds[fmask_band] & mask) != 0
# only run if data bands are present
if len(data_bands) > 0:
# identify pixels that will become negative after rescaling (but not 0 values)
invalid = (
((ds[data_bands] < (-1.0 * -0.2 / 0.0000275)) & (ds[data_bands] > 0))
.to_array(dim="band")
.any(dim="band")
)
#merge masks
pq_mask = xr.ufuncs.logical_or(pq_mask, pq_mask)
# sentinel 2
if product_type == "s2":
pq_mask = odc.algo.enum_to_bool(mask=ds[fmask_band],
categories=categories_to_mask_s2)
# sentinel 1
if product_type == "s1":
pq_mask = odc.algo.enum_to_bool(mask=ds[fmask_band],
categories=categories_to_mask_s1)
# The good data percentage calculation has to load in all `fmask`
# data, which can be slow. If the user has chosen no filtering
# by using the default `min_gooddata = 0`, we can skip this step
# completely to save processing time
if min_gooddata > 0.0:
# Compute good data for each observation as % of total pixels.
# Inveerting the pq_mask for this because cloud=True in pq_mask
# and we want to sum good pixels
if verbose:
print("Counting good quality pixels for each time step")
data_perc = (~pq_mask).sum(axis=[1, 2], dtype="int32") / (
pq_mask.shape[1] * pq_mask.shape[2]
)
keep = (data_perc >= min_gooddata).persist()
# Filter by `min_gooddata` to drop low quality observations
total_obs = len(ds.time)
ds = ds.sel(time=keep)
pq_mask = pq_mask.sel(time=keep)
if verbose:
print(
f"Filtering to {len(ds.time)} out of {total_obs} "
f"time steps with at least {min_gooddata:.1%} "
f"good quality pixels"
)
# morpholigcal filtering on cloud masks
if (mask_filters is not None) & (mask_pixel_quality):
if verbose:
print(f"Applying morphological filters to pq mask {mask_filters}")
pq_mask = mask_cleanup(pq_mask, mask_filters=mask_filters)
###############
# Apply masks #
###############
# Generate good quality data mask
mask = None
if mask_pixel_quality:
if verbose:
print("Applying pixel quality/cloud mask")
mask = pq_mask
# Split into data/masks bands, as conversion to float and masking
# should only be applied to data bands
ds_data = ds[data_bands]
ds_masks = ds[mask_bands]
# Mask data if either of the above masks were generated
if mask is not None:
ds_data = odc.algo.erase_bad(ds_data, where=mask)
# Automatically set dtype to either native or float32 depending
# on whether masking was requested
if dtype == "auto":
dtype = "native" if mask is None else "float32"
# Set nodata values using odc.algo tools to reduce peak memory
# use when converting data dtype
if dtype != "native":
ds_data = odc.algo.to_float(ds_data, dtype=dtype)
# Put data and mask bands back together
attrs = ds.attrs
ds = xr.merge([ds_data, ds_masks])
ds.attrs.update(attrs)
###############
# Return data #
###############
# Drop bands not originally requested by user
if requested_measurements:
ds = ds[requested_measurements]
# Apply the scale and offset factors to Collection 2 Landsat. We need
# different factors for different bands. Also handle the case where
# masking_pixel_quaity = False, in which case the dtype is still
# in int, so we convert it to float
if product_type == "ls":
if verbose:
print("Re-scaling Landsat C2 data")
sr_bands = ["red", "green", "blue", "nir", "swir_1", "swir_2"]
radiance_bands = ["thermal_radiance", "upwell_radiance", "downwell_radiance"]
trans_emiss = ["atmospheric_transmittance", "emissivity", "emissivity_stddev"]
qa = ["pixel_quality", "radiometric_saturation"]
if mask_pixel_quality == False:
# set nodata to NaNs before rescaling
# in the case where masking hasn't already done this
for band in ds.data_vars:
if band not in qa:
ds[band] = odc.algo.to_f32(ds[band])
for band in ds.data_vars:
if band == "cloud_distance":
ds[band] = 0.01 * ds[band]
if band == "surface_temperature_quality":
ds[band] = 0.01 * ds[band]
if band in radiance_bands:
ds[band] = 0.001 * ds[band]
if band in trans_emiss:
ds[band] = 0.0001 * ds[band]
if band in sr_bands:
ds[band] = 2.75e-5 * ds[band] - 0.2
if band == "surface_temperature":
ds[band] = ds[band] * 0.00341802 + 149.0
# add back attrs that are lost during scaling calcs
for band in ds.data_vars:
ds[band].attrs.update(attrs)
# If user supplied dask_chunks, return data as a dask array without
# actually loading it in
if dask_chunks is not None:
if verbose:
print(f"Returning {len(ds.time)} time steps as a dask array")
return ds
else:
if verbose:
print(f"Loading {len(ds.time)} time steps")
return ds.compute()
def array_to_geotiff(
fname, data, geo_transform, projection, nodata_val=0, dtype=gdal.GDT_Float32
):
"""
Create a single band GeoTIFF file with data from an array.
Because this works with simple arrays rather than xarray datasets
from DEA, it requires geotransform info (`(upleft_x, x_size,
x_rotation, upleft_y, y_rotation, y_size)`) and projection data
(in "WKT" format) for the output raster. These are typically
obtained from an existing raster using the following GDAL calls:
>>> from osgeo import gdal
>>> gdal_dataset = gdal.Open(raster_path)
>>> geotrans = gdal_dataset.GetGeoTransform()
>>> prj = gdal_dataset.GetProjection()
or alternatively, directly from an xarray dataset:
>>> geotrans = xarraydataset.geobox.transform.to_gdal()
>>> prj = xarraydataset.geobox.crs.wkt
Parameters
----------
fname : str
Output geotiff file path including extension
data : numpy array
Input array to export as a geotiff
geo_transform : tuple
Geotransform for output raster; e.g. `(upleft_x, x_size,
x_rotation, upleft_y, y_rotation, y_size)`
projection : str
Projection for output raster (in "WKT" format)
nodata_val : int, optional
Value to convert to nodata in the output raster; default 0
dtype : gdal dtype object, optional
Optionally set the dtype of the output raster; can be
useful when exporting an array of float or integer values.
Defaults to `gdal.GDT_Float32`
"""
# Set up driver
driver = gdal.GetDriverByName("GTiff")
# Create raster of given size and projection
rows, cols = data.shape
dataset = driver.Create(fname, cols, rows, 1, dtype)
dataset.SetGeoTransform(geo_transform)
dataset.SetProjection(projection)
# Write data to array and set nodata values
band = dataset.GetRasterBand(1)
band.WriteArray(data)
band.SetNoDataValue(nodata_val)
# Close file
dataset = None
def mostcommon_crs(dc, product, query):
"""
Takes a given query and returns the most common CRS for observations
returned for that spatial extent. This can be useful when your study
area lies on the boundary of two UTM zones, forcing you to decide
which CRS to use for your `output_crs` in `dc.load`.
Parameters
----------
dc : datacube Datacube object
The Datacube to connect to, i.e. `dc = datacube.Datacube()`.
This allows you to also use development datacubes if required.
product : str
A product name to load CRSs from
query : dict
A datacube query including x, y and time range to assess for the
most common CRS
Returns
-------
str
A EPSG string giving the most common CRS from all datasets returned
by the query above
"""
# remove dask_chunks & align to prevent func failing
# prevent function altering dictionary kwargs
query = deepcopy(query)
if "dask_chunks" in query:
query.pop("dask_chunks", None)
if "align" in query:
query.pop("align", None)
# List of matching products
matching_datasets = dc.find_datasets(product=product, **query)
# Extract all CRSs
crs_list = [str(i.crs) for i in matching_datasets]
# Identify most common CRS
crs_counts = Counter(crs_list)
crs_mostcommon = crs_counts.most_common(1)[0][0]
# Warn user if multiple CRSs are encountered
if len(crs_counts.keys()) > 1:
warnings.warn(
f"Multiple UTM zones {list(crs_counts.keys())} "
f"were returned for this query. Defaulting to "
f"the most common zone: {crs_mostcommon}",
UserWarning,
)
return crs_mostcommon
def download_unzip(url, output_dir=None, remove_zip=True):
"""
Downloads and unzips a .zip file from an external URL to a local
directory.
Parameters
----------
url : str
A string giving a URL path to the zip file you wish to download
and unzip
output_dir : str, optional
An optional string giving the directory to unzip files into.
Defaults to None, which will unzip files in the current working
directory
remove_zip : bool, optional
An optional boolean indicating whether to remove the downloaded
.zip file after files are unzipped. Defaults to True, which will
delete the .zip file.
"""
# Get basename for zip file
zip_name = os.path.basename(url)
# Raise exception if the file is not of type .zip
if not zip_name.endswith(".zip"):
raise ValueError(
f"The URL provided does not point to a .zip "
f"file (e.g. {zip_name}). Please specify a "
f"URL path to a valid .zip file"
)
# Download zip file
print(f"Downloading {zip_name}")
r = requests.get(url)
with open(zip_name, "wb") as f:
f.write(r.content)
# Extract into output_dir
with zipfile.ZipFile(zip_name, "r") as zip_ref:
zip_ref.extractall(output_dir)
print(
f"Unzipping output files to: "
f"{output_dir if output_dir else os.getcwd()}"
)
# Optionally cleanup
if remove_zip:
os.remove(zip_name)
def wofs_fuser(dest, src):
"""
Fuse two WOfS water measurements represented as `ndarray` objects.
Note: this is a copy of the function located here:
https://github.com/GeoscienceAustralia/digitalearthau/blob/develop/digitalearthau/utils.py
"""
empty = (dest & 1).astype(np.bool)
both = ~empty & ~((src & 1).astype(np.bool))
dest[empty] = src[empty]
dest[both] |= src[both]
def dilate(array, dilation=10, invert=True):
"""
Dilate a binary array by a specified nummber of pixels using a
disk-like radial dilation.
By default, invalid (e.g. False or 0) values are dilated. This is
suitable for applications such as cloud masking (e.g. creating a
buffer around cloudy or shadowed pixels). This functionality can
be reversed by specifying `invert=False`.
Parameters
----------
array : array
The binary array to dilate.
dilation : int, optional
An optional integer specifying the number of pixels to dilate
by. Defaults to 10, which will dilate `array` by 10 pixels.
invert : bool, optional
An optional boolean specifying whether to invert the binary
array prior to dilation. The default is True, which dilates the
invalid values in the array (e.g. False or 0 values).
Returns
-------
array
An array of the same shape as `array`, with valid data pixels
dilated by the number of pixels specified by `dilation`.
"""
y, x = np.ogrid[
-dilation : (dilation + 1),
-dilation : (dilation + 1),
]
# disk-like radial dilation
kernel = (x * x) + (y * y) <= (dilation + 0.5) ** 2
# If invert=True, invert True values to False etc
if invert:
array = ~array
return ~binary_dilation(
array.astype(np.bool), structure=kernel.reshape((1,) + kernel.shape)
)
def _select_along_axis(values, idx, axis):
other_ind = np.ix_(*[np.arange(s) for s in idx.shape])
sl = other_ind[:axis] + (idx,) + other_ind[axis:]
return values[sl]
def first(array: xr.DataArray, dim: str, index_name: str = None) -> xr.DataArray:
"""
Finds the first occuring non-null value along the given dimension.
Parameters
----------
array : xr.DataArray
The array to search.
dim : str
The name of the dimension to reduce by finding the first non-null value.
Returns
-------
reduced : xr.DataArray
An array of the first non-null values.
The `dim` dimension will be removed, and replaced with a coord of the
same name, containing the value of that dimension where the last value
was found.
"""
axis = array.get_axis_num(dim)
idx_first = np.argmax(~pd.isnull(array), axis=axis)
reduced = array.reduce(_select_along_axis, idx=idx_first, axis=axis)
reduced[dim] = array[dim].isel({dim: xr.DataArray(idx_first, dims=reduced.dims)})
if index_name is not None:
reduced[index_name] = xr.DataArray(idx_first, dims=reduced.dims)
return reduced
def last(array: xr.DataArray, dim: str, index_name: str = None) -> xr.DataArray:
"""
Finds the last occuring non-null value along the given dimension.
Parameters
----------
array : xr.DataArray
The array to search.
dim : str
The name of the dimension to reduce by finding the last non-null value.
index_name : str, optional
If given, the name of a coordinate to be added containing the index
of where on the dimension the nearest value was found.
Returns
-------
reduced : xr.DataArray
An array of the last non-null values.
The `dim` dimension will be removed, and replaced with a coord of the
same name, containing the value of that dimension where the last value
was found.
"""
axis = array.get_axis_num(dim)
rev = (slice(None),) * axis + (slice(None, None, -1),)
idx_last = -1 - np.argmax(~pd.isnull(array)[rev], axis=axis)
reduced = array.reduce(_select_along_axis, idx=idx_last, axis=axis)
reduced[dim] = array[dim].isel({dim: xr.DataArray(idx_last, dims=reduced.dims)})
if index_name is not None:
reduced[index_name] = xr.DataArray(idx_last, dims=reduced.dims)
return reduced
def nearest(
array: xr.DataArray, dim: str, target, index_name: str = None
) -> xr.DataArray:
"""
Finds the nearest values to a target label along the given dimension, for
all other dimensions.
E.g. For a DataArray with dimensions ('time', 'x', 'y')
nearest_array = nearest(array, 'time', '2017-03-12')
will return an array with the dimensions ('x', 'y'), with non-null values
found closest for each (x, y) pixel to that location along the time
dimension.
The returned array will include the 'time' coordinate for each x,y pixel
that the nearest value was found.
Parameters
----------
array : xr.DataArray
The array to search.
dim : str
The name of the dimension to look for the target label.
target : same type as array[dim]
The value to look up along the given dimension.
index_name : str, optional
If given, the name of a coordinate to be added containing the index
of where on the dimension the nearest value was found.
Returns
-------
nearest_array : xr.DataArray
An array of the nearest non-null values to the target label.
The `dim` dimension will be removed, and replaced with a coord of the
same name, containing the value of that dimension closest to the
given target label.
"""
before_target = slice(None, target)
after_target = slice(target, None)
da_before = array.sel({dim: before_target})
da_after = array.sel({dim: after_target})
da_before = last(da_before, dim, index_name) if da_before[dim].shape[0] else None
da_after = first(da_after, dim, index_name) if da_after[dim].shape[0] else None
if da_before is None and da_after is not None:
return da_after
if da_after is None and da_before is not None:
return da_before
target = array[dim].dtype.type(target)
is_before_closer = abs(target - da_before[dim]) < abs(target - da_after[dim])
nearest_array = xr.where(is_before_closer, da_before, da_after)
nearest_array[dim] = xr.where(is_before_closer, da_before[dim], da_after[dim])
if index_name is not None:
nearest_array[index_name] = xr.where(
is_before_closer, da_before[index_name], da_after[index_name]
)
return nearest_array
| [
"copy.deepcopy",
"datacube.utils.masking.create_mask_value",
"zipfile.ZipFile",
"os.remove",
"os.path.basename",
"os.getcwd",
"warnings.warn",
"pandas.isnull",
"xarray.merge",
"odc.algo.mask_cleanup",
"datetime.datetime",
"numpy.arange",
"xarray.DataArray",
"requests.get",
"collections.C... | [((8105, 8121), 'copy.deepcopy', 'deepcopy', (['kwargs'], {}), '(kwargs)\n', (8113, 8121), False, 'from copy import deepcopy\n'), ((19625, 19654), 'xarray.merge', 'xr.merge', (['[ds_data, ds_masks]'], {}), '([ds_data, ds_masks])\n', (19633, 19654), True, 'import xarray as xr\n'), ((23457, 23486), 'osgeo.gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (23477, 23486), False, 'from osgeo import gdal\n'), ((24853, 24868), 'copy.deepcopy', 'deepcopy', (['query'], {}), '(query)\n', (24861, 24868), False, 'from copy import deepcopy\n'), ((25226, 25243), 'collections.Counter', 'Counter', (['crs_list'], {}), '(crs_list)\n', (25233, 25243), False, 'from collections import Counter\n'), ((26386, 26407), 'os.path.basename', 'os.path.basename', (['url'], {}), '(url)\n', (26402, 26407), False, 'import os\n'), ((26767, 26784), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (26779, 26784), False, 'import requests\n'), ((33457, 33504), 'xarray.where', 'xr.where', (['is_before_closer', 'da_before', 'da_after'], {}), '(is_before_closer, da_before, da_after)\n', (33465, 33504), True, 'import xarray as xr\n'), ((33530, 33587), 'xarray.where', 'xr.where', (['is_before_closer', 'da_before[dim]', 'da_after[dim]'], {}), '(is_before_closer, da_before[dim], da_after[dim])\n', (33538, 33587), True, 'import xarray as xr\n'), ((16191, 16288), 'datacube.utils.masking.create_mask_value', 'masking.create_mask_value', (["ds[fmask_band].attrs['flags_definition']"], {}), "(ds[fmask_band].attrs['flags_definition'], **\n categories_to_mask_ls)\n", (16216, 16288), False, 'from datacube.utils import masking\n'), ((16805, 16843), 'xarray.ufuncs.logical_or', 'xr.ufuncs.logical_or', (['pq_mask', 'pq_mask'], {}), '(pq_mask, pq_mask)\n', (16825, 16843), True, 'import xarray as xr\n'), ((18562, 18610), 'odc.algo.mask_cleanup', 'mask_cleanup', (['pq_mask'], {'mask_filters': 'mask_filters'}), '(pq_mask, mask_filters=mask_filters)\n', (18574, 18610), False, 'from odc.algo import mask_cleanup\n'), ((26888, 26918), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_name', '"""r"""'], {}), "(zip_name, 'r')\n", (26903, 26918), False, 'import zipfile\n'), ((27150, 27169), 'os.remove', 'os.remove', (['zip_name'], {}), '(zip_name)\n', (27159, 27169), False, 'import os\n'), ((30163, 30205), 'xarray.DataArray', 'xr.DataArray', (['idx_first'], {'dims': 'reduced.dims'}), '(idx_first, dims=reduced.dims)\n', (30175, 30205), True, 'import xarray as xr\n'), ((31395, 31436), 'xarray.DataArray', 'xr.DataArray', (['idx_last'], {'dims': 'reduced.dims'}), '(idx_last, dims=reduced.dims)\n', (31407, 31436), True, 'import xarray as xr\n'), ((33655, 33726), 'xarray.where', 'xr.where', (['is_before_closer', 'da_before[index_name]', 'da_after[index_name]'], {}), '(is_before_closer, da_before[index_name], da_after[index_name])\n', (33663, 33726), True, 'import xarray as xr\n'), ((8429, 8644), 'warnings.warn', 'warnings.warn', (['"""Setting \'min_gooddata\' percentage to > 0.0 will cause dask arrays to compute when loading pixel-quality data to calculate \'good pixel\' percentage. This can slow the return of your dataset."""'], {}), '(\n "Setting \'min_gooddata\' percentage to > 0.0 will cause dask arrays to compute when loading pixel-quality data to calculate \'good pixel\' percentage. This can slow the return of your dataset."\n )\n', (8442, 8644), False, 'import warnings\n'), ((29914, 29930), 'pandas.isnull', 'pd.isnull', (['array'], {}), '(array)\n', (29923, 29930), True, 'import pandas as pd\n'), ((30057, 30099), 'xarray.DataArray', 'xr.DataArray', (['idx_first'], {'dims': 'reduced.dims'}), '(idx_first, dims=reduced.dims)\n', (30069, 30099), True, 'import xarray as xr\n'), ((31290, 31331), 'xarray.DataArray', 'xr.DataArray', (['idx_last'], {'dims': 'reduced.dims'}), '(idx_last, dims=reduced.dims)\n', (31302, 31331), True, 'import xarray as xr\n'), ((29115, 29127), 'numpy.arange', 'np.arange', (['s'], {}), '(s)\n', (29124, 29127), True, 'import numpy as np\n'), ((31143, 31159), 'pandas.isnull', 'pd.isnull', (['array'], {}), '(array)\n', (31152, 31159), True, 'import pandas as pd\n'), ((14688, 14735), 'datetime.datetime', 'datetime.datetime', (['(2003)', '(5)', '(31)'], {'tzinfo': 'pytz.UTC'}), '(2003, 5, 31, tzinfo=pytz.UTC)\n', (14705, 14735), False, 'import datetime\n'), ((27073, 27084), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (27082, 27084), False, 'import os\n')] |
import glfw
from OpenGL.GL import *
import numpy as np
from OpenGL.GLU import *
def render():
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glEnable(GL_DEPTH_TEST)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(-2,2, -2,2, -1,1)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
drawFrame()
t = glfw.get_time()
# blue base transformation
glPushMatrix()
glTranslatef(np.sin(t), 0, 0)
drawFrame()
# blue base drawing
glPushMatrix()
glScalef(.2, .2, .2)
glColor3ub(0, 0, 255)
drawBox()
glPopMatrix()
# red arm transformation
glPushMatrix()
glRotatef(t*(180/np.pi), 0, 0, 1)
glTranslatef(.5, 0, .01)
drawFrame()
# red arm drawing
glPushMatrix()
glScalef(.5, .1, .1)
glColor3ub(255, 0, 0)
drawBox()
glPopMatrix()
# green arm transformation
glPushMatrix()
glTranslatef(.5, 0, .01)
glRotatef(t*(180/np.pi), 0, 0, 1)
drawFrame()
# green arm drawing
glPushMatrix()
glScalef(.2, .2, .2)
glColor3ub(0, 255, 0)
drawBox()
glPopMatrix()
glPopMatrix()
glPopMatrix()
glPopMatrix()
def drawBox():
glBegin(GL_QUADS)
glVertex3fv(np.array([1,1,0.]))
glVertex3fv(np.array([-1,1,0.]))
glVertex3fv(np.array([-1,-1,0.]))
glVertex3fv(np.array([1,-1,0.]))
glEnd()
def drawFrame():
# draw coordinate: x in red, y in green, z in blue
glBegin(GL_LINES)
glColor3ub(255, 0, 0)
glVertex3fv(np.array([0.,0.,0.]))
glVertex3fv(np.array([1.,0.,0.]))
glColor3ub(0, 255, 0)
glVertex3fv(np.array([0.,0.,0.]))
glVertex3fv(np.array([0.,1.,0.]))
glColor3ub(0, 0, 255)
glVertex3fv(np.array([0.,0.,0]))
glVertex3fv(np.array([0.,0.,1.]))
glEnd()
def main():
if not glfw.init():
return
window = glfw.create_window(480,480,'2018008659', None,None)
if not window:
glfw.terminate()
return
glfw.make_context_current(window)
glfw.swap_interval(1)
while not glfw.window_should_close(window):
glfw.poll_events()
render()
glfw.swap_buffers(window)
glfw.terminate()
if __name__ == "__main__":
main()
| [
"glfw.swap_interval",
"glfw.poll_events",
"glfw.make_context_current",
"glfw.get_time",
"glfw.window_should_close",
"glfw.init",
"numpy.sin",
"numpy.array",
"glfw.terminate",
"glfw.swap_buffers",
"glfw.create_window"
] | [((344, 359), 'glfw.get_time', 'glfw.get_time', ([], {}), '()\n', (357, 359), False, 'import glfw\n'), ((1836, 1890), 'glfw.create_window', 'glfw.create_window', (['(480)', '(480)', '"""2018008659"""', 'None', 'None'], {}), "(480, 480, '2018008659', None, None)\n", (1854, 1890), False, 'import glfw\n'), ((1951, 1984), 'glfw.make_context_current', 'glfw.make_context_current', (['window'], {}), '(window)\n', (1976, 1984), False, 'import glfw\n'), ((1989, 2010), 'glfw.swap_interval', 'glfw.swap_interval', (['(1)'], {}), '(1)\n', (2007, 2010), False, 'import glfw\n'), ((2151, 2167), 'glfw.terminate', 'glfw.terminate', ([], {}), '()\n', (2165, 2167), False, 'import glfw\n'), ((428, 437), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (434, 437), True, 'import numpy as np\n'), ((1215, 1236), 'numpy.array', 'np.array', (['[1, 1, 0.0]'], {}), '([1, 1, 0.0])\n', (1223, 1236), True, 'import numpy as np\n'), ((1251, 1273), 'numpy.array', 'np.array', (['[-1, 1, 0.0]'], {}), '([-1, 1, 0.0])\n', (1259, 1273), True, 'import numpy as np\n'), ((1288, 1311), 'numpy.array', 'np.array', (['[-1, -1, 0.0]'], {}), '([-1, -1, 0.0])\n', (1296, 1311), True, 'import numpy as np\n'), ((1326, 1348), 'numpy.array', 'np.array', (['[1, -1, 0.0]'], {}), '([1, -1, 0.0])\n', (1334, 1348), True, 'import numpy as np\n'), ((1496, 1521), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1504, 1521), True, 'import numpy as np\n'), ((1534, 1559), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (1542, 1559), True, 'import numpy as np\n'), ((1598, 1623), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1606, 1623), True, 'import numpy as np\n'), ((1636, 1661), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (1644, 1661), True, 'import numpy as np\n'), ((1700, 1723), 'numpy.array', 'np.array', (['[0.0, 0.0, 0]'], {}), '([0.0, 0.0, 0])\n', (1708, 1723), True, 'import numpy as np\n'), ((1737, 1762), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (1745, 1762), True, 'import numpy as np\n'), ((1795, 1806), 'glfw.init', 'glfw.init', ([], {}), '()\n', (1804, 1806), False, 'import glfw\n'), ((1915, 1931), 'glfw.terminate', 'glfw.terminate', ([], {}), '()\n', (1929, 1931), False, 'import glfw\n'), ((2026, 2058), 'glfw.window_should_close', 'glfw.window_should_close', (['window'], {}), '(window)\n', (2050, 2058), False, 'import glfw\n'), ((2068, 2086), 'glfw.poll_events', 'glfw.poll_events', ([], {}), '()\n', (2084, 2086), False, 'import glfw\n'), ((2112, 2137), 'glfw.swap_buffers', 'glfw.swap_buffers', (['window'], {}), '(window)\n', (2129, 2137), False, 'import glfw\n')] |
import numpy as np
import heapq
from typing import Union
class Graph:
def __init__(self, adjacency_mat: Union[np.ndarray, str]):
""" Unlike project 2, this Graph class takes an adjacency matrix as input. `adjacency_mat`
can either be a 2D numpy array of floats or the path to a CSV file containing a 2D numpy array of floats.
In this project, we will assume `adjacency_mat` corresponds to the adjacency matrix of an undirected graph
"""
if type(adjacency_mat) == str:
self.adj_mat = self._load_adjacency_matrix_from_csv(adjacency_mat)
elif type(adjacency_mat) == np.ndarray:
self.adj_mat = adjacency_mat
else:
raise TypeError('Input must be a valid path or an adjacency matrix')
self.mst = None
def _load_adjacency_matrix_from_csv(self, path: str) -> np.ndarray:
with open(path) as f:
return np.loadtxt(f, delimiter=',')
def construct_mst(self):
""" Given `self.adj_mat`, the adjacency matrix of a connected undirected graph, implement Prim's
algorithm to construct an adjacency matrix encoding the minimum spanning tree of `self.adj_mat`.
`self.adj_mat` is a 2D numpy array of floats.
Note that because we assume our input graph is undirected, `self.adj_mat` is symmetric.
Row i and column j represents the edge weight between vertex i and vertex j. An edge weight of zero indicates that no edge exists.
"""
# empty mst of same shape, will fill throughout
self.mst = np.zeros_like(self.adj_mat)
# store total available nodes
total_nodes = self.adj_mat.shape[0]
# start at random node, I choose 0th
start_node = 0
# already visited: start_node
visited = [start_node]
# log adj. nodes and dists in 1st row of self.adj
# here: (distance, from_start_node, to_end_node)
logged_edges = [(dist, start_node, node) for node, dist in enumerate(self.adj_mat[0]) if dist != 0]
# heapify: lightest weight first
heapq.heapify(logged_edges)
# while we have not visited everyone yet
while len(visited) != total_nodes:
# popping tuple: dist, from_start_node, to_end_node
lightest, start, end = heapq.heappop(logged_edges)
# if we have not visited the end_node
if end not in visited:
# fill destination [start][end] with dist
# note symmetry
self.mst[start][end] = lightest
self.mst[end][start] = lightest
# we've visited someone new, and it was cheap
visited.append(end)
# let's checkout the hosts adj. neighbors
# iterate through all possible neighbors (total_nodes), cols
for new_node in range(total_nodes):
# push to heap (logged edges) its adj. neighbors, following tuple pattern
# we'll heapify once we pop
# note that (self.adj_mat[end, new_node], end, node)
# is (the_new_distance, from_new_start_node, to_adjacent_neighbor)
heapq.heappush(logged_edges, (self.adj_mat[end, new_node], end, new_node))
# hmm, how do i avoid zeros??? aka no edges
| [
"numpy.zeros_like",
"heapq.heappush",
"heapq.heapify",
"heapq.heappop",
"numpy.loadtxt"
] | [((1599, 1626), 'numpy.zeros_like', 'np.zeros_like', (['self.adj_mat'], {}), '(self.adj_mat)\n', (1612, 1626), True, 'import numpy as np\n'), ((2124, 2151), 'heapq.heapify', 'heapq.heapify', (['logged_edges'], {}), '(logged_edges)\n', (2137, 2151), False, 'import heapq\n'), ((925, 953), 'numpy.loadtxt', 'np.loadtxt', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (935, 953), True, 'import numpy as np\n'), ((2345, 2372), 'heapq.heappop', 'heapq.heappop', (['logged_edges'], {}), '(logged_edges)\n', (2358, 2372), False, 'import heapq\n'), ((3256, 3330), 'heapq.heappush', 'heapq.heappush', (['logged_edges', '(self.adj_mat[end, new_node], end, new_node)'], {}), '(logged_edges, (self.adj_mat[end, new_node], end, new_node))\n', (3270, 3330), False, 'import heapq\n')] |
from .common_setup import *
import os
from ..callbacks import DatapackStoreCallback, GetLearnIndices, StoreHyperparameters, callback_sequence, PlotRhat, PlotEss
from ..misc import make_example_datapack
from ..settings import float_type,dist_type
from threading import Lock
import tensorflow as tf
import numpy as np
def test_PlotRhat(tf_session):
with tf_session.graph.as_default():
cb = PlotRhat(lock=Lock(), plot_directory=os.path.join(TEST_FOLDER,'test_callbacks'))
res = cb(0,1,(np.random.uniform(size=1000)+1)**2., 'test')
print(tf_session.run(res))
def test_PlotEss(tf_session):
with tf_session.graph.as_default():
cb = PlotEss(lock=Lock(), plot_directory=os.path.join(TEST_FOLDER,'test_callbacks'))
res = cb(0,1,(np.random.uniform(size=1000)+1)**2., 'test')
print(tf_session.run(res))
def test_callback_sequence(tf_session):
datapack = make_example_datapack(10, 2, 2, clobber=True, name=os.path.join(TEST_FOLDER, "test_callbacks_data.h5"))
antenna_labels, _ = datapack.antennas
_, antennas = datapack.get_antennas(antenna_labels)
Xa = antennas.cartesian.xyz.to(dist_type).value.T.astype(np.float64)
callbacks = []
args = []
with tf_session.graph.as_default():
callbacks.append(GetLearnIndices(dist_cutoff=0.3))
args.append([tf.convert_to_tensor(Xa, dtype=float_type)])
if os.path.exists(os.path.join(TEST_FOLDER,'test_hyperparam_store.npz')):
os.unlink(os.path.join(TEST_FOLDER,'test_hyperparam_store.npz'))
callbacks.append(StoreHyperparameters(store_file=os.path.join(TEST_FOLDER,'test_hyperparam_store.npz')))
args.append((1000., [0., 1., 2., 3., 4.], [0.], [0.]))
tf_session.run(callback_sequence(callbacks, args))
def test_datapack_store(tf_session):
datapack = make_example_datapack(10,2,2,clobber=True, name=os.path.join(TEST_FOLDER,"test_callbacks_data.h5"))
datapack.switch_solset('sol000')
phase, axes = datapack.phase
store_array = np.ones_like(phase)#np.random.normal(size=phase.shape)
datapack.phase = store_array
with tf_session.graph.as_default():
callback = DatapackStoreCallback(datapack,'sol000', 'phase',
dir=None,
ant=None,
freq=None,
pol=None)
array = tf.convert_to_tensor(store_array, tf.float64)
store_op = callback(0, 2, array)
assert tf_session.run(store_op)[0] == store_array.__sizeof__()
with datapack:
stored, _ = datapack.phase
# print(stored, store_array, phase)
assert np.all(np.isclose(stored, store_array))
def test_get_learn_indices(tf_session):
datapack = make_example_datapack(10,2,2,clobber=True, name=os.path.join(TEST_FOLDER,"test_callbacks_data.h5"))
antenna_labels, _ = datapack.antennas
_, antennas = datapack.get_antennas(antenna_labels)
Xa = antennas.cartesian.xyz.to(dist_type).value.T.astype(np.float64)
with tf_session.graph.as_default():
callback = GetLearnIndices(dist_cutoff=0.3)
indices = callback(tf.convert_to_tensor(Xa, dtype=float_type))
indices = tf_session.run(indices)[0]
Xa = Xa[indices,:]
for i in range(Xa.shape[0]):
assert np.all(np.linalg.norm(Xa[i:i+1,:] - Xa[i+1:,:], axis=1) < 0.3)
def test_store_hyperparams(tf_session):
with tf_session.graph.as_default():
if os.path.exists(os.path.join(TEST_FOLDER,'test_hyperparam_store.npz')):
os.unlink(os.path.join(TEST_FOLDER,'test_hyperparam_store.npz'))
callback = StoreHyperparameters(store_file=os.path.join(TEST_FOLDER,'test_hyperparam_store.npz'))
size = callback(1000., [0., 1., 2., 3., 4.], [0.], [0.])
assert tf_session.run(size)[0] == 1
| [
"numpy.random.uniform",
"numpy.ones_like",
"tensorflow.convert_to_tensor",
"threading.Lock",
"numpy.isclose",
"numpy.linalg.norm",
"os.path.join"
] | [((2019, 2038), 'numpy.ones_like', 'np.ones_like', (['phase'], {}), '(phase)\n', (2031, 2038), True, 'import numpy as np\n'), ((2409, 2454), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['store_array', 'tf.float64'], {}), '(store_array, tf.float64)\n', (2429, 2454), True, 'import tensorflow as tf\n'), ((959, 1010), 'os.path.join', 'os.path.join', (['TEST_FOLDER', '"""test_callbacks_data.h5"""'], {}), "(TEST_FOLDER, 'test_callbacks_data.h5')\n", (971, 1010), False, 'import os\n'), ((1408, 1462), 'os.path.join', 'os.path.join', (['TEST_FOLDER', '"""test_hyperparam_store.npz"""'], {}), "(TEST_FOLDER, 'test_hyperparam_store.npz')\n", (1420, 1462), False, 'import os\n'), ((1879, 1930), 'os.path.join', 'os.path.join', (['TEST_FOLDER', '"""test_callbacks_data.h5"""'], {}), "(TEST_FOLDER, 'test_callbacks_data.h5')\n", (1891, 1930), False, 'import os\n'), ((2688, 2719), 'numpy.isclose', 'np.isclose', (['stored', 'store_array'], {}), '(stored, store_array)\n', (2698, 2719), True, 'import numpy as np\n'), ((2825, 2876), 'os.path.join', 'os.path.join', (['TEST_FOLDER', '"""test_callbacks_data.h5"""'], {}), "(TEST_FOLDER, 'test_callbacks_data.h5')\n", (2837, 2876), False, 'import os\n'), ((3167, 3209), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['Xa'], {'dtype': 'float_type'}), '(Xa, dtype=float_type)\n', (3187, 3209), True, 'import tensorflow as tf\n'), ((3497, 3551), 'os.path.join', 'os.path.join', (['TEST_FOLDER', '"""test_hyperparam_store.npz"""'], {}), "(TEST_FOLDER, 'test_hyperparam_store.npz')\n", (3509, 3551), False, 'import os\n'), ((417, 423), 'threading.Lock', 'Lock', ([], {}), '()\n', (421, 423), False, 'from threading import Lock\n'), ((440, 483), 'os.path.join', 'os.path.join', (['TEST_FOLDER', '"""test_callbacks"""'], {}), "(TEST_FOLDER, 'test_callbacks')\n", (452, 483), False, 'import os\n'), ((683, 689), 'threading.Lock', 'Lock', ([], {}), '()\n', (687, 689), False, 'from threading import Lock\n'), ((706, 749), 'os.path.join', 'os.path.join', (['TEST_FOLDER', '"""test_callbacks"""'], {}), "(TEST_FOLDER, 'test_callbacks')\n", (718, 749), False, 'import os\n'), ((1337, 1379), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['Xa'], {'dtype': 'float_type'}), '(Xa, dtype=float_type)\n', (1357, 1379), True, 'import tensorflow as tf\n'), ((1486, 1540), 'os.path.join', 'os.path.join', (['TEST_FOLDER', '"""test_hyperparam_store.npz"""'], {}), "(TEST_FOLDER, 'test_hyperparam_store.npz')\n", (1498, 1540), False, 'import os\n'), ((3334, 3388), 'numpy.linalg.norm', 'np.linalg.norm', (['(Xa[i:i + 1, :] - Xa[i + 1:, :])'], {'axis': '(1)'}), '(Xa[i:i + 1, :] - Xa[i + 1:, :], axis=1)\n', (3348, 3388), True, 'import numpy as np\n'), ((3575, 3629), 'os.path.join', 'os.path.join', (['TEST_FOLDER', '"""test_hyperparam_store.npz"""'], {}), "(TEST_FOLDER, 'test_hyperparam_store.npz')\n", (3587, 3629), False, 'import os\n'), ((3681, 3735), 'os.path.join', 'os.path.join', (['TEST_FOLDER', '"""test_hyperparam_store.npz"""'], {}), "(TEST_FOLDER, 'test_hyperparam_store.npz')\n", (3693, 3735), False, 'import os\n'), ((506, 534), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(1000)'}), '(size=1000)\n', (523, 534), True, 'import numpy as np\n'), ((772, 800), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(1000)'}), '(size=1000)\n', (789, 800), True, 'import numpy as np\n'), ((1598, 1652), 'os.path.join', 'os.path.join', (['TEST_FOLDER', '"""test_hyperparam_store.npz"""'], {}), "(TEST_FOLDER, 'test_hyperparam_store.npz')\n", (1610, 1652), False, 'import os\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.