code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from djitellopy import tello
import keypress as kp
from time import sleep
import numpy as np
import cv2
import math
#####Parameters#####
fSpeed = 117/10 #Forward Speed in cm/s (15cm/s)
aSpeed = 360/10 #Angular Speed Degree/s (50d/s)
interval = 0.25
dInterval = fSpeed*interval
aInterval = aSpeed*interval
####################
x, y = 500, 500
a = 0
yaw = 0
kp.init()
me=tello.Tello()
me.connect()
print(me.get_battery())
points = [(0, 0), (0, 0)]
def getKeyboardInput():
lr, fb, ud, yv = 0,0,0,0
speed = 15
aSpeed = 50
d = 0
global x, y, yaw, a
if kp.getKey("LEFT"):
lr = -speed
d = dInterval
a = -180
elif kp.getKey("RIGHT"):
lr = speed
d = -dInterval
a = 180
if kp.getKey("UP"):
fb = speed
d = dInterval
a = 270
elif kp.getKey("DOWN"):
fb = -speed
d = -dInterval
a = -90
if kp.getKey("w"):
ud = speed
elif kp.getKey("s"):
ud = -speed
if kp.getKey("a"):
yv = aSpeed
yaw -= aInterval
elif kp.getKey("d"):
yv = -aSpeed
yaw += aInterval
if kp.getKey("l"): me.land(), print('Land!'); sleep(3)
elif kp.getKey("RETURN"): me.takeoff(), print('Take Off!')
sleep(interval)
a += yaw
x += int(d*math.cos(math.radians(a)))
y += int(d*math.sin(math.radians(a)))
return [lr, fb, ud, yv, x, y]
def drawPoints(img, points):
for point in points:
cv2.circle(img, (point[0], point[1]), 5, (0, 0, 255), cv2.FILLED) #Blue, Green, Red
cv2.circle(img, (point[0], point[1]), 8, (0, 225, 0), cv2.FILLED) #Blue, Green, Red
cv2.putText(
img, f'({(points[-1][0]- 500)/100}, {(points[-1][1]- 500)/100})m',
(points[-1][0] + 10, points[-1][1] + 30),
cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 255), 1
)
while True:
values=getKeyboardInput()
me.send_rc_control(values[0], values[1], values[2], values[3])
img = np.zeros((1000, 1000, 3), np.uint8)
if (points[-1][0] != values[4] or points[-1][1] != values[5]):
points.append((values[4], values[5]))
drawPoints(img, points)
cv2.imshow("OutPut", img)
cv2.waitKey(1) | [
"cv2.circle",
"cv2.putText",
"keypress.init",
"cv2.waitKey",
"math.radians",
"keypress.getKey",
"numpy.zeros",
"time.sleep",
"cv2.imshow",
"djitellopy.tello.Tello"
] | [((386, 395), 'keypress.init', 'kp.init', ([], {}), '()\n', (393, 395), True, 'import keypress as kp\n'), ((400, 413), 'djitellopy.tello.Tello', 'tello.Tello', ([], {}), '()\n', (411, 413), False, 'from djitellopy import tello\n'), ((622, 639), 'keypress.getKey', 'kp.getKey', (['"""LEFT"""'], {}), "('LEFT')\n", (631, 639), True, 'import keypress as kp\n'), ((806, 821), 'keypress.getKey', 'kp.getKey', (['"""UP"""'], {}), "('UP')\n", (815, 821), True, 'import keypress as kp\n'), ((988, 1002), 'keypress.getKey', 'kp.getKey', (['"""w"""'], {}), "('w')\n", (997, 1002), True, 'import keypress as kp\n'), ((1085, 1099), 'keypress.getKey', 'kp.getKey', (['"""a"""'], {}), "('a')\n", (1094, 1099), True, 'import keypress as kp\n'), ((1236, 1250), 'keypress.getKey', 'kp.getKey', (['"""l"""'], {}), "('l')\n", (1245, 1250), True, 'import keypress as kp\n'), ((1363, 1378), 'time.sleep', 'sleep', (['interval'], {}), '(interval)\n', (1368, 1378), False, 'from time import sleep\n'), ((1676, 1741), 'cv2.circle', 'cv2.circle', (['img', '(point[0], point[1])', '(8)', '(0, 225, 0)', 'cv2.FILLED'], {}), '(img, (point[0], point[1]), 8, (0, 225, 0), cv2.FILLED)\n', (1686, 1741), False, 'import cv2\n'), ((1765, 1950), 'cv2.putText', 'cv2.putText', (['img', 'f"""({(points[-1][0] - 500) / 100}, {(points[-1][1] - 500) / 100})m"""', '(points[-1][0] + 10, points[-1][1] + 30)', 'cv2.FONT_HERSHEY_PLAIN', '(1)', '(255, 0, 255)', '(1)'], {}), "(img,\n f'({(points[-1][0] - 500) / 100}, {(points[-1][1] - 500) / 100})m', (\n points[-1][0] + 10, points[-1][1] + 30), cv2.FONT_HERSHEY_PLAIN, 1, (\n 255, 0, 255), 1)\n", (1776, 1950), False, 'import cv2\n'), ((2098, 2133), 'numpy.zeros', 'np.zeros', (['(1000, 1000, 3)', 'np.uint8'], {}), '((1000, 1000, 3), np.uint8)\n', (2106, 2133), True, 'import numpy as np\n'), ((2283, 2308), 'cv2.imshow', 'cv2.imshow', (['"""OutPut"""', 'img'], {}), "('OutPut', img)\n", (2293, 2308), False, 'import cv2\n'), ((2314, 2328), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2325, 2328), False, 'import cv2\n'), ((715, 733), 'keypress.getKey', 'kp.getKey', (['"""RIGHT"""'], {}), "('RIGHT')\n", (724, 733), True, 'import keypress as kp\n'), ((896, 913), 'keypress.getKey', 'kp.getKey', (['"""DOWN"""'], {}), "('DOWN')\n", (905, 913), True, 'import keypress as kp\n'), ((1037, 1051), 'keypress.getKey', 'kp.getKey', (['"""s"""'], {}), "('s')\n", (1046, 1051), True, 'import keypress as kp\n'), ((1161, 1175), 'keypress.getKey', 'kp.getKey', (['"""d"""'], {}), "('d')\n", (1170, 1175), True, 'import keypress as kp\n'), ((1279, 1287), 'time.sleep', 'sleep', (['(3)'], {}), '(3)\n', (1284, 1287), False, 'from time import sleep\n'), ((1298, 1317), 'keypress.getKey', 'kp.getKey', (['"""RETURN"""'], {}), "('RETURN')\n", (1307, 1317), True, 'import keypress as kp\n'), ((1587, 1652), 'cv2.circle', 'cv2.circle', (['img', '(point[0], point[1])', '(5)', '(0, 0, 255)', 'cv2.FILLED'], {}), '(img, (point[0], point[1]), 5, (0, 0, 255), cv2.FILLED)\n', (1597, 1652), False, 'import cv2\n'), ((1418, 1433), 'math.radians', 'math.radians', (['a'], {}), '(a)\n', (1430, 1433), False, 'import math\n'), ((1461, 1476), 'math.radians', 'math.radians', (['a'], {}), '(a)\n', (1473, 1476), False, 'import math\n')] |
import numpy as np
from collections import defaultdict as dd
from scipy import sparse as sp
import cnn_rnn
import sample
LABEL_INDEX = ['PRP$', 'VBG', 'VBD', '``', 'VBN', 'POS', "''", 'VBP', 'WDT', 'JJ',\
'WP', 'VBZ', 'DT', '#', 'RP', '$', 'NN', 'FW', ',', '.', 'TO', 'PRP', 'RB', '-LRB-',\
':', 'NNS', 'NNP', 'VB', 'WRB', 'CC', 'LS', 'PDT', 'RBS', 'RBR', 'CD', 'EX', 'IN', 'WP$',\
'MD', 'NNPS', '-RRB-', 'JJS', 'JJR', 'SYM', 'UH']
MAX_LEN = 176
MAX_CHAR_LEN = 35
DIR = 'biomed/genia/'
TRAIN_DATA = DIR + 'train.txt'
DEV_DATA = DIR + 'test.txt'
TEST_DATA = DIR + 'dev.txt'
HASH_FILE = 'words.lst'
EMB_FILE = 'embeddings.txt'
USE_DEV = True
LABELING_RATE = 0.001
def process(word):
word = word.lower()
word = "".join(c if not c.isdigit() else '0' for c in word)
return word
def trans_label(label):
if label == '(':
return '-LRB-'
elif label == ')':
return '-RRB-'
elif label == '':
return '-RRB-'
elif label == 'PP':
return 'PRP'
elif label == 'N':
return 'NN'
elif label == '-':
return 'SYM'
elif label == 'XT' or label == 'CT':
return 'DT'
else:
return label
def create_word_index(filenames):
word_index, word_cnt = {}, 1
for filename in filenames:
for line in open(filename):
if not '/' in line: continue
word = "/".join(line.strip().split('/')[: -1])
word = process(word)
if word in word_index: continue
word_index[word] = word_cnt
word_cnt += 1
return word_index, word_cnt
def create_char_index(filenames):
char_index, char_cnt = {}, 3
for filename in filenames:
for line in open(filename):
if not '/' in line: continue
word = "/".join(line.strip().split('/')[: -1])
for c in word:
if c not in char_index:
char_index[c] = char_cnt
char_cnt += 1
return char_index, char_cnt
def cnt_line(filename):
ret = 0
for line in open(filename):
if not '/' in line: ret += 1
return ret
def read_data(filename, word_index):
line_cnt = cnt_line(filename)
x, y = np.zeros((line_cnt, MAX_LEN), dtype = np.int32), np.zeros((line_cnt, MAX_LEN), dtype = np.int32)
mask = np.zeros((line_cnt, MAX_LEN), dtype = np.float32)
i, j = 0, 0
for line in open(filename):
if not '/' in line:
i += 1
j = 0
continue
inputs = line.strip().split('/')
label = inputs[-1].split('|')[0]
label = trans_label(label)
word = "/".join(inputs[: -1])
word = process(word)
word_ind, label_ind = word_index[word], LABEL_INDEX.index(label)
x[i, j] = word_ind
y[i, j] = label_ind
mask[i, j] = 1.0
j += 1
return x, y, mask
def read_char_data(filename, char_index):
line_cnt = cnt_line(filename)
x = np.zeros((line_cnt, MAX_LEN, MAX_CHAR_LEN), dtype = np.int32)
mask = np.zeros((line_cnt, MAX_LEN, MAX_CHAR_LEN), dtype = np.float32)
i, j = 0, 0
for line in open(filename):
if not '/' in line:
i += 1
j = 0
continue
inputs = line.strip().split('/')
label = inputs[-1]
word = "/".join(inputs[: -1])
for k, c in enumerate(word):
if k + 1 >= MAX_CHAR_LEN: break
x[i, j, k + 1] = char_index[c]
mask[i, j, k + 1] = 1.0
x[i, j, 0] = 1
mask[i, j, 0] = 1.0
if len(word) + 1 < MAX_CHAR_LEN:
x[i, j, len(word) + 1] = 2
mask[i, j, len(word) + 1] = 1.0
j += 1
return x, mask
def read_word2embedding():
words = []
for line in open(HASH_FILE):
words.append(line.strip())
word2embedding = {}
for i, line in enumerate(open(EMB_FILE)):
if words[i] in word2embedding: continue
inputs = line.strip().split()
word2embedding[words[i]] = np.array([float(e) for e in inputs], dtype = np.float32)
return word2embedding
def evaluate(py, y_, m_, full = False):
if len(py.shape) > 1:
py = np.argmax(py, axis = 1)
y, m = y_.flatten(), m_.flatten()
acc = 1.0 * (np.array(y == py, dtype = np.int32) * m).sum() / m.sum()
return acc, acc, acc, acc
if __name__ == '__main__':
word_index, word_cnt = create_word_index([TRAIN_DATA, DEV_DATA, TEST_DATA])
wx, y, m = read_data(TRAIN_DATA, word_index)
if USE_DEV:
dev_wx, dev_y, dev_m = read_data(TEST_DATA, word_index)
wx, y, m = np.vstack((wx, dev_wx)), np.vstack((y, dev_y)), np.vstack((m, dev_m))
twx, ty, tm = read_data(DEV_DATA, word_index)
char_index, char_cnt= create_char_index([TRAIN_DATA, DEV_DATA, TEST_DATA])
x, cm = read_char_data(TRAIN_DATA, char_index)
if USE_DEV:
dev_x, dev_cm = read_char_data(TEST_DATA, char_index)
x, cm = np.vstack((x, dev_x)), np.vstack((cm, dev_cm))
tx, tcm = read_char_data(DEV_DATA, char_index)
model = cnn_rnn.cnn_rnn(char_cnt, len(LABEL_INDEX), word_cnt)
if LABELING_RATE < 1.0:
ind = sample.create_sample_index(LABELING_RATE, x.shape[0])
x, y, m, wx, cm = sample.sample_arrays((x, y, m, wx, cm), ind)
model.add_data(x, y, m, wx, cm, None, tx, ty, tm, twx, tcm, None)
model.build()
word2embedding = read_word2embedding()
model.set_embedding(word2embedding, word_index)
model.train(evaluate)
| [
"numpy.argmax",
"numpy.zeros",
"sample.create_sample_index",
"numpy.array",
"sample.sample_arrays",
"numpy.vstack"
] | [((2319, 2366), 'numpy.zeros', 'np.zeros', (['(line_cnt, MAX_LEN)'], {'dtype': 'np.float32'}), '((line_cnt, MAX_LEN), dtype=np.float32)\n', (2327, 2366), True, 'import numpy as np\n'), ((2962, 3021), 'numpy.zeros', 'np.zeros', (['(line_cnt, MAX_LEN, MAX_CHAR_LEN)'], {'dtype': 'np.int32'}), '((line_cnt, MAX_LEN, MAX_CHAR_LEN), dtype=np.int32)\n', (2970, 3021), True, 'import numpy as np\n'), ((3035, 3096), 'numpy.zeros', 'np.zeros', (['(line_cnt, MAX_LEN, MAX_CHAR_LEN)'], {'dtype': 'np.float32'}), '((line_cnt, MAX_LEN, MAX_CHAR_LEN), dtype=np.float32)\n', (3043, 3096), True, 'import numpy as np\n'), ((2211, 2256), 'numpy.zeros', 'np.zeros', (['(line_cnt, MAX_LEN)'], {'dtype': 'np.int32'}), '((line_cnt, MAX_LEN), dtype=np.int32)\n', (2219, 2256), True, 'import numpy as np\n'), ((2260, 2305), 'numpy.zeros', 'np.zeros', (['(line_cnt, MAX_LEN)'], {'dtype': 'np.int32'}), '((line_cnt, MAX_LEN), dtype=np.int32)\n', (2268, 2305), True, 'import numpy as np\n'), ((4173, 4194), 'numpy.argmax', 'np.argmax', (['py'], {'axis': '(1)'}), '(py, axis=1)\n', (4182, 4194), True, 'import numpy as np\n'), ((5147, 5200), 'sample.create_sample_index', 'sample.create_sample_index', (['LABELING_RATE', 'x.shape[0]'], {}), '(LABELING_RATE, x.shape[0])\n', (5173, 5200), False, 'import sample\n'), ((5227, 5271), 'sample.sample_arrays', 'sample.sample_arrays', (['(x, y, m, wx, cm)', 'ind'], {}), '((x, y, m, wx, cm), ind)\n', (5247, 5271), False, 'import sample\n'), ((4597, 4620), 'numpy.vstack', 'np.vstack', (['(wx, dev_wx)'], {}), '((wx, dev_wx))\n', (4606, 4620), True, 'import numpy as np\n'), ((4622, 4643), 'numpy.vstack', 'np.vstack', (['(y, dev_y)'], {}), '((y, dev_y))\n', (4631, 4643), True, 'import numpy as np\n'), ((4645, 4666), 'numpy.vstack', 'np.vstack', (['(m, dev_m)'], {}), '((m, dev_m))\n', (4654, 4666), True, 'import numpy as np\n'), ((4941, 4962), 'numpy.vstack', 'np.vstack', (['(x, dev_x)'], {}), '((x, dev_x))\n', (4950, 4962), True, 'import numpy as np\n'), ((4964, 4987), 'numpy.vstack', 'np.vstack', (['(cm, dev_cm)'], {}), '((cm, dev_cm))\n', (4973, 4987), True, 'import numpy as np\n'), ((4252, 4285), 'numpy.array', 'np.array', (['(y == py)'], {'dtype': 'np.int32'}), '(y == py, dtype=np.int32)\n', (4260, 4285), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
from scipy.special import comb
from mcse.libmpi.base import _NaiveParallel
from mpi4py import MPI
class DupCheck(_NaiveParallel):
"""
High performance implementation of duplicate check for a dictionary of
Structures. While some parts of the implementation may not look well
optimize, it eleviates the major bottleneck, which is for-loops in Python
over millions or tens of millions of entries. For example, it is much better
to communicate the entire Structure list (which may be 1,000-10,000) at the
beginning of the calculation than try to send a minimal list from rank 0.
"""
def calc(self, compare_fn, struct_dict, compare_fn_kw={}):
"""
Arguments
---------
compare_fn: callable
Function that will compare structures and return True or False
struct_dict: StructDict
Structure dictionary
compare_fn_kw: dict
Dictionary of key word arguments for the comparison function.
"""
self.dup_dict = {}
if type(compare_fn) == dict:
raise Exception()
## Make sure struct_dict is identical on all ranks
struct_dict = self.comm.bcast(struct_dict,root=0)
## Performing pairwise check
## Will be faster to build on each rank than communicate
I,J = np.triu_indices(len(struct_dict), 1)
pairwise = np.hstack([I[:,None], J[:,None]])
keys = [x for x in struct_dict.keys()]
my_list = self.get_list(pairwise)
dup_dict = {}
for row in my_list:
struct1_id = keys[row[0]]
struct2_id = keys[row[1]]
struct1 = struct_dict[struct1_id]
struct2 = struct_dict[struct2_id]
kw = {"struct1": struct1, "struct2": struct2}
result = compare_fn(**kw, **compare_fn_kw)
if struct1_id not in dup_dict:
dup_dict[struct1_id] = {struct1_id: True}
if struct2_id not in dup_dict:
dup_dict[struct2_id] = {struct2_id: True}
if result:
dup_dict[struct1_id][struct2_id] = True
dup_dict[struct2_id][struct1_id] = True
self.dup_dict = dup_dict
all_dup_dict = self.comm.gather(dup_dict, root=0)
## Finish by parsing results on rank 0
if self.rank ==0:
self.master_dup_dict = {}
for struct_id in struct_dict:
self.master_dup_dict[struct_id] = {struct_id: True}
for key,value in self.master_dup_dict.items():
for entry_dict in all_dup_dict:
if key in entry_dict:
temp_dup_dict = entry_dict[key]
for temp_value in temp_dup_dict:
value[temp_value] = True
self.master_dup_dict[key] = value
## Collect unique
## Sort through duplicate check to identify unique dimers
used_id = {}
unique_dict = {}
for key,value in self.master_dup_dict.items():
if key in used_id:
continue
if len(value) == 1:
used_id[key] = True
dimer = struct_dict[key]
unique_dict[dimer.struct_id] = dimer
continue
else:
dimer = struct_dict[key]
unique_dict[dimer.struct_id] = dimer
for dimer_id in value:
used_id[dimer_id] = True
return unique_dict
else:
return {}
if __name__ == "__main__":
pass
| [
"numpy.hstack"
] | [((1490, 1525), 'numpy.hstack', 'np.hstack', (['[I[:, None], J[:, None]]'], {}), '([I[:, None], J[:, None]])\n', (1499, 1525), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Filename: siamese_thawslump_cd
"""
introduction: conduct change detection using siamese neural network
authors: <NAME>
email:<EMAIL>
add time: 05 November, 2019
"""
import sys,os
from optparse import OptionParser
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import transforms
from torch import optim
import numpy as np
# add the upper level path for importing dataTools model
script_folder = os.path.dirname(sys.argv[0])
upper_folder = os.path.dirname(script_folder)
sys.path.insert(0, upper_folder)
import dataTools.img_pairs as img_pairs
from dataTools.img_pairs import two_images_pixel_pair
from dataTools.img_pairs import read_img_pair_paths
from dataTools.img_pairs import save_image_oneband_8bit
sys.path.insert(0,os.path.expanduser('~/codes/PycharmProjects/DeeplabforRS'))
import split_image
import basic_src.RSImageProcess as RSImageProcess
class ToTensor(object):
"""Convert ndarrays read by rasterio to Tensors."""
def __call__(self, image):
# swap color axis because
# rasterio numpy image: C X H X W
# torch image: C X H X W
# image = image.transpose((2, 0, 1))
return torch.from_numpy(image).float() # from Byte to float
# modified network define from
# https://becominghuman.ai/siamese-networks-algorithm-applications-and-pytorch-implementation-4ffa3304c18
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 64, 7) #input is 3 channel, 28 by 28 (MNIST), output height by width 22 by 22
self.pool1 = nn.MaxPool2d(2) # output height by : 11 by 11
self.conv2 = nn.Conv2d(64, 128, 5) # output height by : 7 by 7
self.conv3 = nn.Conv2d(128, 256, 5) # output height by : 3 by 3, therefore, 2304 = 256*3*3
self.linear1 = nn.Linear(2304, 512)
self.linear2 = nn.Linear(512, 2)
def forward(self, data):
res = []
for i in range(2): # Siamese nets; sharing weights
x = data[i]
x = self.conv1(x)
x = F.relu(x)
x = self.pool1(x)
x = self.conv2(x)
x = F.relu(x)
x = self.conv3(x)
x = F.relu(x)
x = x.view(x.shape[0], -1)
x = self.linear1(x)
res.append(F.relu(x))
# The crucial step of the whole procedure is the next one:
# we calculate the squared distance of the feature vectors.
# In principle, to train the network, we could use the triplet loss with the outputs
# of this squared differences. However, I obtained better results
# (faster convergence) using binary cross entropy loss. Therefore,
# we attach one more linear layer with 2 output features (equal number,
# different number) to the network to obtain the logits.
res = torch.abs(res[1] - res[0])
res = self.linear2(res)
return res
def train(model, device, train_loader, epoch, optimizer, batch_size):
model.train()
total_loss = 0
iter_count = 0
for batch_idx, (data, target) in enumerate(train_loader):
for i in range(len(data)):
data[i] = data[i].to(device)
optimizer.zero_grad()
out_target = model(data[:2])
# target = target.type(torch.LongTensor).to(device)
# label_target = torch.squeeze(target)
label_target = target.type(torch.LongTensor).to(device)
label_target = torch.squeeze(label_target)
# https://pytorch.org/docs/stable/nn.html#torch.nn.CrossEntropyLoss
loss = F.cross_entropy(out_target, label_target)
loss.backward()
optimizer.step()
if batch_idx % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * batch_size, len(train_loader.dataset),
100. * batch_idx * batch_size / len(train_loader.dataset),
loss.item()))
total_loss += loss.item()
iter_count += 1
return total_loss/iter_count
def test(model, device, test_loader):
model.eval()
with torch.no_grad():
accurate_labels = 0
all_labels = 0
loss = 0
for batch_idx, (data, target) in enumerate(test_loader):
for i in range(len(data)):
data[i] = data[i].to(device)
output_target = model(data[:2])
label_target = target.type(torch.LongTensor).to(device)
label_target = torch.squeeze(label_target)
# https://pytorch.org/docs/stable/nn.html#torch.nn.CrossEntropyLoss
loss = F.cross_entropy(output_target, label_target)
loss_test = F.cross_entropy(output_target, label_target)
loss = loss + loss_test
accurate_labels_pre = torch.sum(torch.argmax(output_target, dim=1) == label_target).cpu()
accurate_labels = accurate_labels + accurate_labels_pre
all_labels = all_labels + len(label_target)
accuracy = 100. * accurate_labels / all_labels
print('Test accuracy: {}/{} ({:.3f}%)\tLoss: {:.6f}'.format(accurate_labels, all_labels, accuracy, loss))
return accuracy, loss
def oneshot(model, device, data):
model.eval()
with torch.no_grad():
for i in range(len(data)):
data[i] = data[i].to(device)
output = model(data)
return torch.squeeze(torch.argmax(output, dim=1)).cpu().item()
def predict_small_image_or_subset(model,device,save_path, win_size,data_root,image_paths_txt,
image_pair,pair_id, height, width, trans, batch_size,num_workers, subset):
'''
predict a small image (< 1000 by 1000 pixels) or a image subset
:param model:
:param device:
:param save_path:
:param win_size:
:param data_root:
:param image_paths_txt:
:param image_pair:
:param pair_id:
:param height:
:param width:
:param trans:
:param batch_size:
:param num_workers:
:param subset:
:return:
'''
prediction_loader = torch.utils.data.DataLoader(
two_images_pixel_pair(data_root, image_paths_txt, (win_size, win_size), train=False, transform=trans,
predict_pair_id=pair_id, subset_boundary=subset),
batch_size=batch_size, num_workers=num_workers, shuffle=False)
predicted_change_2d = np.zeros((height, width), dtype=np.uint8)
# print('Size of DataLoader: %d'%len(prediction_loader))
# loading data
for batch_idx, (data, pos) in enumerate(prediction_loader):
for i in range(len(data)):
data[i] = data[i].to(device)
out_prop = model(data)
predicted_target = torch.argmax(out_prop, dim=1).cpu()
for out_label, _, row, col in zip(predicted_target, pos[0], pos[1], pos[2]):
predicted_change_2d[row, col] = out_label
# save_path = os.path.join(save_predict_dir, "predict_change_map_%d.tif" % pair_id)
print('Save prediction result to %s' % save_path)
# default, the second image is the new image, when preparing the training image
# the new image has the same size with the change map (label), but the old image may have offset, then the old image was cropped
# so when save the prediction result, use the new image as projection reference
new_image_path = image_pair[1]
img_pairs.save_image_oneband_8bit(new_image_path, predicted_change_2d, save_path,subset)
return True
def main(options, args):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
normalize = transforms.Normalize((128,128, 128), (128, 128, 128)) # mean, std # for 3 band images with 0-255 grey
trans = transforms.Compose([ToTensor(), normalize])
data_root = os.path.expanduser(args[0])
image_paths_txt = os.path.expanduser(args[1])
model = Net().to(device) #Net().double().to(device)
# print(model)
do_learn = options.dotrain
batch_size = options.batch_size
lr = options.learning_rate
weight_decay = options.weight_decay
num_epochs = options.num_epochs
save_frequency = options.save_frequency
num_workers = options.num_workers
if options.save_model_dir is not None:
save_model_folder = options.save_model_dir
else:
save_model_folder = os.getcwd()
train_loss_list = []
evl_loss_list = []
evl_acc_list = []
if do_learn: # training mode
train_loader = torch.utils.data.DataLoader(
two_images_pixel_pair(data_root, image_paths_txt, (28,28), train=True, transform=trans),
batch_size=batch_size, num_workers=num_workers, shuffle=True)
test_loader = torch.utils.data.DataLoader(
two_images_pixel_pair(data_root, image_paths_txt, (28,28), train=True, transform=trans),
batch_size=batch_size, num_workers=num_workers, shuffle=False)
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
for epoch in range(num_epochs):
t_loss = train(model, device, train_loader, epoch, optimizer,batch_size)
eval_acc, eval_loss = test(model, device, test_loader)
train_loss_list.append(t_loss)
evl_loss_list.append(eval_acc)
evl_acc_list.append(eval_loss)
if epoch % save_frequency == 0:
# torch.save(model, 'siamese_{:03}.pt'.format(epoch)) # save the entire model
model_save_path = os.path.join(save_model_folder,'siamese_{:03}.pt'.format(epoch))
torch.save(model.state_dict(),
model_save_path) # save only the state dict, i.e. the weight
else: # prediction
img_pair_list = read_img_pair_paths(data_root, image_paths_txt)
if options.predict_result_dir is None:
save_predict_dir = os.getcwd()
else:
save_predict_dir = options.predict_result_dir
os.mkdir(save_predict_dir)
with torch.no_grad():
# loading model
load_model_path = options.load_model_path # 'siamese_018.pt'
if os.path.isfile(load_model_path) is False:
raise IOError('trained model: %s does not exist'%load_model_path)
model.load_state_dict(torch.load(load_model_path))
model.eval()
for pair_id, image_pair in enumerate(img_pair_list):
print('Predict the %d th image'%pair_id)
# handle images with large size (> 1000 by 1000 pixels)
height, width = img_pairs.get_image_height_width(image_pair[0])
if height*width > 1000*1000:
subset_w = options.sub_width
subset_h = options.sub_height
adj_overlay_x = options.extend_dis_x
adj_overlay_y = options.extend_dis_y
subset_boundaries = split_image.sliding_window(width, height, subset_w, subset_h, adj_overlay_x, adj_overlay_y)
if len(subset_boundaries) > 1:
# go through each subset,then merge them
subset_file_list = []
save_folder = os.path.join(save_predict_dir, "predict_change_map_%d" % pair_id)
os.mkdir(save_folder)
for s_idx, subset in enumerate(subset_boundaries):
# subset: (xoff, yoff, xsize, ysize)
xsize, ysize = subset[2], subset[3] # new width and height
save_path = os.path.join(save_folder,'%d.tif'%s_idx)
predict_small_image_or_subset(model, device, save_path, 28, data_root, image_paths_txt,
image_pair, pair_id,
ysize, xsize, trans, batch_size, num_workers, subset)
subset_file_list.append(save_path)
pass
# mosaic
save_path = os.path.join(save_predict_dir, "predict_change_map_%d.tif" % pair_id)
RSImageProcess.mosaics_images(subset_file_list,save_path,0)
# skip the remaining codes
continue
####################################################################################
# predict a small image
save_path = os.path.join(save_predict_dir, "predict_change_map_%d.tif" % pair_id)
predict_small_image_or_subset(model,device,save_path,28,data_root,image_paths_txt,image_pair,pair_id,
height,width,trans,batch_size,num_workers,None)
if __name__ == "__main__":
usage = "usage: %prog [options] root_dir images_paths_txt"
parser = OptionParser(usage=usage, version="1.0 2019-11-05")
parser.description = 'Introduction: conduct change detection using siamese neural network '
parser.add_option("-t", "--dotrain",
action="store_true", dest="dotrain", default=False,
help="set this flag for training")
parser.add_option("-b", "--batch_size", type=int, default=32,
action="store", dest="batch_size",
help="the batch size")
parser.add_option('-l', '--learning_rate', type = float, default = 0.001,
action="store", dest='learning_rate',
help='the learning rate for training')
parser.add_option('-w', '--weight_decay', type = float, default = 0.0001,
action="store", dest='weight_decay',
help='the weight decay for training')
parser.add_option('-e', '--num_epochs', type = int, default = 20,
action="store", dest='num_epochs',
help='the number of epochs for training')
parser.add_option('-n', '--num_workers', type = int, default = 4,
action="store", dest='num_workers',
help='the number of workers for loading images')
parser.add_option('-s', '--save_frequency', type = int, default = 5,
action="store", dest = 'save_frequency',
help='the frequency for saving traned model')
parser.add_option('-m', '--load_model_path',
action="store", dest = 'load_model_path',
help='the trained model for prediction')
parser.add_option('-d', '--save_model_dir',
action="store", dest = 'save_model_dir',
help='the folder for saving model during training')
parser.add_option('-p', '--predict_result_dir',
action="store", dest = 'predict_result_dir',
help='the folder for saving prediction results')
parser.add_option("-W", "--sub_width",type = int, default = 1024,
action="store", dest="sub_width",
help="the width of wanted subsets")
parser.add_option("-H", "--sub_height", type = int, default = 1024,
action="store", dest="sub_height",
help="the height of wanted subsets")
parser.add_option("-X", "--extend_dis_x",type=int,default = 14,
action="store", dest="extend_dis_x",
help="extend distance in x direction (pixels) of the subset to adjacent subset, make subsets overlay each other")
parser.add_option("-Y", "--extend_dis_y", type=int, default=14,
action="store", dest="extend_dis_y",
help="extend distance in y direction (pixels) of the subset to adjacent subset, make subsets overlay each other")
# parser.add_option("-p", "--para",
# action="store", dest="para_file",
# help="the parameters file")
(options, args) = parser.parse_args()
if len(sys.argv) < 2:
parser.print_help()
sys.exit(2)
## set parameters files
# if options.para_file is None:
# print('error, no parameters file')
# parser.print_help()
# sys.exit(2)
# else:
# parameters.set_saved_parafile_path(options.para_file)
main(options, args)
| [
"os.mkdir",
"dataTools.img_pairs.save_image_oneband_8bit",
"optparse.OptionParser",
"torch.argmax",
"os.path.isfile",
"torchvision.transforms.Normalize",
"torch.no_grad",
"os.path.join",
"basic_src.RSImageProcess.mosaics_images",
"os.path.expanduser",
"os.path.dirname",
"torch.load",
"torch.... | [((460, 488), 'os.path.dirname', 'os.path.dirname', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (475, 488), False, 'import sys, os\n'), ((504, 534), 'os.path.dirname', 'os.path.dirname', (['script_folder'], {}), '(script_folder)\n', (519, 534), False, 'import sys, os\n'), ((535, 567), 'sys.path.insert', 'sys.path.insert', (['(0)', 'upper_folder'], {}), '(0, upper_folder)\n', (550, 567), False, 'import sys, os\n'), ((789, 847), 'os.path.expanduser', 'os.path.expanduser', (['"""~/codes/PycharmProjects/DeeplabforRS"""'], {}), "('~/codes/PycharmProjects/DeeplabforRS')\n", (807, 847), False, 'import sys, os\n'), ((6450, 6491), 'numpy.zeros', 'np.zeros', (['(height, width)'], {'dtype': 'np.uint8'}), '((height, width), dtype=np.uint8)\n', (6458, 6491), True, 'import numpy as np\n'), ((7432, 7525), 'dataTools.img_pairs.save_image_oneband_8bit', 'img_pairs.save_image_oneband_8bit', (['new_image_path', 'predicted_change_2d', 'save_path', 'subset'], {}), '(new_image_path, predicted_change_2d,\n save_path, subset)\n', (7465, 7525), True, 'import dataTools.img_pairs as img_pairs\n'), ((7655, 7709), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(128, 128, 128)', '(128, 128, 128)'], {}), '((128, 128, 128), (128, 128, 128))\n', (7675, 7709), False, 'from torchvision import transforms\n'), ((7830, 7857), 'os.path.expanduser', 'os.path.expanduser', (['args[0]'], {}), '(args[0])\n', (7848, 7857), False, 'import sys, os\n'), ((7880, 7907), 'os.path.expanduser', 'os.path.expanduser', (['args[1]'], {}), '(args[1])\n', (7898, 7907), False, 'import sys, os\n'), ((12956, 13007), 'optparse.OptionParser', 'OptionParser', ([], {'usage': 'usage', 'version': '"""1.0 2019-11-05"""'}), "(usage=usage, version='1.0 2019-11-05')\n", (12968, 13007), False, 'from optparse import OptionParser\n'), ((1486, 1505), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)', '(7)'], {}), '(3, 64, 7)\n', (1495, 1505), False, 'from torch import nn\n'), ((1605, 1620), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (1617, 1620), False, 'from torch import nn\n'), ((1683, 1704), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)', '(5)'], {}), '(64, 128, 5)\n', (1692, 1704), False, 'from torch import nn\n'), ((1759, 1781), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)', '(5)'], {}), '(128, 256, 5)\n', (1768, 1781), False, 'from torch import nn\n'), ((1865, 1885), 'torch.nn.Linear', 'nn.Linear', (['(2304)', '(512)'], {}), '(2304, 512)\n', (1874, 1885), False, 'from torch import nn\n'), ((1910, 1927), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(2)'], {}), '(512, 2)\n', (1919, 1927), False, 'from torch import nn\n'), ((2901, 2927), 'torch.abs', 'torch.abs', (['(res[1] - res[0])'], {}), '(res[1] - res[0])\n', (2910, 2927), False, 'import torch\n'), ((3508, 3535), 'torch.squeeze', 'torch.squeeze', (['label_target'], {}), '(label_target)\n', (3521, 3535), False, 'import torch\n'), ((3628, 3669), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['out_target', 'label_target'], {}), '(out_target, label_target)\n', (3643, 3669), True, 'import torch.nn.functional as F\n'), ((4172, 4187), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4185, 4187), False, 'import torch\n'), ((5319, 5334), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5332, 5334), False, 'import torch\n'), ((6170, 6329), 'dataTools.img_pairs.two_images_pixel_pair', 'two_images_pixel_pair', (['data_root', 'image_paths_txt', '(win_size, win_size)'], {'train': '(False)', 'transform': 'trans', 'predict_pair_id': 'pair_id', 'subset_boundary': 'subset'}), '(data_root, image_paths_txt, (win_size, win_size),\n train=False, transform=trans, predict_pair_id=pair_id, subset_boundary=\n subset)\n', (6191, 6329), False, 'from dataTools.img_pairs import two_images_pixel_pair\n'), ((8376, 8387), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8385, 8387), False, 'import sys, os\n'), ((9794, 9841), 'dataTools.img_pairs.read_img_pair_paths', 'read_img_pair_paths', (['data_root', 'image_paths_txt'], {}), '(data_root, image_paths_txt)\n', (9813, 9841), False, 'from dataTools.img_pairs import read_img_pair_paths\n'), ((16151, 16162), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (16159, 16162), False, 'import sys, os\n'), ((2105, 2114), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (2111, 2114), True, 'import torch.nn.functional as F\n'), ((2191, 2200), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (2197, 2200), True, 'import torch.nn.functional as F\n'), ((2247, 2256), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (2253, 2256), True, 'import torch.nn.functional as F\n'), ((4547, 4574), 'torch.squeeze', 'torch.squeeze', (['label_target'], {}), '(label_target)\n', (4560, 4574), False, 'import torch\n'), ((4675, 4719), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['output_target', 'label_target'], {}), '(output_target, label_target)\n', (4690, 4719), True, 'import torch.nn.functional as F\n'), ((4745, 4789), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['output_target', 'label_target'], {}), '(output_target, label_target)\n', (4760, 4789), True, 'import torch.nn.functional as F\n'), ((7601, 7626), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7624, 7626), False, 'import torch\n'), ((8558, 8650), 'dataTools.img_pairs.two_images_pixel_pair', 'two_images_pixel_pair', (['data_root', 'image_paths_txt', '(28, 28)'], {'train': '(True)', 'transform': 'trans'}), '(data_root, image_paths_txt, (28, 28), train=True,\n transform=trans)\n', (8579, 8650), False, 'from dataTools.img_pairs import two_images_pixel_pair\n'), ((8786, 8878), 'dataTools.img_pairs.two_images_pixel_pair', 'two_images_pixel_pair', (['data_root', 'image_paths_txt', '(28, 28)'], {'train': '(True)', 'transform': 'trans'}), '(data_root, image_paths_txt, (28, 28), train=True,\n transform=trans)\n', (8807, 8878), False, 'from dataTools.img_pairs import two_images_pixel_pair\n'), ((9920, 9931), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9929, 9931), False, 'import sys, os\n'), ((10016, 10042), 'os.mkdir', 'os.mkdir', (['save_predict_dir'], {}), '(save_predict_dir)\n', (10024, 10042), False, 'import sys, os\n'), ((10058, 10073), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10071, 10073), False, 'import torch\n'), ((1200, 1223), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (1216, 1223), False, 'import torch\n'), ((2352, 2361), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (2358, 2361), True, 'import torch.nn.functional as F\n'), ((6773, 6802), 'torch.argmax', 'torch.argmax', (['out_prop'], {'dim': '(1)'}), '(out_prop, dim=1)\n', (6785, 6802), False, 'import torch\n'), ((10191, 10222), 'os.path.isfile', 'os.path.isfile', (['load_model_path'], {}), '(load_model_path)\n', (10205, 10222), False, 'import sys, os\n'), ((10349, 10376), 'torch.load', 'torch.load', (['load_model_path'], {}), '(load_model_path)\n', (10359, 10376), False, 'import torch\n'), ((10631, 10678), 'dataTools.img_pairs.get_image_height_width', 'img_pairs.get_image_height_width', (['image_pair[0]'], {}), '(image_pair[0])\n', (10663, 10678), True, 'import dataTools.img_pairs as img_pairs\n'), ((12568, 12637), 'os.path.join', 'os.path.join', (['save_predict_dir', "('predict_change_map_%d.tif' % pair_id)"], {}), "(save_predict_dir, 'predict_change_map_%d.tif' % pair_id)\n", (12580, 12637), False, 'import sys, os\n'), ((10977, 11072), 'split_image.sliding_window', 'split_image.sliding_window', (['width', 'height', 'subset_w', 'subset_h', 'adj_overlay_x', 'adj_overlay_y'], {}), '(width, height, subset_w, subset_h, adj_overlay_x,\n adj_overlay_y)\n', (11003, 11072), False, 'import split_image\n'), ((11270, 11335), 'os.path.join', 'os.path.join', (['save_predict_dir', "('predict_change_map_%d' % pair_id)"], {}), "(save_predict_dir, 'predict_change_map_%d' % pair_id)\n", (11282, 11335), False, 'import sys, os\n'), ((11360, 11381), 'os.mkdir', 'os.mkdir', (['save_folder'], {}), '(save_folder)\n', (11368, 11381), False, 'import sys, os\n'), ((12167, 12236), 'os.path.join', 'os.path.join', (['save_predict_dir', "('predict_change_map_%d.tif' % pair_id)"], {}), "(save_predict_dir, 'predict_change_map_%d.tif' % pair_id)\n", (12179, 12236), False, 'import sys, os\n'), ((12261, 12322), 'basic_src.RSImageProcess.mosaics_images', 'RSImageProcess.mosaics_images', (['subset_file_list', 'save_path', '(0)'], {}), '(subset_file_list, save_path, 0)\n', (12290, 12322), True, 'import basic_src.RSImageProcess as RSImageProcess\n'), ((4872, 4906), 'torch.argmax', 'torch.argmax', (['output_target'], {'dim': '(1)'}), '(output_target, dim=1)\n', (4884, 4906), False, 'import torch\n'), ((5471, 5498), 'torch.argmax', 'torch.argmax', (['output'], {'dim': '(1)'}), '(output, dim=1)\n', (5483, 5498), False, 'import torch\n'), ((11653, 11696), 'os.path.join', 'os.path.join', (['save_folder', "('%d.tif' % s_idx)"], {}), "(save_folder, '%d.tif' % s_idx)\n", (11665, 11696), False, 'import sys, os\n')] |
import time
from utils import deserialize, serialize, print_percentage, hms_string, get_clean_tokens
import numpy as np
def error(pi, pre_pi):
c = 0
for el1, el2 in zip(pi, pre_pi):
c += abs(el2 - el1)
return c
def create_pagerank(C, L, I, k=1):
"""
:param n: Matrix length
:param k: iteration nb
:return: List of page rank (indices are pages ids)
"""
start_time = time.time()
n = len(L) - 1
Pi = [1 / n for _ in range(n)]
P = [0] * n
for _ in range(k):
for i in range(n):
if i + 1 < n + 1:
if L[i] == L[i + 1]: # Empty line
for j in range(n):
P[j] += 1 / n * Pi[i]
else:
for j in range(L[i], L[i + 1]):
P[I[j]] += C[j] * Pi[i]
print_percentage(i, n)
print(" ** Finish create_pagerank()")
elapsed_time = time.time() - start_time
print(" Elapsed time create_pagerank() : {}".format(hms_string(elapsed_time)))
return P
def sort_page_by_score(request, dic_word_page, page_rank, alpha=1e-3, beta=.999):
"""
:param beta:
:param alpha:
:param request: list of word
:param page_rank: Pages rank
:param dic_word_page: word -> ((pages->tf),idf)
:return:
Page list sorted by score
Dictionnary of ~200k most used words containing all the words from titles in form {word : ({page_id : TF_normalized}, IDF)}
"""
# TODO : Fo utiliser l'algo WAND *soupire*....
# Les pages qui contiennent les mots de la requete
new_dict = {}
for (key, value) in dic_word_page.items():
if key in request:
new_dict[key] = value
s = set()
page_of_request = [dic_word_page[word] for word in request if word in dic_word_page.keys()] # [[pages], idf]
for page_list, idf in page_of_request:
for page in page_list.keys():
s.add(page)
# mot -> (page -> tfidf) score = []
# res = [(page_id, (alpha * (fd(page_id, request, new_dict)) + beta * page_rank[page_id])) for page_id in s]
res = []
for page_id in s:
freq = fd(page_id, request, new_dict)
pr = page_rank[page_id]
calcul = alpha * freq + beta * pr
print("Frequence : ", freq, "| Pagerank : ", pr, " | Score : ",calcul)
res.append((page_id, calcul))
# res = [(page_id, (alpha * (fd(page_id, request, new_dict)) + beta * page_rank[page_id])) for page_id in s]
return sorted(res, key=lambda t: t[1], reverse=True)
def fd(d, r, dic_word_page):
import math
norm = 0
for _, idf in dic_word_page.values():
norm += idf ** 2
norm = math.sqrt(norm)
res = 0
for m in r:
if m in dic_word_page.keys():
res += dic_word_page[m][1] * dic_word_page[m][0][d] if d in dic_word_page[m][0] else 0
return res / norm
if __name__ == '__main__':
print("deserialize pagerank")
P = deserialize("../data/pagerank.serialized")
print("deserialize dico")
dicto = deserialize("../data/dico.serialized")
print("deserialize page list")
page_list = deserialize("../data/pagelist_noclean.serialized")
while True:
req = input("Req : ")
if req == "exit 0":
break
clean_req = get_clean_tokens(req)
print("clean req = ", req.split())
print("sort page by score")
for alpha in np.arange(0, 0.2, 0.001):
# alpha = 0.001
beta = 1 - alpha
if beta > 0:
res = sort_page_by_score(req.split(), dicto, P, alpha, beta)
print("\nVOICI LE RESULSTAT pour alpha = ", alpha, "beta = ", beta)
for i in res[:5]:
link = "https://fr.wikipedia.org/?curid={0}"
# print(page_list[i[0]][1], " ", link.format(page_list[i[0]][1].replace(" ", "_").replace("'", "%27")))
print(page_list[i[0]][1], " ", link.format(page_list[i[0]][0]))
| [
"math.sqrt",
"utils.hms_string",
"utils.deserialize",
"utils.print_percentage",
"time.time",
"numpy.arange",
"utils.get_clean_tokens"
] | [((414, 425), 'time.time', 'time.time', ([], {}), '()\n', (423, 425), False, 'import time\n'), ((2696, 2711), 'math.sqrt', 'math.sqrt', (['norm'], {}), '(norm)\n', (2705, 2711), False, 'import math\n'), ((2971, 3013), 'utils.deserialize', 'deserialize', (['"""../data/pagerank.serialized"""'], {}), "('../data/pagerank.serialized')\n", (2982, 3013), False, 'from utils import deserialize, serialize, print_percentage, hms_string, get_clean_tokens\n'), ((3057, 3095), 'utils.deserialize', 'deserialize', (['"""../data/dico.serialized"""'], {}), "('../data/dico.serialized')\n", (3068, 3095), False, 'from utils import deserialize, serialize, print_percentage, hms_string, get_clean_tokens\n'), ((3148, 3198), 'utils.deserialize', 'deserialize', (['"""../data/pagelist_noclean.serialized"""'], {}), "('../data/pagelist_noclean.serialized')\n", (3159, 3198), False, 'from utils import deserialize, serialize, print_percentage, hms_string, get_clean_tokens\n'), ((937, 948), 'time.time', 'time.time', ([], {}), '()\n', (946, 948), False, 'import time\n'), ((3315, 3336), 'utils.get_clean_tokens', 'get_clean_tokens', (['req'], {}), '(req)\n', (3331, 3336), False, 'from utils import deserialize, serialize, print_percentage, hms_string, get_clean_tokens\n'), ((3438, 3462), 'numpy.arange', 'np.arange', (['(0)', '(0.2)', '(0.001)'], {}), '(0, 0.2, 0.001)\n', (3447, 3462), True, 'import numpy as np\n'), ((848, 870), 'utils.print_percentage', 'print_percentage', (['i', 'n'], {}), '(i, n)\n', (864, 870), False, 'from utils import deserialize, serialize, print_percentage, hms_string, get_clean_tokens\n'), ((1022, 1046), 'utils.hms_string', 'hms_string', (['elapsed_time'], {}), '(elapsed_time)\n', (1032, 1046), False, 'from utils import deserialize, serialize, print_percentage, hms_string, get_clean_tokens\n')] |
from pygco import cut_from_graph
import numpy as np
import sys
class DiscreteEnergyMinimize:
def __init__(self, nlabel, lamb, value1=100, value2=10000, niter = 10):
'''
Args:
nlabel - int
lamb - float
should be positive
value1 - int
defaults to be 100
value2 - int
defaults to be 10000
'''
self.nlabel = nlabel
self.lamb = lamb
self.value1 = value1
self.value2 = value2
self.niter = niter
self.pairwise_cost = -self.value1*self.lamb*np.eye(self.nlabel)
self.pairwise_cost = self.pairwise_cost.astype(np.int32)
def solve(self, unary_term, pairwise_term, k):
'''
Args :
unary_term - Numpy 2d array [nvertex, nlabel]
unary_term term to be minimized
pairwise_term - Numpy 2d array [nvertex, nvertex]
pairwise_term term to be minimized
k - int
'''
assert unary_term.shape[1]==self.nlabel, "Unary term have wrong labels"
nvertex = unary_term.shape[0]
assert pairwise_term.shape==(nvertex, nvertex), "Pairwise term haver wrong shape"
unary_term = unary_term*self.value1*self.value2
unary_term = unary_term.astype(np.int32)
nedges = nvertex*(nvertex-1)/2
nedges = int(nedges)
self.edges = np.zeros([nedges, 3], dtype=np.float32)
idx = 0
for i in range(nvertex):
for j in range(i+1, nvertex):
self.edges[idx] = [i, j, -self.value2*pairwise_term[i][j]]
idx+=1
self.edges = self.edges.astype(np.int32)
binary_vector = np.zeros([nvertex, self.nlabel], dtype=np.float32)
energy = 0
keep = unary_term
for _ in range(k):
results = cut_from_graph(edges=self.edges, unary_cost=unary_term, pairwise_cost=self.pairwise_cost, n_iter=self.niter, algorithm='swap')
for i, j in enumerate(results):
binary_vector[i][j] = 1
unary_term[i][j] = np.iinfo(np.int32).max//2
return binary_vector
| [
"numpy.eye",
"numpy.zeros",
"numpy.iinfo",
"pygco.cut_from_graph"
] | [((1442, 1481), 'numpy.zeros', 'np.zeros', (['[nedges, 3]'], {'dtype': 'np.float32'}), '([nedges, 3], dtype=np.float32)\n', (1450, 1481), True, 'import numpy as np\n'), ((1756, 1806), 'numpy.zeros', 'np.zeros', (['[nvertex, self.nlabel]'], {'dtype': 'np.float32'}), '([nvertex, self.nlabel], dtype=np.float32)\n', (1764, 1806), True, 'import numpy as np\n'), ((606, 625), 'numpy.eye', 'np.eye', (['self.nlabel'], {}), '(self.nlabel)\n', (612, 625), True, 'import numpy as np\n'), ((1902, 2033), 'pygco.cut_from_graph', 'cut_from_graph', ([], {'edges': 'self.edges', 'unary_cost': 'unary_term', 'pairwise_cost': 'self.pairwise_cost', 'n_iter': 'self.niter', 'algorithm': '"""swap"""'}), "(edges=self.edges, unary_cost=unary_term, pairwise_cost=self.\n pairwise_cost, n_iter=self.niter, algorithm='swap')\n", (1916, 2033), False, 'from pygco import cut_from_graph\n'), ((2150, 2168), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (2158, 2168), True, 'import numpy as np\n')] |
#! /usr/bin/env python
import os
import sys
import itertools
import numpy as np
import numpy.linalg as linalg
IN = [os.path.join(sys.argv[1], x[:-1] + '.txt') for x in open(sys.argv[2])]
skipComments = lambda path: itertools.ifilter(lambda x: not x.startswith('#'), open(path))
ks = [None]*len(list(skipComments(IN[0])))
streams = map(skipComments, IN)
def fv_norm(fv):
fv = np.clip(fv, -1000, 1000)
fv = np.sign(fv) * np.sqrt(np.abs(fv))
fv /= (1e-4 + linalg.norm(fv))
return fv
for i in range(len(ks)):
x = np.vstack(tuple([fv_norm(np.fromstring(s.next(), dtype = np.float32, sep = '\t')) for s in streams]))
ks[i] = np.dot(x, x.T)
res = reduce(np.add, ks)
np.savetxt(sys.stdout, res, fmt = '%.6f', delimiter = '\t')
| [
"numpy.abs",
"numpy.savetxt",
"numpy.clip",
"numpy.linalg.norm",
"numpy.sign",
"numpy.dot",
"os.path.join"
] | [((671, 726), 'numpy.savetxt', 'np.savetxt', (['sys.stdout', 'res'], {'fmt': '"""%.6f"""', 'delimiter': '"""\t"""'}), "(sys.stdout, res, fmt='%.6f', delimiter='\\t')\n", (681, 726), True, 'import numpy as np\n'), ((118, 160), 'os.path.join', 'os.path.join', (['sys.argv[1]', "(x[:-1] + '.txt')"], {}), "(sys.argv[1], x[:-1] + '.txt')\n", (130, 160), False, 'import os\n'), ((380, 404), 'numpy.clip', 'np.clip', (['fv', '(-1000)', '(1000)'], {}), '(fv, -1000, 1000)\n', (387, 404), True, 'import numpy as np\n'), ((630, 644), 'numpy.dot', 'np.dot', (['x', 'x.T'], {}), '(x, x.T)\n', (636, 644), True, 'import numpy as np\n'), ((411, 422), 'numpy.sign', 'np.sign', (['fv'], {}), '(fv)\n', (418, 422), True, 'import numpy as np\n'), ((460, 475), 'numpy.linalg.norm', 'linalg.norm', (['fv'], {}), '(fv)\n', (471, 475), True, 'import numpy.linalg as linalg\n'), ((433, 443), 'numpy.abs', 'np.abs', (['fv'], {}), '(fv)\n', (439, 443), True, 'import numpy as np\n')] |
'''
Determine the shift between two spectra
'''
import math
import numpy as np
def find_row_of_max(A):
max_value = -math.inf
max_row = -1
for row, value in enumerate(A[:,-1]):
if value > max_value:
max_value = value
max_row = row
return max_row
def ev_energy(A, row):
'''
Find the eV value corresponding to the row
'''
return A[row,0]
def nm_energy(A, row):
'''
Find the nm value corresponding to the row
'''
return A[row,1]
def find_shift(file1, file2, units):
'''
Find the eV shift between two spectra
'''
DATA1 = np.loadtxt(file1)
DATA2 = np.loadtxt(file2)
max1 = find_row_of_max(DATA1)
max2 = find_row_of_max(DATA2)
energy1 = None
energy2 = None
if units == "ev":
energy1 = ev_energy(DATA1, max1)
energy2 = ev_energy(DATA2, max2)
elif units == "nm":
energy1 = nm_energy(DATA1, max1)
energy2 = nm_energy(DATA2, max2)
return energy2 - energy1
| [
"numpy.loadtxt"
] | [((619, 636), 'numpy.loadtxt', 'np.loadtxt', (['file1'], {}), '(file1)\n', (629, 636), True, 'import numpy as np\n'), ((649, 666), 'numpy.loadtxt', 'np.loadtxt', (['file2'], {}), '(file2)\n', (659, 666), True, 'import numpy as np\n')] |
import torch
from torch import nn
import numpy as np
from collections import defaultdict
import polyscope as ps
def to_numpy_cpu(a):
if isinstance(a, torch.Tensor):
return a.detach().cpu().numpy()
elif isinstance(a, np.ndarray):
return a
else:
raise ValueError("Requiring Numpy or torch.Tensor")
class PolyScopeVisualizer(nn.Module):
def __init__(self, model_cfg, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.enabled = model_cfg.get('ENABLED', False)
if self.enabled:
self.point_cloud_vis = model_cfg.get("POINT_CLOUD", None)
self.box_vis = model_cfg.get("BOX", None)
self.lidar_origin_vis = model_cfg.get("LIDAR_ORIGIN", None)
self.graph_vis = model_cfg.get("GRAPH", None)
self.primitive_vis = model_cfg.get("PRIMITIVE", None)
self.shared_color_dict = model_cfg.get("SHARED_COLOR", None)
self.output = model_cfg.get("OUTPUT", None)
self.voxel_size = model_cfg.get('voxel_size', None)
self.pc_range = model_cfg.get('pc_range', None)
self.size_factor = model_cfg.get('size_factor', None)
self.radius = model_cfg.get('radius', 2e-4)
self.ground_plane = model_cfg.get("ground_plane", False)
self.init()
def color(self, color_name):
if not hasattr(self, "_shared_color"):
raise ValueError("Color Dictionary not initialized")
return self._shared_color[color_name]
def init(self):
ps.set_up_dir('z_up')
ps.init()
if not self.ground_plane:
ps.set_ground_plane_mode('none')
if self.shared_color_dict is not None:
color_dict = {}
for color_name, color in self.shared_color_dict.items():
if isinstance(color, list) and len(color) == 2:
color_dict[color_name] = np.random.uniform(size=color)
else:
color_dict[color_name] = np.array(color)
self._shared_color = color_dict
def visualize(self, monitor=None):
if monitor is None:
return
if monitor == 'screen':
self.show()
elif isinstance(monitor, str):
self.screenshot(monitor)
else:
raise ValueError(f"Unrecognized Monitor Option {monitor}")
def forward(self, batch_dict):
if not self.enabled:
return
for i in range(batch_dict['batch_size']):
if self.lidar_origin_vis is not None:
for lo_key, vis_cfg_this in self.lidar_origin_vis.items():
vis_cfg = {}; vis_cfg.update(vis_cfg_this)
origins = batch_dict[lo_key]
origin = to_numpy_cpu(origins)[i]
if 'name' in vis_cfg:
lo_name = vis_cfg.pop('name')
else:
lo_name = lo_key
self.pointcloud(lo_name, origin[np.newaxis, :], None, None, **vis_cfg)
if self.point_cloud_vis is not None:
for pc_key, vis_cfg_this in self.point_cloud_vis.items():
if pc_key not in batch_dict:
continue
vis_cfg = {}; vis_cfg.update(vis_cfg_this)
pointcloud = batch_dict[pc_key]
batch_key = vis_cfg.pop('batch') if 'batch' in vis_cfg else None
if batch_key is None:
batch_idx = pointcloud[:, 0]
pointcloud = pointcloud[:, 1:]
else:
batch_idx = batch_dict[batch_key][:, 0]
batch_mask = batch_idx == i
pointcloud = to_numpy_cpu(pointcloud[batch_mask, :3])
if 'name' in vis_cfg:
pc_name = vis_cfg.pop('name')
else:
pc_name = pc_key
self.pointcloud(pc_name, pointcloud, batch_dict, batch_mask, **vis_cfg)
if self.box_vis is not None:
for box_key, vis_cfg_this in self.box_vis.items():
if box_key not in batch_dict:
continue
vis_cfg = {}; vis_cfg.update(vis_cfg_this)
boxes = to_numpy_cpu(batch_dict[box_key][i])
mask = (boxes[:, 3:6] ** 2).sum(axis=-1) > 1e-1
boxes = boxes[mask]
if boxes.shape[1] > 7:
labels = boxes[:, 7]
else:
labels = np.zeros(boxes.shape[0]).astype(np.int32)
boxes = boxes[:, :7]
if 'name' in vis_cfg:
box_name = vis_cfg.pop('name')
else:
box_name = box_key
self.boxes_from_attr(box_name, boxes, batch_dict, i, mask, labels, **vis_cfg)
if self.graph_vis is not None:
for graph_key, vis_cfg_this in self.graph_vis.items():
if graph_key not in batch_dict:
continue
vis_cfg = {}; vis_cfg.update(vis_cfg_this)
e_query, e_ref = to_numpy_cpu(batch_dict[graph_key])
query_key = vis_cfg.pop('query')
query_points = to_numpy_cpu(batch_dict[query_key])
ref_key = vis_cfg.pop('ref')
ref_points = to_numpy_cpu(batch_dict[ref_key])
valid_mask = (query_points[e_query, 0].round().astype(np.int32) == i) & (ref_points[e_ref, 0].round().astype(np.int32) == i)
e_query, e_ref = e_query[valid_mask], e_ref[valid_mask]
# take this batch
query_batch_idx = np.where(query_points[:, 0].round().astype(np.int32) == i)[0]
query_idx_map = np.zeros(query_points.shape[0]).round().astype(np.int32)
query_idx_map[query_batch_idx] = np.arange(query_batch_idx.shape[0])
query_points = to_numpy_cpu(query_points[query_batch_idx, 1:])
e_query = query_idx_map[e_query]
ref_batch_idx = np.where(ref_points[:, 0].round().astype(np.int32) == i)[0]
ref_idx_map = np.zeros(ref_points.shape[0]).round().astype(np.int32)
ref_idx_map[ref_batch_idx] = np.arange(ref_batch_idx.shape[0])
ref_points = to_numpy_cpu(ref_points[ref_batch_idx, 1:])
e_ref = ref_idx_map[e_ref]
edge_indices = to_numpy_cpu(np.stack([e_query, e_ref+query_points.shape[0]], axis=-1))
if 'name' in vis_cfg:
graph_name = vis_cfg.pop('name')
else:
graph_name = graph_key
all_points = np.concatenate([query_points[:, :3], ref_points[:, :3]], axis=0)
self.curvenetwork(graph_name, all_points, edge_indices, batch_dict, valid_mask, **vis_cfg)
if self.primitive_vis is not None:
for primitive_key, vis_cfg in self.primitive_vis.items():
if primitive_key not in batch_dict:
continue
vis_cfg_this = {}; vis_cfg_this.update(vis_cfg)
primitives = to_numpy_cpu(batch_dict[primitive_key])
batch_index = primitives[:, 0].round().astype(np.int32)
batch_mask = batch_index == i
primitives = primitives[batch_mask, 1:]
centers = primitives[:, :3]
cov = primitives[:, -10:-1].reshape(-1, 3, 3)
S, R = np.linalg.eigh(cov)
R = R * np.sqrt(S[:, None, :])
fitness = primitives[:, -1].reshape(-1)
corners = []
if False:
shell = np.random.randn(20, 3)
shell = shell / shell.norm(p=2, dim=-1)[:, None]
point_balls = (R @ shell.T).transpose(1, 2) + centers[:, None, :]
point_balls = point_balls.reshape(-1, 3)
self.pointcloud(primitive_key, point_balls, None, None, **vis_cfg_this)
else:
for dx in [-1, 1]:
for dy, dz in [(-1, -1), (-1, 1), (1, 1), (1, -1)]:
dvec = np.array([dx, dy, dz]).astype(np.float32)
corner = centers + (R * dvec).sum(-1)
corners.append(corner)
corners = np.stack(corners, axis=1)
hexes = np.arange(corners.shape[0]*8).reshape(-1, 8)
scalars = vis_cfg_this.pop("scalars") if "scalars" in vis_cfg else None
class_labels = vis_cfg_this.pop("class_labels") if "class_labels" in vis_cfg_this else None
ps_v = ps.register_volume_mesh(primitive_key, to_numpy_cpu(corners.reshape(-1, 3)), hexes=hexes, **vis_cfg_this)
ps_v.add_scalar_quantity('fitness', to_numpy_cpu(fitness), defined_on='cells')
if scalars:
for scalar_name, scalar_cfg in scalars.items():
ps_v.add_scalar_quantity('scalars/'+scalar_name, to_numpy_cpu(batch_dict[scalar_name][batch_mask]), defined_on='cells', **scalar_cfg)
if class_labels:
for label_name, label_cfg in class_labels.items():
label = to_numpy_cpu(batch_dict[label_name][batch_mask]).astype(np.int32)
label_cfg_this = {}
for key, val in label_cfg.items():
if (key == 'values') and isinstance(val, str):
label_cfg_this[key] = self.color(val)[label]
invalid_mask = label < 0
label_cfg_this[key][invalid_mask] = np.array([75./255, 75./255, 75/255.])
else:
label_cfg_this[key] = val
ps_v.add_color_quantity('class_labels/'+label_name, defined_on='cells', **label_cfg_this)
self.visualize(monitor=self.output)
def clear(self):
ps.remove_all_structures()
self.logs = []
def pc_scalar(self, pc_name, name, quantity, enabled=False):
if not self.enabled:
raise ValueError(f"Visualizer {self.__class__} is not Enabled")
ps.get_point_cloud(pc_name).add_scalar_quantity(name, quantity, enabled=enabled)
def pc_color(self, pc_name, name, color, enabled=False):
if not self.enabled:
raise ValueError(f"Visualizer {self.__class__} is not Enabled")
ps.get_point_cloud(pc_name).add_color_quantity(name, color, enabled=enabled)
def corres(self, name, src, tgt):
if not self.enabled:
raise ValueError(f"Visualizer {self.__class__} is not Enabled")
points = np.concatenate([src, tgt], axis=0)
edges = np.stack([np.arange(src.shape[0]),
np.arange(tgt.shape[0]) + src.shape[0]], axis=-1)
return ps.register_curve_network(name, points, edges, radius=self.radius)
def trace(self, name, points, **kwargs):
if not self.enabled:
raise ValueError(f"Visualizer {self.__class__} is not Enabled")
num_points = points.shape[0]
edges = np.stack([np.arange(num_points-1),
np.arange(num_points-1)+1], axis=-1)
return ps.register_curve_network(name, points, edges, **kwargs)
def curvenetwork(self, name, nodes, edges, data_dict, batch_mask, **kwargs):
if not self.enabled:
raise ValueError(f"Visualizer {self.__class__} is not Enabled")
edge_scalars = kwargs.pop("edge_scalars") if "edge_scalars" in kwargs else None
radius = kwargs.pop('radius', self.radius)
ps_c = ps.register_curve_network(name, nodes, edges, radius=radius, **kwargs)
if edge_scalars:
for scalar_name, scalar_cfg in edge_scalars.items():
scalar = to_numpy_cpu(data_dict[scalar_name][batch_mask])
ps_c.add_scalar_quantity('edge-scalars/'+scalar_name, scalar, defined_on='edges', **scalar_cfg)
return ps_c
def pointcloud(self, name, pointcloud, data_dict, batch_mask, color=None, radius=None, **kwargs):
"""Visualize non-zero entries of heat map on 3D point cloud.
point cloud (torch.Tensor, [N, 3])
"""
if not self.enabled:
raise ValueError(f"Visualizer {self.__class__} is not Enabled")
if radius is None:
radius = self.radius
scalars = kwargs.pop("scalars") if "scalars" in kwargs else None
class_labels = kwargs.pop("class_labels") if "class_labels" in kwargs else None
if color is None:
ps_p = ps.register_point_cloud(name, pointcloud, radius=radius, **kwargs)
else:
ps_p = ps.register_point_cloud(
name, pointcloud, radius=radius, color=tuple(color), **kwargs
)
if scalars:
for scalar_name, scalar_cfg in scalars.items():
if scalar_name not in data_dict:
continue
scalar = to_numpy_cpu(data_dict[scalar_name][batch_mask])
ps_p.add_scalar_quantity('scalars/'+scalar_name, scalar.reshape(-1), **scalar_cfg)
if class_labels:
for label_name, label_cfg in class_labels.items():
if label_name not in data_dict:
continue
label = to_numpy_cpu(data_dict[label_name][batch_mask]).astype(np.int32)
if label.shape[0] == 0:
continue
label_cfg_this = {}
for key, val in label_cfg.items():
if (key == 'values') and isinstance(val, str):
label_cfg_this[key] = self.color(val)[label]
invalid_mask = label < 0
label_cfg_this[key][invalid_mask] = np.array([75./255, 75./255, 75/255.])
else:
label_cfg_this[key] = val
if label_cfg_this.get('values', None) is None:
print(label.shape, label_name)
label_cfg_this['values'] = np.random.randn(label.max()+100, 3)[label]
ps_p.add_color_quantity('class_labels/'+label_name, **label_cfg_this)
return ps_p
def get_meshes(self, centers, eigvals, eigvecs):
""" Prepare corners and faces (for visualization only). """
if not self.enabled:
raise ValueError(f"Visualizer {self.__class__} is not Enabled")
v1 = eigvecs[:, :3]
v2 = eigvecs[:, 3:]
e1 = np.sqrt(eigvals[:, 0:1])
e2 = np.sqrt(eigvals[:, 1:2])
corners = []
for d1 in [-1, 1]:
for d2 in [-1, 1]:
corners.append(centers + d1*v1*e1 + d2*v2*e2)
num_voxels = centers.shape[0]
corners = np.stack(corners, axis=1) # [M, 4, 3]
faces = [0, 1, 3, 2]
faces = np.array(faces, dtype=np.int32)
faces = np.repeat(faces[np.newaxis, np.newaxis, ...], num_voxels, axis=0)
faces += np.arange(num_voxels)[..., np.newaxis, np.newaxis]*4
return corners.reshape(-1, 3), faces.reshape(-1, 4)
def planes(self, name, planes):
if not self.enabled:
raise ValueError(f"Visualizer {self.__class__} is not Enabled")
corners, faces = self.get_meshes(planes[:, :3], planes[:, 6:8], planes[:, 8:14])
return ps.register_surface_mesh(name, corners, faces)
def boxes_from_attr(self, name, attr, data_dict=None, batch_mask=None, data_mask=None, labels=None, **kwargs):
if not self.enabled:
raise ValueError(f"Visualizer {self.__class__} is not Enabled")
from pcdet.utils.box_utils import boxes_to_corners_3d
corners = boxes_to_corners_3d(attr)
if 'with_ori' in kwargs:
with_ori = kwargs.pop('with_ori')
else:
with_ori = False
ps_box = self.boxes(name, corners, data_dict, batch_mask, data_mask, labels, **kwargs)
#if with_ori:
# ori = attr[:, -1]
# sint, cost = np.sin(ori), np.cos(ori)
# arrow = np.stack([sint, cost, np.zeros_like(cost)], axis=-1)[:, np.newaxis, :].repeat(8, 1)
# ps_box.add_vector_quantity('orientation', arrow.reshape(-1, 3), enabled=True)
def boxes(self, name, corners, data_dict=None, batch_mask=None, data_mask=None, labels=None, **kwargs):
"""
corners (shape=[N, 8, 3]):
labels (shape=[N])
"""
if not self.enabled:
raise ValueError(f"Visualizer {self.__class__} is not Enabled")
# 0 1
# 3 2
# | |
# 4 5
# 7 6
#edges = [[0, 1], [0, 3], [0, 4], [1, 2],
# [1, 5], [2, 3], [2, 6], [3, 7],
# [4, 5], [4, 7], [5, 6], [6, 7]]
N = corners.shape[0]
#edges = np.array(edges) # [12, 2]
#edges = np.repeat(edges[np.newaxis, ...], N, axis=0) # [N, 12, 2]
#offset = np.arange(N)[..., np.newaxis, np.newaxis]*8 # [N, 1, 1]
#edges = edges + offset
#if kwargs.get('radius', None) is None:
# kwargs['radius'] = 2e-4
scalars = kwargs.pop("scalars") if "scalars" in kwargs else None
class_labels = kwargs.pop("class_labels") if "class_labels" in kwargs else None
corners = to_numpy_cpu(corners)
corners = corners.reshape(-1, 3)
ps_box = ps.register_volume_mesh(
name, corners,
hexes=np.arange(corners.shape[0]).reshape(-1, 8),
**kwargs
)
ps_box.set_transparency(0.2)
if scalars:
for scalar_name, scalar_cfg in scalars.items():
if scalar_name not in data_dict:
continue
scalar = to_numpy_cpu(data_dict[scalar_name][batch_mask][data_mask]).reshape(-1)
ps_box.add_scalar_quantity('scalars/'+scalar_name, scalar, defined_on='cells', **scalar_cfg)
if labels is not None:
# R->Car, G->Ped, B->Cyc
colors = np.array([[1,0,0], [1,0,0], [0,1,0], [0,0,1], [1,0,1], [1,1,0]])
labels = to_numpy_cpu(labels).astype(np.int64)
#labels = np.repeat(labels[:, np.newaxis], 8, axis=-1).reshape(-1).astype(np.int64)
ps_box.add_color_quantity('class', colors[labels], defined_on='cells', enabled=True)
ps_box.add_scalar_quantity('scalars/class', labels, defined_on='cells')
return ps_box
def wireframe(self, name, heatmap):
if not self.enabled:
raise ValueError(f"Visualizer {self.__class__} is not Enabled")
size_y, size_x = heatmap.shape
x, y = np.meshgrid(heatmap)
return x, y
def heatmap(self, name, heatmap, color=True, threshold=0.1,
**kwargs):
"""Visualize non-zero entries of heat map on 3D point cloud.
`voxel_size`, `size_factor`, `pc_range` need to be specified.
By default, the heatmap need to be transposed.
Args:
heatmap (torch.Tensor or np.ndarray, [W, H])
"""
if not self.enabled:
raise ValueError(f"Visualizer {self.__class__} is not Enabled")
if isinstance(heatmap, np.ndarray):
heatmap = torch.from_numpy(heatmap)
if self.voxel_size is None:
raise ValueError("self.voxel_size not specified")
heatmap = heatmap.T
size_x, size_y = heatmap.shape
x, y = torch.meshgrid(torch.arange(size_x),
torch.arange(size_y),
indexing="ij")
x, y = x.reshape(-1), y.reshape(-1)
z = heatmap.reshape(-1)
mask = torch.zeros(size_x+2, size_y+2, size_x+2, size_y+2, dtype=torch.bool)
for dx, dy in [[0, 1], [0, -1], [1, 0], [-1, 0]]:
mask[x+1, y+1, x+1+dx, y+1+dy] = True
x0, y0, x1, y1 = torch.where(mask)
x0, y0, x1, y1 = x0-1, y0-1, x1-1, y1-1
is_inside = ((x1 >= size_x) | (x1 < 0) | (y1 >= size_y) | (y1 < 0)) == False
e0 = (x0 * size_y + y0)[is_inside]
e1 = (x1 * size_y + y1)[is_inside]
edges = torch.stack([e0, e1], dim=-1)
x = x * self.size_factor * self.voxel_size[0] + self.pc_range[0]
y = y * self.size_factor * self.voxel_size[1] + self.pc_range[1]
nodes = torch.stack([x, y, z], dim=-1)
radius = kwargs.get("radius", self.radius*10)
ps_c = self.curvenetwork(name, nodes, edges, radius=radius)
if color:
ps_c.add_scalar_quantity("height", z, enabled=True)
return ps_c
def show(self):
if not self.enabled:
raise ValueError(f"Visualizer {self.__class__} is not Enabled")
ps.set_up_dir('z_up')
ps.init()
ps.show()
def look_at(self, center, distance=100, bev=True, **kwargs):
if not self.enabled:
raise ValueError(f"Visualizer {self.__class__} is not Enabled")
if bev:
camera_loc = center + np.array([0, 0, distance])
# look down from bird eye view
# with +y-axis being the up dir on the image
ps.look_at_dir(camera_loc, center, (0,1,0), **kwargs)
else:
raise ValueError("Not Implemented Yet, please use bev=True")
def screenshot(self, filename, **kwargs):
if not self.enabled:
raise ValueError(f"Visualizer {self.__class__} is not Enabled")
ps.screenshot(filename, **kwargs)
| [
"numpy.arange",
"torch.arange",
"numpy.sqrt",
"pcdet.utils.box_utils.boxes_to_corners_3d",
"numpy.meshgrid",
"numpy.random.randn",
"polyscope.register_point_cloud",
"polyscope.get_point_cloud",
"polyscope.register_surface_mesh",
"torch.zeros",
"numpy.repeat",
"numpy.stack",
"polyscope.show",... | [((1572, 1593), 'polyscope.set_up_dir', 'ps.set_up_dir', (['"""z_up"""'], {}), "('z_up')\n", (1585, 1593), True, 'import polyscope as ps\n'), ((1602, 1611), 'polyscope.init', 'ps.init', ([], {}), '()\n', (1609, 1611), True, 'import polyscope as ps\n'), ((10728, 10754), 'polyscope.remove_all_structures', 'ps.remove_all_structures', ([], {}), '()\n', (10752, 10754), True, 'import polyscope as ps\n'), ((11455, 11489), 'numpy.concatenate', 'np.concatenate', (['[src, tgt]'], {'axis': '(0)'}), '([src, tgt], axis=0)\n', (11469, 11489), True, 'import numpy as np\n'), ((11632, 11698), 'polyscope.register_curve_network', 'ps.register_curve_network', (['name', 'points', 'edges'], {'radius': 'self.radius'}), '(name, points, edges, radius=self.radius)\n', (11657, 11698), True, 'import polyscope as ps\n'), ((12016, 12072), 'polyscope.register_curve_network', 'ps.register_curve_network', (['name', 'points', 'edges'], {}), '(name, points, edges, **kwargs)\n', (12041, 12072), True, 'import polyscope as ps\n'), ((12418, 12488), 'polyscope.register_curve_network', 'ps.register_curve_network', (['name', 'nodes', 'edges'], {'radius': 'radius'}), '(name, nodes, edges, radius=radius, **kwargs)\n', (12443, 12488), True, 'import polyscope as ps\n'), ((15325, 15349), 'numpy.sqrt', 'np.sqrt', (['eigvals[:, 0:1]'], {}), '(eigvals[:, 0:1])\n', (15332, 15349), True, 'import numpy as np\n'), ((15363, 15387), 'numpy.sqrt', 'np.sqrt', (['eigvals[:, 1:2]'], {}), '(eigvals[:, 1:2])\n', (15370, 15387), True, 'import numpy as np\n'), ((15585, 15610), 'numpy.stack', 'np.stack', (['corners'], {'axis': '(1)'}), '(corners, axis=1)\n', (15593, 15610), True, 'import numpy as np\n'), ((15668, 15699), 'numpy.array', 'np.array', (['faces'], {'dtype': 'np.int32'}), '(faces, dtype=np.int32)\n', (15676, 15699), True, 'import numpy as np\n'), ((15716, 15781), 'numpy.repeat', 'np.repeat', (['faces[np.newaxis, np.newaxis, ...]', 'num_voxels'], {'axis': '(0)'}), '(faces[np.newaxis, np.newaxis, ...], num_voxels, axis=0)\n', (15725, 15781), True, 'import numpy as np\n'), ((16162, 16208), 'polyscope.register_surface_mesh', 'ps.register_surface_mesh', (['name', 'corners', 'faces'], {}), '(name, corners, faces)\n', (16186, 16208), True, 'import polyscope as ps\n'), ((16510, 16535), 'pcdet.utils.box_utils.boxes_to_corners_3d', 'boxes_to_corners_3d', (['attr'], {}), '(attr)\n', (16529, 16535), False, 'from pcdet.utils.box_utils import boxes_to_corners_3d\n'), ((19508, 19528), 'numpy.meshgrid', 'np.meshgrid', (['heatmap'], {}), '(heatmap)\n', (19519, 19528), True, 'import numpy as np\n'), ((20533, 20610), 'torch.zeros', 'torch.zeros', (['(size_x + 2)', '(size_y + 2)', '(size_x + 2)', '(size_y + 2)'], {'dtype': 'torch.bool'}), '(size_x + 2, size_y + 2, size_x + 2, size_y + 2, dtype=torch.bool)\n', (20544, 20610), False, 'import torch\n'), ((20745, 20762), 'torch.where', 'torch.where', (['mask'], {}), '(mask)\n', (20756, 20762), False, 'import torch\n'), ((21007, 21036), 'torch.stack', 'torch.stack', (['[e0, e1]'], {'dim': '(-1)'}), '([e0, e1], dim=-1)\n', (21018, 21036), False, 'import torch\n'), ((21199, 21229), 'torch.stack', 'torch.stack', (['[x, y, z]'], {'dim': '(-1)'}), '([x, y, z], dim=-1)\n', (21210, 21229), False, 'import torch\n'), ((21599, 21620), 'polyscope.set_up_dir', 'ps.set_up_dir', (['"""z_up"""'], {}), "('z_up')\n", (21612, 21620), True, 'import polyscope as ps\n'), ((21629, 21638), 'polyscope.init', 'ps.init', ([], {}), '()\n', (21636, 21638), True, 'import polyscope as ps\n'), ((21647, 21656), 'polyscope.show', 'ps.show', ([], {}), '()\n', (21654, 21656), True, 'import polyscope as ps\n'), ((22318, 22351), 'polyscope.screenshot', 'ps.screenshot', (['filename'], {}), '(filename, **kwargs)\n', (22331, 22351), True, 'import polyscope as ps\n'), ((1658, 1690), 'polyscope.set_ground_plane_mode', 'ps.set_ground_plane_mode', (['"""none"""'], {}), "('none')\n", (1682, 1690), True, 'import polyscope as ps\n'), ((13390, 13456), 'polyscope.register_point_cloud', 'ps.register_point_cloud', (['name', 'pointcloud'], {'radius': 'radius'}), '(name, pointcloud, radius=radius, **kwargs)\n', (13413, 13456), True, 'import polyscope as ps\n'), ((18883, 18959), 'numpy.array', 'np.array', (['[[1, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 1], [1, 1, 0]]'], {}), '([[1, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 1], [1, 1, 0]])\n', (18891, 18959), True, 'import numpy as np\n'), ((20091, 20116), 'torch.from_numpy', 'torch.from_numpy', (['heatmap'], {}), '(heatmap)\n', (20107, 20116), False, 'import torch\n'), ((20322, 20342), 'torch.arange', 'torch.arange', (['size_x'], {}), '(size_x)\n', (20334, 20342), False, 'import torch\n'), ((20374, 20394), 'torch.arange', 'torch.arange', (['size_y'], {}), '(size_y)\n', (20386, 20394), False, 'import torch\n'), ((22017, 22072), 'polyscope.look_at_dir', 'ps.look_at_dir', (['camera_loc', 'center', '(0, 1, 0)'], {}), '(camera_loc, center, (0, 1, 0), **kwargs)\n', (22031, 22072), True, 'import polyscope as ps\n'), ((10957, 10984), 'polyscope.get_point_cloud', 'ps.get_point_cloud', (['pc_name'], {}), '(pc_name)\n', (10975, 10984), True, 'import polyscope as ps\n'), ((11217, 11244), 'polyscope.get_point_cloud', 'ps.get_point_cloud', (['pc_name'], {}), '(pc_name)\n', (11235, 11244), True, 'import polyscope as ps\n'), ((11516, 11539), 'numpy.arange', 'np.arange', (['src.shape[0]'], {}), '(src.shape[0])\n', (11525, 11539), True, 'import numpy as np\n'), ((11913, 11938), 'numpy.arange', 'np.arange', (['(num_points - 1)'], {}), '(num_points - 1)\n', (11922, 11938), True, 'import numpy as np\n'), ((15799, 15820), 'numpy.arange', 'np.arange', (['num_voxels'], {}), '(num_voxels)\n', (15808, 15820), True, 'import numpy as np\n'), ((21878, 21904), 'numpy.array', 'np.array', (['[0, 0, distance]'], {}), '([0, 0, distance])\n', (21886, 21904), True, 'import numpy as np\n'), ((1944, 1973), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'color'}), '(size=color)\n', (1961, 1973), True, 'import numpy as np\n'), ((2041, 2056), 'numpy.array', 'np.array', (['color'], {}), '(color)\n', (2049, 2056), True, 'import numpy as np\n'), ((6138, 6173), 'numpy.arange', 'np.arange', (['query_batch_idx.shape[0]'], {}), '(query_batch_idx.shape[0])\n', (6147, 6173), True, 'import numpy as np\n'), ((6545, 6578), 'numpy.arange', 'np.arange', (['ref_batch_idx.shape[0]'], {}), '(ref_batch_idx.shape[0])\n', (6554, 6578), True, 'import numpy as np\n'), ((7053, 7117), 'numpy.concatenate', 'np.concatenate', (['[query_points[:, :3], ref_points[:, :3]]'], {'axis': '(0)'}), '([query_points[:, :3], ref_points[:, :3]], axis=0)\n', (7067, 7117), True, 'import numpy as np\n'), ((7908, 7927), 'numpy.linalg.eigh', 'np.linalg.eigh', (['cov'], {}), '(cov)\n', (7922, 7927), True, 'import numpy as np\n'), ((11567, 11590), 'numpy.arange', 'np.arange', (['tgt.shape[0]'], {}), '(tgt.shape[0])\n', (11576, 11590), True, 'import numpy as np\n'), ((11964, 11989), 'numpy.arange', 'np.arange', (['(num_points - 1)'], {}), '(num_points - 1)\n', (11973, 11989), True, 'import numpy as np\n'), ((6768, 6827), 'numpy.stack', 'np.stack', (['[e_query, e_ref + query_points.shape[0]]'], {'axis': '(-1)'}), '([e_query, e_ref + query_points.shape[0]], axis=-1)\n', (6776, 6827), True, 'import numpy as np\n'), ((7956, 7978), 'numpy.sqrt', 'np.sqrt', (['S[:, None, :]'], {}), '(S[:, None, :])\n', (7963, 7978), True, 'import numpy as np\n'), ((8134, 8156), 'numpy.random.randn', 'np.random.randn', (['(20)', '(3)'], {}), '(20, 3)\n', (8149, 8156), True, 'import numpy as np\n'), ((8870, 8895), 'numpy.stack', 'np.stack', (['corners'], {'axis': '(1)'}), '(corners, axis=1)\n', (8878, 8895), True, 'import numpy as np\n'), ((14599, 14645), 'numpy.array', 'np.array', (['[75.0 / 255, 75.0 / 255, 75 / 255.0]'], {}), '([75.0 / 255, 75.0 / 255, 75 / 255.0])\n', (14607, 14645), True, 'import numpy as np\n'), ((18300, 18327), 'numpy.arange', 'np.arange', (['corners.shape[0]'], {}), '(corners.shape[0])\n', (18309, 18327), True, 'import numpy as np\n'), ((4696, 4720), 'numpy.zeros', 'np.zeros', (['boxes.shape[0]'], {}), '(boxes.shape[0])\n', (4704, 4720), True, 'import numpy as np\n'), ((8928, 8959), 'numpy.arange', 'np.arange', (['(corners.shape[0] * 8)'], {}), '(corners.shape[0] * 8)\n', (8937, 8959), True, 'import numpy as np\n'), ((6028, 6059), 'numpy.zeros', 'np.zeros', (['query_points.shape[0]'], {}), '(query_points.shape[0])\n', (6036, 6059), True, 'import numpy as np\n'), ((6441, 6470), 'numpy.zeros', 'np.zeros', (['ref_points.shape[0]'], {}), '(ref_points.shape[0])\n', (6449, 6470), True, 'import numpy as np\n'), ((8669, 8691), 'numpy.array', 'np.array', (['[dx, dy, dz]'], {}), '([dx, dy, dz])\n', (8677, 8691), True, 'import numpy as np\n'), ((10357, 10403), 'numpy.array', 'np.array', (['[75.0 / 255, 75.0 / 255, 75 / 255.0]'], {}), '([75.0 / 255, 75.0 / 255, 75 / 255.0])\n', (10365, 10403), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import argparse
import IPython as ipy
def main(raw_args=None):
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--problem", type=str, default="lava_problem", help="choose problem: lava_problem or two_lavas_problem (default: lava_problem)")
args = parser.parse_args(raw_args)
problem = args.problem
# Load data
data = np.load(problem+"_results.npz")
p_correct_vals = data['p_correct_vals']
bounds = data['bounds']
opt_values = data['opt_values']
# Plot
fig, ax = plt.subplots()
ax.plot(p_correct_vals, bounds, 'o--', color='#780e0e', label='Upper Bound', linewidth=1)
ax.plot(p_correct_vals, opt_values, '*--', color='#007FFF', label='POMDP', linewidth=0.5)
plt.xlabel('$p_{correct}$', fontsize=15)
plt.ylabel('Cumulative reward', fontsize=15)
plt.legend(fontsize=12, loc='center right')
plt.ylim([0, 5.01])
plt.show()
#################################################################
# Run with command line arguments precisely when called directly
# (rather than when imported)
if __name__ == '__main__':
main() | [
"numpy.load",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots"
] | [((174, 199), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (197, 199), False, 'import argparse\n'), ((434, 467), 'numpy.load', 'np.load', (["(problem + '_results.npz')"], {}), "(problem + '_results.npz')\n", (441, 467), True, 'import numpy as np\n'), ((586, 600), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (598, 600), True, 'import matplotlib.pyplot as plt\n'), ((785, 825), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$p_{correct}$"""'], {'fontsize': '(15)'}), "('$p_{correct}$', fontsize=15)\n", (795, 825), True, 'import matplotlib.pyplot as plt\n'), ((827, 871), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cumulative reward"""'], {'fontsize': '(15)'}), "('Cumulative reward', fontsize=15)\n", (837, 871), True, 'import matplotlib.pyplot as plt\n'), ((873, 916), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(12)', 'loc': '"""center right"""'}), "(fontsize=12, loc='center right')\n", (883, 916), True, 'import matplotlib.pyplot as plt\n'), ((918, 937), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 5.01]'], {}), '([0, 5.01])\n', (926, 937), True, 'import matplotlib.pyplot as plt\n'), ((939, 949), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (947, 949), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 4 22:39:40 2017
@author: 74297
"""
import numpy as np
import os
from subprocess import Popen, PIPE, STDOUT
from numba import jit
from io import BytesIO
aadic={
'A':1,
'B':0,
'C':2,
'D':3,
'E':4,
'F':5,
'G':6,
'H':7,
'I':8,
'J':0,
'K':9,
'L':10,
'M':11,
'N':12,
'O':0,
'P':13,
'Q':14,
'R':15,
'S':16,
'T':17,
'U':0,
'V':18,
'W':19,
'X':0,
'Y':20,
'Z':0,
'-':0,
'*':0,
}
def readsequence(seq_file):
lines=open(seq_file).readlines()
lines=[line.strip() for line in lines]
seq=lines[1].strip()
aalines=''
for i in range(1,len(lines)):
aalines+=lines[i]
aas=[aadic[aa] for aa in aalines]
return aas
def read_msa(file_path):
lines=open(file_path).readlines()
lines=[line.strip() for line in lines]
n=len(lines)
d=len(lines[0]) #CR AND LF
msa=np.zeros([n,d],dtype=int)
for i in range(n):
aline=lines[i]
for j in range(d):
msa[i,j]=aadic[aline[j]]
return msa
@jit
def cal_large_matrix1(msa,weight):
#output:21*l*21*l
ALPHA=21
pseudoc=1
M=msa.shape[0]
N=msa.shape[1]
pab=np.zeros((ALPHA,ALPHA))
pa=np.zeros((N,ALPHA))
cov=np.zeros([N*ALPHA,N*ALPHA ])
for i in range(N):
for aa in range(ALPHA):
pa[i,aa] = pseudoc
neff=0.0
for k in range(M):
pa[i,msa[k,i]]+=weight[k]
neff+=weight[k]
for aa in range(ALPHA):
pa[i,aa] /=pseudoc * ALPHA * 1.0 + neff
#print(pab)
for i in range(N):
for j in range(i,N):
for a in range(ALPHA):
for b in range(ALPHA):
if i ==j :
if a==b :
pab[a,b]=pa[i,a]
else:
pab[a,b]=0.0
else:
pab[a,b] = pseudoc *1.0 /ALPHA
if(i!=j):
neff2=0;
for k in range(M):
a=msa[k,i]
b=msa[k,j]
tmp=weight[k]
pab[a,b]+=tmp
neff2+=tmp
for a in range(ALPHA):
for b in range(ALPHA):
pab[a,b] /= pseudoc*ALPHA*1.0 +neff2
for a in range(ALPHA):
for b in range(ALPHA):
if(i!=j or a==b):
if (pab[a][b] > 0.0):
cov[i*21+a][j*21+b]=pab[a][b] - pa[i][a] * pa[j][b]
cov[j*21+b][i*21+a]=cov[i*21+a][j*21+b]
return cov
if __name__ == "__main__":
print('Nothing')
| [
"numpy.zeros"
] | [((1142, 1169), 'numpy.zeros', 'np.zeros', (['[n, d]'], {'dtype': 'int'}), '([n, d], dtype=int)\n', (1150, 1169), True, 'import numpy as np\n'), ((1433, 1457), 'numpy.zeros', 'np.zeros', (['(ALPHA, ALPHA)'], {}), '((ALPHA, ALPHA))\n', (1441, 1457), True, 'import numpy as np\n'), ((1464, 1484), 'numpy.zeros', 'np.zeros', (['(N, ALPHA)'], {}), '((N, ALPHA))\n', (1472, 1484), True, 'import numpy as np\n'), ((1492, 1524), 'numpy.zeros', 'np.zeros', (['[N * ALPHA, N * ALPHA]'], {}), '([N * ALPHA, N * ALPHA])\n', (1500, 1524), True, 'import numpy as np\n')] |
"""
Once a model is learned, use this to play it.
It is running a policy to get its the feature expectations.
"""
from simulation import carmunk
import numpy as np
from neuralNets import net1
import sys
import time
import timeit
import random
NUM_FEATURES = 46 # number of features
NUM_ACTIONS = 25 # number of actions
GAMMA = 0.9
def play(model, weights, play_frames=10000, play_rounds=10000, scene_file_name='scenes/scene-city.txt'):
return play_multi_model([model], [1], weights, play_frames, play_rounds, scene_file_name)
def play_multi_model(model_list, lamda_list, weights, play_frames=10000, play_rounds=10000, scene_file_name='scenes/scene-city.txt'):
# init
car_move = 0
game_state = carmunk.GameState(weights, scene_file_name = scene_file_name)
_, state, _, _, _ = game_state.frame_step((11))
featureExp = np.zeros(NUM_FEATURES)
round_num = 0
score_list = []
dist_list = []
dist_1round = 0
step_1round = 0
max_step_1round = 3000
time_list = []
# start to move
while True:
start_time = timeit.default_timer()
car_move += 1
step_1round += 1
# choose the best action
randv = random.uniform(0, 1)
model_id = -1
while randv>=0 and model_id<len(lamda_list)-1:
model_id += 1
randv -= lamda_list[model_id]
model_id = np.clip(model_id, 0, len(lamda_list) - 1)
model = model_list[model_id]
qval = model.predict(state, batch_size=1)
action = (np.argmax(qval))
#TODO
#action = random.randrange(0, len(qval.flatten()))
# take the action
reward , next_state, readings, score, dist_1step = game_state.frame_step(action)
dist_1round += dist_1step
#print ("reward: ", reward)
#print ("readings: ", readings)
# start recording feature expectations only after 100 frames
if car_move > 100:
featureExp += (GAMMA**(car_move-101))*np.array(readings)
#print ("featureExp: ", featureExp)
# Tell us something.
if readings[-1]==1 or step_1round==max_step_1round:
step_1round = 0
round_num += 1
score_list.append(score)
dist_list.append(dist_1round)
#print("Score in this round: ", score)
#print("Aver Score in ", round_num, "rounds: ", np.average(score_list))
#print("Dist in this round: ", dist_1round)
#print("Aver dist in ", round_num, "rounds: ", np.average(dist_list))
dist_1round = 0
game_state.reinit_car()
if play_frames > 0 and car_move % play_frames == 0:
#print("The car has moved %d frames" % car_move)
if readings[-1] == 0:
round_num += 1
score_list.append(score)
dist_list.append(dist_1round)
#print("Score in this round: ", score)
#print("Aver Score in ", round_num, "rounds: ", np.average(score_list))
#print("Dist in this round: ", dist_1round)
#print("Aver dist in ", round_num, "rounds: ", np.average(dist_list))
break
if play_rounds > 0 and round_num == play_rounds:
#print("Score in this round: ", score)
#print("Aver Score in ", round_num, "rounds: ", np.average(score_list))
#print("Dist in this round: ", dist_1round)
#print("Aver dist in ", round_num, "rounds: ", np.average(dist_list))
break
state = next_state
time_list.append(timeit.default_timer() - start_time)
#print("fps: ", 1 / np.average(time_list), " ")
print("min score=", np.min(score_list))
print("max score=", np.max(score_list))
print("aver score=", np.average(score_list))
print("standard deviation score=", np.std(score_list))
print("min dist=", np.min(dist_list))
print("max dist=", np.max(dist_list))
print("aver dist=", np.average(dist_list))
print("standard deviation dist=", np.std(dist_list))
return featureExp, np.average(score_list), np.average(dist_list)
if __name__ == "__main__":
#BEHAVIOR = sys.argv[1]
#ITERATION = sys.argv[2]
#FRAME = sys.argv[3]
BEHAVIOR = "city"
ITERATION = 20000
FRAME = 1
score_list = []
dist_list = []
for FRAME in range(1,10):
print('***************************************************************************************************')
print('FRAME ', FRAME)
modelType = BEHAVIOR
#model_dir = 'results/models-'+ modelType +'/'
model_dir = 'results/finals/'
saved_model = model_dir+'164-150-100-50000-'+str(ITERATION)+'-'+str(FRAME)+'.h5'
weights = [-0.79380502 , 0.00704546 , 0.50866139 , 0.29466834, -0.07636144 , 0.09153848 ,-0.02632325 ,-0.09672041]
model = net1(NUM_FEATURES, NUM_ACTIONS, [164, 150], saved_model)
scene_file_name = 'scenes/scene-city-car.txt'
scene_file_name = 'scenes/scene-ground-car.txt'
scene_file_name = 'scenes/scene-city.txt'
featureExp, score, dist = play(model, weights, play_rounds=100, scene_file_name = scene_file_name)
score_list.append(score)
dist_list.append(dist)
for feature in featureExp:
print('{:.3f}'.format(feature), end =", ")
print('***************************************************************************************************')
for i in range(len(score_list)):
print(i+1, 'score', score_list[i], 'dist', dist_list[i])
| [
"numpy.average",
"numpy.argmax",
"random.uniform",
"timeit.default_timer",
"numpy.std",
"neuralNets.net1",
"numpy.zeros",
"numpy.min",
"numpy.max",
"numpy.array",
"simulation.carmunk.GameState"
] | [((717, 776), 'simulation.carmunk.GameState', 'carmunk.GameState', (['weights'], {'scene_file_name': 'scene_file_name'}), '(weights, scene_file_name=scene_file_name)\n', (734, 776), False, 'from simulation import carmunk\n'), ((848, 870), 'numpy.zeros', 'np.zeros', (['NUM_FEATURES'], {}), '(NUM_FEATURES)\n', (856, 870), True, 'import numpy as np\n'), ((1073, 1095), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (1093, 1095), False, 'import timeit\n'), ((1193, 1213), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1207, 1213), False, 'import random\n'), ((1528, 1543), 'numpy.argmax', 'np.argmax', (['qval'], {}), '(qval)\n', (1537, 1543), True, 'import numpy as np\n'), ((3719, 3737), 'numpy.min', 'np.min', (['score_list'], {}), '(score_list)\n', (3725, 3737), True, 'import numpy as np\n'), ((3763, 3781), 'numpy.max', 'np.max', (['score_list'], {}), '(score_list)\n', (3769, 3781), True, 'import numpy as np\n'), ((3808, 3830), 'numpy.average', 'np.average', (['score_list'], {}), '(score_list)\n', (3818, 3830), True, 'import numpy as np\n'), ((3871, 3889), 'numpy.std', 'np.std', (['score_list'], {}), '(score_list)\n', (3877, 3889), True, 'import numpy as np\n'), ((3914, 3931), 'numpy.min', 'np.min', (['dist_list'], {}), '(dist_list)\n', (3920, 3931), True, 'import numpy as np\n'), ((3956, 3973), 'numpy.max', 'np.max', (['dist_list'], {}), '(dist_list)\n', (3962, 3973), True, 'import numpy as np\n'), ((3999, 4020), 'numpy.average', 'np.average', (['dist_list'], {}), '(dist_list)\n', (4009, 4020), True, 'import numpy as np\n'), ((4060, 4077), 'numpy.std', 'np.std', (['dist_list'], {}), '(dist_list)\n', (4066, 4077), True, 'import numpy as np\n'), ((4103, 4125), 'numpy.average', 'np.average', (['score_list'], {}), '(score_list)\n', (4113, 4125), True, 'import numpy as np\n'), ((4127, 4148), 'numpy.average', 'np.average', (['dist_list'], {}), '(dist_list)\n', (4137, 4148), True, 'import numpy as np\n'), ((4895, 4951), 'neuralNets.net1', 'net1', (['NUM_FEATURES', 'NUM_ACTIONS', '[164, 150]', 'saved_model'], {}), '(NUM_FEATURES, NUM_ACTIONS, [164, 150], saved_model)\n', (4899, 4951), False, 'from neuralNets import net1\n'), ((1993, 2011), 'numpy.array', 'np.array', (['readings'], {}), '(readings)\n', (2001, 2011), True, 'import numpy as np\n'), ((3592, 3614), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (3612, 3614), False, 'import timeit\n')] |
##########################################################################
#
# Functions for calculating signals from share-prices and financial data.
#
##########################################################################
# SimFin - Simple financial data for Python.
# www.simfin.com - www.github.com/simfin/simfin
# See README.md for instructions and LICENSE.txt for license details.
##########################################################################
import pandas as pd
import numpy as np
from simfin.cache import cache
from simfin.derived import free_cash_flow, ncav, netnet, shares
from simfin.rel_change import rel_change
from simfin.resample import reindex
from simfin.utils import apply, add_date_offset
from simfin.names import *
##########################################################################
@cache
def price_signals(df_prices, group_index=TICKER):
"""
Calculate price-signals such as Moving Average and MACD for all stocks
in the given DataFrame.
This function can take a while to compute, so it will create a cache-file
if you pass the arg `cache_refresh`. The next time you call this function,
the cache-file will get loaded if it is more recent than specified by
`cache_refresh`, otherwise the function will get computed again and the
result saved in the cache-file for future use. See the documentation for
the :obj:`~simfin.cache.cache` wrapper for details on its arguments.
.. warning:: You **MUST** use keyword arguments to this function,
otherwise the first unnamed arguments would get passed to the
:obj:`~simfin.cache.cache` wrapper instead.
:param df_prices:
Pandas DataFrame with share-prices for multiple stocks.
:param group_index:
If the DataFrame has a MultiIndex then group data using this
index-column. By default this is TICKER but it could also be e.g.
SIMFIN_ID if you are using that as an index in your DataFrame.
:return:
Pandas DataFrame with price-signals.
"""
# Helper-function for calculating signals for a single stock.
def _signals(df_prices):
# Create new DataFrame for the signals.
# Setting the index improves performance.
df_signals = pd.DataFrame(index=df_prices.index)
# Use the closing share-price for all the signals.
df_price = df_prices[CLOSE]
# Moving Average for past 20 days.
df_signals[MAVG_20] = df_price.rolling(window=20).mean()
# Moving Average for past 200 days.
df_signals[MAVG_200] = df_price.rolling(window=200).mean()
# Exponential Moving Average for past 20 days.
df_signals[EMA] = df_price.ewm(span=20).mean()
# Moving Average Convergence Divergence for 12 and 26 days.
# https://en.wikipedia.org/wiki/MACD
df_signals[MACD] = df_price.ewm(span=12).mean() \
- df_price.ewm(span=26).mean()
# MACD with extra smoothing by Exp. Moving Average for 9 days.
df_signals[MACD_EMA] = df_signals[MACD].ewm(span=9).mean()
return df_signals
# Calculate signals and use Pandas groupby if `df` has multiple stocks.
df_signals = apply(df=df_prices, func=_signals, group_index=group_index)
# Sort the columns by their names.
df_signals.sort_index(axis='columns', inplace=True)
return df_signals
##########################################################################
@cache
def trade_signals(df, signal1, signal2, group_index=TICKER):
"""
Create Buy / Sell / Hold signals from two signals in the given DataFrame.
- If `df[signal1] >= df[signal2]` create a Hold signal.
- If `df[signal1]` crosses above `df[signal2]` create a Buy signal.
- if `df[signal1]` crosses below `df[signal2]` create a Sell signal.
This function can take a while to compute, so it will create a cache-file
if you pass the arg `cache_refresh`. The next time you call this function,
the cache-file will get loaded if it is more recent than specified by
`cache_refresh`, otherwise the function will get computed again and the
result saved in the cache-file for future use. See the documentation for
the :obj:`~simfin.cache.cache` wrapper for details on its arguments.
.. warning:: You **MUST** use keyword arguments to this function,
otherwise the first unnamed arguments would get passed to the
:obj:`~simfin.cache.cache` wrapper instead.
:param df:
Pandas DataFrame with columns `signal1` and `signal2`.
May contain data for one or more stocks.
:param signal1:
String with the name of a column in `df`.
:param signal2:
String with the name of a column in `df`.
:param group_index:
If the DataFrame has a MultiIndex then group data using this
index-column. By default this is TICKER but it could also be e.g.
SIMFIN_ID if you are using that as an index in your DataFrame.
:return:
Pandas Dataframe with BUY, SELL, HOLD signals.
"""
# Helper-function for calculating signals for a single stock.
def _signals(df):
# Create new DataFrame for the signals.
# Setting the index improves performance.
df_signals = pd.DataFrame(index=df.index)
# Boolean whether signal1 >= signal2.
df_above = (df[signal1] >= df[signal2])
# Boolean whether to buy the stock.
df_signals[BUY] = df_above & ~df_above.shift(1, fill_value=True)
# Boolean whether to sell the stock.
df_signals[SELL] = ~df_above & df_above.shift(1, fill_value=False)
# Boolean whether to keep holding the stock.
df_signals[HOLD] = df_above
return df_signals
# Calculate signals and use Pandas groupby if `df` has multiple stocks.
df_signals = apply(df=df, func=_signals, group_index=group_index)
# Sort the columns by their names.
df_signals.sort_index(axis='columns', inplace=True)
return df_signals
##########################################################################
@cache
def volume_signals(df_prices, df_shares, window=20, fill_method='ffill',
offset=None, date_index=REPORT_DATE,
shares_index=SHARES_BASIC, group_index=TICKER):
"""
Calculate signals for the daily trading-volume of stocks, such as:
- REL_VOL: The daily trading-volume relative to its moving average.
- VOLUME_MCAP: The Market-Capitalization of the daily trading volume.
- VOLUME_TURNOVER: Trading-volume relative to the shares outstanding.
The moving-average is calculated in different ways for the signals.
For REL_VOL it is a part of the formula definition. For VOLUME_MCAP
and VOLUME_TURNOVER the moving-average is calculated afterwards.
This function can take a while to compute, so it will create a cache-file
if you pass the arg `cache_refresh`. The next time you call this function,
the cache-file will get loaded if it is more recent than specified by
`cache_refresh`, otherwise the function will get computed again and the
result saved in the cache-file for future use. See the documentation for
the :obj:`~simfin.cache.cache` wrapper for details on its arguments.
.. warning:: You **MUST** use keyword arguments to this function,
otherwise the first unnamed arguments would get passed to the
:obj:`~simfin.cache.cache` wrapper instead.
:param df_prices:
Pandas DataFrame with share-prices for multiple stocks.
:param df_shares:
Pandas DataFrame with both columns SHARES_BASIC and SHARES_DILUTED
e.g. `df_shares=df_income_ttm`
:param window:
Integer for the number of days to use in moving-average calculations.
:param fill_method:
String or callable for the method of filling in empty values when
reindexing financial data to daily data-points.
See :obj:`~simfin.resample.reindex` for valid options.
:param offset:
Pandas DateOffset added to the date-index of `df_shares`. Example:
`pd.DateOffset(days=60)`
See :obj:`~simfin.utils.add_date_offset` for more details.
:param date_index:
Name of the date-column for `df_shares` e.g. REPORT_DATE.
:param shares_index:
Name of the column for share-counts in `df_shares`. SHARES_DILUTED
takes the potential diluting impact of stock-options into account,
while SHARES_BASIC does not take potential dilution into account.
:param group_index:
If the DataFrame has a MultiIndex then group data using this
index-column. By default this is TICKER but it could also be e.g.
SIMFIN_ID if you are using that as an index in your DataFrame.
:return:
Pandas DataFrame with volume-signals.
"""
# Copy the given share-counts (e.g. SHARES_BASIC) and fill in missing
# values with the other share-counts (e.g. SHARES_DILUTED).
df_shares = shares(df=df_shares, index=shares_index)
# Helper-function for calculating signals for a single stock.
def _signals(df):
# Create new DataFrame for the signals.
# Setting the index improves performance.
df_signals = pd.DataFrame(index=df.index)
# Get the relevant data.
df_price = df[CLOSE]
df_volume = df[VOLUME]
# Share-counts from financial reports, reindexed to daily data-points.
df_shares_daily = df[shares_index]
# Moving average for the daily trading volume.
df_volume_mavg = df_volume.rolling(window=window).mean()
# Last trading volume relative to its moving average.
df_rel_vol = df_volume / df_volume_mavg
df_signals[REL_VOL] = np.log(df_rel_vol)
# Calculate Market-Capitalization of the daily trading-volume.
df_vol_mcap = df_volume * df_price
df_signals[VOLUME_MCAP] = df_vol_mcap.rolling(window=window).mean()
# Calculate Volume Turnover as the daily trading-volume
# divided by the total number of shares outstanding.
df_vol_turn = df_volume / df_shares_daily
df_signals[VOLUME_TURNOVER] = df_vol_turn.rolling(window=window).mean()
return df_signals
# Add offset / lag to the dates of the share-counts.
if offset is not None:
df_shares = add_date_offset(df=df_shares, offset=offset,
date_index=date_index)
# Reindex the share-counts to daily data-points.
df_shares_daily = reindex(df_src=df_shares, df_target=df_prices,
method=fill_method, group_index=group_index)
# Combine the relevant data into a single DataFrame.
dfs = [df_prices[[CLOSE, VOLUME]], df_shares_daily]
df = pd.concat(dfs, axis=1)
# Calculate signals and use Pandas groupby if `df` has multiple stocks.
df_signals = apply(df=df, func=_signals, group_index=group_index)
# Sort the columns by their names.
df_signals.sort_index(axis='columns', inplace=True)
return df_signals
##########################################################################
@cache
def fin_signals(df_income_ttm, df_balance_ttm, df_cashflow_ttm, df_prices=None,
offset=None, func=None, fill_method='ffill',
date_index=REPORT_DATE, group_index=TICKER, banks=False, insurance=False):
"""
Calculate financial signals such as Net Profit Margin, Debt Ratio, ROA,
etc. for all stocks in the given DataFrames.
This function can take a while to compute, so it will create a cache-file
if you pass the arg `cache_refresh`. The next time you call this function,
the cache-file will get loaded if it is more recent than specified by
`cache_refresh`, otherwise the function will get computed again and the
result saved in the cache-file for future use. See the documentation for
the :obj:`~simfin.cache.cache` wrapper for details on its arguments.
.. warning:: You **MUST** use keyword arguments to this function,
otherwise the first unnamed arguments would get passed to the
:obj:`~simfin.cache.cache` wrapper instead.
:param df_prices:
Optional Pandas DataFrame with share-prices for one or more stocks.
If not `None`, then the signals will be reindexed to the same daily
data-points as `df_prices`, otherwise the signals will be quarterly.
:param df_income_ttm:
Pandas DataFrame with Income Statement TTM data for one or more stocks.
:param df_balance_ttm:
Pandas DataFrame with Balance Sheet TTM data for one or more stocks.
:param df_cashflow_ttm:
Pandas DataFrame with Cash-Flow Statement TTM data for one or more stocks.
:param func:
Function to apply on a per-stock basis after the signals have been
calculated, but before they have been reindexed to daily data-points.
This is useful e.g. to calculate multi-year averages.
For example, to calculate the 2-year averages of TTM data:
`func = lambda df: 0.5 * (df + df.shift(4))`
:param fill_method:
String or callable for the method of filling in empty values when
reindexing financial data to daily data-points.
See :obj:`~simfin.resample.reindex` for valid options.
:param offset:
Pandas DateOffset added to the date-index of the Pandas DataFrames with
the financial data. Example: `pd.DateOffset(days=60)` This is useful if
you want to add a lag of e.g. 60 days to the dates of financial reports
with Income Statements, Balance Sheets, and Cash-Flow Statements, because
the REPORT_DATE is not when it was actually made available to the public,
which can be 1, 2 or even 3 months after the REPORT_DATE.
See :obj:`~simfin.utils.add_date_offset` for more details.
:param date_index:
Name of the date-column for the financial data e.g. REPORT_DATE.
:param group_index:
If the DataFrames have a MultiIndex then group data using this
index-column. By default this is TICKER but it could also be e.g.
SIMFIN_ID if you are using that as an index in your DataFrame.
:param banks:
Boolean whether to use the special datasets for banks.
:param insurance:
Boolean whether to use the special datasets for insurance
companies.
:return:
Pandas DataFrame with financial signals.
"""
# Helper-function for calculating signals for a single stock.
def _signals(df):
# Create new DataFrame for the signals.
# Setting the index improves performance.
df_signals = pd.DataFrame(index=df.index)
# Net Profit Margin.
df_signals[NET_PROFIT_MARGIN] = df[NET_INCOME] / df[REVENUE]
# Gross Profit Margin.
# Note: Not available for banks or insurances.
if not banks and not insurance:
df_signals[GROSS_PROFIT_MARGIN] = df[GROSS_PROFIT] / df[REVENUE]
# R&D / Revenue.
# Note: RESEARCH_DEV must be negated.
# Note: Not available for banks or insurances.
if not banks and not insurance:
df_signals[RD_REVENUE] = -df[RESEARCH_DEV] / df[REVENUE]
# R&D / Gross Profit.
# Note: RESEARCH_DEV must be negated.
# Note: Not available for banks or insurances.
if not banks and not insurance:
df_signals[RD_GROSS_PROFIT] = -df[RESEARCH_DEV] / df[GROSS_PROFIT]
# Return on Research Capital (RORC).
# Note: RESEARCH_DEV must be negated.
# Note: Not available for banks or insurances.
if not banks and not insurance:
df_signals[RORC] = df[GROSS_PROFIT] / -df[RESEARCH_DEV]
# Interest Coverage.
# Note: INTEREST_EXP_NET must be negated.
# Note: Not available for banks or insurances.
if not banks and not insurance:
df_signals[INTEREST_COV] = df[OPERATING_INCOME] / -df[INTEREST_EXP_NET]
# Current Ratio = Current Assets / Current Liabilities.
# Note: Not available for banks or insurances.
if not banks and not insurance:
df_signals[CURRENT_RATIO] = df[TOTAL_CUR_ASSETS] / df[TOTAL_CUR_LIAB]
#: Quick Ratio = (Cash + Equiv. + ST Inv. + Recv.) / Current Liab.
# Note: Not available for banks or insurances.
if not banks and not insurance:
df_signals[QUICK_RATIO] = \
(df[CASH_EQUIV_ST_INVEST] + df[ACC_NOTES_RECV].fillna(0.0)) \
/ df[TOTAL_CUR_LIAB]
# Debt Ratio = (Short-term Debt + Long-term Debt) / Total Assets.
df_signals[DEBT_RATIO] = (df[ST_DEBT] + df[LT_DEBT]) / df[TOTAL_ASSETS]
# NOTE: There are different ways of calculating ROA, ROE,
# ASSET_TURNOVER, etc. See Tutorial 04. For example, we could use the
# Assets or Equity from last year instead of from the current year,
# but the resulting ROA, ROE, etc. are usually very similar, and using
# last year's Assets or Equity would cause us to loose one year of
# data-points for the signals we are calculating here.
# Return on Assets = Net Income / Total Assets. See note above.
df_signals[ROA] = df[NET_INCOME] / df[TOTAL_ASSETS]
# Return on Equity = Net Income / Total Equity. See note above.
df_signals[ROE] = df[NET_INCOME] / df[TOTAL_EQUITY]
# Asset Turnover = Revenue / Total Assets. See note above.
df_signals[ASSET_TURNOVER] = df[REVENUE] / df[TOTAL_ASSETS]
# Inventory Turnover = Revenue / Inventory. See note above.
# Note: Not available for banks or insurances.
if not banks and not insurance:
df_signals[INVENTORY_TURNOVER] = df[REVENUE] / df[INVENTORIES]
# Payout Ratio = Dividends / Free Cash Flow
# Note the negation because DIVIDENDS_PAID is negative.
df_signals[PAYOUT_RATIO] = -df[DIVIDENDS_PAID].fillna(0) / df[FCF]
# Buyback Ratio = Share Buyback / Free Cash Flow
# Note the negation because CASH_REPURCHASE_EQUITY is negative.
df_signals[BUYBACK_RATIO] = \
-df[CASH_REPURCHASE_EQUITY].fillna(0) / df[FCF]
# Payout + Buyback Ratio = (Dividends + Share Buyback) / Free Cash Flow
# Note the negation because DIVIDENDS_PAID and CASH_REP.. are negative.
df_signals[PAYOUT_BUYBACK_RATIO] = \
-(df[DIVIDENDS_PAID].fillna(0) +
df[CASH_REPURCHASE_EQUITY].fillna(0)) / df[FCF]
# Net Acquisitions & Divestitures / Total Assets.
# Note the negation because NET_CASH_ACQ_DIVEST is usually negative.
# Note: Not available for insurances.
if not insurance:
df_signals[ACQ_ASSETS_RATIO] = \
-df[NET_CASH_ACQ_DIVEST] / df[TOTAL_ASSETS]
# Capital Expenditures / (Depreciation + Amortization).
# Note the negation because CAPEX is negative.
df_signals[CAPEX_DEPR_RATIO] = -df[CAPEX] / df[DEPR_AMOR]
# Log10(Revenue).
df_signals[LOG_REVENUE] = np.log10(df[REVENUE])
return df_signals
# Get relevant data from Income Statements.
if banks or insurance:
columns = [REVENUE, OPERATING_INCOME,
NET_INCOME]
else:
columns = [REVENUE, GROSS_PROFIT, OPERATING_INCOME, INTEREST_EXP_NET,
NET_INCOME, RESEARCH_DEV]
df1 = df_income_ttm[columns]
# Get relevant data from Balance Sheets.
if banks or insurance:
columns = [TOTAL_ASSETS, TOTAL_EQUITY,
ST_DEBT, LT_DEBT]
else:
columns = [TOTAL_ASSETS, TOTAL_CUR_ASSETS, TOTAL_CUR_LIAB, TOTAL_EQUITY,
ST_DEBT, LT_DEBT, INVENTORIES, CASH_EQUIV_ST_INVEST,
ACC_NOTES_RECV]
df2 = df_balance_ttm[columns]
# Get relevant data from Cash-Flow Statements.
if banks:
columns = [DIVIDENDS_PAID, CASH_REPURCHASE_EQUITY, NET_CASH_ACQ_DIVEST,
CAPEX, DEPR_AMOR]
elif insurance:
columns = [DIVIDENDS_PAID, CASH_REPURCHASE_EQUITY,
CAPEX, DEPR_AMOR]
else:
columns = [DIVIDENDS_PAID, CASH_REPURCHASE_EQUITY, NET_CASH_ACQ_DIVEST,
CAPEX, DEPR_AMOR]
df3 = df_cashflow_ttm[columns]
# Calculate Free Cash Flow.
df_fcf = free_cash_flow(df_cashflow=df_cashflow_ttm)
# Combine the data into a single DataFrame.
df = pd.concat([df1, df2, df3, df_fcf], axis=1)
# Add offset / lag to the index-dates of the financial data.
if offset is not None:
df = add_date_offset(df=df, offset=offset, date_index=date_index)
# Calculate signals and use Pandas groupby if `df` has multiple stocks.
df_signals = apply(df=df, func=_signals, group_index=group_index)
# Process the signals using the supplied function e.g. to calculate averages.
if func is not None:
df_signals = apply(df=df_signals, func=func, group_index=group_index)
# Reindex to the same daily data-points as the share-prices.
if df_prices is not None:
df_signals = reindex(df_src=df_signals, df_target=df_prices,
method=fill_method, group_index=group_index)
# Sort the columns by their names.
df_signals.sort_index(axis='columns', inplace=True)
return df_signals
##########################################################################
@cache
def growth_signals(df_income_ttm, df_income_qrt,
df_balance_ttm, df_balance_qrt,
df_cashflow_ttm, df_cashflow_qrt,
df_prices=None, fill_method='ffill',
offset=None, func=None,
date_index=REPORT_DATE, group_index=TICKER):
"""
Calculate growth-signals such as Sales Growth, Earnings Growth, etc.
for all stocks in the given DataFrames.
Three growth-signals are given for each type of financial data, e.g.:
- SALES_GROWTH is calculated from the TTM Revenue divided by the
TTM Revenue from one year ago.
- SALES_GROWTH_YOY is calculated from the Quarterly Revenue divided by
the Quarterly Revenue from one year ago.
- SALES_GROWTH_QOQ is calculated from the Quarterly Revenue divided by
the Quarterly Revenue from the previous quarter.
This function can take a while to compute, so it will create a cache-file
if you pass the arg `cache_refresh`. The next time you call this function,
the cache-file will get loaded if it is more recent than specified by
`cache_refresh`, otherwise the function will get computed again and the
result saved in the cache-file for future use. See the documentation for
the :obj:`~simfin.cache.cache` wrapper for details on its arguments.
.. warning:: You **MUST** use keyword arguments to this function,
otherwise the first unnamed arguments would get passed to the
:obj:`~simfin.cache.cache` wrapper instead.
:param df_prices:
Optional Pandas DataFrame with share-prices for one or more stocks.
If not `None`, then the signals will be reindexed to the same daily
data-points as `df_prices`, otherwise the signals will be quarterly.
:param df_income_ttm:
Pandas DataFrame with Income Statement TTM data for one or more stocks.
:param df_income_qrt:
Pandas DataFrame with Income Statement Quarterly data for one or more
stocks.
:param df_balance_ttm:
Pandas DataFrame with Balance Sheet TTM data for one or more stocks.
:param df_balance_qrt:
Pandas DataFrame with Balance Sheet Quarterly data for one or more
stocks.
:param df_cashflow_ttm:
Pandas DataFrame with Cash-Flow Statement TTM data for one or more
stocks.
:param df_cashflow_qrt:
Pandas DataFrame with Cash-Flow Statement Quarterly data for one or
more stocks.
:param func:
Function to apply on a per-stock basis after the signals have been
calculated, but before they have been reindexed to daily data-points.
This is useful e.g. to calculate multi-year averages.
For example, to calculate the 2-year averages of TTM data:
`func = lambda df: 0.5 * (df + df.shift(4))`
:param fill_method:
String or callable for the method of filling in empty values when
reindexing financial data to daily data-points.
See :obj:`~simfin.resample.reindex` for valid options.
:param offset:
Pandas DateOffset added to the date-index of the Pandas DataFrames with
the financial data. Example: `pd.DateOffset(days=60)` This is useful if
you want to add a lag of e.g. 60 days to the dates of financial reports
with Income Statements, Balance Sheets, and Cash-Flow Statements, because
the REPORT_DATE is not when it was actually made available to the public,
which can be 1, 2 or even 3 months after the REPORT_DATE.
See :obj:`~simfin.utils.add_date_offset` for more details.
:param date_index:
Name of the date-column for the financial data e.g. REPORT_DATE.
:param group_index:
If the DataFrames have a MultiIndex then group data using this
index-column. By default this is TICKER but it could also be e.g.
SIMFIN_ID if you are using that as an index in your DataFrame.
:return:
Pandas DataFrame with growth signals.
"""
# This implementation uses sf.rel_change() to calculate the growth-rates,
# which means that several groupby operations are performed. But this is
# easier to implement and for large DataFrames it is only about 10% slower
# than using sf.apply() with a function like _signals() in fin_signals().
###############################
# Annual growth using TTM data.
# Select and combine the data we need.
df_ttm1 = df_income_ttm[[REVENUE, NET_INCOME]]
df_ttm2 = free_cash_flow(df_cashflow_ttm)
df_ttm3 = df_balance_ttm[[TOTAL_ASSETS]]
df_ttm = pd.concat([df_ttm1, df_ttm2, df_ttm3], axis=1)
# Dict mapping to the new column-names.
new_names = {REVENUE: SALES_GROWTH,
NET_INCOME: EARNINGS_GROWTH,
FCF: FCF_GROWTH,
TOTAL_ASSETS: ASSETS_GROWTH}
# Calculate the growth-rates.
df_growth = rel_change(df=df_ttm, freq='q', quarters=4,
future=False, annualized=False,
new_names=new_names)
#############################################
# Year-Over-Year growth using Quarterly data.
# Select and combine the data we need.
df_qrt1 = df_income_qrt[[REVENUE, NET_INCOME]]
df_qrt2 = free_cash_flow(df_cashflow_qrt)
df_qrt3 = df_balance_qrt[[TOTAL_ASSETS]]
df_qrt = pd.concat([df_qrt1, df_qrt2, df_qrt3], axis=1)
# Dict mapping to the new column-names.
new_names = {REVENUE: SALES_GROWTH_YOY,
NET_INCOME: EARNINGS_GROWTH_YOY,
FCF: FCF_GROWTH_YOY,
TOTAL_ASSETS: ASSETS_GROWTH_YOY}
# Calculate the growth-rates.
df_growth_yoy = rel_change(df=df_qrt, freq='q', quarters=4,
future=False, annualized=False,
new_names=new_names)
########################################################
# Quarter-Over-Quarter growth using Quarterly data.
# Note: This uses the same Quarterly DataFrame as above.
# Dict mapping to the new column-names.
new_names = {REVENUE: SALES_GROWTH_QOQ,
NET_INCOME: EARNINGS_GROWTH_QOQ,
FCF: FCF_GROWTH_QOQ,
TOTAL_ASSETS: ASSETS_GROWTH_QOQ}
# Calculate the growth-rates.
df_growth_qoq = rel_change(df=df_qrt, freq='q', quarters=1,
future=False, annualized=False,
new_names=new_names)
##################
# Post-processing.
# Combine into a single DataFrame.
df_signals = pd.concat([df_growth, df_growth_yoy, df_growth_qoq], axis=1)
# Add offset / lag to the index-dates of the signals.
if offset is not None:
df_signals = add_date_offset(df=df_signals, offset=offset,
date_index=date_index)
# Process the signals using the supplied function e.g. to calculate averages.
if func is not None:
df_signals = apply(df=df_signals, func=func, group_index=group_index)
# Reindex to the same daily data-points as the share-prices.
if df_prices is not None:
df_signals = reindex(df_src=df_signals, df_target=df_prices,
method=fill_method, group_index=group_index)
# Sort the columns by their names.
df_signals.sort_index(axis='columns', inplace=True)
return df_signals
##########################################################################
@cache
def val_signals(df_prices, df_income_ttm, df_balance_ttm, df_cashflow_ttm,
fill_method='ffill', offset=None, func=None,
date_index=REPORT_DATE, shares_index=SHARES_DILUTED,
group_index=TICKER, banks=False, insurance=False):
"""
Calculate valuation signals such as P/E and P/Sales ratios for all stocks
in the given DataFrames.
This function can take a while to compute, so it will create a cache-file
if you pass the arg `cache_refresh`. The next time you call this function,
the cache-file will get loaded if it is more recent than specified by
`cache_refresh`, otherwise the function will get computed again and the
result saved in the cache-file for future use. See the documentation for
the :obj:`~simfin.cache.cache` wrapper for details on its arguments.
.. warning:: You **MUST** use keyword arguments to this function,
otherwise the first unnamed arguments would get passed to the
:obj:`~simfin.cache.cache` wrapper instead.
:param df_prices:
Pandas DataFrame with share-prices for one or more stocks.
:param df_income_ttm:
Pandas DataFrame with Income Statement TTM data for one or more stocks.
:param df_balance_ttm:
Pandas DataFrame with Balance Sheet TTM data for one or more stocks.
:param df_cashflow_ttm:
Pandas DataFrame with Cash-Flow Statement TTM data for one or more stocks.
:param fill_method:
String or callable for the method of filling in empty values when
reindexing financial data to daily data-points.
See :obj:`~simfin.resample.reindex` for valid options.
:param offset:
Pandas DateOffset added to the date-index of the Pandas DataFrames with
the financial data. Example: `pd.DateOffset(days=60)` This is useful if
you want to add a lag of e.g. 60 days to the dates of financial reports
with Income Statements, Balance Sheets, and Cash-Flow Statements, because
the REPORT_DATE is not when it was actually made available to the public,
which can be 1, 2 or even 3 months after the REPORT_DATE.
See :obj:`~simfin.utils.add_date_offset` for more details.
:param func:
Function to apply on a per-stock basis on the financial data, before
calculating the valuation signals. This is useful e.g. to calculate
multi-year averages of the Net Income and Revenue and use those when
calculating P/E and P/Sales ratios.
For example, to calculate the 2-year averages of TTM data:
`func = lambda df: 0.5 * (df + df.shift(4))`
:param date_index:
Name of the date-column for the financial data e.g. REPORT_DATE.
:param shares_index:
String with the column-name for the share-counts. SHARES_DILUTED
takes the potential diluting impact of stock-options into account, so
it results in more conservative valuation ratios than SHARES_BASIC.
:param group_index:
If the DataFrames have a MultiIndex then group data using this
index-column. By default this is TICKER but it could also be e.g.
SIMFIN_ID if you are using that as an index in your DataFrame.
:param banks:
Boolean whether to use the special datasets for banks.
:param insurance:
Boolean whether to use the special datasets for insurance
companies.
:return:
Pandas DataFrame with valuation signals.
"""
# Get the required data from the Income Statements.
columns = [REVENUE, NET_INCOME_COMMON, SHARES_BASIC, SHARES_DILUTED]
df_inc = df_income_ttm[columns]
# Get the required data from the Balance Sheets.
if banks or insurance:
columns = [TOTAL_ASSETS, TOTAL_LIABILITIES, TOTAL_EQUITY]
else:
columns = [TOTAL_CUR_ASSETS, CASH_EQUIV_ST_INVEST, ACC_NOTES_RECV,
INVENTORIES, TOTAL_LIABILITIES, TOTAL_EQUITY]
df_bal = df_balance_ttm[columns]
# Get the required data from the Cash-Flow Statements.
columns = [DIVIDENDS_PAID]
df_cf = df_cashflow_ttm[columns]
# Combine all the data. This creates a new copy that we can add columns to.
df = pd.concat([df_inc, df_bal, df_cf], axis=1)
# Calculate derived financial data such as Free Cash Flow (FCF),
# and add it as new columns to the DataFrame.
# This is only TTM data with 4 data-points per year, so it is
# faster than calculating it for the daily data-points below.
df[FCF] = free_cash_flow(df_cashflow_ttm)
# Note: Not for banks and insurances.
if not banks and not insurance:
df[NCAV] = ncav(df_balance_ttm)
# Note: Not for banks and insurances.
if not banks and not insurance:
df[NETNET] = netnet(df_balance_ttm)
# Add offset / lag to the index-dates of the financial data.
if offset is not None:
df = add_date_offset(df=df, offset=offset, date_index=date_index)
# Copy the number of shares before applying the user-supplied function,
# which might change the number of shares in the original DataFrame df.
# This tries to use the given share-counts (e.g. SHARES_DILUTED) and
# fill in missing values with the other share-counts (e.g. SHARES_BASIC).
df_shares = shares(df=df, index=shares_index)
# Reindex the share-counts to daily data-points.
df_shares_daily = reindex(df_src=df_shares, df_target=df_prices,
method=fill_method, group_index=group_index)
# Process the financial data using the user-supplied function
# e.g. to calculate multi-year averages of Earnings, Sales, etc.
if func is not None:
df = apply(df=df, func=func, group_index=group_index)
# Calculate Per-Share numbers. It is important to use the share-count
# from before the user-supplied function was applied.
df_per_share = df.div(df_shares, axis=0)
# Reindex the per-share financial data to daily data-points.
df_daily = reindex(df_src=df_per_share, df_target=df_prices,
method=fill_method, group_index=group_index)
# Create new DataFrame for the signals.
# Setting the index improves performance.
df_signals = pd.DataFrame(index=df_prices.index)
# Use the closing share-price for all signals.
df_price = df_prices[CLOSE]
# Calculate basic signals.
df_signals[PSALES] = df_price / df_daily[REVENUE]
df_signals[PE] = df_price / df_daily[NET_INCOME_COMMON]
df_signals[PFCF] = df_price / df_daily[FCF]
df_signals[PBOOK] = df_price / df_daily[TOTAL_EQUITY]
# Calculate Price / Net Current Asset Value (NCAV).
# This measures the share-price relative to estimated liquidation value.
# Note: Not for banks and insurances.
if not banks and not insurance:
df_signals[P_NCAV] = df_price / df_daily[NCAV]
# Calculate Price / Net-Net Working Capital (NNWC aka. NetNet).
# This measures the share-price relative to a more conservative estimate
# of liquidation value, which values the Receivables and Inventories at
# a discount to their book-value.
# Note: Not for banks and insurances.
if not banks and not insurance:
df_signals[P_NETNET] = df_price / df_daily[NETNET]
# Calculate Price / (Cash + Equivalents + Short-Term Investments)
# This can be used to screen for companies that might be takeover targets.
# Note: Not for banks and insurances.
if not banks and not insurance:
df_signals[P_CASH] = df_price / df_daily[CASH_EQUIV_ST_INVEST]
# Calculate Earnings Yield (inverse of the P/E ratio).
df_signals[EARNINGS_YIELD] = df_daily[NET_INCOME_COMMON] / df_price
# Calculate FCF Yield (inverse of the P/FCF ratio).
df_signals[FCF_YIELD] = df_daily[FCF] / df_price
# Calculate Dividend Yield using TTM Cash-Flow data, which is easier than
# using df_prices[DIVIDEND] because the actual payment dates may differ
# slightly from one year to the next, making it difficult to calculate TTM.
# Note the negation because DIVIDENDS_PAID is negative.
df_signals[DIV_YIELD] = -df_daily[DIVIDENDS_PAID] / df_price
# Calculate Market Capitalization.
df_signals[MARKET_CAP] = df_shares_daily * df_price
# Sort the columns by their names.
df_signals.sort_index(axis='columns', inplace=True)
return df_signals
##########################################################################
| [
"pandas.DataFrame",
"simfin.derived.shares",
"numpy.log",
"simfin.utils.apply",
"simfin.derived.netnet",
"numpy.log10",
"simfin.derived.ncav",
"simfin.utils.add_date_offset",
"pandas.concat",
"simfin.resample.reindex",
"simfin.derived.free_cash_flow",
"simfin.rel_change.rel_change"
] | [((3210, 3269), 'simfin.utils.apply', 'apply', ([], {'df': 'df_prices', 'func': '_signals', 'group_index': 'group_index'}), '(df=df_prices, func=_signals, group_index=group_index)\n', (3215, 3269), False, 'from simfin.utils import apply, add_date_offset\n'), ((5845, 5897), 'simfin.utils.apply', 'apply', ([], {'df': 'df', 'func': '_signals', 'group_index': 'group_index'}), '(df=df, func=_signals, group_index=group_index)\n', (5850, 5897), False, 'from simfin.utils import apply, add_date_offset\n'), ((8999, 9039), 'simfin.derived.shares', 'shares', ([], {'df': 'df_shares', 'index': 'shares_index'}), '(df=df_shares, index=shares_index)\n', (9005, 9039), False, 'from simfin.derived import free_cash_flow, ncav, netnet, shares\n'), ((10534, 10629), 'simfin.resample.reindex', 'reindex', ([], {'df_src': 'df_shares', 'df_target': 'df_prices', 'method': 'fill_method', 'group_index': 'group_index'}), '(df_src=df_shares, df_target=df_prices, method=fill_method,\n group_index=group_index)\n', (10541, 10629), False, 'from simfin.resample import reindex\n'), ((10779, 10801), 'pandas.concat', 'pd.concat', (['dfs'], {'axis': '(1)'}), '(dfs, axis=1)\n', (10788, 10801), True, 'import pandas as pd\n'), ((10896, 10948), 'simfin.utils.apply', 'apply', ([], {'df': 'df', 'func': '_signals', 'group_index': 'group_index'}), '(df=df, func=_signals, group_index=group_index)\n', (10901, 10948), False, 'from simfin.utils import apply, add_date_offset\n'), ((20359, 20402), 'simfin.derived.free_cash_flow', 'free_cash_flow', ([], {'df_cashflow': 'df_cashflow_ttm'}), '(df_cashflow=df_cashflow_ttm)\n', (20373, 20402), False, 'from simfin.derived import free_cash_flow, ncav, netnet, shares\n'), ((20461, 20503), 'pandas.concat', 'pd.concat', (['[df1, df2, df3, df_fcf]'], {'axis': '(1)'}), '([df1, df2, df3, df_fcf], axis=1)\n', (20470, 20503), True, 'import pandas as pd\n'), ((20765, 20817), 'simfin.utils.apply', 'apply', ([], {'df': 'df', 'func': '_signals', 'group_index': 'group_index'}), '(df=df, func=_signals, group_index=group_index)\n', (20770, 20817), False, 'from simfin.utils import apply, add_date_offset\n'), ((25953, 25984), 'simfin.derived.free_cash_flow', 'free_cash_flow', (['df_cashflow_ttm'], {}), '(df_cashflow_ttm)\n', (25967, 25984), False, 'from simfin.derived import free_cash_flow, ncav, netnet, shares\n'), ((26043, 26089), 'pandas.concat', 'pd.concat', (['[df_ttm1, df_ttm2, df_ttm3]'], {'axis': '(1)'}), '([df_ttm1, df_ttm2, df_ttm3], axis=1)\n', (26052, 26089), True, 'import pandas as pd\n'), ((26352, 26452), 'simfin.rel_change.rel_change', 'rel_change', ([], {'df': 'df_ttm', 'freq': '"""q"""', 'quarters': '(4)', 'future': '(False)', 'annualized': '(False)', 'new_names': 'new_names'}), "(df=df_ttm, freq='q', quarters=4, future=False, annualized=False,\n new_names=new_names)\n", (26362, 26452), False, 'from simfin.rel_change import rel_change\n'), ((26713, 26744), 'simfin.derived.free_cash_flow', 'free_cash_flow', (['df_cashflow_qrt'], {}), '(df_cashflow_qrt)\n', (26727, 26744), False, 'from simfin.derived import free_cash_flow, ncav, netnet, shares\n'), ((26803, 26849), 'pandas.concat', 'pd.concat', (['[df_qrt1, df_qrt2, df_qrt3]'], {'axis': '(1)'}), '([df_qrt1, df_qrt2, df_qrt3], axis=1)\n', (26812, 26849), True, 'import pandas as pd\n'), ((27132, 27232), 'simfin.rel_change.rel_change', 'rel_change', ([], {'df': 'df_qrt', 'freq': '"""q"""', 'quarters': '(4)', 'future': '(False)', 'annualized': '(False)', 'new_names': 'new_names'}), "(df=df_qrt, freq='q', quarters=4, future=False, annualized=False,\n new_names=new_names)\n", (27142, 27232), False, 'from simfin.rel_change import rel_change\n'), ((27752, 27852), 'simfin.rel_change.rel_change', 'rel_change', ([], {'df': 'df_qrt', 'freq': '"""q"""', 'quarters': '(1)', 'future': '(False)', 'annualized': '(False)', 'new_names': 'new_names'}), "(df=df_qrt, freq='q', quarters=1, future=False, annualized=False,\n new_names=new_names)\n", (27762, 27852), False, 'from simfin.rel_change import rel_change\n'), ((28015, 28075), 'pandas.concat', 'pd.concat', (['[df_growth, df_growth_yoy, df_growth_qoq]'], {'axis': '(1)'}), '([df_growth, df_growth_yoy, df_growth_qoq], axis=1)\n', (28024, 28075), True, 'import pandas as pd\n'), ((33114, 33156), 'pandas.concat', 'pd.concat', (['[df_inc, df_bal, df_cf]'], {'axis': '(1)'}), '([df_inc, df_bal, df_cf], axis=1)\n', (33123, 33156), True, 'import pandas as pd\n'), ((33423, 33454), 'simfin.derived.free_cash_flow', 'free_cash_flow', (['df_cashflow_ttm'], {}), '(df_cashflow_ttm)\n', (33437, 33454), False, 'from simfin.derived import free_cash_flow, ncav, netnet, shares\n'), ((34182, 34215), 'simfin.derived.shares', 'shares', ([], {'df': 'df', 'index': 'shares_index'}), '(df=df, index=shares_index)\n', (34188, 34215), False, 'from simfin.derived import free_cash_flow, ncav, netnet, shares\n'), ((34292, 34387), 'simfin.resample.reindex', 'reindex', ([], {'df_src': 'df_shares', 'df_target': 'df_prices', 'method': 'fill_method', 'group_index': 'group_index'}), '(df_src=df_shares, df_target=df_prices, method=fill_method,\n group_index=group_index)\n', (34299, 34387), False, 'from simfin.resample import reindex\n'), ((34896, 34994), 'simfin.resample.reindex', 'reindex', ([], {'df_src': 'df_per_share', 'df_target': 'df_prices', 'method': 'fill_method', 'group_index': 'group_index'}), '(df_src=df_per_share, df_target=df_prices, method=fill_method,\n group_index=group_index)\n', (34903, 34994), False, 'from simfin.resample import reindex\n'), ((35122, 35157), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'df_prices.index'}), '(index=df_prices.index)\n', (35134, 35157), True, 'import pandas as pd\n'), ((2258, 2293), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'df_prices.index'}), '(index=df_prices.index)\n', (2270, 2293), True, 'import pandas as pd\n'), ((5271, 5299), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'df.index'}), '(index=df.index)\n', (5283, 5299), True, 'import pandas as pd\n'), ((9248, 9276), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'df.index'}), '(index=df.index)\n', (9260, 9276), True, 'import pandas as pd\n'), ((9756, 9774), 'numpy.log', 'np.log', (['df_rel_vol'], {}), '(df_rel_vol)\n', (9762, 9774), True, 'import numpy as np\n'), ((10354, 10421), 'simfin.utils.add_date_offset', 'add_date_offset', ([], {'df': 'df_shares', 'offset': 'offset', 'date_index': 'date_index'}), '(df=df_shares, offset=offset, date_index=date_index)\n', (10369, 10421), False, 'from simfin.utils import apply, add_date_offset\n'), ((14667, 14695), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'df.index'}), '(index=df.index)\n', (14679, 14695), True, 'import pandas as pd\n'), ((19095, 19116), 'numpy.log10', 'np.log10', (['df[REVENUE]'], {}), '(df[REVENUE])\n', (19103, 19116), True, 'import numpy as np\n'), ((20610, 20670), 'simfin.utils.add_date_offset', 'add_date_offset', ([], {'df': 'df', 'offset': 'offset', 'date_index': 'date_index'}), '(df=df, offset=offset, date_index=date_index)\n', (20625, 20670), False, 'from simfin.utils import apply, add_date_offset\n'), ((20947, 21003), 'simfin.utils.apply', 'apply', ([], {'df': 'df_signals', 'func': 'func', 'group_index': 'group_index'}), '(df=df_signals, func=func, group_index=group_index)\n', (20952, 21003), False, 'from simfin.utils import apply, add_date_offset\n'), ((21121, 21217), 'simfin.resample.reindex', 'reindex', ([], {'df_src': 'df_signals', 'df_target': 'df_prices', 'method': 'fill_method', 'group_index': 'group_index'}), '(df_src=df_signals, df_target=df_prices, method=fill_method,\n group_index=group_index)\n', (21128, 21217), False, 'from simfin.resample import reindex\n'), ((28183, 28251), 'simfin.utils.add_date_offset', 'add_date_offset', ([], {'df': 'df_signals', 'offset': 'offset', 'date_index': 'date_index'}), '(df=df_signals, offset=offset, date_index=date_index)\n', (28198, 28251), False, 'from simfin.utils import apply, add_date_offset\n'), ((28418, 28474), 'simfin.utils.apply', 'apply', ([], {'df': 'df_signals', 'func': 'func', 'group_index': 'group_index'}), '(df=df_signals, func=func, group_index=group_index)\n', (28423, 28474), False, 'from simfin.utils import apply, add_date_offset\n'), ((28592, 28688), 'simfin.resample.reindex', 'reindex', ([], {'df_src': 'df_signals', 'df_target': 'df_prices', 'method': 'fill_method', 'group_index': 'group_index'}), '(df_src=df_signals, df_target=df_prices, method=fill_method,\n group_index=group_index)\n', (28599, 28688), False, 'from simfin.resample import reindex\n'), ((33552, 33572), 'simfin.derived.ncav', 'ncav', (['df_balance_ttm'], {}), '(df_balance_ttm)\n', (33556, 33572), False, 'from simfin.derived import free_cash_flow, ncav, netnet, shares\n'), ((33672, 33694), 'simfin.derived.netnet', 'netnet', (['df_balance_ttm'], {}), '(df_balance_ttm)\n', (33678, 33694), False, 'from simfin.derived import free_cash_flow, ncav, netnet, shares\n'), ((33801, 33861), 'simfin.utils.add_date_offset', 'add_date_offset', ([], {'df': 'df', 'offset': 'offset', 'date_index': 'date_index'}), '(df=df, offset=offset, date_index=date_index)\n', (33816, 33861), False, 'from simfin.utils import apply, add_date_offset\n'), ((34588, 34636), 'simfin.utils.apply', 'apply', ([], {'df': 'df', 'func': 'func', 'group_index': 'group_index'}), '(df=df, func=func, group_index=group_index)\n', (34593, 34636), False, 'from simfin.utils import apply, add_date_offset\n')] |
from aicspylibczi import CziFile
from aicsimageio import AICSImage, imread, imread_dask
from czifiletools import czifile_tools as czt
#import tools.fileutils as czt
from czifiletools import napari_tools as nap
import numpy as np
import zarr
import dask
import dask.array as da
from dask import delayed
from itertools import product
import napari
# filename = r"testdata\Tumor_H+E_small2.czi"
#filename = r"C:\Testdata_Zeiss\CD7\Z-Stack_DCV\CellDivision_T=10_Z=15_CH=2_DCV_small.czi"
#filename = r"C:\Testdata_Zeiss\CD7\Z-Stack_DCV\CellDivision_T=15_Z=20_CH=2_DCV.czi"
#filename = r"C:\Testdata_Zeiss\CD7\Mouse Kidney_40x0.95_3CD_JK_comp.czi"
#filename = r"C:\Testdata_Zeiss\DTScan_ID4_small.czi"
#filename = r"C:\Testdata_Zeiss\DTScan_ID4.czi"
#filename = r"D:\Temp\input\DTScan_ID4-nokeeptiles.czi"
filename = r"C:\Testdata_Zeiss\CD7\testwell96.czi"
#filename = r"C:\Testdata_Zeiss\CZI_Testfiles\S=2_3x3_Z=4_CH=2.czi"
#filename = r"C:\Testdata_Zeiss\LatticeLightSheet\LS_Mitosis_T=150-300.czi"
#filename = r"C:\Users\m1srh\Downloads\Halo_CZI_small.czi"
#filename = r"C:\Testdata_Zeiss\OverViewScan.czi"
######################################################################
# get the metadata as a dictionary
md, md_add = czt.get_metadata_czi(filename)
use_pylibczi = True
use_dask_delayed = True
# read CZI using aicspylibczi
czi = CziFile(filename)
# for testing
# Get the shape of the data
print('czi_dims : ', czi.dims)
print('czi_dims_shape : ', czi.get_dims_shape())
print('czi_size : ', czi.size)
print('IsMoasic : ', czi.is_mosaic())
# get the required shape for all and single scenes
shape_all, shape_single, same_shape = czt.get_shape_allscenes(czi)
print('Required_Array Shape for all scenes: ', shape_all)
for sh in shape_single:
print('Required Array Shape for single scenes: ', sh)
#array_type = 'dask'
array_type = 'zarr'
#array_type = 'numpy'
if array_type == 'zarr':
# define array to store all channels
print('Using aicspylibCZI to read the image (ZARR array).')
# option 1
all_scenes_array = zarr.create(tuple(shape_all),
dtype=md['NumPy.dtype'],
chunks=True)
# option 2
# all_scenes_array = zarr.open(r'd:\Temp\czi_scene_all.zarr', mode='w',
# shape=shape_all,
# chunks=True,
# dtype=md['NumPy.dtype'])
if array_type == 'numpy':
print('Using aicspylibCZI to read the image (Numpy.Array).')
all_scenes_array = np.empty(shape_all, dtype=md['NumPy.dtype'])
if array_type == 'zarr' or array_type == 'numpy':
# loop over all scenes
for s in range(md['SizeS']):
# get the CZIscene for the current scene
single_scene = czt.CZIScene(czi, sceneindex=s)
out = czt.read_czi_scene(czi, single_scene, md, array_type=array_type)
index_list_out = [slice(None, None, None)] * (len(all_scenes_array.shape) - 2)
index_list_out[single_scene.posS] = 0
index_list = [slice(None, None, None)] * (len(all_scenes_array.shape) - 2)
index_list[single_scene.posS] = s
all_scenes_array[tuple(index_list)] = out[tuple(index_list_out)]
#all_scenes_array[:, s, :, :, :] = out
#all_scenes_array[s, :, :, :, :, :] = np.squeeze(out, axis=0)
print(all_scenes_array.shape)
elif array_type == 'dask':
def dask_load_sceneimage(czi, s, md):
# get the CZIscene for the current scene
single_scene = czt.CZIScene(czi, md, sceneindex=s)
out = czt.read_czi_scene(czi, single_scene, md)
return out
sp = shape_all[1:]
# create dask stack of lazy image readers
print('Using aicspylibCZI to read the image (Dask.Array) + Delayed Reading.')
lazy_process_image = dask.delayed(dask_load_sceneimage) # lazy reader
lazy_arrays = [lazy_process_image(czi, s, md) for s in range(md['SizeS'])]
dask_arrays = [
da.from_delayed(lazy_array, shape=sp, dtype=md['NumPy.dtype'])
for lazy_array in lazy_arrays
]
# Stack into one large dask.array
all_scenes_array = da.stack(dask_arrays, axis=0)
print(all_scenes_array.shape)
# show array inside napari viewer
viewer = napari.Viewer()
layers = nap.show_napari(viewer, all_scenes_array, md,
blending='additive',
adjust_contrast=True,
gamma=0.85,
add_mdtable=True,
rename_sliders=True)
#viewer.run()
| [
"czifiletools.czifile_tools.read_czi_scene",
"czifiletools.napari_tools.show_napari",
"dask.delayed",
"numpy.empty",
"dask.array.stack",
"czifiletools.czifile_tools.CZIScene",
"czifiletools.czifile_tools.get_shape_allscenes",
"napari.Viewer",
"czifiletools.czifile_tools.get_metadata_czi",
"aicspyl... | [((1226, 1256), 'czifiletools.czifile_tools.get_metadata_czi', 'czt.get_metadata_czi', (['filename'], {}), '(filename)\n', (1246, 1256), True, 'from czifiletools import czifile_tools as czt\n'), ((1339, 1356), 'aicspylibczi.CziFile', 'CziFile', (['filename'], {}), '(filename)\n', (1346, 1356), False, 'from aicspylibczi import CziFile\n'), ((1657, 1685), 'czifiletools.czifile_tools.get_shape_allscenes', 'czt.get_shape_allscenes', (['czi'], {}), '(czi)\n', (1680, 1685), True, 'from czifiletools import czifile_tools as czt\n'), ((4262, 4277), 'napari.Viewer', 'napari.Viewer', ([], {}), '()\n', (4275, 4277), False, 'import napari\n'), ((4288, 4431), 'czifiletools.napari_tools.show_napari', 'nap.show_napari', (['viewer', 'all_scenes_array', 'md'], {'blending': '"""additive"""', 'adjust_contrast': '(True)', 'gamma': '(0.85)', 'add_mdtable': '(True)', 'rename_sliders': '(True)'}), "(viewer, all_scenes_array, md, blending='additive',\n adjust_contrast=True, gamma=0.85, add_mdtable=True, rename_sliders=True)\n", (4303, 4431), True, 'from czifiletools import napari_tools as nap\n'), ((2566, 2610), 'numpy.empty', 'np.empty', (['shape_all'], {'dtype': "md['NumPy.dtype']"}), "(shape_all, dtype=md['NumPy.dtype'])\n", (2574, 2610), True, 'import numpy as np\n'), ((2795, 2826), 'czifiletools.czifile_tools.CZIScene', 'czt.CZIScene', (['czi'], {'sceneindex': 's'}), '(czi, sceneindex=s)\n', (2807, 2826), True, 'from czifiletools import czifile_tools as czt\n'), ((2841, 2905), 'czifiletools.czifile_tools.read_czi_scene', 'czt.read_czi_scene', (['czi', 'single_scene', 'md'], {'array_type': 'array_type'}), '(czi, single_scene, md, array_type=array_type)\n', (2859, 2905), True, 'from czifiletools import czifile_tools as czt\n'), ((3826, 3860), 'dask.delayed', 'dask.delayed', (['dask_load_sceneimage'], {}), '(dask_load_sceneimage)\n', (3838, 3860), False, 'import dask\n'), ((4153, 4182), 'dask.array.stack', 'da.stack', (['dask_arrays'], {'axis': '(0)'}), '(dask_arrays, axis=0)\n', (4161, 4182), True, 'import dask.array as da\n'), ((3537, 3572), 'czifiletools.czifile_tools.CZIScene', 'czt.CZIScene', (['czi', 'md'], {'sceneindex': 's'}), '(czi, md, sceneindex=s)\n', (3549, 3572), True, 'from czifiletools import czifile_tools as czt\n'), ((3587, 3628), 'czifiletools.czifile_tools.read_czi_scene', 'czt.read_czi_scene', (['czi', 'single_scene', 'md'], {}), '(czi, single_scene, md)\n', (3605, 3628), True, 'from czifiletools import czifile_tools as czt\n'), ((3985, 4047), 'dask.array.from_delayed', 'da.from_delayed', (['lazy_array'], {'shape': 'sp', 'dtype': "md['NumPy.dtype']"}), "(lazy_array, shape=sp, dtype=md['NumPy.dtype'])\n", (4000, 4047), True, 'import dask.array as da\n')] |
from functools import reduce
import torch
from tqdm import tqdm
import numpy as np
from sklearn.metrics import f1_score, precision_score, recall_score
def run_valid(model, loader, device):
model.eval()
valid_loss = 0
all_valid_preds = []
for data in tqdm(loader):
text, targets = data
mask = (targets != -1) + 0
with torch.no_grad():
preds = model(text.to(device), mask.to(device))
word_mask = targets != -1
preds = preds[word_mask]
targets = targets[word_mask]
# loss = criterion(preds.view(-1, num_classes), targets.to(device).view(-1))
# valid_loss += loss.mean().item()
all_valid_preds.append(preds.detach().cpu().numpy())
return all_valid_preds
def evaluate(model, loaders, targets, device):
if loaders.__class__ is not list:
loaders = [loaders]
preds = []
for loader in loaders:
valid_preds = run_valid(model, loader, device)
valid_preds = np.concatenate(valid_preds)
preds.append(valid_preds)
preds = reduce(lambda x,y: x+y, preds)
valid_f1 = f1_score(preds.argmax(axis=1), targets, average='macro', labels=[1, 2, 3])
valid_f1_sep = f1_score(preds.argmax(axis=1), targets, average=None, labels=[1, 2, 3])
valid_precision = precision_score(preds.argmax(axis=1), targets, average='macro', labels=[1, 2, 3])
valid_precision_sep = precision_score(preds.argmax(axis=1), targets, average=None, labels=[1, 2, 3])
valid_recall = recall_score(preds.argmax(axis=1), targets, average='macro', labels=[1, 2, 3])
valid_recall_sep = recall_score(preds.argmax(axis=1), targets, average=None, labels=[1, 2, 3])
return {'total': valid_f1, 'period': valid_f1_sep[0],
'question': valid_f1_sep[1],
'comma': valid_f1_sep[2],
'total_precision': valid_precision,
'period_precision': valid_precision_sep[0],
'question_precision': valid_precision_sep[1],
'comma_precision': valid_precision_sep[2],
'total_recall': valid_recall,
'period_recall': valid_recall_sep[0],
'question_recall': valid_recall_sep[1],
'comma_recall': valid_recall_sep[2]}, preds
| [
"functools.reduce",
"tqdm.tqdm",
"torch.no_grad",
"numpy.concatenate"
] | [((269, 281), 'tqdm.tqdm', 'tqdm', (['loader'], {}), '(loader)\n', (273, 281), False, 'from tqdm import tqdm\n'), ((1068, 1101), 'functools.reduce', 'reduce', (['(lambda x, y: x + y)', 'preds'], {}), '(lambda x, y: x + y, preds)\n', (1074, 1101), False, 'from functools import reduce\n'), ((994, 1021), 'numpy.concatenate', 'np.concatenate', (['valid_preds'], {}), '(valid_preds)\n', (1008, 1021), True, 'import numpy as np\n'), ((360, 375), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (373, 375), False, 'import torch\n')] |
# Test frovedis niters and sklearn niters
import sys
import numpy as np
from frovedis.exrpc.server import FrovedisServer
from frovedis.matrix.dense import FrovedisRowmajorMatrix
from frovedis.mllib.gmm import GaussianMixture
import sklearn.mixture as sk
# initializing the Frovedis server
argvs = sys.argv
argc = len(argvs)
if (argc < 2):
print ('Please give frovedis_server calling command as the first argument \n(e.g. "mpirun -np 2 /opt/nec/frovedis/ve/bin/frovedis_server")')
quit()
FrovedisServer.initialize(argvs[1])
train_mat = np.loadtxt("./input/gmm_data.txt")
# creating spectral agglomerative object
n_components = 2
try:
f_model = GaussianMixture(n_components=n_components)
# fitting the training matrix on gaussian mixture object
f_model.fit(train_mat)
f_niter = f_model.n_iter_
except Exception as e:
print ("status=Exception: " + str(e))
sys.exit(1)
try:
sk_model = sk.GaussianMixture(n_components=n_components, random_state=0).fit(train_mat)
s_niter = sk_model.n_iter_
except Exception as e:
print ("status=Exception: " + str(e))
sys.exit(1)
if(f_niter == s_niter):
print("status=Passed")
else:
print("status=Failed") | [
"frovedis.exrpc.server.FrovedisServer.initialize",
"sklearn.mixture.GaussianMixture",
"frovedis.mllib.gmm.GaussianMixture",
"numpy.loadtxt",
"sys.exit"
] | [((497, 532), 'frovedis.exrpc.server.FrovedisServer.initialize', 'FrovedisServer.initialize', (['argvs[1]'], {}), '(argvs[1])\n', (522, 532), False, 'from frovedis.exrpc.server import FrovedisServer\n'), ((546, 580), 'numpy.loadtxt', 'np.loadtxt', (['"""./input/gmm_data.txt"""'], {}), "('./input/gmm_data.txt')\n", (556, 580), True, 'import numpy as np\n'), ((660, 702), 'frovedis.mllib.gmm.GaussianMixture', 'GaussianMixture', ([], {'n_components': 'n_components'}), '(n_components=n_components)\n', (675, 702), False, 'from frovedis.mllib.gmm import GaussianMixture\n'), ((890, 901), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (898, 901), False, 'import sys\n'), ((1100, 1111), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1108, 1111), False, 'import sys\n'), ((923, 984), 'sklearn.mixture.GaussianMixture', 'sk.GaussianMixture', ([], {'n_components': 'n_components', 'random_state': '(0)'}), '(n_components=n_components, random_state=0)\n', (941, 984), True, 'import sklearn.mixture as sk\n')] |
"""
DANN
"""
import numpy as np
import tensorflow as tf
from adapt.base import BaseAdaptDeep, make_insert_doc
from adapt.utils import check_network
EPS = np.finfo(np.float32).eps
# class SetEncoder(tf.keras.callbacks.Callback):
# def __init__(self):
# self.pretrain = True
# def on_epoch_end(self, epoch, logs=None):
# if (not logs.get("pretrain")) and self.pretrain:
# self.pretrain = False
# self.model.encoder_.set_weights(
# self.model.encoder_src_.get_weights())
@make_insert_doc(["encoder", "task", "discriminator"])
class ADDA(BaseAdaptDeep):
"""
ADDA: Adversarial Discriminative Domain Adaptation
ADDA is a feature-based domain adaptation method.
The purpose of ADDA is to build a new feature representation
in which source and target data could not be distinguished by
any **discriminator** network. This feature representation is
built with two **encoder** networks:
- a **source encoder** trained to provide good features in order
to learn the task on the source domain. The task is learned
through a **task** network trained with the **source encoder**.
- a **target encoder** trained to fool a **discriminator** network
which tries to classify source and target data in the encoded space.
The **target encoder** and the **discriminator** are trained
in an adversarial fashion in the same way as GAN.
The parameters of the four networks are optimized in a two stage
algorithm where **source encoder** and **task** networks are first
fitted according to the following optimization problem:
.. math::
\min_{\phi_S, F} \mathcal{L}_{task}(F(\phi_S(X_S)), y_S)
In the second stage, **target encoder** and **discriminator**
networks are fitted according to:
.. math::
\min_{\phi_T} & \; - \log(D(\phi_T(X_T)))) \\\\
\min_{D} & \; - \log(D(\phi_S(X_S))) - \log(1 - D(\phi_T(X_T)))
Where:
- :math:`(X_S, y_S), (X_T)` are respectively the labeled source data
and the unlabeled target data.
- :math:`\phi_S, \phi_T, F, D` are respectively the **source encoder**,
the **target encoder**, the **task** and the **discriminator** networks.
The method has been originally introduced for **unsupervised**
classification DA but it could be widen to other task in **supervised**
DA straightforwardly.
.. figure:: ../_static/images/adda.png
:align: center
Overview of the ADDA approach (source: [1])
Parameters
----------
pretrain : bool (default=True)
Weither to perform pretraining of the ``encoder_src_``
and ``task_`` networks on source data or not.
separated compile and fit arguments for the
pretraining can be given by using the prefix
``pretrain__`` as ``pretrain__epochs=10`` or
``pretrain__learning_rate=0.1`` for instance.
If no pretrain arguments are given, the training
arguments are used by default
tol : float (default=0.001)
Tolerance on the loss for early stopping of
pretraining.
Attributes
----------
encoder_ : tensorflow Model
encoder network.
task_ : tensorflow Model
task network.
discriminator_ : tensorflow Model
discriminator network.
encoder_src_ : tensorflow Model
Source encoder network
history_ : dict
history of the losses and metrics across the epochs.
If ``yt`` is given in ``fit`` method, target metrics
and losses are recorded too.
Examples
--------
>>> import numpy as np
>>> from adapt.feature_based import ADDA
>>> np.random.seed(0)
>>> Xs = np.concatenate((np.random.random((100, 1)),
... np.zeros((100, 1))), 1)
>>> Xt = np.concatenate((np.random.random((100, 1)),
... np.ones((100, 1))), 1)
>>> ys = 0.2 * Xs[:, 0]
>>> yt = 0.2 * Xt[:, 0]
>>> model = ADDA(random_state=0)
>>> model.fit(Xs, ys, Xt, epochs=100, verbose=0)
>>> np.abs(model.predict_task(Xt, domain="src").ravel() - yt).mean()
0.1531...
>>> np.abs(model.predict_task(Xt, domain="tgt").ravel() - yt).mean()
0.0227...
See also
--------
DANN
DeepCORAL
References
----------
.. [1] `[1] <https://arxiv.org/pdf/1702.05464.pdf>`_ <NAME>, <NAME>, \
<NAME>, and <NAME>. "Adversarial discriminative domain adaptation". \
In CVPR, 2017.
"""
def __init__(self,
encoder=None,
task=None,
discriminator=None,
Xt=None,
yt=None,
pretrain=True,
tol=0.001,
copy=True,
verbose=1,
random_state=None,
**params):
names = self._get_param_names()
kwargs = {k: v for k, v in locals().items() if k in names}
kwargs.update(params)
super().__init__(**kwargs)
def _initialize_pretain_networks(self):
self.encoder_.set_weights(
self.encoder_src_.get_weights())
def pretrain_step(self, data):
# Unpack the data.
Xs, Xt, ys, yt = self._unpack_data(data)
# loss
with tf.GradientTape() as task_tape, tf.GradientTape() as enc_tape:
# Forward pass
Xs_enc = self.encoder_src_(Xs, training=True)
ys_pred = self.task_(Xs_enc, training=True)
# Reshape
ys_pred = tf.reshape(ys_pred, tf.shape(ys))
# Compute the loss value
loss = self.task_loss_(ys, ys_pred)
task_loss = loss + sum(self.task_.losses)
enc_loss = loss + sum(self.encoder_src_.losses)
# Compute gradients
trainable_vars_task = self.task_.trainable_variables
trainable_vars_enc = self.encoder_src_.trainable_variables
gradients_task = task_tape.gradient(task_loss, trainable_vars_task)
gradients_enc = enc_tape.gradient(enc_loss, trainable_vars_enc)
# Update weights
self.optimizer.apply_gradients(zip(gradients_task, trainable_vars_task))
self.optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))
# Update metrics
self.compiled_metrics.update_state(ys, ys_pred)
self.compiled_loss(ys, ys_pred)
# Return a dict mapping metric names to current value
logs = {m.name: m.result() for m in self.metrics}
return logs
def train_step(self, data):
# Pretrain
if self.pretrain_:
return self.pretrain_step(data)
else:
# Unpack the data.
Xs, Xt, ys, yt = self._unpack_data(data)
# loss
with tf.GradientTape() as enc_tape, tf.GradientTape() as disc_tape:
# Forward pass
if self.pretrain:
Xs_enc = self.encoder_src_(Xs, training=False)
else:
# encoder src is not needed if pretrain=False
Xs_enc = Xs
ys_disc = self.discriminator_(Xs_enc, training=True)
Xt_enc = self.encoder_(Xt, training=True)
yt_disc = self.discriminator_(Xt_enc, training=True)
# Compute the loss value
disc_loss = (-tf.math.log(ys_disc + EPS)
-tf.math.log(1-yt_disc + EPS))
enc_loss = -tf.math.log(yt_disc + EPS)
disc_loss = tf.reduce_mean(disc_loss)
enc_loss = tf.reduce_mean(enc_loss)
disc_loss += sum(self.discriminator_.losses)
enc_loss += sum(self.encoder_.losses)
# Compute gradients
trainable_vars_enc = self.encoder_.trainable_variables
trainable_vars_disc = self.discriminator_.trainable_variables
gradients_enc = enc_tape.gradient(enc_loss, trainable_vars_enc)
gradients_disc = disc_tape.gradient(disc_loss, trainable_vars_disc)
# Update weights
self.optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))
self.optimizer_disc.apply_gradients(zip(gradients_disc, trainable_vars_disc))
# Update metrics
# self.compiled_metrics.update_state(ys, ys_pred)
# self.compiled_loss(ys, ys_pred)
# Return a dict mapping metric names to current value
# logs = {m.name: m.result() for m in self.metrics}
logs = self._get_disc_metrics(ys_disc, yt_disc)
return logs
def _get_disc_metrics(self, ys_disc, yt_disc):
disc_dict = {}
disc_dict["disc_loss"] = tf.reduce_mean(
(-tf.math.log(ys_disc + EPS)
-tf.math.log(1-yt_disc + EPS))
)
for m in self.disc_metrics:
disc_dict["disc_%s"%m.name] = tf.reduce_mean(0.5 * (
m(tf.ones_like(ys_disc), ys_disc)+
m(tf.zeros_like(yt_disc), yt_disc)
))
return disc_dict
def _initialize_weights(self, shape_X):
# Init weights encoder
self(np.zeros((1,) + shape_X))
# Set same weights to encoder_src
if self.pretrain:
# encoder src is not needed if pretrain=False
self.encoder_(np.zeros((1,) + shape_X))
self.encoder_src_ = check_network(self.encoder_,
copy=True,
name="encoder_src")
def transform(self, X, domain="tgt"):
"""
Return the encoded features of X.
Parameters
----------
X : array
input data
domain: str (default="tgt")
If domain is ``"tgt"`` or ``"target"``,
the target encoder is used.
If domain is ``"src"`` or ``"source"``,
the source encoder is used.
Returns
-------
X_enc : array
predictions of encoder network
"""
if domain in ["tgt", "target"]:
return self.encoder_.predict(X)
elif domain in ["src", "source"]:
return self.encoder_src_.predict(X)
else:
raise ValueError("`domain `argument "
"should be `tgt` or `src`, "
"got, %s"%domain)
def predict_disc(self, X, domain="tgt"):
"""
Return predictions of the discriminator on the encoded features.
Parameters
----------
X : array
input data
domain: str (default="tgt")
If domain is ``"tgt"`` or ``"target"``,
the target encoder is used.
If domain is ``"src"`` or ``"source"``,
the source encoder is used.
Returns
-------
y_disc : array
predictions of discriminator network
"""
return self.discriminator_.predict(self.transform(X, domain=domain))
def predict_task(self, X, domain="tgt"):
"""
Return predictions of the task on the encoded features.
Parameters
----------
X : array
input data
domain: str (default="tgt")
If domain is ``"tgt"`` or ``"target"``,
the target encoder is used.
If domain is ``"src"`` or ``"source"``,
the source encoder is used.
Returns
-------
y_task : array
predictions of task network
"""
return self.task_.predict(self.transform(X, domain=domain))
| [
"tensorflow.math.log",
"numpy.zeros",
"tensorflow.reduce_mean",
"tensorflow.zeros_like",
"adapt.base.make_insert_doc",
"tensorflow.ones_like",
"numpy.finfo",
"tensorflow.shape",
"adapt.utils.check_network",
"tensorflow.GradientTape"
] | [((563, 616), 'adapt.base.make_insert_doc', 'make_insert_doc', (["['encoder', 'task', 'discriminator']"], {}), "(['encoder', 'task', 'discriminator'])\n", (578, 616), False, 'from adapt.base import BaseAdaptDeep, make_insert_doc\n'), ((157, 177), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (165, 177), True, 'import numpy as np\n'), ((5485, 5502), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (5500, 5502), True, 'import tensorflow as tf\n'), ((5517, 5534), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (5532, 5534), True, 'import tensorflow as tf\n'), ((9502, 9526), 'numpy.zeros', 'np.zeros', (['((1,) + shape_X)'], {}), '((1,) + shape_X)\n', (9510, 9526), True, 'import numpy as np\n'), ((9747, 9806), 'adapt.utils.check_network', 'check_network', (['self.encoder_'], {'copy': '(True)', 'name': '"""encoder_src"""'}), "(self.encoder_, copy=True, name='encoder_src')\n", (9760, 9806), False, 'from adapt.utils import check_network\n'), ((5777, 5789), 'tensorflow.shape', 'tf.shape', (['ys'], {}), '(ys)\n', (5785, 5789), True, 'import tensorflow as tf\n'), ((7049, 7066), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (7064, 7066), True, 'import tensorflow as tf\n'), ((7080, 7097), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (7095, 7097), True, 'import tensorflow as tf\n'), ((7849, 7874), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['disc_loss'], {}), '(disc_loss)\n', (7863, 7874), True, 'import tensorflow as tf\n'), ((7902, 7926), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['enc_loss'], {}), '(enc_loss)\n', (7916, 7926), True, 'import tensorflow as tf\n'), ((9121, 9151), 'tensorflow.math.log', 'tf.math.log', (['(1 - yt_disc + EPS)'], {}), '(1 - yt_disc + EPS)\n', (9132, 9151), True, 'import tensorflow as tf\n'), ((9689, 9713), 'numpy.zeros', 'np.zeros', (['((1,) + shape_X)'], {}), '((1,) + shape_X)\n', (9697, 9713), True, 'import numpy as np\n'), ((7734, 7764), 'tensorflow.math.log', 'tf.math.log', (['(1 - yt_disc + EPS)'], {}), '(1 - yt_disc + EPS)\n', (7745, 7764), True, 'import tensorflow as tf\n'), ((7793, 7819), 'tensorflow.math.log', 'tf.math.log', (['(yt_disc + EPS)'], {}), '(yt_disc + EPS)\n', (7804, 7819), True, 'import tensorflow as tf\n'), ((9080, 9106), 'tensorflow.math.log', 'tf.math.log', (['(ys_disc + EPS)'], {}), '(ys_disc + EPS)\n', (9091, 9106), True, 'import tensorflow as tf\n'), ((7677, 7703), 'tensorflow.math.log', 'tf.math.log', (['(ys_disc + EPS)'], {}), '(ys_disc + EPS)\n', (7688, 7703), True, 'import tensorflow as tf\n'), ((9280, 9301), 'tensorflow.ones_like', 'tf.ones_like', (['ys_disc'], {}), '(ys_disc)\n', (9292, 9301), True, 'import tensorflow as tf\n'), ((9331, 9353), 'tensorflow.zeros_like', 'tf.zeros_like', (['yt_disc'], {}), '(yt_disc)\n', (9344, 9353), True, 'import tensorflow as tf\n')] |
import numpy as np
import networkx as nx
from itertools import combinations
# TSP functions
def rearrangeTour(route, start, end):
if start is not None:
if end is not None:
end_val = route[end]
route = route[start:] + route[0:start]
if end is not None:
end_idx = route.index(end_val)
if end_idx < len(route) - 1:
route = route[0:end_idx] + route[end_idx + 1:] + [route[end_idx]]
return route
def compute_tour_cost(graph, tour, weight='weight', is_cycle=True):
cost = 0
start = 0 if is_cycle else 1
for idx in xrange(start, len(tour)):
u = tour[idx - 1]
v = tour[idx]
cost += graph.edge[u][v][weight]
return cost
def euclidean_fn(x, y, weights=None):
if weights is None:
distance = np.sqrt(np.sum((x - y) ** 2))
else:
distance = np.sqrt(np.sum(weights * (x - y) ** 2))
return distance
def construct_tgraph(coordinates, distfn=None, args=()):
if distfn is None:
distfn = euclidean_fn
num_nodes = len(coordinates)
graph = nx.Graph()
for i in xrange(num_nodes):
for j in xrange(i + 1, num_nodes):
graph.add_node(i, value=coordinates[i])
graph.add_node(j, value=coordinates[j])
dist = distfn(coordinates[i], coordinates[j], *args)
graph.add_edge(i, j, weight=dist)
return graph
def two_opt(graph, weight='weight', start=None, end=None):
num_nodes = graph.number_of_nodes()
tour = graph.nodes()
if (start is not None) | (end is not None):
if start is None:
start = 0
begin_i = 0
else:
begin_i = 1
if end is None:
end = num_nodes - 1
end_j = num_nodes
else:
end_j = num_nodes - 1
tour = rearrangeTour(tour, start, end)
else:
begin_i = 1
end_j = num_nodes
start_again = True
loop_n = 0
while start_again:
loop_n += 1
start_again = False
for i in xrange(begin_i, end_j - 1):
for k in xrange(i + 1, end_j):
# 2-opt swap
a, b = tour[i - 1], tour[i]
c, d = tour[k], tour[(k + 1) % num_nodes]
if (a == c) or (b == d):
continue
ab_cd_dist = graph.edge[a][b][weight] + graph.edge[c][d][weight]
ac_bd_dist = graph.edge[a][c][weight] + graph.edge[b][d][weight]
if ab_cd_dist > ac_bd_dist:
tour[i:k + 1] = reversed(tour[i:k + 1])
start_again = True
if start_again:
break
if start_again:
break
min_cost = compute_tour_cost(graph, tour)
return tour
## Global TSP functions
def minEuclid(point, array):
if len(np.shape(array)) > 1:
dist = np.sqrt(np.sum((array - point) ** 2, axis=1))
return np.argmin(dist), min(dist)
else:
dist = np.sqrt(np.sum((array - point) ** 2))
return 0, dist
def dsearchn(point, array, ignore=None):
# find nearest element to point in array
if (len(array) == 1) & (ignore is not None):
return None
dist = np.sqrt(np.sum((array - point) ** 2, 1))
if ignore is not None:
dist[ignore] = 10000
idx = dist.argmin()
return idx
def closestPoints(clusters, pairs):
closest_points = np.array([[0.0] * 3] * len(pairs))
for i in xrange(0, len(pairs)):
try:
cluster1 = clusters[pairs[i, 0]][:, 0:6]
except IndexError:
cluster1 = clusters[pairs[i, 0]][0:6]
try:
cluster2 = clusters[pairs[i, 1]][:, 0:6]
except IndexError:
cluster2 = clusters[pairs[i, 1]][0:6]
min_dist = float('inf')
for idx1 in xrange(0, len(cluster1)):
point = cluster1[idx1]
idx2, dist = minEuclid(point, cluster2)
if dist < min_dist:
best_idx1 = idx1
best_idx2 = idx2
min_dist = dist
closest_points[i, 0:3] = [best_idx1, best_idx2, min_dist]
return closest_points
def globalTSP(clusters, home_pose):
# add home pose as new start and end clusters
clusters.append(np.hstack((home_pose, [0, 0, 0, 0, 0, 0])))
clusters.append(np.hstack((home_pose, [0, 0, 0, 0, 0, 0])))
N = len(clusters)
# get cluster pairings
combs = combinations(range(0, N), 2)
pairs = np.array(list(combs))
# find best distance
closest_points = closestPoints(clusters, pairs)
# create graph
graph = nx.Graph()
for i in range(0, N):
graph.add_node(i)
for i in range(0, len(pairs)):
graph.add_edge(pairs[i, 0], pairs[i, 1], weight=closest_points[i, 2])
# solve TSP
gtour = two_opt(graph, start=N - 2, end=N - 1)
entry_points = np.array([[0, 0]] * N)
# extract entry and exit points
for i in range(1, len(gtour) - 1):
before = gtour[i - 1]
current = gtour[i]
after = gtour[i + 1]
if before < current:
entry_idx = np.where((pairs[:, 0] == before) & (pairs[:, 1] == current))[0]
entry_points[i][0] = closest_points[entry_idx, 1]
else:
entry_idx = np.where((pairs[:, 0] == current) & (pairs[:, 1] == before))[0]
entry_points[i][0] = closest_points[entry_idx, 0]
if current < after:
exit_idx = np.where((pairs[:, 0] == current) & (pairs[:, 1] == after))[0]
exit_pt = closest_points[exit_idx, 0]
next_entry = closest_points[exit_idx, 1]
else:
exit_idx = np.where((pairs[:, 0] == after) & (pairs[:, 1] == current))[0]
exit_pt = closest_points[exit_idx, 1]
next_entry = closest_points[exit_idx, 0]
if exit_pt != entry_points[i][0]:
entry_points[i][1] = exit_pt
else:
try:
exit_pt = dsearchn(clusters[after][int(next_entry), 0:6], clusters[current][:, 0:6],
ignore=[int(exit_pt)])
except IndexError:
exit_pt = dsearchn(clusters[after][0:6], clusters[current][:, 0:6], ignore=[int(exit_pt)])
entry_points[i][1] = exit_pt
return gtour, pairs, closest_points, entry_points
| [
"numpy.sum",
"numpy.argmin",
"numpy.hstack",
"numpy.shape",
"numpy.where",
"numpy.array",
"networkx.Graph"
] | [((1083, 1093), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (1091, 1093), True, 'import networkx as nx\n'), ((4634, 4644), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (4642, 4644), True, 'import networkx as nx\n'), ((4898, 4920), 'numpy.array', 'np.array', (['([[0, 0]] * N)'], {}), '([[0, 0]] * N)\n', (4906, 4920), True, 'import numpy as np\n'), ((3251, 3282), 'numpy.sum', 'np.sum', (['((array - point) ** 2)', '(1)'], {}), '((array - point) ** 2, 1)\n', (3257, 3282), True, 'import numpy as np\n'), ((4291, 4333), 'numpy.hstack', 'np.hstack', (['(home_pose, [0, 0, 0, 0, 0, 0])'], {}), '((home_pose, [0, 0, 0, 0, 0, 0]))\n', (4300, 4333), True, 'import numpy as np\n'), ((4355, 4397), 'numpy.hstack', 'np.hstack', (['(home_pose, [0, 0, 0, 0, 0, 0])'], {}), '((home_pose, [0, 0, 0, 0, 0, 0]))\n', (4364, 4397), True, 'import numpy as np\n'), ((815, 835), 'numpy.sum', 'np.sum', (['((x - y) ** 2)'], {}), '((x - y) ** 2)\n', (821, 835), True, 'import numpy as np\n'), ((874, 904), 'numpy.sum', 'np.sum', (['(weights * (x - y) ** 2)'], {}), '(weights * (x - y) ** 2)\n', (880, 904), True, 'import numpy as np\n'), ((2862, 2877), 'numpy.shape', 'np.shape', (['array'], {}), '(array)\n', (2870, 2877), True, 'import numpy as np\n'), ((2907, 2943), 'numpy.sum', 'np.sum', (['((array - point) ** 2)'], {'axis': '(1)'}), '((array - point) ** 2, axis=1)\n', (2913, 2943), True, 'import numpy as np\n'), ((2960, 2975), 'numpy.argmin', 'np.argmin', (['dist'], {}), '(dist)\n', (2969, 2975), True, 'import numpy as np\n'), ((3020, 3048), 'numpy.sum', 'np.sum', (['((array - point) ** 2)'], {}), '((array - point) ** 2)\n', (3026, 3048), True, 'import numpy as np\n'), ((5135, 5195), 'numpy.where', 'np.where', (['((pairs[:, 0] == before) & (pairs[:, 1] == current))'], {}), '((pairs[:, 0] == before) & (pairs[:, 1] == current))\n', (5143, 5195), True, 'import numpy as np\n'), ((5299, 5359), 'numpy.where', 'np.where', (['((pairs[:, 0] == current) & (pairs[:, 1] == before))'], {}), '((pairs[:, 0] == current) & (pairs[:, 1] == before))\n', (5307, 5359), True, 'import numpy as np\n'), ((5476, 5535), 'numpy.where', 'np.where', (['((pairs[:, 0] == current) & (pairs[:, 1] == after))'], {}), '((pairs[:, 0] == current) & (pairs[:, 1] == after))\n', (5484, 5535), True, 'import numpy as np\n'), ((5680, 5739), 'numpy.where', 'np.where', (['((pairs[:, 0] == after) & (pairs[:, 1] == current))'], {}), '((pairs[:, 0] == after) & (pairs[:, 1] == current))\n', (5688, 5739), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""plotlib.py: Plots generators."""
__author__ = "<NAME>."
__copyright__ = "Copyright 2021, SuperDARN@VT"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0."
__maintainer__ = "<NAME>."
__email__ = "<EMAIL>"
__status__ = "Research"
import matplotlib
matplotlib.use("Agg")
import cartopy
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
plt.style.use("seaborn-bright")
import numpy as np
import pandas as pd
import matplotlib.ticker as mticker
import datetime as dt
import aacgmv2
from netCDF4 import Dataset, num2date
import matplotlib.dates as mdates
import os
import sys
sys.path.append("models/")
import utils
from fetch_data import Simulation, Riometer
np.set_printoptions(formatter={"float_kind": lambda x:"{:.2f}".format(x)})
def plot_rio_locations():
""" Plot all riometers used for the study """
fig = plt.figure(figsize=(5,9),dpi=150)
ax = plt.axes(projection=ccrs.NearsidePerspective(central_longitude=-110, central_latitude=60))
kwargs = {}
kwargs["edgecolor"] = "black"
kwargs["facecolor"] = "none"
kwargs["alpha"] = 0.4
kwargs["linewidth"] = 0.3
resolution="110m"
ax.add_feature( cartopy.feature.COASTLINE, **kwargs )
ax.set_global()
g = ax.gridlines(draw_labels=True, linestyle="--", linewidth=0.3, alpha=0.4)
g.top_labels = False
g.right_labels = False
g.ylocator = mticker.FixedLocator(range(0,90,10))
g.xlocator = mticker.FixedLocator(range(-180,180,60))
ax.plot(np.ones(90)*-135.3, np.arange(90), color="r", linewidth=0.7, linestyle="--", transform=ccrs.PlateCarree())
ax.text(-135, 40., "GOES-15", fontdict={"color":"darkblue","size":5}, transform=ccrs.PlateCarree())
R = pd.read_csv("config/riometers.csv")
for _, x in R.iterrows():
if x["rio"] != "sps":
ax.scatter(x["lon"], x["lat"], s=0.2, marker="o", color="k", zorder=2, transform=ccrs.PlateCarree())
ax.scatter(x["lon"], x["lat"], s=15, marker="o", color="darkgreen", zorder=2, transform=ccrs.PlateCarree(), alpha=0.5)
ax.text(x["lon"]+0.5, x["lat"]+0.5, x["rio"].upper(), fontdict={"color":"darkblue","size":5}, transform=ccrs.PlateCarree())
ax.set_extent([-145, -55, 40, 90], ccrs.PlateCarree())
fig.savefig("figs/Figure01.png",bbox_inches="tight")
dtime = dt.datetime(2021, 1, 1)
R["lonx"] = np.mod( (R["lon"] + 180), 360 ) - 180
R["mlat"], R["mlon"], _ = aacgmv2.get_aacgm_coord_arr(R.lat.tolist(), R.lonx.tolist(), [300]*len(R), dtime)
R["rio"] = [r.upper() for r in R.rio]
R = R.round(1)
print(R[["rio","lat","lon","mlat","mlon"]].to_latex(index=False, label="tab:01", caption="List of riometers used in this study.",
column_format="ccccc"))
return
def analysis_plot():
def fit_mod(y, x=np.arange(10,500), lim=55):
yf = utils.extrap1d(x[lim:], np.log10(y[lim:]), kind="linear")
return 10**yf(x)
rio = "ott"
date = dt.datetime(2015,3,11,16,20)
sim = Simulation(date, rio)
sim.create_remote_local_dir()
sim.get_bgc_file()
sim.get_flare_file()
_ncb = Dataset(sim._dir_ + "bgc.nc")
_ncf = Dataset(sim._dir_ + "flare.nc")
alt = np.arange(10,500)
fig, axes = plt.subplots(figsize=(5,8),dpi=120,nrows=3,ncols=2,sharey="row")
ax = axes[0,0]
ax.set_ylabel("Height, km")
ax.set_xlabel(r"Density, $m^{-3}$")
ax.semilogx(fit_mod(_ncf.variables["ne"][10,:]), alt, color="r", ls="--", lw=1.)
ax.semilogx(fit_mod(_ncf.variables["ni"][10,:]), np.arange(10,500), color="g", ls="--", lw=1.)
ax.semilogx(fit_mod(_ncf.variables["ni-"][10,:]), np.arange(10,500), color="b", ls="--", lw=1.)
ax.semilogx(fit_mod(_ncf.variables["nix"][10,:]), np.arange(10,500), color="gold", ls="--", lw=1.)
ax.text(0.5,1.05,"2015-03-11 16:02 UT", ha="center", va="center", fontdict={"color":"b"}, transform=ax.transAxes)
ax.text(0.7,.9,"(a.1)", ha="center", va="center", transform=ax.transAxes)
ax.set_ylim(60,120)
ax.set_xlim(1e6,1e12)
ax = axes[0,1]
ax.set_xlabel(r"Density, $m^{-3}$")
ax.semilogx(fit_mod(_ncf.variables["ne"][20,:], lim=60), alt, color="r", ls="--", lw=1., label=r"$n_e$")
ax.semilogx(fit_mod(_ncf.variables["ni"][20,:], lim=60), np.arange(10,500), color="g", ls="--", lw=1., label=r"$n^+$")
ax.semilogx(fit_mod(_ncf.variables["ni-"][20,:], lim=60), np.arange(10,500), color="b", ls="--", lw=1., label=r"$n^-$")
ax.semilogx(fit_mod(_ncf.variables["nix"][20,:], lim=60), np.arange(10,500), color="gold", ls="--", lw=1., label=r"$n_x^+$")
ax.set_xlim(1e6,1e12)
ax.set_ylim(60,120)
ax.text(0.5,1.05,"2015-03-11 16:22 UT", ha="center", va="center", fontdict={"color":"b"}, transform=ax.transAxes)
ax.text(0.7,.9,"(a.2)", ha="center", va="center", transform=ax.transAxes)
ax.legend(bbox_to_anchor=(1.05, 0.8))
ax = axes[1,0]
ax.set_ylabel("Height, km")
ax.set_xlabel(r"Collision Frequency $(\nu)$, $s^{-1}$")
ax.semilogx(fit_mod(_ncb.variables["col.av.sn"][10,:]), alt, color="b", ls="--", lw=1.)
ax.semilogx(_ncb.variables["col.ft"][10,:], alt, color="r", ls="--", lw=1.)
ax.semilogx(_ncb.variables["col.av.cc"][10,:], alt, color="g", ls="--", lw=1.)
ax.semilogx(_ncb.variables["col.av.mb"][10,:], alt, color="g", ls="-.", lw=1.)
ax.text(0.7,.9,"(b.1)", ha="center", va="center", transform=ax.transAxes)
ax.set_ylim(60,120)
ax.set_xlim(1e2,1e8)
ax = axes[1,1]
ax.set_xlabel(r"Collision Frequency $(\nu)$, $s^{-1}$")
ax.semilogx(fit_mod(_ncb.variables["col.av.sn"][10,:]), alt, color="b", ls="--", lw=1., label=r"$\nu_{sn}$")
ax.semilogx(_ncb.variables["col.ft"][20,:], alt, color="r", ls="--", lw=1., label=r"$\nu_{me}$")
ax.semilogx(_ncb.variables["col.av.cc"][20,:], alt, color="g", ls="--", lw=1., label=r"$\nu^{cc}_{av}$")
ax.semilogx(_ncb.variables["col.av.mb"][20,:], alt, color="g", ls="-.", lw=1., label=r"$\nu^{mb}_{av}$")
ax.set_ylim(60,120)
ax.set_xlim(1e2,1e8)
ax.text(0.7,.9,"(b.2)", ha="center", va="center", transform=ax.transAxes)
ax.legend(bbox_to_anchor=(1.05, 0.8))
ax = axes[2,0]
ax.set_ylabel("Height, km")
ax.set_xlabel(r"Absorption $(\beta^h)$, $db/km$")
ax.plot(fit_mod(_ncf.variables["abs.ah.sn.o"][10,:], lim=75), alt, color="r", ls="--", lw=1.)
ax.plot(fit_mod(_ncf.variables["abs.ah.av.cc.o"][10,:], lim=75), alt, color="g", ls="--", lw=1.)
ax.plot(fit_mod(_ncf.variables["abs.ah.av.mb.o"][10,:], lim=75), alt, color="b", ls="--", lw=1.)
ax.plot(fit_mod(_ncf.variables["abs.sw.ft.o"][10,:], lim=75), alt, color="k", ls="-.", lw=1.)
ax.set_ylim(60,120)
ax.set_xlim(0,0.015)
ax.text(0.7,.9,"(c.1)", ha="center", va="center", transform=ax.transAxes)
ax = axes[2,1]
ax.set_xlabel(r"Absorption $(\beta^h)$, $db/km$")
ax.plot(fit_mod(_ncf.variables["abs.ah.sn.o"][22,:]), alt, color="r", ls="--", lw=1., label=r"$\beta_{ah}^h(\nu_{sn})$")
ax.plot(fit_mod(_ncf.variables["abs.ah.av.cc.o"][22,:]), alt, color="g", ls="--", lw=1., label=r"$\beta_{ah}^h(\nu^{cc}_{av})$")
ax.plot(fit_mod(_ncf.variables["abs.ah.av.mb.o"][22,:]), alt, color="b", ls="--", lw=1., label=r"$\beta_{ah}^h(\nu^{mb}_{av})$")
ax.plot(fit_mod(_ncf.variables["abs.sw.ft.o"][22,:]), alt, color="k", ls="-.", lw=1., label=r"$\beta_{ah}^h(\nu_{me})$")
ax.set_ylim(60,120)
ax.set_xlim(0,0.15)
ax.legend(bbox_to_anchor=(1.75, 0.8))
ax.text(0.7,.9,"(c.2)", ha="center", va="center", transform=ax.transAxes)
sim.clear_local_folders()
fig.subplots_adjust(wspace=0.2, hspace=0.3)
fig.savefig("figs/Figure03.png",bbox_inches="tight")
return
def example_event():
fontT = {"family": "serif", "color": "k", "weight": "normal", "size": 8}
font = {"family": "serif", "color": "black", "weight": "normal", "size": 10}
def coloring_axes(ax, atype="left", col="red"):
ax.spines[atype].set_color(col)
ax.tick_params(axis="y", which="both", colors=col)
ax.yaxis.label.set_color(col)
fmt = matplotlib.dates.DateFormatter("%H%M")
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(mdates.MinuteLocator(interval=10))
return ax
def coloring_twaxes(ax, atype="left", col="red", twcol="k"):
ax.spines[atype].set_color(col)
ax.tick_params(axis="y", which="both", colors=twcol)
ax.yaxis.label.set_color(twcol)
fmt = matplotlib.dates.DateFormatter("%H%M")
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(mdates.MinuteLocator(interval=10))
return ax
rio = "ott"
alts = np.arange(10,500)
date = dt.datetime(2015,3,11,16,20)
sim = Simulation(date, rio)
sim.create_remote_local_dir()
sim.get_flare_file()
file_name = Riometer().get_riometer_file(None, date, rio)
_ncf = Dataset(sim._dir_ + "flare.nc")
times = num2date(_ncf.variables["time"][:], _ncf.variables["time"].units, _ncf.variables["time"].calendar,
only_use_cftime_datetimes=False)
times = np.array([x._to_real_datetime() for x in times]).astype("datetime64[ns]")
times = [dt.datetime.utcfromtimestamp(x.astype(int) * 1e-9) for x in times]
riom = utils.read_riometer(date, "ott")
fig, ax = plt.subplots(figsize=(5,5),dpi=120,nrows=1,ncols=1)
sTime,eTime = dt.datetime(2015,3,11,16,10), dt.datetime(2015,3,11,17)
ax = coloring_axes(ax, col="gray")
ax.plot(riom.date, riom.absorp,color="gray",marker="o", markersize=1,ls="None")
ax.set_ylim(1e-6,1e-3)
font["color"] = "gray"
ax.set_ylabel(r"Observation, dB",fontdict=font)
font["color"] = "k"
ax.set_xlabel("Time (UT)",fontdict=font)
ax.grid(False, axis="y")
ax.set_xlim(sTime,eTime)
ax.set_ylim(-.1, 3.)
ax = coloring_twaxes(ax.twinx(), col="gray")
ax.plot(times, _ncf.variables["drap"][:], "darkred", label=r"$\beta_{DRAP2}$", ls="--", lw=0.8)
ax.plot(times, utils.int_absorption(_ncf.variables["abs.ah.sn.o"][:], alts, extpoint=68),
"r", label=r"$\beta_{ah}(\nu_{sn})$", ls="-", lw=1.2)
ax.plot(times, utils.int_absorption(_ncf.variables["abs.ah.av.cc.o"][:], alts, extpoint=64),
"g", label=r"$\beta_{ah}(\nu_{av}^{cc})$", ls="-", lw=0.8)
ax.plot(times, utils.int_absorption(_ncf.variables["abs.ah.av.mb.o"][:], alts, extpoint=64),
"b", label=r"$\beta_{ah}(\nu_{av}^{mb})$", ls="-", lw=1.2)
ax.plot(times, utils.int_absorption(_ncf.variables["abs.sw.ft.o"][:], alts, extpoint=64),
"k", label=r"$\beta_{sw}(\nu_{me})$", ls="-", lw=1.2)
ax.legend(loc=1, scatterpoints=2, ncol=1, fontsize=8, frameon=True)
ax.set_ylim(-.1, 3.)
font["color"] = "k"
ax.set_ylabel("Modeled HF Absorption, dB",fontdict=font)
font["color"] = "darkgreen"
ax.text(0.5,1.05,"Station - OTT, 11 March 2015",horizontalalignment="center",
verticalalignment="center", transform=ax.transAxes,fontdict=font)
font["color"] = "k"
ax.set_xlim(sTime,eTime)
fig.autofmt_xdate(rotation=70,ha="center")
sim.clear_local_folders()
os.remove(file_name)
fig.savefig("figs/Figure04.png",bbox_inches="tight")
return
if __name__ == "__main__":
###############################################
# Run one time plots for final use
###############################################
plot_rio_locations()
#analysis_plot()
#example_event()
pass | [
"os.remove",
"pandas.read_csv",
"cartopy.crs.NearsidePerspective",
"numpy.ones",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"fetch_data.Simulation",
"numpy.arange",
"utils.read_riometer",
"sys.path.append",
"netCDF4.Dataset",
"fetch_data.Riometer",
"matplotlib.dates.DateForma... | [((280, 301), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (294, 301), False, 'import matplotlib\n'), ((376, 407), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-bright"""'], {}), "('seaborn-bright')\n", (389, 407), True, 'import matplotlib.pyplot as plt\n'), ((614, 640), 'sys.path.append', 'sys.path.append', (['"""models/"""'], {}), "('models/')\n", (629, 640), False, 'import sys\n'), ((862, 897), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 9)', 'dpi': '(150)'}), '(figsize=(5, 9), dpi=150)\n', (872, 897), True, 'import matplotlib.pyplot as plt\n'), ((1711, 1746), 'pandas.read_csv', 'pd.read_csv', (['"""config/riometers.csv"""'], {}), "('config/riometers.csv')\n", (1722, 1746), True, 'import pandas as pd\n'), ((2315, 2338), 'datetime.datetime', 'dt.datetime', (['(2021)', '(1)', '(1)'], {}), '(2021, 1, 1)\n', (2326, 2338), True, 'import datetime as dt\n'), ((2940, 2972), 'datetime.datetime', 'dt.datetime', (['(2015)', '(3)', '(11)', '(16)', '(20)'], {}), '(2015, 3, 11, 16, 20)\n', (2951, 2972), True, 'import datetime as dt\n'), ((2979, 3000), 'fetch_data.Simulation', 'Simulation', (['date', 'rio'], {}), '(date, rio)\n', (2989, 3000), False, 'from fetch_data import Simulation, Riometer\n'), ((3094, 3123), 'netCDF4.Dataset', 'Dataset', (["(sim._dir_ + 'bgc.nc')"], {}), "(sim._dir_ + 'bgc.nc')\n", (3101, 3123), False, 'from netCDF4 import Dataset, num2date\n'), ((3135, 3166), 'netCDF4.Dataset', 'Dataset', (["(sim._dir_ + 'flare.nc')"], {}), "(sim._dir_ + 'flare.nc')\n", (3142, 3166), False, 'from netCDF4 import Dataset, num2date\n'), ((3177, 3195), 'numpy.arange', 'np.arange', (['(10)', '(500)'], {}), '(10, 500)\n', (3186, 3195), True, 'import numpy as np\n'), ((3211, 3280), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 8)', 'dpi': '(120)', 'nrows': '(3)', 'ncols': '(2)', 'sharey': '"""row"""'}), "(figsize=(5, 8), dpi=120, nrows=3, ncols=2, sharey='row')\n", (3223, 3280), True, 'import matplotlib.pyplot as plt\n'), ((8612, 8630), 'numpy.arange', 'np.arange', (['(10)', '(500)'], {}), '(10, 500)\n', (8621, 8630), True, 'import numpy as np\n'), ((8641, 8673), 'datetime.datetime', 'dt.datetime', (['(2015)', '(3)', '(11)', '(16)', '(20)'], {}), '(2015, 3, 11, 16, 20)\n', (8652, 8673), True, 'import datetime as dt\n'), ((8680, 8701), 'fetch_data.Simulation', 'Simulation', (['date', 'rio'], {}), '(date, rio)\n', (8690, 8701), False, 'from fetch_data import Simulation, Riometer\n'), ((8834, 8865), 'netCDF4.Dataset', 'Dataset', (["(sim._dir_ + 'flare.nc')"], {}), "(sim._dir_ + 'flare.nc')\n", (8841, 8865), False, 'from netCDF4 import Dataset, num2date\n'), ((8878, 9014), 'netCDF4.num2date', 'num2date', (["_ncf.variables['time'][:]", "_ncf.variables['time'].units", "_ncf.variables['time'].calendar"], {'only_use_cftime_datetimes': '(False)'}), "(_ncf.variables['time'][:], _ncf.variables['time'].units, _ncf.\n variables['time'].calendar, only_use_cftime_datetimes=False)\n", (8886, 9014), False, 'from netCDF4 import Dataset, num2date\n'), ((9195, 9227), 'utils.read_riometer', 'utils.read_riometer', (['date', '"""ott"""'], {}), "(date, 'ott')\n", (9214, 9227), False, 'import utils\n'), ((9251, 9306), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 5)', 'dpi': '(120)', 'nrows': '(1)', 'ncols': '(1)'}), '(figsize=(5, 5), dpi=120, nrows=1, ncols=1)\n', (9263, 9306), True, 'import matplotlib.pyplot as plt\n'), ((11094, 11114), 'os.remove', 'os.remove', (['file_name'], {}), '(file_name)\n', (11103, 11114), False, 'import os\n'), ((1512, 1525), 'numpy.arange', 'np.arange', (['(90)'], {}), '(90)\n', (1521, 1525), True, 'import numpy as np\n'), ((2226, 2244), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (2242, 2244), True, 'import cartopy.crs as ccrs\n'), ((2355, 2382), 'numpy.mod', 'np.mod', (["(R['lon'] + 180)", '(360)'], {}), "(R['lon'] + 180, 360)\n", (2361, 2382), True, 'import numpy as np\n'), ((2789, 2807), 'numpy.arange', 'np.arange', (['(10)', '(500)'], {}), '(10, 500)\n', (2798, 2807), True, 'import numpy as np\n'), ((3505, 3523), 'numpy.arange', 'np.arange', (['(10)', '(500)'], {}), '(10, 500)\n', (3514, 3523), True, 'import numpy as np\n'), ((3605, 3623), 'numpy.arange', 'np.arange', (['(10)', '(500)'], {}), '(10, 500)\n', (3614, 3623), True, 'import numpy as np\n'), ((3705, 3723), 'numpy.arange', 'np.arange', (['(10)', '(500)'], {}), '(10, 500)\n', (3714, 3723), True, 'import numpy as np\n'), ((4229, 4247), 'numpy.arange', 'np.arange', (['(10)', '(500)'], {}), '(10, 500)\n', (4238, 4247), True, 'import numpy as np\n'), ((4353, 4371), 'numpy.arange', 'np.arange', (['(10)', '(500)'], {}), '(10, 500)\n', (4362, 4371), True, 'import numpy as np\n'), ((4477, 4495), 'numpy.arange', 'np.arange', (['(10)', '(500)'], {}), '(10, 500)\n', (4486, 4495), True, 'import numpy as np\n'), ((8021, 8059), 'matplotlib.dates.DateFormatter', 'matplotlib.dates.DateFormatter', (['"""%H%M"""'], {}), "('%H%M')\n", (8051, 8059), False, 'import matplotlib\n'), ((8411, 8449), 'matplotlib.dates.DateFormatter', 'matplotlib.dates.DateFormatter', (['"""%H%M"""'], {}), "('%H%M')\n", (8441, 8449), False, 'import matplotlib\n'), ((9321, 9353), 'datetime.datetime', 'dt.datetime', (['(2015)', '(3)', '(11)', '(16)', '(10)'], {}), '(2015, 3, 11, 16, 10)\n', (9332, 9353), True, 'import datetime as dt\n'), ((9351, 9379), 'datetime.datetime', 'dt.datetime', (['(2015)', '(3)', '(11)', '(17)'], {}), '(2015, 3, 11, 17)\n', (9362, 9379), True, 'import datetime as dt\n'), ((9933, 10006), 'utils.int_absorption', 'utils.int_absorption', (["_ncf.variables['abs.ah.sn.o'][:]", 'alts'], {'extpoint': '(68)'}), "(_ncf.variables['abs.ah.sn.o'][:], alts, extpoint=68)\n", (9953, 10006), False, 'import utils\n'), ((10093, 10169), 'utils.int_absorption', 'utils.int_absorption', (["_ncf.variables['abs.ah.av.cc.o'][:]", 'alts'], {'extpoint': '(64)'}), "(_ncf.variables['abs.ah.av.cc.o'][:], alts, extpoint=64)\n", (10113, 10169), False, 'import utils\n'), ((10261, 10337), 'utils.int_absorption', 'utils.int_absorption', (["_ncf.variables['abs.ah.av.mb.o'][:]", 'alts'], {'extpoint': '(64)'}), "(_ncf.variables['abs.ah.av.mb.o'][:], alts, extpoint=64)\n", (10281, 10337), False, 'import utils\n'), ((10429, 10502), 'utils.int_absorption', 'utils.int_absorption', (["_ncf.variables['abs.sw.ft.o'][:]", 'alts'], {'extpoint': '(64)'}), "(_ncf.variables['abs.sw.ft.o'][:], alts, extpoint=64)\n", (10449, 10502), False, 'import utils\n'), ((925, 994), 'cartopy.crs.NearsidePerspective', 'ccrs.NearsidePerspective', ([], {'central_longitude': '(-110)', 'central_latitude': '(60)'}), '(central_longitude=-110, central_latitude=60)\n', (949, 994), True, 'import cartopy.crs as ccrs\n'), ((1492, 1503), 'numpy.ones', 'np.ones', (['(90)'], {}), '(90)\n', (1499, 1503), True, 'import numpy as np\n'), ((1579, 1597), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (1595, 1597), True, 'import cartopy.crs as ccrs\n'), ((1683, 1701), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (1699, 1701), True, 'import cartopy.crs as ccrs\n'), ((2854, 2871), 'numpy.log10', 'np.log10', (['y[lim:]'], {}), '(y[lim:])\n', (2862, 2871), True, 'import numpy as np\n'), ((8137, 8170), 'matplotlib.dates.MinuteLocator', 'mdates.MinuteLocator', ([], {'interval': '(10)'}), '(interval=10)\n', (8157, 8170), True, 'import matplotlib.dates as mdates\n'), ((8527, 8560), 'matplotlib.dates.MinuteLocator', 'mdates.MinuteLocator', ([], {'interval': '(10)'}), '(interval=10)\n', (8547, 8560), True, 'import matplotlib.dates as mdates\n'), ((8777, 8787), 'fetch_data.Riometer', 'Riometer', ([], {}), '()\n', (8785, 8787), False, 'from fetch_data import Simulation, Riometer\n'), ((1900, 1918), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (1916, 1918), True, 'import cartopy.crs as ccrs\n'), ((2020, 2038), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (2036, 2038), True, 'import cartopy.crs as ccrs\n'), ((2167, 2185), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (2183, 2185), True, 'import cartopy.crs as ccrs\n')] |
import dash
from dash.dependencies import Input, Output, State, ClientsideFunction
import dash_html_components as html
import dash_core_components as dcc
import plotly.graph_objects as go
from skimage import data, img_as_ubyte, segmentation, measure
from dash_canvas.utils import array_to_data_url
import plotly.graph_objects as go
import plot_common
import image_utils
import numpy as np
from nilearn import image
import nibabel as nib
import plotly.express as px
import shape_utils
from sys import exit
import io
import base64
import skimage
import time
import os
DEBUG_MASK = False
DEFAULT_STROKE_COLOR = px.colors.qualitative.Light24[0]
DEFAULT_STROKE_WIDTH = 5
# the scales for the top and side images (they might be different)
# TODO: If the width and height scales are different, strange things happen? For
# example I have observed the masks getting scaled unevenly, maybe because the
# axes are actually scaled evenly (fixed to the x axis?) but then the mask gets
# scaled differently?
hwscales = [(2, 2), (2, 2)]
# the number of dimensions displayed
NUM_DIMS_DISPLAYED = 2 # top and side
# the color of the triangles displaying the slice number
INDICATOR_COLOR = "DarkOrange"
DISPLAY_BG_COLOR = "darkgrey"
# A string, if length non-zero, saves superpixels to this file and then exits
SAVE_SUPERPIXEL = os.environ.get("SAVE_SUPERPIXEL", default="")
# A string, if length non-zero, loads superpixels from this file
LOAD_SUPERPIXEL = os.environ.get("LOAD_SUPERPIXEL", default="")
# If not "0", debugging mode is on.
DEBUG = os.environ.get("DEBUG", default="0") != "0"
def PRINT(*vargs):
if DEBUG:
print(*vargs)
def make_seg_image(img):
""" Segment the image, then find the boundaries, then return an array that
is clear (alpha=0) where there are no boundaries. """
segb = np.zeros_like(img).astype("uint8")
seg = segmentation.slic(
img, start_label=1, multichannel=False, compactness=0.1, n_segments=300
)
# Only keep superpixels with an average intensity greater than threshold
# in order to remove superpixels of the background
superpx_avg = (
np.histogram(
seg.astype(np.float), bins=np.arange(0, 310), weights=img.astype(np.float)
)[0]
/ np.histogram(seg.astype(np.float), bins=np.arange(0, 310))[0]
> 10
)
mask_brain = superpx_avg[seg]
seg[np.logical_not(mask_brain)] = 0
seg, _, _ = segmentation.relabel_sequential(seg)
segb = segmentation.find_boundaries(seg).astype("uint8")
segl = image_utils.label_to_colors(
segb, colormap=["#000000", "#E48F72"], alpha=[0, 128], color_class_offset=0
)
return (segl, seg)
def make_default_figure(
images=[],
stroke_color=DEFAULT_STROKE_COLOR,
stroke_width=DEFAULT_STROKE_WIDTH,
img_args=dict(layer="above"),
width_scale=1,
height_scale=1,
):
fig = plot_common.dummy_fig()
plot_common.add_layout_images_to_fig(
fig,
images,
img_args=img_args,
width_scale=width_scale,
height_scale=height_scale,
update_figure_dims="height",
)
# add an empty image with the same size as the greatest of the already added
# images so that we can add computed masks clientside later
mwidth, mheight = [
max([im[sz] for im in fig["layout"]["images"]]) for sz in ["sizex", "sizey"]
]
fig.add_layout_image(
dict(
source="",
xref="x",
yref="y",
x=0,
y=0,
sizex=mwidth,
sizey=mheight,
sizing="contain",
layer="above",
)
)
fig.update_layout(
{
"dragmode": "drawopenpath",
"newshape.line.color": stroke_color,
"newshape.line.width": stroke_width,
"margin": dict(l=0, r=0, b=0, t=0, pad=4),
"plot_bgcolor": DISPLAY_BG_COLOR,
}
)
return fig
img = image.load_img("assets/BraTS19_2013_10_1_flair.nii")
img = img.get_data().transpose(2, 0, 1)[::-1].astype("float")
img = img_as_ubyte((img - img.min()) / (img.max() - img.min()))
def make_empty_found_segments():
""" fstc_slices is initialized to a bunch of images containing nothing (clear pixels) """
found_segs_tensor = np.zeros_like(img)
# convert to a colored image (but it will just be colored "clear")
fst_colored = image_utils.label_to_colors(
found_segs_tensor,
colormap=["#000000", "#8A2BE2"],
alpha=[0, 128],
color_class_offset=0,
)
fstc_slices = [
[
array_to_data_url(np.moveaxis(fst_colored, 0, j)[i])
for i in range(np.moveaxis(fst_colored, 0, j).shape[0])
]
for j in range(NUM_DIMS_DISPLAYED)
]
return fstc_slices
if len(LOAD_SUPERPIXEL) > 0:
# load partitioned image (to save time)
if LOAD_SUPERPIXEL.endswith(".gz"):
import gzip
with gzip.open(LOAD_SUPERPIXEL) as fd:
dat = np.load(fd)
segl = dat["segl"]
seg = dat["seg"]
else:
dat = np.load(LOAD_SUPERPIXEL)
segl = dat["segl"]
seg = dat["seg"]
else:
# partition image
segl, seg = make_seg_image(img)
if len(SAVE_SUPERPIXEL) > 0:
np.savez(SAVE_SUPERPIXEL, segl=segl, seg=seg)
exit(0)
seg_img = img_as_ubyte(segl)
img_slices, seg_slices = [
[
# top
[array_to_data_url(im[i, :, :]) for i in range(im.shape[0])],
# side
[array_to_data_url(im[:, i, :]) for i in range(im.shape[1])],
]
for im in [img, seg_img]
]
# initially no slices have been found so we don't draw anything
found_seg_slices = make_empty_found_segments()
# store encoded blank slices for each view to save recomputing them for slices
# containing no colored pixels
blank_seg_slices = [found_seg_slices[0][0], found_seg_slices[1][0]]
app = dash.Dash(__name__)
server = app.server
top_fig, side_fig = [
make_default_figure(
images=[img_slices[i][0], seg_slices[i][0]],
width_scale=hwscales[i][1],
height_scale=hwscales[i][0],
)
for i in range(NUM_DIMS_DISPLAYED)
]
default_3d_layout = dict(
scene=dict(
yaxis=dict(visible=False, showticklabels=False, showgrid=False, ticks=""),
xaxis=dict(visible=True, title="Side View Slice Number"),
zaxis=dict(visible=True, title="Top View Slice Number"),
camera=dict(
up=dict(x=0, y=0, z=1),
center=dict(x=0, y=0, z=0),
eye=dict(x=1.25, y=1.25, z=1.25),
),
),
height=800,
)
def make_default_3d_fig():
fig = go.Figure(data=[go.Mesh3d()])
fig.update_layout(**default_3d_layout)
return fig
def make_modal():
with open("assets/howto.md", "r") as f:
readme_md = f.read()
return html.Div(
id="markdown",
className="modal",
style={"display": "none"},
children=[
html.Div(
id="markdown-container",
className="markdown-container",
# style={
# "color": text_color["light"],
# "backgroundColor": card_color["light"],
# },
children=[
html.Div(
className="close-container",
children=html.Button(
"Close",
id="markdown_close",
n_clicks=0,
className="closeButton",
style={"color": "DarkBlue"},
),
),
html.Div(
className="markdown-text", children=dcc.Markdown(readme_md)
),
],
)
],
)
app.layout = html.Div(
id="main",
children=[
# Banner display
html.Div(
id="banner",
children=[
html.Div(
html.H1(
"3D Image Annotation",
id="title",
style={
"color": "#f9f9f9",
"display": "inline-block",
"margin": "0",
},
),
),
html.Div(
html.Button(
"Learn more",
id="learn-more-button",
n_clicks=0,
style={"width": "auto"},
),
),
# Adding the modal content here. It is only shown if the show-modal
# button is pressed
make_modal(),
html.Img(id="logo", src=app.get_asset_url("dash-logo-new.png"),),
],
style={
"display": "flex",
"position": "relative",
"margin": "10px 10px 10px 10px",
},
),
dcc.Store(id="image-slices", data=img_slices),
dcc.Store(id="seg-slices", data=seg_slices),
dcc.Store(
id="drawn-shapes",
data=[
[[] for _ in range(seg_img.shape[i])] for i in range(NUM_DIMS_DISPLAYED)
],
),
dcc.Store(id="slice-number-top", data=0),
dcc.Store(id="slice-number-side", data=0),
dcc.Store(
id="undo-data",
data=dict(
undo_n_clicks=0,
redo_n_clicks=0,
undo_shapes=[],
redo_shapes=[],
# 2 arrays, one for each image-display-graph-{top,side}
# each array contains the number of slices in that image view, and each
# item of this array contains a list of shapes
empty_shapes=[
[[] for _ in range(seg_img.shape[i])]
for i in range(NUM_DIMS_DISPLAYED)
],
),
),
# In this implementation we want to prevent needless passing of the
# large image array from client to server, so when "downloaded-button"
# is clicked, the contents of the "found-segs" store is converted to nii
# imaging format, converted to base64, and stored in the
# "found-image-tensor-data" store. When this store's contents are
# updated, they are stored, decoded, in a Blob and a url is created from
# the contents of this blob and set as the href of "download-link". Then
# somehow we need to simulate a "click" on the "download-link". The
# "found-image-tensor-data" store is necessary because we can only pass
# base64-encoded data between client and server: we let the browser
# handle how data from the browser can be written to the client's
# filesystem.
html.Div(
id="loader-wrapper",
children=[
# required so callback triggered by writing to "found-image-tensor-data"
# has an output
html.Div(id="dummy", style={"display": "none"}),
html.Div(id="dummy2", style={"display": "none"}, children=",0"),
# hidden elements so we can show/hide segmentations on 2d and 3d figures
html.Div(
id="show-hide-seg-2d", children="show", style={"display": "none"}
),
html.Div(
id="show-hide-seg-3d", children="show", style={"display": "none"}
),
dcc.Loading(
id="graph-loading",
type="circle",
children=[
html.A(id="download-link", download="found_image.nii",),
# the image data of the found segmentation is stored
# here before it is downloaded
dcc.Store(id="found-image-tensor-data", data=""),
html.Div(
children=[
html.Button(
"3D View",
id="view-select-button",
n_clicks=0,
style={"width": "25%"},
),
html.Button(
"Hide Segmentation",
id="show-seg-check",
n_clicks=0,
style={"width": "25%"},
),
html.Button(
"Download Brain Volume",
id="download-brain-button",
style={"width": "auto"},
),
html.Button(
"Download Selected Partitions",
id="download-button",
style={"width": "auto"},
),
html.Button(
"Undo",
id="undo-button",
n_clicks=0,
style={"width": "12.5%"},
),
html.Button(
"Redo",
id="redo-button",
n_clicks=0,
style={"width": "12.5%"},
),
],
style={"display": "flex", "margin": "2px 0 2px 0"},
),
html.Div(
id="2D-graphs",
style={
"display": "grid",
"grid-template-columns": "repeat(2,1fr)",
"grid-auto-rows": "auto",
"grid-gap": "0 2px",
},
children=[
html.Div(
[
html.H6(
"Top View", style={"text-align": "center",}
)
],
style={
"grid-column": "1",
"grid-row": "1",
"background-color": DISPLAY_BG_COLOR,
},
),
html.Div(
[
dcc.Graph(
id="image-display-graph-top",
figure=top_fig,
)
],
style={
"grid-column": "1",
"grid-row": "2",
"background-color": DISPLAY_BG_COLOR,
},
),
html.Div(
[
html.Div(
id="image-select-top-display",
style={"width": "125px"},
),
html.Div(
dcc.Slider(
id="image-select-top",
min=0,
max=len(img_slices[0]) - 1,
step=1,
updatemode="drag",
value=len(img_slices[0]) // 2,
),
style={"flex-grow": "1"},
),
],
style={
"grid-column": "1",
"grid-row": "3",
"display": "flex",
"background": "grey",
},
),
html.Div(
[
html.H6(
"Side View", style={"text-align": "center"}
)
],
style={
"grid-column": "2",
"grid-row": "1",
"background-color": DISPLAY_BG_COLOR,
},
),
html.Div(
[
dcc.Graph(
id="image-display-graph-side",
figure=side_fig,
)
],
style={
"grid-column": "2",
"grid-row": "2",
"background-color": DISPLAY_BG_COLOR,
},
),
html.Div(
[
html.Div(
id="image-select-side-display",
style={"width": "125px"},
),
html.Div(
[
dcc.Slider(
id="image-select-side",
min=0,
max=len(img_slices[1]) - 1,
step=1,
updatemode="drag",
value=len(img_slices[1]) // 2,
)
],
style={"flex-grow": "1"},
),
],
style={
"grid-column": "2",
"grid-row": "3",
"display": "flex",
"background": "grey",
},
),
# This store has to be put here so dcc.Loading sees that it is updating.
dcc.Store(id="found-segs", data=found_seg_slices),
],
),
html.Div(
id="3D-graphs",
children=[
dcc.Graph(
"image-display-graph-3d",
figure=make_default_3d_fig(),
config=dict(displayModeBar=False,),
)
],
style={"display": "none"},
),
],
),
],
),
dcc.Store(id="fig-3d-scene", data=default_3d_layout,),
dcc.Store(id="current-render-id", data=0),
dcc.Store(id="last-render-id", data=0),
],
)
app.clientside_callback(
"""
function (show_seg_n_clicks) {
// update show segmentation button
var show_seg_button = document.getElementById("show-seg-check");
if (show_seg_button) {
show_seg_button.textContent = show_seg_n_clicks % 2 ?
"Show Segmentation" :
"Hide Segmentation";
}
var ret = (show_seg_n_clicks % 2) ? "" : "show";
return [ret,ret];
}
""",
[Output("show-hide-seg-2d", "children"), Output("show-hide-seg-3d", "children")],
[Input("show-seg-check", "n_clicks")],
)
app.clientside_callback(
"""
function(
image_select_top_value,
image_select_side_value,
show_hide_seg_2d,
found_segs_data,
image_slices_data,
image_display_top_figure,
image_display_side_figure,
seg_slices_data,
drawn_shapes_data) {{
let show_seg_check = show_hide_seg_2d;
let image_display_figures_ = figure_display_update(
[image_select_top_value,image_select_side_value],
show_seg_check,
found_segs_data,
image_slices_data,
[image_display_top_figure,image_display_side_figure],
seg_slices_data,
drawn_shapes_data),
// slider order reversed because the image slice number is shown on the
// other figure
side_figure = image_display_figures_[1],
top_figure = image_display_figures_[0],
d=3,
sizex, sizey;
// append shapes that show what slice the other figure is in
sizex = top_figure.layout.images[0].sizex,
sizey = top_figure.layout.images[0].sizey;
// tri_shape draws the triangular shape, see assets/app_clientside.js
if (top_figure.layout.shapes) {{
top_figure.layout.shapes=top_figure.layout.shapes.concat([
tri_shape(d/2,sizey*image_select_side_value/found_segs_data[1].length,
d/2,d/2,'right'),
tri_shape(sizex-d/2,sizey*image_select_side_value/found_segs_data[1].length,
d/2,d/2,'left'),
]);
}}
sizex = side_figure.layout.images[0].sizex,
sizey = side_figure.layout.images[0].sizey;
if (side_figure.layout.shapes) {{
side_figure.layout.shapes=side_figure.layout.shapes.concat([
tri_shape(d/2,sizey*image_select_top_value/found_segs_data[0].length,
d/2,d/2,'right'),
tri_shape(sizex-d/2,sizey*image_select_top_value/found_segs_data[0].length,
d/2,d/2,'left'),
]);
}}
// return the outputs
return image_display_figures_.concat([
"Slice: " + (image_select_top_value+1) + " / {num_top_slices}",
"Slice: " + (image_select_side_value+1) + " / {num_side_slices}",
image_select_top_value,
image_select_side_value
]);
}}
""".format(
num_top_slices=len(img_slices[0]), num_side_slices=len(img_slices[1])
),
[
Output("image-display-graph-top", "figure"),
Output("image-display-graph-side", "figure"),
Output("image-select-top-display", "children"),
Output("image-select-side-display", "children"),
Output("slice-number-top", "data"),
Output("slice-number-side", "data"),
],
[
Input("image-select-top", "value"),
Input("image-select-side", "value"),
Input("show-hide-seg-2d", "children"),
Input("found-segs", "data"),
],
[
State("image-slices", "data"),
State("image-display-graph-top", "figure"),
State("image-display-graph-side", "figure"),
State("seg-slices", "data"),
State("drawn-shapes", "data"),
],
)
app.clientside_callback(
"""
function(top_relayout_data,
side_relayout_data,
undo_n_clicks,
redo_n_clicks,
top_slice_number,
side_slice_number,
drawn_shapes_data,
undo_data)
{
// Ignore if "shapes" not in any of the relayout data
let triggered = window.dash_clientside.callback_context.triggered.map(
t => t['prop_id'])[0];
if ((triggered === "image-display-graph-top.relayoutData" && !("shapes" in
top_relayout_data)) || (triggered === "image-display-graph-side.relayoutData"
&& !("shapes" in side_relayout_data))) {
return [window.dash_clientside.no_update,window.dash_clientside.no_update];
}
drawn_shapes_data = json_copy(drawn_shapes_data);
let ret = undo_track_slice_figure_shapes (
[top_relayout_data,side_relayout_data],
["image-display-graph-top.relayoutData",
"image-display-graph-side.relayoutData"],
undo_n_clicks,
redo_n_clicks,
undo_data,
drawn_shapes_data,
[top_slice_number,side_slice_number],
// a function that takes a list of shapes and returns those that we want to
// track (for example if some shapes are to show some attribute but should not
// be tracked by undo/redo)
function (shapes) { return shapes.filter(function (s) {
let ret = true;
try { ret &= (s.fillcolor == "%s"); } catch(err) { ret &= false; }
try { ret &= (s.line.color == "%s"); } catch(err) { ret &= false; }
// return !ret because we don't want to keep the indicators
return !ret;
});
});
undo_data=ret[0];
drawn_shapes_data=ret[1];
return [drawn_shapes_data,undo_data];
}
"""
% ((INDICATOR_COLOR,) * 2),
[Output("drawn-shapes", "data"), Output("undo-data", "data")],
[
Input("image-display-graph-top", "relayoutData"),
Input("image-display-graph-side", "relayoutData"),
Input("undo-button", "n_clicks"),
Input("redo-button", "n_clicks"),
],
[
State("slice-number-top", "data"),
State("slice-number-side", "data"),
State("drawn-shapes", "data"),
State("undo-data", "data"),
],
)
def shapes_to_segs(
drawn_shapes_data, image_display_top_figure, image_display_side_figure,
):
masks = np.zeros_like(img)
for j, (graph_figure, (hscale, wscale)) in enumerate(
zip([image_display_top_figure, image_display_side_figure], hwscales)
):
fig = go.Figure(**graph_figure)
# we use the width and the height of the first layout image (this will be
# one of the images of the brain) to get the bounding box of the SVG that we
# want to rasterize
width, height = [fig.layout.images[0][sz] for sz in ["sizex", "sizey"]]
for i in range(seg_img.shape[j]):
shape_args = [
dict(width=width, height=height, shape=s)
for s in drawn_shapes_data[j][i]
]
if len(shape_args) > 0:
mask = shape_utils.shapes_to_mask(
shape_args,
# we only have one label class, so the mask is given value 1
1,
)
# TODO: Maybe there's a more elegant way to downsample the mask?
np.moveaxis(masks, 0, j)[i, :, :] = mask[::hscale, ::wscale]
found_segs_tensor = np.zeros_like(img)
if DEBUG_MASK:
found_segs_tensor[masks == 1] = 1
else:
# find labels beneath the mask
labels = set(seg[1 == masks])
# for each label found, select all of the segment with that label
for l in labels:
found_segs_tensor[seg == l] = 1
return found_segs_tensor
@app.callback(
[Output("found-segs", "data"), Output("current-render-id", "data")],
[Input("drawn-shapes", "data")],
[
State("image-display-graph-top", "figure"),
State("image-display-graph-side", "figure"),
State("current-render-id", "data"),
],
)
def draw_shapes_react(
drawn_shapes_data,
image_display_top_figure,
image_display_side_figure,
current_render_id,
):
if any(
[
e is None
for e in [
drawn_shapes_data,
image_display_top_figure,
image_display_side_figure,
]
]
):
return dash.no_update
t1 = time.time()
found_segs_tensor = shapes_to_segs(
drawn_shapes_data, image_display_top_figure, image_display_side_figure,
)
t2 = time.time()
PRINT("Time to convert shapes to segments:", t2 - t1)
# convert to a colored image
fst_colored = image_utils.label_to_colors(
found_segs_tensor,
colormap=["#8A2BE2"],
alpha=[128],
# we map label 0 to the color #000000 using no_map_zero, so we start at
# color_class 1
color_class_offset=1,
labels_contiguous=True,
no_map_zero=True,
)
t3 = time.time()
PRINT("Time to convert from labels to colored image:", t3 - t2)
fstc_slices = [
[
array_to_data_url(s) if np.any(s != 0) else blank_seg_slices[j]
for s in np.moveaxis(fst_colored, 0, j)
]
for j in range(NUM_DIMS_DISPLAYED)
]
t4 = time.time()
PRINT("Time to convert to data URLs:", t4 - t3)
PRINT("Total time to compute 2D annotations:", t4 - t1)
return fstc_slices, current_render_id + 1
def _decode_b64_slice(s):
return base64.b64decode(s.encode())
def slice_image_list_to_ndarray(fstc_slices):
# convert encoded slices to array
# TODO eventually make it format agnostic, right now we just assume png and
# strip off length equal to uri_header from the uri string
uri_header = "data:image/png;base64,"
# preallocating the final tensor by reading the first image makes converting
# much faster (because all the images have the same dimensions)
n_slices = len(fstc_slices)
first_img = plot_common.str_to_img_ndarrary(
_decode_b64_slice(fstc_slices[0][len(uri_header) :])
)
fstc_ndarray = np.zeros((n_slices,) + first_img.shape, dtype=first_img.dtype)
PRINT("first_img.dtype", first_img.dtype)
fstc_ndarray[0] = first_img
for n, img_slice in enumerate(fstc_slices[1:]):
img = plot_common.str_to_img_ndarrary(
_decode_b64_slice(img_slice[len(uri_header) :])
)
fstc_ndarray[n] = img
PRINT("fstc_ndarray.shape", fstc_ndarray.shape)
# transpose back to original
if len(fstc_ndarray.shape) == 3:
# Brain data is lacking the 4th channel dimension
# Here we allow for this function to also return an array for the 3D brain data
return fstc_ndarray.transpose((1, 2, 0))
return fstc_ndarray.transpose((1, 2, 0, 3))
# Converts found slices to nii file and encodes in b64 so it can be downloaded
def save_found_slices(fstc_slices):
# we just save the first view (it makes no difference in the end)
fstc_slices = fstc_slices[0]
fstc_ndarray = slice_image_list_to_ndarray(fstc_slices)
# if the tensor is all zero (no partitions found) return None
if np.all(fstc_ndarray == 0):
return None
# TODO add affine
# technique for writing nii to bytes from here:
# https://gist.github.com/arokem/423d915e157b659d37f4aded2747d2b3
fstc_nii = nib.Nifti1Image(skimage.img_as_ubyte(fstc_ndarray), affine=None)
fstcbytes = io.BytesIO()
file_map = fstc_nii.make_file_map({"image": fstcbytes, "header": fstcbytes})
fstc_nii.to_file_map(file_map)
fstcb64 = base64.b64encode(fstcbytes.getvalue()).decode()
return fstcb64
@app.callback(
Output("found-image-tensor-data", "data"),
[Input("download-button", "n_clicks"), Input("download-brain-button", "n_clicks")],
[State("found-segs", "data"), State("image-slices", "data")],
)
def download_button_react(
download_button_n_clicks,
download_brain_button_n_clicks,
found_segs_data,
brain_data,
):
ctx = dash.callback_context
# Find out which download button was triggered
if not ctx.triggered:
# Nothing has happened yet
return ""
trigger_id = ctx.triggered[0]["prop_id"].split(".")[0]
if trigger_id == "download-button":
ret = save_found_slices(found_segs_data)
elif trigger_id == "download-brain-button":
ret = save_found_slices(brain_data)
else:
return ""
if ret is None:
return ""
return ret
app.clientside_callback(
"""
function (found_image_tensor_data) {
if (found_image_tensor_data.length <= 0) {
return "";
}
// for some reason you can't use the conversion to ascii from base64 directly
// with blob, you have to use the ascii encoded as numbers
const byte_chars = window.atob(found_image_tensor_data);
const byte_numbers = Array.from(byte_chars,(b,i)=>byte_chars.charCodeAt(i));
const byte_array = new Uint8Array(byte_numbers);
let b = new Blob([byte_array],{type: 'application/octet-stream'});
let url = URL.createObjectURL(b);
return url;
}
""",
Output("download-link", "href"),
[Input("found-image-tensor-data", "data")],
)
app.clientside_callback(
"""
function (href) {
if (href != "") {
let download_a=document.getElementById("download-link");
download_a.click();
}
return '';
}
""",
Output("dummy", "children"),
[Input("download-link", "href")],
)
app.clientside_callback(
"""
function (view_select_button_nclicks,current_render_id) {
console.log("view_select_button_nclicks");
console.log(view_select_button_nclicks);
var graphs_2d = document.getElementById("2D-graphs"),
graphs_3d = document.getElementById("3D-graphs"),
ret = "";
// update view select button
var view_select_button = document.getElementById("view-select-button");
if (view_select_button) {
view_select_button.textContent = view_select_button_nclicks % 2 ?
"2D View" :
"3D View";
}
if (graphs_2d && graphs_3d) {
if (view_select_button_nclicks % 2) {
graphs_2d.style.display = "none";
graphs_3d.style.display = "";
ret = "3d shown";
} else {
graphs_2d.style.display = "grid";
graphs_3d.style.display = "none";
ret = "2d shown";
}
}
ret += ","+current_render_id;
return ret;
}
""",
Output("dummy2", "children"),
[Input("view-select-button", "n_clicks")],
[State("current-render-id", "data")],
)
@app.callback(
Output("fig-3d-scene", "data"),
[Input("image-display-graph-3d", "relayoutData")],
[State("fig-3d-scene", "data")],
)
def store_scene_data(graph_3d_relayoutData, last_3d_scene):
PRINT("graph_3d_relayoutData", graph_3d_relayoutData)
if graph_3d_relayoutData is not None:
for k in graph_3d_relayoutData.keys():
last_3d_scene[k] = graph_3d_relayoutData[k]
return last_3d_scene
return dash.no_update
@app.callback(
[Output("image-display-graph-3d", "figure"), Output("last-render-id", "data")],
[Input("dummy2", "children"), Input("show-hide-seg-3d", "children")],
[
State("drawn-shapes", "data"),
State("fig-3d-scene", "data"),
State("last-render-id", "data"),
State("image-display-graph-top", "figure"),
State("image-display-graph-side", "figure"),
],
)
def populate_3d_graph(
dummy2_children,
show_hide_seg_3d,
drawn_shapes_data,
last_3d_scene,
last_render_id,
image_display_top_figure,
image_display_side_figure,
):
# extract which graph shown and the current render id
graph_shown, current_render_id = dummy2_children.split(",")
current_render_id = int(current_render_id)
start_time = time.time()
cbcontext = [p["prop_id"] for p in dash.callback_context.triggered][0]
# check that we're not toggling the display of the 3D annotation
if cbcontext != "show-hide-seg-3d.children":
PRINT(
"might render 3D, current_id: %d, last_id: %d"
% (current_render_id, last_render_id)
)
if graph_shown != "3d shown" or current_render_id == last_render_id:
if current_render_id == last_render_id:
PRINT("not rendering 3D because it is up to date")
return dash.no_update
PRINT("rendering 3D")
segs_ndarray = shapes_to_segs(
drawn_shapes_data, image_display_top_figure, image_display_side_figure,
).transpose((1, 2, 0))
# image, color
images = [
(img.transpose((1, 2, 0))[:, :, ::-1], "grey"),
]
if show_hide_seg_3d == "show":
images.append((segs_ndarray[:, :, ::-1], "purple"))
data = []
for im, color in images:
im = image_utils.combine_last_dim(im)
try:
verts, faces, normals, values = measure.marching_cubes(im, 0, step_size=3)
x, y, z = verts.T
i, j, k = faces.T
data.append(
go.Mesh3d(x=x, y=y, z=z, color=color, opacity=0.5, i=i, j=j, k=k)
)
except RuntimeError:
continue
fig = go.Figure(data=data)
fig.update_layout(**last_3d_scene)
end_time = time.time()
PRINT("serverside 3D generation took: %f seconds" % (end_time - start_time,))
return (fig, current_render_id)
# ======= Callback for modal popup =======
@app.callback(
Output("markdown", "style"),
[Input("learn-more-button", "n_clicks"), Input("markdown_close", "n_clicks")],
)
def update_click_output(button_click, close_click):
if button_click > close_click:
return {"display": "block"}
else:
return {"display": "none"}
if __name__ == "__main__":
app.run_server(debug=DEBUG)
| [
"numpy.load",
"shape_utils.shapes_to_mask",
"numpy.moveaxis",
"plot_common.add_layout_images_to_fig",
"numpy.arange",
"skimage.segmentation.find_boundaries",
"numpy.zeros_like",
"dash.Dash",
"dash_html_components.Div",
"skimage.segmentation.relabel_sequential",
"numpy.logical_not",
"dash.depen... | [((1315, 1360), 'os.environ.get', 'os.environ.get', (['"""SAVE_SUPERPIXEL"""'], {'default': '""""""'}), "('SAVE_SUPERPIXEL', default='')\n", (1329, 1360), False, 'import os\n'), ((1444, 1489), 'os.environ.get', 'os.environ.get', (['"""LOAD_SUPERPIXEL"""'], {'default': '""""""'}), "('LOAD_SUPERPIXEL', default='')\n", (1458, 1489), False, 'import os\n'), ((3943, 3995), 'nilearn.image.load_img', 'image.load_img', (['"""assets/BraTS19_2013_10_1_flair.nii"""'], {}), "('assets/BraTS19_2013_10_1_flair.nii')\n", (3957, 3995), False, 'from nilearn import image\n'), ((5326, 5344), 'skimage.img_as_ubyte', 'img_as_ubyte', (['segl'], {}), '(segl)\n', (5338, 5344), False, 'from skimage import data, img_as_ubyte, segmentation, measure\n'), ((5880, 5899), 'dash.Dash', 'dash.Dash', (['__name__'], {}), '(__name__)\n', (5889, 5899), False, 'import dash\n'), ((1534, 1570), 'os.environ.get', 'os.environ.get', (['"""DEBUG"""'], {'default': '"""0"""'}), "('DEBUG', default='0')\n", (1548, 1570), False, 'import os\n'), ((1855, 1949), 'skimage.segmentation.slic', 'segmentation.slic', (['img'], {'start_label': '(1)', 'multichannel': '(False)', 'compactness': '(0.1)', 'n_segments': '(300)'}), '(img, start_label=1, multichannel=False, compactness=0.1,\n n_segments=300)\n', (1872, 1949), False, 'from skimage import data, img_as_ubyte, segmentation, measure\n'), ((2415, 2451), 'skimage.segmentation.relabel_sequential', 'segmentation.relabel_sequential', (['seg'], {}), '(seg)\n', (2446, 2451), False, 'from skimage import data, img_as_ubyte, segmentation, measure\n'), ((2524, 2632), 'image_utils.label_to_colors', 'image_utils.label_to_colors', (['segb'], {'colormap': "['#000000', '#E48F72']", 'alpha': '[0, 128]', 'color_class_offset': '(0)'}), "(segb, colormap=['#000000', '#E48F72'], alpha=[0,\n 128], color_class_offset=0)\n", (2551, 2632), False, 'import image_utils\n'), ((2872, 2895), 'plot_common.dummy_fig', 'plot_common.dummy_fig', ([], {}), '()\n', (2893, 2895), False, 'import plot_common\n'), ((2900, 3058), 'plot_common.add_layout_images_to_fig', 'plot_common.add_layout_images_to_fig', (['fig', 'images'], {'img_args': 'img_args', 'width_scale': 'width_scale', 'height_scale': 'height_scale', 'update_figure_dims': '"""height"""'}), "(fig, images, img_args=img_args,\n width_scale=width_scale, height_scale=height_scale, update_figure_dims=\n 'height')\n", (2936, 3058), False, 'import plot_common\n'), ((4275, 4293), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (4288, 4293), True, 'import numpy as np\n'), ((4383, 4504), 'image_utils.label_to_colors', 'image_utils.label_to_colors', (['found_segs_tensor'], {'colormap': "['#000000', '#8A2BE2']", 'alpha': '[0, 128]', 'color_class_offset': '(0)'}), "(found_segs_tensor, colormap=['#000000',\n '#8A2BE2'], alpha=[0, 128], color_class_offset=0)\n", (4410, 4504), False, 'import image_utils\n'), ((5257, 5302), 'numpy.savez', 'np.savez', (['SAVE_SUPERPIXEL'], {'segl': 'segl', 'seg': 'seg'}), '(SAVE_SUPERPIXEL, segl=segl, seg=seg)\n', (5265, 5302), True, 'import numpy as np\n'), ((5307, 5314), 'sys.exit', 'exit', (['(0)'], {}), '(0)\n', (5311, 5314), False, 'from sys import exit\n'), ((26657, 26675), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (26670, 26675), True, 'import numpy as np\n'), ((27746, 27764), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (27759, 27764), True, 'import numpy as np\n'), ((28766, 28777), 'time.time', 'time.time', ([], {}), '()\n', (28775, 28777), False, 'import time\n'), ((28913, 28924), 'time.time', 'time.time', ([], {}), '()\n', (28922, 28924), False, 'import time\n'), ((29034, 29184), 'image_utils.label_to_colors', 'image_utils.label_to_colors', (['found_segs_tensor'], {'colormap': "['#8A2BE2']", 'alpha': '[128]', 'color_class_offset': '(1)', 'labels_contiguous': '(True)', 'no_map_zero': '(True)'}), "(found_segs_tensor, colormap=['#8A2BE2'], alpha=\n [128], color_class_offset=1, labels_contiguous=True, no_map_zero=True)\n", (29061, 29184), False, 'import image_utils\n'), ((29348, 29359), 'time.time', 'time.time', ([], {}), '()\n', (29357, 29359), False, 'import time\n'), ((29654, 29665), 'time.time', 'time.time', ([], {}), '()\n', (29663, 29665), False, 'import time\n'), ((30479, 30541), 'numpy.zeros', 'np.zeros', (['((n_slices,) + first_img.shape)'], {'dtype': 'first_img.dtype'}), '((n_slices,) + first_img.shape, dtype=first_img.dtype)\n', (30487, 30541), True, 'import numpy as np\n'), ((31537, 31562), 'numpy.all', 'np.all', (['(fstc_ndarray == 0)'], {}), '(fstc_ndarray == 0)\n', (31543, 31562), True, 'import numpy as np\n'), ((31824, 31836), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (31834, 31836), False, 'import io\n'), ((32055, 32096), 'dash.dependencies.Output', 'Output', (['"""found-image-tensor-data"""', '"""data"""'], {}), "('found-image-tensor-data', 'data')\n", (32061, 32096), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((33491, 33522), 'dash.dependencies.Output', 'Output', (['"""download-link"""', '"""href"""'], {}), "('download-link', 'href')\n", (33497, 33522), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((33773, 33800), 'dash.dependencies.Output', 'Output', (['"""dummy"""', '"""children"""'], {}), "('dummy', 'children')\n", (33779, 33800), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((34840, 34868), 'dash.dependencies.Output', 'Output', (['"""dummy2"""', '"""children"""'], {}), "('dummy2', 'children')\n", (34846, 34868), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((34982, 35012), 'dash.dependencies.Output', 'Output', (['"""fig-3d-scene"""', '"""data"""'], {}), "('fig-3d-scene', 'data')\n", (34988, 35012), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((36218, 36229), 'time.time', 'time.time', ([], {}), '()\n', (36227, 36229), False, 'import time\n'), ((37576, 37596), 'plotly.graph_objects.Figure', 'go.Figure', ([], {'data': 'data'}), '(data=data)\n', (37585, 37596), True, 'import plotly.graph_objects as go\n'), ((37651, 37662), 'time.time', 'time.time', ([], {}), '()\n', (37660, 37662), False, 'import time\n'), ((37845, 37872), 'dash.dependencies.Output', 'Output', (['"""markdown"""', '"""style"""'], {}), "('markdown', 'style')\n", (37851, 37872), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((2367, 2393), 'numpy.logical_not', 'np.logical_not', (['mask_brain'], {}), '(mask_brain)\n', (2381, 2393), True, 'import numpy as np\n'), ((5082, 5106), 'numpy.load', 'np.load', (['LOAD_SUPERPIXEL'], {}), '(LOAD_SUPERPIXEL)\n', (5089, 5106), True, 'import numpy as np\n'), ((21214, 21252), 'dash.dependencies.Output', 'Output', (['"""show-hide-seg-2d"""', '"""children"""'], {}), "('show-hide-seg-2d', 'children')\n", (21220, 21252), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((21254, 21292), 'dash.dependencies.Output', 'Output', (['"""show-hide-seg-3d"""', '"""children"""'], {}), "('show-hide-seg-3d', 'children')\n", (21260, 21292), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((21300, 21335), 'dash.dependencies.Input', 'Input', (['"""show-seg-check"""', '"""n_clicks"""'], {}), "('show-seg-check', 'n_clicks')\n", (21305, 21335), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((23662, 23705), 'dash.dependencies.Output', 'Output', (['"""image-display-graph-top"""', '"""figure"""'], {}), "('image-display-graph-top', 'figure')\n", (23668, 23705), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((23715, 23759), 'dash.dependencies.Output', 'Output', (['"""image-display-graph-side"""', '"""figure"""'], {}), "('image-display-graph-side', 'figure')\n", (23721, 23759), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((23769, 23815), 'dash.dependencies.Output', 'Output', (['"""image-select-top-display"""', '"""children"""'], {}), "('image-select-top-display', 'children')\n", (23775, 23815), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((23825, 23872), 'dash.dependencies.Output', 'Output', (['"""image-select-side-display"""', '"""children"""'], {}), "('image-select-side-display', 'children')\n", (23831, 23872), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((23882, 23916), 'dash.dependencies.Output', 'Output', (['"""slice-number-top"""', '"""data"""'], {}), "('slice-number-top', 'data')\n", (23888, 23916), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((23926, 23961), 'dash.dependencies.Output', 'Output', (['"""slice-number-side"""', '"""data"""'], {}), "('slice-number-side', 'data')\n", (23932, 23961), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((23984, 24018), 'dash.dependencies.Input', 'Input', (['"""image-select-top"""', '"""value"""'], {}), "('image-select-top', 'value')\n", (23989, 24018), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((24028, 24063), 'dash.dependencies.Input', 'Input', (['"""image-select-side"""', '"""value"""'], {}), "('image-select-side', 'value')\n", (24033, 24063), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((24073, 24110), 'dash.dependencies.Input', 'Input', (['"""show-hide-seg-2d"""', '"""children"""'], {}), "('show-hide-seg-2d', 'children')\n", (24078, 24110), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((24120, 24147), 'dash.dependencies.Input', 'Input', (['"""found-segs"""', '"""data"""'], {}), "('found-segs', 'data')\n", (24125, 24147), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((24170, 24199), 'dash.dependencies.State', 'State', (['"""image-slices"""', '"""data"""'], {}), "('image-slices', 'data')\n", (24175, 24199), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((24209, 24251), 'dash.dependencies.State', 'State', (['"""image-display-graph-top"""', '"""figure"""'], {}), "('image-display-graph-top', 'figure')\n", (24214, 24251), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((24261, 24304), 'dash.dependencies.State', 'State', (['"""image-display-graph-side"""', '"""figure"""'], {}), "('image-display-graph-side', 'figure')\n", (24266, 24304), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((24314, 24341), 'dash.dependencies.State', 'State', (['"""seg-slices"""', '"""data"""'], {}), "('seg-slices', 'data')\n", (24319, 24341), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((24351, 24380), 'dash.dependencies.State', 'State', (['"""drawn-shapes"""', '"""data"""'], {}), "('drawn-shapes', 'data')\n", (24356, 24380), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((26091, 26121), 'dash.dependencies.Output', 'Output', (['"""drawn-shapes"""', '"""data"""'], {}), "('drawn-shapes', 'data')\n", (26097, 26121), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((26123, 26150), 'dash.dependencies.Output', 'Output', (['"""undo-data"""', '"""data"""'], {}), "('undo-data', 'data')\n", (26129, 26150), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((26167, 26215), 'dash.dependencies.Input', 'Input', (['"""image-display-graph-top"""', '"""relayoutData"""'], {}), "('image-display-graph-top', 'relayoutData')\n", (26172, 26215), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((26225, 26274), 'dash.dependencies.Input', 'Input', (['"""image-display-graph-side"""', '"""relayoutData"""'], {}), "('image-display-graph-side', 'relayoutData')\n", (26230, 26274), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((26284, 26316), 'dash.dependencies.Input', 'Input', (['"""undo-button"""', '"""n_clicks"""'], {}), "('undo-button', 'n_clicks')\n", (26289, 26316), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((26326, 26358), 'dash.dependencies.Input', 'Input', (['"""redo-button"""', '"""n_clicks"""'], {}), "('redo-button', 'n_clicks')\n", (26331, 26358), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((26381, 26414), 'dash.dependencies.State', 'State', (['"""slice-number-top"""', '"""data"""'], {}), "('slice-number-top', 'data')\n", (26386, 26414), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((26424, 26458), 'dash.dependencies.State', 'State', (['"""slice-number-side"""', '"""data"""'], {}), "('slice-number-side', 'data')\n", (26429, 26458), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((26468, 26497), 'dash.dependencies.State', 'State', (['"""drawn-shapes"""', '"""data"""'], {}), "('drawn-shapes', 'data')\n", (26473, 26497), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((26507, 26533), 'dash.dependencies.State', 'State', (['"""undo-data"""', '"""data"""'], {}), "('undo-data', 'data')\n", (26512, 26533), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((26832, 26857), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '(**graph_figure)\n', (26841, 26857), True, 'import plotly.graph_objects as go\n'), ((28107, 28135), 'dash.dependencies.Output', 'Output', (['"""found-segs"""', '"""data"""'], {}), "('found-segs', 'data')\n", (28113, 28135), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((28137, 28172), 'dash.dependencies.Output', 'Output', (['"""current-render-id"""', '"""data"""'], {}), "('current-render-id', 'data')\n", (28143, 28172), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((28180, 28209), 'dash.dependencies.Input', 'Input', (['"""drawn-shapes"""', '"""data"""'], {}), "('drawn-shapes', 'data')\n", (28185, 28209), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((28226, 28268), 'dash.dependencies.State', 'State', (['"""image-display-graph-top"""', '"""figure"""'], {}), "('image-display-graph-top', 'figure')\n", (28231, 28268), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((28278, 28321), 'dash.dependencies.State', 'State', (['"""image-display-graph-side"""', '"""figure"""'], {}), "('image-display-graph-side', 'figure')\n", (28283, 28321), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((28331, 28365), 'dash.dependencies.State', 'State', (['"""current-render-id"""', '"""data"""'], {}), "('current-render-id', 'data')\n", (28336, 28365), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((31759, 31793), 'skimage.img_as_ubyte', 'skimage.img_as_ubyte', (['fstc_ndarray'], {}), '(fstc_ndarray)\n', (31779, 31793), False, 'import skimage\n'), ((32103, 32139), 'dash.dependencies.Input', 'Input', (['"""download-button"""', '"""n_clicks"""'], {}), "('download-button', 'n_clicks')\n", (32108, 32139), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((32141, 32183), 'dash.dependencies.Input', 'Input', (['"""download-brain-button"""', '"""n_clicks"""'], {}), "('download-brain-button', 'n_clicks')\n", (32146, 32183), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((32191, 32218), 'dash.dependencies.State', 'State', (['"""found-segs"""', '"""data"""'], {}), "('found-segs', 'data')\n", (32196, 32218), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((32220, 32249), 'dash.dependencies.State', 'State', (['"""image-slices"""', '"""data"""'], {}), "('image-slices', 'data')\n", (32225, 32249), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((33529, 33569), 'dash.dependencies.Input', 'Input', (['"""found-image-tensor-data"""', '"""data"""'], {}), "('found-image-tensor-data', 'data')\n", (33534, 33569), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((33807, 33837), 'dash.dependencies.Input', 'Input', (['"""download-link"""', '"""href"""'], {}), "('download-link', 'href')\n", (33812, 33837), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((34875, 34914), 'dash.dependencies.Input', 'Input', (['"""view-select-button"""', '"""n_clicks"""'], {}), "('view-select-button', 'n_clicks')\n", (34880, 34914), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((34922, 34956), 'dash.dependencies.State', 'State', (['"""current-render-id"""', '"""data"""'], {}), "('current-render-id', 'data')\n", (34927, 34956), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((35019, 35066), 'dash.dependencies.Input', 'Input', (['"""image-display-graph-3d"""', '"""relayoutData"""'], {}), "('image-display-graph-3d', 'relayoutData')\n", (35024, 35066), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((35074, 35103), 'dash.dependencies.State', 'State', (['"""fig-3d-scene"""', '"""data"""'], {}), "('fig-3d-scene', 'data')\n", (35079, 35103), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((37202, 37234), 'image_utils.combine_last_dim', 'image_utils.combine_last_dim', (['im'], {}), '(im)\n', (37230, 37234), False, 'import image_utils\n'), ((35448, 35490), 'dash.dependencies.Output', 'Output', (['"""image-display-graph-3d"""', '"""figure"""'], {}), "('image-display-graph-3d', 'figure')\n", (35454, 35490), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((35492, 35524), 'dash.dependencies.Output', 'Output', (['"""last-render-id"""', '"""data"""'], {}), "('last-render-id', 'data')\n", (35498, 35524), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((35532, 35559), 'dash.dependencies.Input', 'Input', (['"""dummy2"""', '"""children"""'], {}), "('dummy2', 'children')\n", (35537, 35559), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((35561, 35598), 'dash.dependencies.Input', 'Input', (['"""show-hide-seg-3d"""', '"""children"""'], {}), "('show-hide-seg-3d', 'children')\n", (35566, 35598), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((35615, 35644), 'dash.dependencies.State', 'State', (['"""drawn-shapes"""', '"""data"""'], {}), "('drawn-shapes', 'data')\n", (35620, 35644), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((35654, 35683), 'dash.dependencies.State', 'State', (['"""fig-3d-scene"""', '"""data"""'], {}), "('fig-3d-scene', 'data')\n", (35659, 35683), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((35693, 35724), 'dash.dependencies.State', 'State', (['"""last-render-id"""', '"""data"""'], {}), "('last-render-id', 'data')\n", (35698, 35724), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((35734, 35776), 'dash.dependencies.State', 'State', (['"""image-display-graph-top"""', '"""figure"""'], {}), "('image-display-graph-top', 'figure')\n", (35739, 35776), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((35786, 35829), 'dash.dependencies.State', 'State', (['"""image-display-graph-side"""', '"""figure"""'], {}), "('image-display-graph-side', 'figure')\n", (35791, 35829), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((37879, 37917), 'dash.dependencies.Input', 'Input', (['"""learn-more-button"""', '"""n_clicks"""'], {}), "('learn-more-button', 'n_clicks')\n", (37884, 37917), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((37919, 37954), 'dash.dependencies.Input', 'Input', (['"""markdown_close"""', '"""n_clicks"""'], {}), "('markdown_close', 'n_clicks')\n", (37924, 37954), False, 'from dash.dependencies import Input, Output, State, ClientsideFunction\n'), ((1810, 1828), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (1823, 1828), True, 'import numpy as np\n'), ((2463, 2496), 'skimage.segmentation.find_boundaries', 'segmentation.find_boundaries', (['seg'], {}), '(seg)\n', (2491, 2496), False, 'from skimage import data, img_as_ubyte, segmentation, measure\n'), ((4934, 4960), 'gzip.open', 'gzip.open', (['LOAD_SUPERPIXEL'], {}), '(LOAD_SUPERPIXEL)\n', (4943, 4960), False, 'import gzip\n'), ((4986, 4997), 'numpy.load', 'np.load', (['fd'], {}), '(fd)\n', (4993, 4997), True, 'import numpy as np\n'), ((5401, 5431), 'dash_canvas.utils.array_to_data_url', 'array_to_data_url', (['im[i, :, :]'], {}), '(im[i, :, :])\n', (5418, 5431), False, 'from dash_canvas.utils import array_to_data_url\n'), ((5486, 5516), 'dash_canvas.utils.array_to_data_url', 'array_to_data_url', (['im[:, i, :]'], {}), '(im[:, i, :])\n', (5503, 5516), False, 'from dash_canvas.utils import array_to_data_url\n'), ((9052, 9097), 'dash_core_components.Store', 'dcc.Store', ([], {'id': '"""image-slices"""', 'data': 'img_slices'}), "(id='image-slices', data=img_slices)\n", (9061, 9097), True, 'import dash_core_components as dcc\n'), ((9107, 9150), 'dash_core_components.Store', 'dcc.Store', ([], {'id': '"""seg-slices"""', 'data': 'seg_slices'}), "(id='seg-slices', data=seg_slices)\n", (9116, 9150), True, 'import dash_core_components as dcc\n'), ((9344, 9384), 'dash_core_components.Store', 'dcc.Store', ([], {'id': '"""slice-number-top"""', 'data': '(0)'}), "(id='slice-number-top', data=0)\n", (9353, 9384), True, 'import dash_core_components as dcc\n'), ((9394, 9435), 'dash_core_components.Store', 'dcc.Store', ([], {'id': '"""slice-number-side"""', 'data': '(0)'}), "(id='slice-number-side', data=0)\n", (9403, 9435), True, 'import dash_core_components as dcc\n'), ((20629, 20681), 'dash_core_components.Store', 'dcc.Store', ([], {'id': '"""fig-3d-scene"""', 'data': 'default_3d_layout'}), "(id='fig-3d-scene', data=default_3d_layout)\n", (20638, 20681), True, 'import dash_core_components as dcc\n'), ((20692, 20733), 'dash_core_components.Store', 'dcc.Store', ([], {'id': '"""current-render-id"""', 'data': '(0)'}), "(id='current-render-id', data=0)\n", (20701, 20733), True, 'import dash_core_components as dcc\n'), ((20743, 20781), 'dash_core_components.Store', 'dcc.Store', ([], {'id': '"""last-render-id"""', 'data': '(0)'}), "(id='last-render-id', data=0)\n", (20752, 20781), True, 'import dash_core_components as dcc\n'), ((37292, 37334), 'skimage.measure.marching_cubes', 'measure.marching_cubes', (['im', '(0)'], {'step_size': '(3)'}), '(im, 0, step_size=3)\n', (37314, 37334), False, 'from skimage import data, img_as_ubyte, segmentation, measure\n'), ((6632, 6643), 'plotly.graph_objects.Mesh3d', 'go.Mesh3d', ([], {}), '()\n', (6641, 6643), True, 'import plotly.graph_objects as go\n'), ((27382, 27423), 'shape_utils.shapes_to_mask', 'shape_utils.shapes_to_mask', (['shape_args', '(1)'], {}), '(shape_args, 1)\n', (27408, 27423), False, 'import shape_utils\n'), ((29494, 29508), 'numpy.any', 'np.any', (['(s != 0)'], {}), '(s != 0)\n', (29500, 29508), True, 'import numpy as np\n'), ((29470, 29490), 'dash_canvas.utils.array_to_data_url', 'array_to_data_url', (['s'], {}), '(s)\n', (29487, 29490), False, 'from dash_canvas.utils import array_to_data_url\n'), ((29555, 29585), 'numpy.moveaxis', 'np.moveaxis', (['fst_colored', '(0)', 'j'], {}), '(fst_colored, 0, j)\n', (29566, 29585), True, 'import numpy as np\n'), ((37436, 37501), 'plotly.graph_objects.Mesh3d', 'go.Mesh3d', ([], {'x': 'x', 'y': 'y', 'z': 'z', 'color': 'color', 'opacity': '(0.5)', 'i': 'i', 'j': 'j', 'k': 'k'}), '(x=x, y=y, z=z, color=color, opacity=0.5, i=i, j=j, k=k)\n', (37445, 37501), True, 'import plotly.graph_objects as go\n'), ((4600, 4630), 'numpy.moveaxis', 'np.moveaxis', (['fst_colored', '(0)', 'j'], {}), '(fst_colored, 0, j)\n', (4611, 4630), True, 'import numpy as np\n'), ((27661, 27685), 'numpy.moveaxis', 'np.moveaxis', (['masks', '(0)', 'j'], {}), '(masks, 0, j)\n', (27672, 27685), True, 'import numpy as np\n'), ((2173, 2190), 'numpy.arange', 'np.arange', (['(0)', '(310)'], {}), '(0, 310)\n', (2182, 2190), True, 'import numpy as np\n'), ((2284, 2301), 'numpy.arange', 'np.arange', (['(0)', '(310)'], {}), '(0, 310)\n', (2293, 2301), True, 'import numpy as np\n'), ((11125, 11172), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""dummy"""', 'style': "{'display': 'none'}"}), "(id='dummy', style={'display': 'none'})\n", (11133, 11172), True, 'import dash_html_components as html\n'), ((11190, 11253), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""dummy2"""', 'style': "{'display': 'none'}", 'children': '""",0"""'}), "(id='dummy2', style={'display': 'none'}, children=',0')\n", (11198, 11253), True, 'import dash_html_components as html\n'), ((11360, 11435), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""show-hide-seg-2d"""', 'children': '"""show"""', 'style': "{'display': 'none'}"}), "(id='show-hide-seg-2d', children='show', style={'display': 'none'})\n", (11368, 11435), True, 'import dash_html_components as html\n'), ((11491, 11566), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""show-hide-seg-3d"""', 'children': '"""show"""', 'style': "{'display': 'none'}"}), "(id='show-hide-seg-3d', children='show', style={'display': 'none'})\n", (11499, 11566), True, 'import dash_html_components as html\n'), ((4662, 4692), 'numpy.moveaxis', 'np.moveaxis', (['fst_colored', '(0)', 'j'], {}), '(fst_colored, 0, j)\n', (4673, 4692), True, 'import numpy as np\n'), ((8016, 8132), 'dash_html_components.H1', 'html.H1', (['"""3D Image Annotation"""'], {'id': '"""title"""', 'style': "{'color': '#f9f9f9', 'display': 'inline-block', 'margin': '0'}"}), "('3D Image Annotation', id='title', style={'color': '#f9f9f9',\n 'display': 'inline-block', 'margin': '0'})\n", (8023, 8132), True, 'import dash_html_components as html\n'), ((8401, 8492), 'dash_html_components.Button', 'html.Button', (['"""Learn more"""'], {'id': '"""learn-more-button"""', 'n_clicks': '(0)', 'style': "{'width': 'auto'}"}), "('Learn more', id='learn-more-button', n_clicks=0, style={\n 'width': 'auto'})\n", (8412, 8492), True, 'import dash_html_components as html\n'), ((7338, 7450), 'dash_html_components.Button', 'html.Button', (['"""Close"""'], {'id': '"""markdown_close"""', 'n_clicks': '(0)', 'className': '"""closeButton"""', 'style': "{'color': 'DarkBlue'}"}), "('Close', id='markdown_close', n_clicks=0, className=\n 'closeButton', style={'color': 'DarkBlue'})\n", (7349, 7450), True, 'import dash_html_components as html\n'), ((7727, 7750), 'dash_core_components.Markdown', 'dcc.Markdown', (['readme_md'], {}), '(readme_md)\n', (7739, 7750), True, 'import dash_core_components as dcc\n'), ((11765, 11819), 'dash_html_components.A', 'html.A', ([], {'id': '"""download-link"""', 'download': '"""found_image.nii"""'}), "(id='download-link', download='found_image.nii')\n", (11771, 11819), True, 'import dash_html_components as html\n'), ((11978, 12026), 'dash_core_components.Store', 'dcc.Store', ([], {'id': '"""found-image-tensor-data"""', 'data': '""""""'}), "(id='found-image-tensor-data', data='')\n", (11987, 12026), True, 'import dash_core_components as dcc\n'), ((12133, 12220), 'dash_html_components.Button', 'html.Button', (['"""3D View"""'], {'id': '"""view-select-button"""', 'n_clicks': '(0)', 'style': "{'width': '25%'}"}), "('3D View', id='view-select-button', n_clicks=0, style={'width':\n '25%'})\n", (12144, 12220), True, 'import dash_html_components as html\n'), ((12429, 12523), 'dash_html_components.Button', 'html.Button', (['"""Hide Segmentation"""'], {'id': '"""show-seg-check"""', 'n_clicks': '(0)', 'style': "{'width': '25%'}"}), "('Hide Segmentation', id='show-seg-check', n_clicks=0, style={\n 'width': '25%'})\n", (12440, 12523), True, 'import dash_html_components as html\n'), ((12731, 12825), 'dash_html_components.Button', 'html.Button', (['"""Download Brain Volume"""'], {'id': '"""download-brain-button"""', 'style': "{'width': 'auto'}"}), "('Download Brain Volume', id='download-brain-button', style={\n 'width': 'auto'})\n", (12742, 12825), True, 'import dash_html_components as html\n'), ((12997, 13092), 'dash_html_components.Button', 'html.Button', (['"""Download Selected Partitions"""'], {'id': '"""download-button"""', 'style': "{'width': 'auto'}"}), "('Download Selected Partitions', id='download-button', style={\n 'width': 'auto'})\n", (13008, 13092), True, 'import dash_html_components as html\n'), ((13264, 13339), 'dash_html_components.Button', 'html.Button', (['"""Undo"""'], {'id': '"""undo-button"""', 'n_clicks': '(0)', 'style': "{'width': '12.5%'}"}), "('Undo', id='undo-button', n_clicks=0, style={'width': '12.5%'})\n", (13275, 13339), True, 'import dash_html_components as html\n'), ((13552, 13627), 'dash_html_components.Button', 'html.Button', (['"""Redo"""'], {'id': '"""redo-button"""', 'n_clicks': '(0)', 'style': "{'width': '12.5%'}"}), "('Redo', id='redo-button', n_clicks=0, style={'width': '12.5%'})\n", (13563, 13627), True, 'import dash_html_components as html\n'), ((19937, 19986), 'dash_core_components.Store', 'dcc.Store', ([], {'id': '"""found-segs"""', 'data': 'found_seg_slices'}), "(id='found-segs', data=found_seg_slices)\n", (19946, 19986), True, 'import dash_core_components as dcc\n'), ((14486, 14537), 'dash_html_components.H6', 'html.H6', (['"""Top View"""'], {'style': "{'text-align': 'center'}"}), "('Top View', style={'text-align': 'center'})\n", (14493, 14537), True, 'import dash_html_components as html\n'), ((15097, 15152), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""image-display-graph-top"""', 'figure': 'top_fig'}), "(id='image-display-graph-top', figure=top_fig)\n", (15106, 15152), True, 'import dash_core_components as dcc\n'), ((15756, 15821), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""image-select-top-display"""', 'style': "{'width': '125px'}"}), "(id='image-select-top-display', style={'width': '125px'})\n", (15764, 15821), True, 'import dash_html_components as html\n'), ((17139, 17191), 'dash_html_components.H6', 'html.H6', (['"""Side View"""'], {'style': "{'text-align': 'center'}"}), "('Side View', style={'text-align': 'center'})\n", (17146, 17191), True, 'import dash_html_components as html\n'), ((17750, 17807), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""image-display-graph-side"""', 'figure': 'side_fig'}), "(id='image-display-graph-side', figure=side_fig)\n", (17759, 17807), True, 'import dash_core_components as dcc\n'), ((18411, 18477), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""image-select-side-display"""', 'style': "{'width': '125px'}"}), "(id='image-select-side-display', style={'width': '125px'})\n", (18419, 18477), True, 'import dash_html_components as html\n')] |
from typing import Dict, Optional, Any, List, Set, Union
from causaldag import DAG
import itertools as itr
from causaldag.utils.ci_tests import CI_Tester
from causaldag.utils.invariance_tests import InvarianceTester
from causaldag.utils.core_utils import powerset
import random
from causaldag.structure_learning.undirected import threshold_ug
from causaldag import UndirectedGraph
import numpy as np
def perm2dag(perm, ci_tester: CI_Tester, verbose=False, fixed_adjacencies=set(), fixed_gaps=set(), node2nbrs=None,
older=False):
"""
TODO
Parameters
----------
perm
ci_tester
verbose
fixed_adjacencies
fixed_gaps
node2nbrs
older
Examples
--------
TODO
"""
d = DAG(nodes=set(perm))
ixs = list(itr.chain.from_iterable(((f, s) for f in range(s)) for s in range(len(perm))))
for i, j in ixs:
pi_i, pi_j = perm[i], perm[j]
# === IF FIXED, DON'T TEST
if (pi_i, pi_j) in fixed_adjacencies or (pi_j, pi_i) in fixed_adjacencies:
d.add_arc(pi_i, pi_j)
continue
if (pi_i, pi_j) in fixed_gaps or (pi_j, pi_i) in fixed_gaps:
continue
# === TEST MARKOV BLANKET
mb = d.markov_blanket(pi_i) if node2nbrs is None else (set(perm[:j]) - {pi_i}) & (
node2nbrs[pi_i] | node2nbrs[pi_j])
mb = mb if not older else set(perm[:j]) - {pi_i}
is_ci = ci_tester.is_ci(pi_i, pi_j, mb)
if not is_ci:
d.add_arc(pi_i, pi_j, unsafe=True)
if verbose: print("%s indep of %s given %s: %s" % (pi_i, pi_j, mb, is_ci))
return d
def perm2dag2(perm, ci_tester, node2nbrs=None):
arcs = set()
for (i, pi_i), (j, pi_j) in itr.combinations(enumerate(perm), 2):
c = set(perm[:j]) - {pi_i}
c = c if node2nbrs is None else c & (node2nbrs[pi_i] | node2nbrs[pi_j])
print(pi_i, pi_j, c)
if not ci_tester.is_ci(pi_i, pi_j, c):
arcs.add((pi_i, pi_j))
return DAG(nodes=set(perm), arcs=arcs)
def update_minimal_imap(dag, i, j, ci_tester, fixed_adjacencies=set(), fixed_gaps=set()):
"""
TODO
Parameters
----------
TODO
Examples
--------
TODO
"""
removed_arcs = set()
parents = dag.parents_of(i)
for parent in parents:
rest = parents - {parent}
if (i, parent) not in fixed_adjacencies | fixed_gaps and (parent, i) not in fixed_adjacencies | fixed_gaps:
if ci_tester.is_ci(i, parent, rest):
removed_arcs.add((parent, i))
if (j, parent) not in fixed_adjacencies | fixed_gaps and (parent, j) not in fixed_adjacencies | fixed_gaps:
if ci_tester.is_ci(j, parent, rest | {i}):
removed_arcs.add((parent, j))
return removed_arcs
# def min_degree_alg(undirected_graph, ci_tester: CI_Tester, delete=False):
# permutation = []
# curr_undirected_graph = undirected_graph.copy()
# while curr_undirected_graph._nodes:
# min_degree = min(curr_undirected_graph.degrees.values())
# min_degree_nodes = {node for node, degree in curr_undirected_graph.degrees.items() if degree == min_degree}
# k = random.choice(list(min_degree_nodes))
# nbrs_k = curr_undirected_graph._neighbors[k]
#
# curr_undirected_graph.delete_node(k)
# curr_undirected_graph.add_edges_from(itr.combinations(nbrs_k, 2))
# # for nbr1, nbr2 in itr.combinations(nbrs_k, 2):
# # # if not curr_undirected_graph.has_edge(nbr1, nbr2):
# # curr_undirected_graph.add_edge(nbr1, nbr2)
# # elif delete and ci_tester.is_ci(nbr1, nbr2, curr_undirected_graph._nodes - {nbr1, nbr2, k}):
# # curr_undirected_graph.delete_edge(nbr1, nbr2)
#
# permutation.append(k)
#
# return list(reversed(permutation))
def min_degree_alg_amat(amat, rnd=True):
"""
TODO
Parameters
----------
TODO
Examples
--------
TODO
"""
amat = amat.copy()
remaining_nodes = list(range(amat.shape[0]))
permutation = []
while remaining_nodes:
# === PICK A NODE OF MINIMUM DEGREE
curr_amat = amat[np.ix_(remaining_nodes, remaining_nodes)]
degrees = curr_amat.sum(axis=0)
min_degree = degrees.min()
min_degree_ixs = np.where(degrees == min_degree)[0]
min_degree_ix = random.choice(min_degree_ixs)
# === ATTACH ITS NEIGHBORS
nbrs = {remaining_nodes[ix] for ix in curr_amat[min_degree_ix].nonzero()[0]}
for i in nbrs:
amat[i, list(nbrs - {i})] = 1
# === REMOVE IT
permutation.append(remaining_nodes[min_degree_ix])
del remaining_nodes[min_degree_ix]
return list(reversed(permutation))
# def min_degree_alg2(undirected_graph, ci_tester: CI_Tester, delete=False):
# permutation = []
# curr_undirected_graph = undirected_graph.copy()
# while curr_undirected_graph._nodes:
# nodes2added = {node: set() for node in curr_undirected_graph._nodes}
# nodes2removed = {node: set() for node in curr_undirected_graph._nodes}
# for k in curr_undirected_graph._nodes:
# nbrs_k = curr_undirected_graph._neighbors[k]
# for nbr1, nbr2 in itr.combinations(nbrs_k, 2):
# if not curr_undirected_graph.has_edge(nbr1, nbr2):
# nodes2added[k].add((nbr1, nbr2))
# elif delete and ci_tester.is_ci(nbr1, nbr2, curr_undirected_graph._nodes - {nbr1, nbr2, k}):
# nodes2removed[k].add((nbr1, nbr2))
#
# # === PICK A NODE
# min_added = min(map(len, nodes2added.values()))
# min_added_nodes = {node for node, added in nodes2added.items() if len(added) == min_added}
# removed_node = random.choice(list(min_added_nodes))
#
# # === UPDATE GRAPH
# curr_undirected_graph.delete_node(removed_node)
# curr_undirected_graph.add_edges_from(nodes2added[removed_node])
# if delete:
# curr_undirected_graph.delete_edges_from(nodes2removed[removed_node])
#
# permutation.append(removed_node)
#
# return list(reversed(permutation))
# def min_degree_alg2(undirected_graph):
# amat = undirected_graph.to_amat(sparse=True)
# return list(reversed(list(amd.order(amat))))
def jci_gsp(
setting_list: List[Dict],
nodes: set,
combined_ci_tester: CI_Tester,
depth: int = 4,
nruns: int = 5,
verbose: bool = False,
initial_undirected: Optional[Union[str, UndirectedGraph]] = 'threshold',
):
"""
TODO
Parameters
----------
TODO
Examples
--------
TODO
"""
# CREATE NEW NODES AND OTHER INPUT TO ALGORITHM
context_nodes = ['c%d' % i for i in range(len(setting_list))]
context_adjacencies = set(itr.permutations(context_nodes, r=2))
known_iv_adjacencies = set.union(*(
{('c%s' % i, node) for node in setting['known_interventions']} for i, setting in enumerate(setting_list)
))
fixed_orders = set(itr.combinations(context_nodes, 2)) | set(itr.product(context_nodes, nodes))
# === DO SMART INITIALIZATION
if isinstance(initial_undirected, str):
if initial_undirected == 'threshold':
initial_undirected = threshold_ug(set(nodes), combined_ci_tester)
else:
raise ValueError("initial_undirected must be one of 'threshold', or an UndirectedGraph")
if initial_undirected:
amat = initial_undirected.to_amat()
initial_permutations = [context_nodes + min_degree_alg_amat(amat) for _ in range(nruns)]
else:
initial_permutations = [context_nodes + random.sample(list(nodes), len(nodes)) for _ in range(nruns)]
# === RUN GSP ON FULL DAG
est_meta_dag, _ = gsp(
nodes | set(context_nodes),
combined_ci_tester,
depth=depth,
nruns=nruns,
initial_permutations=initial_permutations,
fixed_orders=fixed_orders,
fixed_adjacencies=context_adjacencies | known_iv_adjacencies,
verbose=verbose
)
# === PROCESS OUTPUT
learned_intervention_targets = {
int(node[1:]): {child for child in est_meta_dag.children_of(node) if not isinstance(child, str)}
for node in est_meta_dag.nodes
if isinstance(node, str)
}
learned_intervention_targets = [learned_intervention_targets[i] for i in range(len(setting_list))]
est_dag = est_meta_dag.induced_subgraph({node for node in est_meta_dag.nodes if not isinstance(node, str)})
return est_dag, learned_intervention_targets
def gsp(
nodes: set,
ci_tester: CI_Tester,
depth: Optional[int] = 4,
nruns: int = 5,
verbose: bool = False,
initial_undirected: Optional[Union[str, UndirectedGraph]] = 'threshold',
initial_permutations: Optional[List] = None,
fixed_orders=set(),
fixed_adjacencies=set(),
fixed_gaps=set(),
use_lowest=True,
max_iters=float('inf'),
factor=2
) -> (DAG, List[List[Dict]]):
"""
Use the Greedy Sparsest Permutation (GSP) algorithm to estimate the Markov equivalence class of the data-generating
DAG.
Parameters
----------
nodes:
Labels of nodes in the graph.
ci_tester:
A conditional independence tester, which has a method is_ci taking two sets A and B, and a conditioning set C,
and returns True/False.
depth:
Maximum depth in depth-first search. Use None for infinite search depth.
nruns:
Number of runs of the algorithm. Each run starts at a random permutation and the sparsest DAG from all
runs is returned.
verbose:
TODO
initial_undirected:
Option to find the starting permutation by using the minimum degree algorithm on an undirected graph that is
Markov to the data. You can provide the undirected graph yourself, use the default 'threshold' to do simple
thresholding on the partial correlation matrix, or select 'None' to start at a random permutation.
initial_permutations:
A list of initial permutations with which to start the algorithm. This option is helpful when there is
background knowledge on orders. This option is mutually exclusive with initial_undirected.
fixed_orders:
Tuples (i, j) where i is known to come before j.
fixed_adjacencies:
Tuples (i, j) where i and j are known to be adjacent.
fixed_gaps:
Tuples (i, j) where i and j are known to be non-adjacent.
See Also
--------
pcalg, igsp, unknown_target_igsp
Return
------
(est_dag, summaries)
"""
if initial_permutations is None and isinstance(initial_undirected, str):
if initial_undirected == 'threshold':
initial_undirected = threshold_ug(nodes, ci_tester)
else:
raise ValueError("initial_undirected must be one of 'threshold', or an UndirectedGraph")
# === GENERATE CANDIDATE STARTING PERMUTATIONS
if initial_permutations is None:
if initial_undirected:
amat = initial_undirected.to_amat()
initial_permutations = [min_degree_alg_amat(amat) for _ in range(factor * nruns)]
else:
initial_permutations = [random.sample(nodes, len(nodes)) for _ in range(nruns)]
# === FIND CANDIDATE STARTING DAGS
starting_dags = []
for perm in initial_permutations:
d = perm2dag(perm, ci_tester, fixed_adjacencies=fixed_adjacencies, fixed_gaps=fixed_gaps)
starting_dags.append(d)
starting_dags = sorted(starting_dags, key=lambda d: d.num_arcs)
summaries = []
min_dag = None
# all_kept_dags = set()
for r in range(nruns):
summary = []
current_dag = starting_dags[r]
if verbose: print("=== STARTING DAG:", current_dag)
# === FIND NEXT POSSIBLE MOVES
current_covered_arcs = current_dag.reversible_arcs() - fixed_orders
if verbose: print(f"Current covered arcs: {current_covered_arcs}")
covered_arcs2removed_arcs = [
(i, j, update_minimal_imap(current_dag, i, j, ci_tester))
for i, j in current_covered_arcs
]
covered_arcs2removed_arcs = sorted(covered_arcs2removed_arcs, key=lambda c: len(c[2]))
# === RECORDS FOR DEPTH-FIRST SEARCH
all_visited_dags = set()
trace = []
graph_counter = 0
# === SEARCH!
iters_since_improvement = 0
while True:
if iters_since_improvement > max_iters:
break
summary.append({'dag': current_dag, 'depth': len(trace), 'num_arcs': len(current_dag.arcs)})
all_visited_dags.add(frozenset(current_dag.arcs))
max_arcs_removed = len(covered_arcs2removed_arcs[-1][2]) if len(covered_arcs2removed_arcs) > 0 else 0
if (len(covered_arcs2removed_arcs) > 0 and len(trace) != depth) or max_arcs_removed > 0:
graph_counter += 1
if max_arcs_removed > 0: # start over at sparser DAG
iters_since_improvement = 0
# all_visited_dags = set()
trace = []
# === CHOOSE A SPARSER I-MAP
if use_lowest:
candidate_ixs = [
ix for ix, (i, j, rem) in enumerate(covered_arcs2removed_arcs)
if len(rem) == max_arcs_removed
]
else:
candidate_ixs = [ix for ix, (i, j, rem) in enumerate(covered_arcs2removed_arcs) if len(rem) > 0]
selected_ix = random.choice(candidate_ixs)
# === FIND THE DAG CORRESPONDING TO THE SPARSER IMAP
i, j, rem_arcs = covered_arcs2removed_arcs.pop(selected_ix)
current_dag.reverse_arc(i, j, unsafe=True)
current_dag.remove_arcs(rem_arcs)
current_covered_arcs = current_dag.reversible_arcs() - fixed_orders
# if frozenset(current_dag.arcs) in all_kept_dags: # CHECK IF THIS MAKES SENSE
# print('Break')
# break
# all_kept_dags.add(frozenset(current_dag.arcs))
if verbose: print("=== FOUND DAG WITH FEWER ARCS:", current_dag)
else:
iters_since_improvement += 1
trace.append((current_dag.copy(), current_covered_arcs, covered_arcs2removed_arcs))
i, j, _ = covered_arcs2removed_arcs.pop(random.randrange(len(covered_arcs2removed_arcs)))
current_dag.reverse_arc(i, j, unsafe=True)
current_covered_arcs = current_dag.reversible_arcs() - fixed_orders
# === FIND NEXT POSSIBLE MOVES
covered_arcs2removed_arcs = [
(i, j, update_minimal_imap(current_dag, i, j, ci_tester))
for i, j in current_covered_arcs
]
covered_arcs2removed_arcs = sorted(covered_arcs2removed_arcs, key=lambda c: len(c[2]))
# === REMOVE ANY MOVES WHICH LEAD TO ALREADY-EXPLORED DAGS
current_arcs = frozenset(current_dag.arcs)
covered_arcs2removed_arcs = [
(i, j, rem_arcs) for i, j, rem_arcs in covered_arcs2removed_arcs if
current_arcs - {(i, j)} | {(j, i)} - rem_arcs not in all_visited_dags
]
else:
if len(trace) == 0: # reached minimum within search depth
break
else: # backtrack
current_dag, current_covered_arcs, covered_arcs2removed_arcs = trace.pop()
# === END OF RUN
summaries.append(summary)
if min_dag is None or len(current_dag.arcs) < len(min_dag.arcs):
min_dag = current_dag
return min_dag, summaries
def igsp(
setting_list: List[Dict],
nodes: set,
ci_tester: CI_Tester,
invariance_tester: InvarianceTester,
depth: Optional[int] = 4,
nruns: int = 5,
initial_undirected: Optional[Union[str, UndirectedGraph]] = 'threshold',
initial_permutations: Optional[List] = None,
verbose: bool = False,
):
"""
TODO
Parameters
----------
TODO
Examples
--------
TODO
"""
only_single_node = all(len(setting['interventions']) <= 1 for setting in setting_list)
interventions2setting_nums = {
frozenset(setting['interventions']): setting_num
for setting_num, setting in enumerate(setting_list)
}
def _is_icovered(i, j):
"""
i -> j is I-covered if:
1) if {i} is an intervention, then f^{i}(j) = f(j)
"""
setting_num = interventions2setting_nums.get(frozenset({i}))
if setting_num is not None and not invariance_tester.is_invariant(j, 0, setting_num):
return False
# for iv_nodes in samples.keys():
# if j in iv_nodes and i not in iv_nodes:
# if not _get_is_variant(iv_nodes, i, None):
# return False
return True
def _reverse_arc(dag, i, j):
new_dag = dag.copy()
parents = dag.parents_of(i)
new_dag.reverse_arc(i, j)
if parents:
for parent in parents:
rest = parents - {parent}
if ci_tester.is_ci(i, parent, [*rest, j]):
new_dag.remove_arc(parent, i)
if ci_tester.is_ci(j, parent, cond_set=[*rest]):
new_dag.remove_arc(parent, j)
new_covered_arcs = new_dag.reversible_arcs()
new_icovered_arcs = [(i, j) for i, j in new_covered_arcs if _is_icovered(i, j)]
new_contradicting = _get_contradicting_arcs(new_dag)
return new_dag, new_icovered_arcs, new_contradicting
def _is_i_contradicting(i, j, dag):
"""
i -> j is I-contradicting if either:
1) there exists S, a subset of the neighbors of j besides i, s.t. f^I(j|S) = f(j|S) for all I
containing i but not j
2) there exists I with j \in I but i \not\in I, s.t. f^I(i|S) \not\eq f(i|S) for all subsets S
of the neighbors of i besides j
If there are only single node interventions, this condition becomes:
1) {i} \in I and f^{i}(j) = f(j)
or
2) {j} \in I and f^{j}(i) \neq f(i)
"""
if only_single_node:
setting_num_i = interventions2setting_nums.get(frozenset({i}))
if setting_num_i is not None and invariance_tester.is_invariant(j, context=setting_num_i):
return True
setting_num_j = interventions2setting_nums.get(frozenset({j}))
if setting_num_j is not None and not invariance_tester.is_invariant(i, context=setting_num_j):
return True
return False
else:
# === TEST CONDITION 1
neighbors_j = dag.neighbors_of(j) - {i}
for s in powerset(neighbors_j):
for setting_num, setting in enumerate(setting_list):
if i in setting['interventions'] and j not in setting['interventions']:
if not invariance_tester.is_invariant(j, context=setting_num, cond_set=s):
return True
neighbors_i = dag.neighbors_of(i) - {j}
for setting_num, setting in enumerate(setting_list):
if j in setting['interventions'] and i not in setting['interventions']:
i_always_varies = all(
invariance_tester.is_invariant(i, context=setting_num, cond_set=s) for s in
powerset(neighbors_i)
)
if i_always_varies: return True
return False
def _get_contradicting_arcs(dag):
"""
Count the number of I-contradicting arcs in the DAG dag
"""
contradicting_arcs = {(i, j) for i, j in dag.arcs if _is_icovered(i, j) and _is_i_contradicting(i, j, dag)}
return contradicting_arcs
summaries = []
# === LIST OF DAGS FOUND BY EACH RUN
finishing_dags = []
if initial_permutations is None and isinstance(initial_undirected, str):
if initial_undirected == 'threshold':
initial_undirected = threshold_ug(nodes, ci_tester)
else:
raise ValueError("initial_undirected must be one of 'threshold', or an UndirectedGraph")
# === DO MULTIPLE RUNS
for r in range(nruns):
summary = []
# === STARTING VALUES
if initial_permutations is not None:
starting_perm = initial_permutations[r]
elif initial_undirected:
starting_perm = min_degree_alg_amat(initial_undirected.to_amat())
else:
starting_perm = random.sample(nodes, len(nodes))
current_dag = perm2dag(starting_perm, ci_tester)
if verbose: print("=== STARTING RUN %s/%s" % (r + 1, nruns))
current_covered_arcs = current_dag.reversible_arcs()
current_icovered_arcs = [(i, j) for i, j in current_covered_arcs if _is_icovered(i, j)]
current_contradicting = _get_contradicting_arcs(current_dag)
next_dags = [_reverse_arc(current_dag, i, j) for i, j in current_icovered_arcs]
random.shuffle(next_dags)
# === RECORDS FOR DEPTH-FIRST SEARCH
all_visited_dags = set()
trace = []
min_dag_run = (current_dag, current_contradicting)
# === SEARCH
while True:
summary.append({
'dag': current_dag,
'num_arcs': len(current_dag.arcs),
'num_contradicting': len(current_contradicting)
})
all_visited_dags.add(frozenset(current_dag.arcs))
lower_dags = [
(d, icovered_arcs, contradicting_arcs)
for d, icovered_arcs, contradicting_arcs in next_dags
if len(d.arcs) < len(current_dag.arcs)
]
if verbose:
desc = f'({len(current_dag.arcs)} arcs'
desc += f', I-covered: {current_icovered_arcs}'
desc += f', I-contradicting: {current_contradicting})'
print('-' * len(trace), current_dag, desc)
if (len(next_dags) > 0 and len(trace) != depth) or len(lower_dags) > 0:
if len(lower_dags) > 0: # restart at a lower DAG
all_visited_dags = set()
trace = []
current_dag, current_icovered_arcs, current_contradicting = lower_dags.pop()
min_dag_run = (current_dag, current_contradicting)
if verbose: print(f"FOUND DAG WITH {len(current_dag.arcs)}) ARCS: {current_dag}")
else:
trace.append((current_dag, current_icovered_arcs, current_contradicting))
current_dag, current_icovered_arcs, current_contradicting = next_dags.pop()
if len(current_contradicting) < len(min_dag_run[1]):
min_dag_run = (current_dag, current_contradicting)
if verbose:
print(f"FOUND DAG WITH {current_contradicting} CONTRADICTING ARCS: {current_dag}")
next_dags = [_reverse_arc(current_dag, i, j) for i, j in current_icovered_arcs]
next_dags = [
(d, icovered_arcs, contradicting_arcs)
for d, icovered_arcs, contradicting_arcs in next_dags
if frozenset(d.arcs) not in all_visited_dags
]
random.shuffle(next_dags)
# === DEAD END
else:
if len(trace) == 0:
break
else: # len(lower_dags) == 0, len(next_dags) > 0, len(trace) == depth
current_dag, current_icovered_arcs, current_contradicting = trace.pop()
# === END OF RUN
summaries.append(summary)
finishing_dags.append(min_dag_run)
min_dag = min(finishing_dags, key=lambda dag_n: (len(dag_n[0].arcs), len(dag_n[1])))
# print(min_dag)
return min_dag[0]
def is_icovered(
setting_list: List[Dict],
i: int,
j: int,
dag: DAG,
invariance_tester: InvarianceTester,
):
"""
Tell if an edge i->j is I-covered with respect to the invariance tests.
True if, for all I s.t. i \in I, the distribution of j given its parents varies between the observational and
interventional data.
setting_list:
A list of dictionaries that provide meta-information about each setting.
The first setting must be observational.
i:
Source of the edge being tested.
j:
Target of the edge being tested.
"""
parents_j = list(dag.parents_of(j))
for setting_num, setting in enumerate(setting_list):
if i in setting['interventions']:
if invariance_tester.is_invariant(j, context=setting_num, cond_set=parents_j):
return False
return True
def unknown_target_igsp(
setting_list: List[Dict],
nodes: set,
ci_tester: CI_Tester,
invariance_tester: InvarianceTester,
depth: Optional[int] = 4,
nruns: int = 5,
initial_undirected: Optional[Union[str, UndirectedGraph]] = 'threshold',
initial_permutations: Optional[List] = None,
verbose: bool = False,
use_lowest=True,
tup_score=True
) -> (DAG, List[Set[int]]):
"""
Use the Unknown Target Interventional Greedy Sparsest Permutation algorithm to estimate a DAG in the I-MEC of the
data-generating DAG.
Parameters
----------
setting_list:
A list of dictionaries that provide meta-information about each non-observational setting.
nodes:
Nodes in the graph.
ci_tester:
A conditional independence tester object, which has a method is_ci taking two sets A and B, and a conditioning
set C, and returns True/False.
invariance_tester:
An invariance tester object, which has a method is_invariant taking a node, two settings, and a conditioning
set C, and returns True/False.
depth:
Maximum depth in depth-first search. Use None for infinite search depth.
nruns:
Number of runs of the algorithm. Each run starts at a random permutation and the sparsest DAG from all
runs is returned.
initial_undirected:
Option to find the starting permutation by using the minimum degree algorithm on an undirected graph that is
Markov to the data. You can provide the undirected graph yourself, use the default 'threshold' to do simple
thresholding on the partial correlation matrix, or select 'None' to start at a random permutation.
initial_permutations:
A list of initial permutations with which to start the algorithm. This option is helpful when there is
background knowledge on orders. This option is mutually exclusive with initial_undirected.
"""
def _is_icovered(i, j, dag):
"""
Check if the edge i->j is I-covered in the DAG dag
"""
parents_j = frozenset(dag.parents_of(j))
for setting_num, setting in enumerate(setting_list):
if i in setting['known_interventions']:
if invariance_tester.is_invariant(j, context=setting_num, cond_set=parents_j):
return False
return True
def _get_variants(dag):
"""
Count the number of variances for the DAG dag
"""
variants = set()
for i in dag.nodes:
parents_i = frozenset(dag.parents_of(i))
for setting_num, setting in enumerate(setting_list):
if not invariance_tester.is_invariant(i, context=setting_num, cond_set=parents_i):
variants.add((setting_num, i, parents_i))
return variants
def _reverse_arc_igsp(dag, i_covered_arcs, i, j):
"""
Return the DAG that comes from reversing the arc i->j, as well as its I-covered arcs and its score
"""
new_dag = dag.copy()
parents = dag.parents_of(i)
new_dag.reverse_arc(i, j)
if parents:
for parent in parents:
rest = parents - {parent}
if ci_tester.is_ci(i, parent, [*rest, j]):
new_dag.remove_arc(parent, i)
if ci_tester.is_ci(j, parent, cond_set=[*rest]):
new_dag.remove_arc(parent, j)
# new_i_covered_arcs = i_covered_arcs.copy() - dag.incident_arcs(i) - dag.incident_arcs(j)
# for k, l in new_dag.incident_arcs(i) | new_dag.incident_arcs(j):
# if new_dag.parents_of(k) == new_dag.parents_of(l) - {k} and _is_icovered(i, j, dag):
# new_i_covered_arcs.add((k, l))
new_covered_arcs = new_dag.reversible_arcs()
new_i_covered_arcs = [(i, j) for i, j in new_covered_arcs if _is_icovered(i, j, new_dag)]
variants = _get_variants(new_dag)
new_score = len(new_dag.arcs) + len(variants) if not tup_score else (len(new_dag.arcs), len(variants))
intervention_targets = [set() for _ in range(len(setting_list))]
for setting_num, i, parents_i in variants:
intervention_targets[setting_num].add(i)
return new_dag, new_i_covered_arcs, new_score, intervention_targets
# === MINIMUM DAG AND SCORE FOUND BY ANY RUN
min_dag = None
min_score = float('inf') if not tup_score else (float('inf'), float('inf'))
learned_intervention_targets = None
if initial_permutations is None and isinstance(initial_undirected, str):
if initial_undirected == 'threshold':
initial_undirected = threshold_ug(nodes, ci_tester)
else:
raise ValueError("initial_undirected must be one of 'threshold', or an UndirectedGraph")
# === MULTIPLE RUNS
for r in range(nruns):
# === STARTING VALUES
if initial_permutations is not None:
starting_perm = initial_permutations[r]
elif initial_undirected:
starting_perm = min_degree_alg_amat(initial_undirected.to_amat())
else:
starting_perm = random.sample(nodes, len(nodes))
current_dag = perm2dag(starting_perm, ci_tester)
variants = _get_variants(current_dag)
current_intervention_targets = [set() for _ in range(len(setting_list))]
for setting_num, i, parents_i in variants:
current_intervention_targets[setting_num].add(i)
current_score = len(current_dag.arcs) + len(variants) if not tup_score else (
len(current_dag.arcs), len(variants))
if verbose: print("=== STARTING DAG:", current_dag, "== SCORE:", current_score)
current_covered_arcs = current_dag.reversible_arcs()
current_i_covered_arcs = [(i, j) for i, j in current_covered_arcs if _is_icovered(i, j, current_dag)]
if verbose: print("=== STARTING I-COVERED ARCS:", current_i_covered_arcs)
next_dags = [_reverse_arc_igsp(current_dag, current_i_covered_arcs, i, j) for i, j in current_i_covered_arcs]
next_dags = [
(d, i_cov_arcs, score, iv_targets) for d, i_cov_arcs, score, iv_targets in next_dags
if score <= current_score
]
random.shuffle(next_dags)
# === RECORDS FOR DEPTH-FIRST SEARCH
all_visited_dags = set()
trace = []
# === SEARCH!
while True:
if verbose:
print('-' * len(trace), current_dag, '(%d arcs)' % len(current_dag.arcs), 'I-covered arcs:',
current_i_covered_arcs, 'score:', current_score)
all_visited_dags.add(frozenset(current_dag.arcs))
lower_dags = [
(d, i_cov_arcs, score, iv_targets) for d, i_cov_arcs, score, iv_targets in next_dags
if score < current_score
]
if (len(next_dags) > 0 and len(trace) != depth) or len(lower_dags) > 0:
if len(lower_dags) > 0: # restart at a lower DAG
all_visited_dags = set()
trace = []
lowest_ix = min(enumerate(lower_dags), key=lambda x: x[1][2])[0] if use_lowest else 0
current_dag, current_i_covered_arcs, current_score, current_intervention_targets = lower_dags.pop(
lowest_ix)
if verbose: print("FOUND DAG WITH LOWER SCORE:", current_dag, "== SCORE:", current_score)
else:
trace.append((current_dag, current_i_covered_arcs, next_dags, current_intervention_targets))
current_dag, current_i_covered_arcs, current_score, current_intervention_targets = next_dags.pop()
next_dags = [
_reverse_arc_igsp(current_dag, current_i_covered_arcs, i, j)
for i, j in current_i_covered_arcs
]
next_dags = [
(d, i_cov_arcs, score, iv_targets) for d, i_cov_arcs, score, iv_targets in next_dags
if score <= current_score
]
next_dags = [
(d, i_cov_arcs, score, iv_targets) for d, i_cov_arcs, score, iv_targets in next_dags
if frozenset(d.arcs) not in all_visited_dags
]
random.shuffle(next_dags)
# === DEAD END ===
else:
if len(trace) == 0: # reached minimum within search depth
break
else: # backtrack
current_dag, current_i_covered_arcs, next_dags, current_intervention_targets = trace.pop()
if min_dag is None or current_score < min_score:
min_dag = current_dag
min_score = current_score
learned_intervention_targets = current_intervention_targets
if verbose: print("=== FINISHED RUN %s/%s ===" % (r + 1, nruns))
return min_dag, learned_intervention_targets
if __name__ == '__main__':
import causaldag as cd
from causaldag.utils.ci_tests.ci_tester import MemoizedCI_Tester
from causaldag.utils.ci_tests.oracle import dsep_test
p = 10
d = cd.rand.directed_erdos(p, .2)
ci_tester = MemoizedCI_Tester(dsep_test, d)
est_dag, _ = gsp(set(range(p)), ci_tester, nruns=1, depth=float('inf'))
print(est_dag.shd_skeleton(d))
| [
"causaldag.rand.directed_erdos",
"random.shuffle",
"itertools.permutations",
"numpy.ix_",
"random.choice",
"causaldag.utils.core_utils.powerset",
"itertools.combinations",
"numpy.where",
"causaldag.structure_learning.undirected.threshold_ug",
"itertools.product",
"causaldag.utils.ci_tests.ci_tes... | [((34523, 34553), 'causaldag.rand.directed_erdos', 'cd.rand.directed_erdos', (['p', '(0.2)'], {}), '(p, 0.2)\n', (34545, 34553), True, 'import causaldag as cd\n'), ((34569, 34600), 'causaldag.utils.ci_tests.ci_tester.MemoizedCI_Tester', 'MemoizedCI_Tester', (['dsep_test', 'd'], {}), '(dsep_test, d)\n', (34586, 34600), False, 'from causaldag.utils.ci_tests.ci_tester import MemoizedCI_Tester\n'), ((4387, 4416), 'random.choice', 'random.choice', (['min_degree_ixs'], {}), '(min_degree_ixs)\n', (4400, 4416), False, 'import random\n'), ((6860, 6896), 'itertools.permutations', 'itr.permutations', (['context_nodes'], {'r': '(2)'}), '(context_nodes, r=2)\n', (6876, 6896), True, 'import itertools as itr\n'), ((21502, 21527), 'random.shuffle', 'random.shuffle', (['next_dags'], {}), '(next_dags)\n', (21516, 21527), False, 'import random\n'), ((31589, 31614), 'random.shuffle', 'random.shuffle', (['next_dags'], {}), '(next_dags)\n', (31603, 31614), False, 'import random\n'), ((4186, 4226), 'numpy.ix_', 'np.ix_', (['remaining_nodes', 'remaining_nodes'], {}), '(remaining_nodes, remaining_nodes)\n', (4192, 4226), True, 'import numpy as np\n'), ((4328, 4359), 'numpy.where', 'np.where', (['(degrees == min_degree)'], {}), '(degrees == min_degree)\n', (4336, 4359), True, 'import numpy as np\n'), ((7081, 7115), 'itertools.combinations', 'itr.combinations', (['context_nodes', '(2)'], {}), '(context_nodes, 2)\n', (7097, 7115), True, 'import itertools as itr\n'), ((7123, 7156), 'itertools.product', 'itr.product', (['context_nodes', 'nodes'], {}), '(context_nodes, nodes)\n', (7134, 7156), True, 'import itertools as itr\n'), ((10867, 10897), 'causaldag.structure_learning.undirected.threshold_ug', 'threshold_ug', (['nodes', 'ci_tester'], {}), '(nodes, ci_tester)\n', (10879, 10897), False, 'from causaldag.structure_learning.undirected import threshold_ug\n'), ((19182, 19203), 'causaldag.utils.core_utils.powerset', 'powerset', (['neighbors_j'], {}), '(neighbors_j)\n', (19190, 19203), False, 'from causaldag.utils.core_utils import powerset\n'), ((20518, 20548), 'causaldag.structure_learning.undirected.threshold_ug', 'threshold_ug', (['nodes', 'ci_tester'], {}), '(nodes, ci_tester)\n', (20530, 20548), False, 'from causaldag.structure_learning.undirected import threshold_ug\n'), ((30015, 30045), 'causaldag.structure_learning.undirected.threshold_ug', 'threshold_ug', (['nodes', 'ci_tester'], {}), '(nodes, ci_tester)\n', (30027, 30045), False, 'from causaldag.structure_learning.undirected import threshold_ug\n'), ((23841, 23866), 'random.shuffle', 'random.shuffle', (['next_dags'], {}), '(next_dags)\n', (23855, 23866), False, 'import random\n'), ((33673, 33698), 'random.shuffle', 'random.shuffle', (['next_dags'], {}), '(next_dags)\n', (33687, 33698), False, 'import random\n'), ((13720, 13748), 'random.choice', 'random.choice', (['candidate_ixs'], {}), '(candidate_ixs)\n', (13733, 13748), False, 'import random\n'), ((19878, 19899), 'causaldag.utils.core_utils.powerset', 'powerset', (['neighbors_i'], {}), '(neighbors_i)\n', (19886, 19899), False, 'from causaldag.utils.core_utils import powerset\n')] |
import numpy as np
def _fix_time_units(da):
modified = False
if np.issubdtype(da.dtype, np.datetime64):
# already converted since xarray has managed to parse the time in
# CF-format
pass
elif da.attrs["units"].startswith("seconds since 2000-01-01"):
# I fixed UCLALES to CF valid output, this is output from a fixed
# version
pass
elif da.attrs["units"].startswith("seconds since 2000-00-00"):
da.attrs["units"] = da.attrs["units"].replace(
"seconds since 2000-00-00",
"seconds since 2000-01-01",
)
modified = True
elif da.attrs["units"].startswith("seconds since 0-00-00"):
# 2D fields have strange time units...
da.attrs["units"] = da.attrs["units"].replace(
"seconds since 0-00-00",
"seconds since 2000-01-01",
)
modified = True
elif da.attrs["units"].startswith("seconds since 0-0-0"):
# 2D fields have strange time units...
da.attrs["units"] = da.attrs["units"].replace(
"seconds since 0-0-0",
"seconds since 2000-01-01",
)
modified = True
elif da.attrs["units"] == "day as %Y%m%d.%f":
da = (da * 24 * 60 * 60).astype(int)
da.attrs["units"] = "seconds since 2000-01-01 00:00:00"
modified = True
else:
raise NotImplementedError(da.attrs["units"])
return da, modified
| [
"numpy.issubdtype"
] | [((74, 112), 'numpy.issubdtype', 'np.issubdtype', (['da.dtype', 'np.datetime64'], {}), '(da.dtype, np.datetime64)\n', (87, 112), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
"""
generate individual reports for data prepared using narps.py
"""
import os
import glob
import warnings
import matplotlib.pyplot as plt
import numpy
import nilearn.input_data
from narps import Narps, hypnums
from utils import get_masked_data
cut_coords = [-24, -10, 4, 18, 32, 52, 64]
bins = numpy.linspace(-5, 5)
def create_map_overlays(narps, overwrite=True):
"""
Make report showing all orig maps with threshold overlays
This report includes all maps for which data were available,
including those that were excluded
"""
figdir = os.path.join(narps.dirs.dirs['figures'], 'orig_map_overlays')
if not os.path.exists(figdir):
os.mkdir(figdir)
for hyp in hypnums:
outfile = os.path.join(
figdir,
'hyp%d_orig_map_overlays.pdf' % hyp)
if not os.path.exists(outfile) or overwrite:
print('making map overlay figure for hyp', hyp)
# find all maps
hmaps = glob.glob(os.path.join(
narps.dirs.dirs['orig'],
'*_*'))
collection_ids = [os.path.basename(i) for i in hmaps]
collection_ids.sort()
fig, ax = plt.subplots(
len(collection_ids), 2,
figsize=(len(collection_ids), 140),
gridspec_kw={'width_ratios': [2, 1]})
ctr = 0
for collection_id in collection_ids:
teamID = collection_id.split('_')[1]
unthresh_img = os.path.join(
narps.dirs.dirs['orig'],
'%s/hypo%d_unthresh.nii.gz' % (collection_id, hyp))
thresh_img = os.path.join(
narps.dirs.dirs['thresh_mask_orig'],
'%s/hypo%d_thresh.nii.gz' % (
collection_id, hyp))
if not (os.path.exists(thresh_img) or
os.path.exists(unthresh_img)):
print('skipping', teamID)
continue
if teamID not in narps.complete_image_sets:
imagetitle = '%s (excluded)' % teamID
else:
imagetitle = teamID
display = nilearn.plotting.plot_stat_map(
unthresh_img,
display_mode="z",
colorbar=True, title=imagetitle,
cut_coords=cut_coords,
axes=ax[ctr, 0], cmap='gray')
# ignore levels warning
with warnings.catch_warnings():
warnings.simplefilter("ignore")
display.add_contours(
thresh_img, filled=False,
alpha=0.7, levels=[0.5],
colors='b')
masker = nilearn.input_data.NiftiMasker(mask_img=thresh_img)
maskdata = masker.fit_transform(unthresh_img)
if numpy.sum(maskdata) > 0: # check for empty mask
_ = ax[ctr, 1].hist(maskdata, bins=bins)
ctr += 1
plt.savefig(outfile)
plt.close(fig)
def create_unthresh_histograms(narps, overwrite=True):
"""
` Create histograms for in-mask values in unthresholded images
These are only created for the images that were successfully
registered and rectified.
"""
figdir = os.path.join(
narps.dirs.dirs['figures'],
'unthresh_histograms')
if not os.path.exists(figdir):
os.mkdir(figdir)
for hyp in hypnums:
outfile = os.path.join(
figdir,
'hyp%d_unthresh_histogram.pdf' % hyp)
if not os.path.exists(outfile) or overwrite:
print('making figure for hyp', hyp)
unthresh_data, labels = get_masked_data(
hyp, narps.dirs.MNI_mask, narps.dirs.dirs['output'],
imgtype='unthresh', dataset='rectified')
fig, ax = plt.subplots(
int(numpy.ceil(len(labels)/3)), 3,
figsize=(16, 50))
# make three columns - these are row and column counters
ctr_x = 0
ctr_y = 0
for i, l in enumerate(labels):
ax[ctr_x, ctr_y].hist(unthresh_data[i, :], 100)
ax[ctr_x, ctr_y].set_title(l)
ctr_y += 1
if ctr_y > 2:
ctr_y = 0
ctr_x += 1
plt.tight_layout()
plt.savefig(outfile)
plt.close(fig)
if __name__ == "__main__":
# instantiate main Narps class, which loads data
if 'NARPS_BASEDIR' in os.environ:
basedir = os.environ['NARPS_BASEDIR']
else:
basedir = '/data'
narps = Narps(basedir)
create_map_overlays(narps)
create_unthresh_histograms(narps)
| [
"os.mkdir",
"matplotlib.pyplot.tight_layout",
"numpy.sum",
"warnings.simplefilter",
"os.path.basename",
"matplotlib.pyplot.close",
"utils.get_masked_data",
"os.path.exists",
"narps.Narps",
"warnings.catch_warnings",
"numpy.linspace",
"os.path.join",
"matplotlib.pyplot.savefig"
] | [((336, 357), 'numpy.linspace', 'numpy.linspace', (['(-5)', '(5)'], {}), '(-5, 5)\n', (350, 357), False, 'import numpy\n'), ((603, 664), 'os.path.join', 'os.path.join', (["narps.dirs.dirs['figures']", '"""orig_map_overlays"""'], {}), "(narps.dirs.dirs['figures'], 'orig_map_overlays')\n", (615, 664), False, 'import os\n'), ((3429, 3492), 'os.path.join', 'os.path.join', (["narps.dirs.dirs['figures']", '"""unthresh_histograms"""'], {}), "(narps.dirs.dirs['figures'], 'unthresh_histograms')\n", (3441, 3492), False, 'import os\n'), ((4794, 4808), 'narps.Narps', 'Narps', (['basedir'], {}), '(basedir)\n', (4799, 4808), False, 'from narps import Narps, hypnums\n'), ((677, 699), 'os.path.exists', 'os.path.exists', (['figdir'], {}), '(figdir)\n', (691, 699), False, 'import os\n'), ((709, 725), 'os.mkdir', 'os.mkdir', (['figdir'], {}), '(figdir)\n', (717, 725), False, 'import os\n'), ((769, 826), 'os.path.join', 'os.path.join', (['figdir', "('hyp%d_orig_map_overlays.pdf' % hyp)"], {}), "(figdir, 'hyp%d_orig_map_overlays.pdf' % hyp)\n", (781, 826), False, 'import os\n'), ((3522, 3544), 'os.path.exists', 'os.path.exists', (['figdir'], {}), '(figdir)\n', (3536, 3544), False, 'import os\n'), ((3554, 3570), 'os.mkdir', 'os.mkdir', (['figdir'], {}), '(figdir)\n', (3562, 3570), False, 'import os\n'), ((3614, 3672), 'os.path.join', 'os.path.join', (['figdir', "('hyp%d_unthresh_histogram.pdf' % hyp)"], {}), "(figdir, 'hyp%d_unthresh_histogram.pdf' % hyp)\n", (3626, 3672), False, 'import os\n'), ((3135, 3155), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outfile'], {}), '(outfile)\n', (3146, 3155), True, 'import matplotlib.pyplot as plt\n'), ((3168, 3182), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (3177, 3182), True, 'import matplotlib.pyplot as plt\n'), ((3836, 3949), 'utils.get_masked_data', 'get_masked_data', (['hyp', 'narps.dirs.MNI_mask', "narps.dirs.dirs['output']"], {'imgtype': '"""unthresh"""', 'dataset': '"""rectified"""'}), "(hyp, narps.dirs.MNI_mask, narps.dirs.dirs['output'],\n imgtype='unthresh', dataset='rectified')\n", (3851, 3949), False, 'from utils import get_masked_data\n'), ((4499, 4517), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4515, 4517), True, 'import matplotlib.pyplot as plt\n'), ((4530, 4550), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outfile'], {}), '(outfile)\n', (4541, 4550), True, 'import matplotlib.pyplot as plt\n'), ((4563, 4577), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (4572, 4577), True, 'import matplotlib.pyplot as plt\n'), ((868, 891), 'os.path.exists', 'os.path.exists', (['outfile'], {}), '(outfile)\n', (882, 891), False, 'import os\n'), ((1025, 1069), 'os.path.join', 'os.path.join', (["narps.dirs.dirs['orig']", '"""*_*"""'], {}), "(narps.dirs.dirs['orig'], '*_*')\n", (1037, 1069), False, 'import os\n'), ((1135, 1154), 'os.path.basename', 'os.path.basename', (['i'], {}), '(i)\n', (1151, 1154), False, 'import os\n'), ((1542, 1636), 'os.path.join', 'os.path.join', (["narps.dirs.dirs['orig']", "('%s/hypo%d_unthresh.nii.gz' % (collection_id, hyp))"], {}), "(narps.dirs.dirs['orig'], '%s/hypo%d_unthresh.nii.gz' % (\n collection_id, hyp))\n", (1554, 1636), False, 'import os\n'), ((1702, 1805), 'os.path.join', 'os.path.join', (["narps.dirs.dirs['thresh_mask_orig']", "('%s/hypo%d_thresh.nii.gz' % (collection_id, hyp))"], {}), "(narps.dirs.dirs['thresh_mask_orig'], '%s/hypo%d_thresh.nii.gz' %\n (collection_id, hyp))\n", (1714, 1805), False, 'import os\n'), ((3714, 3737), 'os.path.exists', 'os.path.exists', (['outfile'], {}), '(outfile)\n', (3728, 3737), False, 'import os\n'), ((2573, 2598), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2596, 2598), False, 'import warnings\n'), ((2620, 2651), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (2641, 2651), False, 'import warnings\n'), ((2988, 3007), 'numpy.sum', 'numpy.sum', (['maskdata'], {}), '(maskdata)\n', (2997, 3007), False, 'import numpy\n'), ((1893, 1919), 'os.path.exists', 'os.path.exists', (['thresh_img'], {}), '(thresh_img)\n', (1907, 1919), False, 'import os\n'), ((1947, 1975), 'os.path.exists', 'os.path.exists', (['unthresh_img'], {}), '(unthresh_img)\n', (1961, 1975), False, 'import os\n')] |
from styx_msgs.msg import TrafficLight
import rospy
import os
import numpy as np
import tensorflow as tf
from attrdict import AttrDict
import time
LIGHT_ID_TO_NAME = AttrDict({2: "Red",
3:"Yellow",
1:"Green",
4:"Unknown"})
class TLClassifier(object):
def __init__(self, environment, model_name, thresh=0.4):
#TODO load classifier
self.init_light = TrafficLight.UNKNOWN
self.detection_threshhold = thresh
curr_dir = os.path.dirname(os.path.realpath(__file__))
model_path = os.path.join(curr_dir, model_name)
self.detection_graph = tf.Graph()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
rospy.loginfo("Loading SSD Model for detecting traffic ligths for {}".format(environment))
start = time.time()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(model_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.sess = tf.Session(graph=self.detection_graph, config=config)
# Definite input and output Tensors for detection_graph
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
end = time.time()
rospy.loginfo("Model load time is = {} sec".format(end - start))
def do_infer(self, image):
image_expanded = np.expand_dims(image, axis=0)
with self.detection_graph.as_default():
(boxes, scores, classes, num) = self.sess.run(
[self.detection_boxes, self.detection_scores,
self.detection_classes, self.num_detections],
feed_dict={self.image_tensor: image_expanded})
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes).astype(np.int32)
return boxes, scores, classes
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
self.current_light = self.init_light
boxes, scores, classes = self.do_infer(image)
highest_score = self.detection_threshhold
for i in range(boxes.shape[0]):
if scores is None or scores[i] > self.detection_threshhold:
class_name = LIGHT_ID_TO_NAME[classes[i]]
if scores[i] > highest_score:
highest_score = scores[i]
if class_name == 'Red':
self.current_light = TrafficLight.RED
elif class_name == 'Green':
self.current_light = TrafficLight.GREEN
elif class_name == 'Yellow':
self.current_light = TrafficLight.YELLOW
elif class_name == 'Unknown':
self.current_light = TrafficLight.UNKNOWN
else:
pass
return self.current_light | [
"os.path.realpath",
"tensorflow.Session",
"numpy.expand_dims",
"time.time",
"tensorflow.ConfigProto",
"tensorflow.gfile.GFile",
"tensorflow.Graph",
"numpy.squeeze",
"tensorflow.import_graph_def",
"tensorflow.GraphDef",
"attrdict.AttrDict",
"os.path.join"
] | [((174, 241), 'attrdict.AttrDict', 'AttrDict', (["{(2): 'Red', (3): 'Yellow', (1): 'Green', (4): 'Unknown'}"], {}), "({(2): 'Red', (3): 'Yellow', (1): 'Green', (4): 'Unknown'})\n", (182, 241), False, 'from attrdict import AttrDict\n'), ((592, 626), 'os.path.join', 'os.path.join', (['curr_dir', 'model_name'], {}), '(curr_dir, model_name)\n', (604, 626), False, 'import os\n'), ((659, 669), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (667, 669), True, 'import tensorflow as tf\n'), ((688, 704), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (702, 704), True, 'import tensorflow as tf\n'), ((868, 879), 'time.time', 'time.time', ([], {}), '()\n', (877, 879), False, 'import time\n'), ((2063, 2074), 'time.time', 'time.time', ([], {}), '()\n', (2072, 2074), False, 'import time\n'), ((2207, 2236), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (2221, 2236), True, 'import numpy as np\n'), ((2549, 2566), 'numpy.squeeze', 'np.squeeze', (['boxes'], {}), '(boxes)\n', (2559, 2566), True, 'import numpy as np\n'), ((2584, 2602), 'numpy.squeeze', 'np.squeeze', (['scores'], {}), '(scores)\n', (2594, 2602), True, 'import numpy as np\n'), ((542, 568), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (558, 568), False, 'import os\n'), ((955, 968), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (966, 968), True, 'import tensorflow as tf\n'), ((1221, 1274), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.detection_graph', 'config': 'config'}), '(graph=self.detection_graph, config=config)\n', (1231, 1274), True, 'import tensorflow as tf\n'), ((987, 1019), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['model_path', '"""rb"""'], {}), "(model_path, 'rb')\n", (1001, 1019), True, 'import tensorflow as tf\n'), ((1153, 1195), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (1172, 1195), True, 'import tensorflow as tf\n'), ((2621, 2640), 'numpy.squeeze', 'np.squeeze', (['classes'], {}), '(classes)\n', (2631, 2640), True, 'import numpy as np\n')] |
import warnings
from numpy import all as npall, ascontiguousarray, clip, isfinite
def check_economic_qs(QS):
if not isinstance(QS, tuple):
raise ValueError("QS must be a tuple.")
if not isinstance(QS[0], tuple):
raise ValueError("QS[0] must be a tuple.")
fmsg = "QS has non-finite values."
if not all(npall(isfinite(Q)) for Q in QS[0]):
raise ValueError(fmsg)
if not npall(isfinite(QS[1])):
raise ValueError(fmsg)
return QS
def check_covariates(X):
if not X.ndim == 2:
raise ValueError("Covariates must be a bidimensional array.")
if not npall(isfinite(X)):
raise ValueError("Covariates must have finite values only.")
return X
def check_outcome(y, lik):
if not isinstance(lik, (list, tuple)):
lik = (lik,)
str_err = "The first item of ``lik`` has to be a string."
if not isinstance(lik[0], str):
raise ValueError(str_err)
lik_name = lik[0].lower()
y = ascontiguousarray(y, float)
lik = lik[:1] + tuple(ascontiguousarray(i, float) for i in lik[1:])
if not npall(isfinite(y)):
raise ValueError("Outcome must be finite.")
if lik_name == "poisson":
return _check_poisson_outcome(y)
if lik_name in ("binomial", "normal"):
if len(lik) != 2:
msg = "``lik`` must be a tuple of two elements for"
msg += " {} likelihood.".format(lik_name[0].upper() + lik_name[1:])
raise ValueError(msg)
return y
def _check_poisson_outcome(y):
poisson_lim = 25000
if y.max() > poisson_lim:
msg = "Output values of Poisson likelihood greater"
msg += " than {lim} is set to {lim} before applying GLMM."
warnings.warn(msg.format(lim=poisson_lim))
y = clip(y, 0, poisson_lim)
return y
| [
"numpy.ascontiguousarray",
"numpy.isfinite",
"numpy.clip"
] | [((990, 1017), 'numpy.ascontiguousarray', 'ascontiguousarray', (['y', 'float'], {}), '(y, float)\n', (1007, 1017), False, 'from numpy import all as npall, ascontiguousarray, clip, isfinite\n'), ((1786, 1809), 'numpy.clip', 'clip', (['y', '(0)', 'poisson_lim'], {}), '(y, 0, poisson_lim)\n', (1790, 1809), False, 'from numpy import all as npall, ascontiguousarray, clip, isfinite\n'), ((424, 439), 'numpy.isfinite', 'isfinite', (['QS[1]'], {}), '(QS[1])\n', (432, 439), False, 'from numpy import all as npall, ascontiguousarray, clip, isfinite\n'), ((627, 638), 'numpy.isfinite', 'isfinite', (['X'], {}), '(X)\n', (635, 638), False, 'from numpy import all as npall, ascontiguousarray, clip, isfinite\n'), ((1108, 1119), 'numpy.isfinite', 'isfinite', (['y'], {}), '(y)\n', (1116, 1119), False, 'from numpy import all as npall, ascontiguousarray, clip, isfinite\n'), ((1044, 1071), 'numpy.ascontiguousarray', 'ascontiguousarray', (['i', 'float'], {}), '(i, float)\n', (1061, 1071), False, 'from numpy import all as npall, ascontiguousarray, clip, isfinite\n'), ((345, 356), 'numpy.isfinite', 'isfinite', (['Q'], {}), '(Q)\n', (353, 356), False, 'from numpy import all as npall, ascontiguousarray, clip, isfinite\n')] |
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import dash
from dash.dependencies import Input, Output
from dash import dcc
from dash import html
from dash.dependencies import Input, Output, State
import dash_table
from dash_table.Format import Format, Scheme
# SolCalc
from helicalc import helicalc_dir, helicalc_data
from helicalc.solcalc import SolCalcIntegrator
from helicalc.geometry import read_solenoid_geom_combined
from helicalc.cylinders import get_thick_cylinders_padded
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
# load nominal PS geom
# paramdir = '/home/ckampa/coding/helicalc/dev/params/'
paramdir = helicalc_dir + 'dev/params/'
paramfile = 'Mu2e_V13'
df_PS_nom = read_solenoid_geom_combined(paramdir, paramfile).iloc[:3]
# calculate layer thickness
# FIXME!
# integration params
drz = np.array([5e-3, 1e-2])
# editable vs. dependent columns
cols_edit = ['Ri', 'x', 'y', 'z', 'rot0', 'rot1', 'rot2', 'N_layers',
'N_turns', 'I_turn']
cols_stat = ['Coil_Num', 'Ro', 'L', 'I_tot', 'N_turns_tot', 'helicity', 'h_cable',
'w_cable', 'h_sc', 'w_sc', 't_gi', 't_ci', 't_il', 'phi0_deg', 'phi1_deg',
'pitch']
# load TS+DS contribution to PS
#PSoff_file = '/home/shared_data/Bmaps/SolCalc_complete/Mau13.SolCalc.PS_region.standard.PSoff.pkl'
PSoff_file = helicalc_data+'Bmaps/aux/Mau13.SolCalc.PS_region.standard.PSoff.pkl'
df_PSoff = pd.read_pickle(PSoff_file)
df_PSoff = df_PSoff.astype(float)
# m = (df_PSoff.Y == 0.) & (np.isin(df_PSoff.X - 3.904, [0., 0.4, 0.7]))
m = (df_PSoff.Y == 0.) & (np.isin(df_PSoff.X, [3.904, 4.304, 4.604]))
df_PSoff_lines = df_PSoff[m].copy().reset_index(drop=True, inplace=False)
# print(df_PSoff_lines)
# formatting/style
green = 'rgb(159, 210, 128)'
plot_bg = 'rgb(240, 240, 240)'
button_style = {'fontSize': 'large',
'backgroundColor': green,
}
# plot globals
marker_size = 10
fsize_plot = 20
fsize_ticks = 14
# instantiate app
app = dash.Dash(name='solcalc', external_stylesheets=external_stylesheets)
app.layout = html.Div([
html.H1('SolCalc Magnet Builder (Production Solenoid)'),
# html.H2('Coils Plot'),
dcc.Graph(id='coils-plot'),
html.H2('Coil Geometries'),
# tables
html.H3('Editable Parameters'),
dash_table.DataTable(id='editable-table',
columns=[{'name':i, 'id': i, 'hideable':True, 'type':'numeric',
'format': Format(scheme=Scheme.fixed, precision=4),} for i in cols_edit],
data=df_PS_nom[cols_edit].to_dict('records'),
editable=True),
html.Br(),
html.Button('Recalculate Field', id='calc-button', style=button_style),
# field plot
html.H2('Field Plot'),
html.Label('Plotting Options:'),
html.Label('Field Component:'),
dcc.Dropdown(
id='yaxis-column-field',
options=['Bx', 'By', 'Bz'],
value='Bz',
multi=False,
#style=desc_style,
),
html.Label('Field value or gradient?'),
dcc.RadioItems(
id='yaxis-type-field',
options=[{'label': i, 'value': i} for i in ['B_i', 'grad_z(B_i)']],
value='B_i',
labelStyle={'display': 'inline-block'},
#style=desc_style,
),
html.Label('Include TS/DS Contribution?'),
dcc.RadioItems(
id='include-TS-field',
options=[{'label': i, 'value': i} for i in ['yes', 'no']],
value='yes',
labelStyle={'display': 'inline-block'},
#style=desc_style,
),
html.Label('Individual coil contributions or combined field?'),
dcc.RadioItems(
id='indiv-contrib',
options=[{'label': i, 'value': i} for i in ['combined', 'individal']],
value='combined',
labelStyle={'display': 'inline-block'},
#style=desc_style,
),
html.Label('Field unit:'),
dcc.RadioItems(
id='field-unit',
options=[{'label': i, 'value': i} for i in ['Gauss', 'Tesla']],
value='Gauss',
labelStyle={'display': 'inline-block'},
#style=desc_style,
),
dcc.Graph(id='field-plot'),
# FIXME!
# not positive best placement for these
html.H3('Static/Dependent Parameters'),
dash_table.DataTable(id='static-table',
columns=[{'name':i, 'id': i, 'hideable':True, 'type':'numeric',
'format': Format(scheme=Scheme.fixed, precision=4),} for i in cols_stat],
data=df_PS_nom[cols_stat].to_dict('records'),
editable=False),
html.H3('Notes on Dependent Parameters'),
# dcc.Markdown('''
# $R_o = R_i + h_{cable}*N_{layers} + 2*t_{gi} + 2*t_{ci}*N_{layers} + 2*{t_il}*(N_{layers}-1)$
# '''),
#html.Div(html.P(['Notes on depdendent parameters:', html.Br(),
html.Div(html.P([
'Ro = Ri + h_cable*N_layers + 2*t_gi + 2*t_ci*N_layers + 2*t_il*(N_layers-1)', html.Br(),
'pitch = h_cable + 2*t_ci', html.Br(),
'L = pitch*N_turns + 2*t_gi [note nominal seems to use (N_turns-1)]', html.Br(),
'N_turns_tot = N_turns * N_layers', html.Br(),
'I_tot = I_turn * N_turns_tot',])),
# hidden divs for data
html.Div(children=df_PS_nom[cols_edit+cols_stat].to_json(),
id='geom-data', style={'display': 'none'}),
html.Div(id='field-data', style={'display': 'none'}),
])
# update geom div when button is clicked
@app.callback(
[Output('geom-data', 'children'),
Output('static-table', 'data'),],
[Input('calc-button', 'n_clicks'),],
[State('static-table', 'data'),
State('static-table', 'columns'),
State('editable-table', 'data'),
State('editable-table', 'columns')],
)
def update_geom_data(n_clicks, rows_stat, cols_stat, rows_edit, cols_edit):
# load data
df_edit = pd.DataFrame(rows_edit, columns=[c['name'] for c in cols_edit], dtype=float)
print(df_edit)
print(df_edit.info())
df_stat = pd.DataFrame(rows_stat, columns=[c['name'] for c in cols_stat], dtype=float)
# calculations
df_stat.loc[:, 'Ro'] = df_edit.Ri + df_stat.h_cable * df_edit.N_layers + \
2 * df_stat.t_gi + 2*df_stat.t_ci*df_edit.N_layers +\
2*df_stat.t_il*(df_edit.N_layers - 1)
df_stat.loc[:, 'L'] = df_stat.pitch * df_edit.N_turns + 2 * df_stat.t_gi
df_stat.loc[:, 'N_turns_tot'] = df_edit.N_turns * df_edit.N_layers
df_stat.loc[:, 'I_tot'] = df_edit.I_turn + df_stat.N_turns_tot
# combine results
df = pd.concat([df_stat, df_edit], axis=1)
return df.to_json(), df_stat.to_dict('records')
# update coils plot
@app.callback(
Output('coils-plot', 'figure'),
[Input('geom-data', 'children'),],
)
def plot_coils(df):
df = pd.read_json(df)
# get cylinders PS
xs, ys, zs, cs = get_thick_cylinders_padded(df, [1, 2, 3])
# get cylinders nominal PS
xs_n, ys_n, zs_n, cs_n = get_thick_cylinders_padded(df_PS_nom, [1, 2, 3])
# FIXME! Add some of the TS coils
# return surface plot
# layout
# camera
# y up
# camera = dict(
# up=dict(x=0, y=1, z=0),
# #center=dict(x=-3.904, y=0, z=9.),
# eye=dict(x=-2, y=0., z=0.)
# )
# z up
camera = dict(
up=dict(x=0, y=0, z=1),
#center=dict(x=-3.904, y=0, z=9.),
eye=dict(x=0., y=-2., z=0.)
)
layout = go.Layout(
title='Coil Layout',
height=700,
font=dict(family="Courier New", size=fsize_plot,),
margin={'l': 60, 'b': 60, 't': 60, 'r': 60},
scene=dict(aspectmode='data', camera=camera,
xaxis={'title': 'Z [m]', 'tickfont':{'size': fsize_ticks}},
yaxis={'title': 'X [m]', 'tickfont':{'size': fsize_ticks}},
zaxis={'title': 'Y [m]', 'tickfont':{'size': fsize_ticks}},),
plot_bgcolor=plot_bg,
# autosize=True,
# width=1600,
# height=800,
)
return {'data':
#[go.Surface(x=xs, y=ys, z=zs, surfacecolor=cs,
[go.Surface(x=zs_n, y=xs_n, z=ys_n, surfacecolor=cs_n,
colorscale=[[0,'rgba(0,0,0,0)'],[1,'rgba(220, 50, 103, 0.8)']],
showscale=False,
showlegend=True,
opacity=1.0,
name='PS Coils (nominal)',),
go.Surface(x=zs, y=xs, z=ys, surfacecolor=cs,
colorscale=[[0,'rgba(0,0,0,0)'],[1,'rgba(138, 207, 103, 0.8)']],
showscale=False,
showlegend=True,
opacity=1.0,
name='PS Coils (current)',),
],
'layout': layout,
}
# recalculate field
@app.callback(
Output('field-data', 'children'),
[Input('geom-data', 'children'),],
)
def calculate_field(df):
df = pd.read_json(df)
# create dataframe with same grid as PSoff filtered dataframe
df_calc = df_PSoff_lines[['X', 'Y', 'Z', 'R']].copy()
for i in range(len(df)):
# for geom in geom_df_mu2e.itertuples():
j = int(round(df.iloc[i].Coil_Num))
# print coil number to screen for reference
#print(f'Calculating coil {i+1}/'+f'{N_coils}', file=old_stdout)
# instantiate integrator
mySolCalc = SolCalcIntegrator(df.iloc[i], drz=drz)
# integrate on grid (and update the grid df)
df_calc = mySolCalc.integrate_grid(df_calc, N_proc=1)
# save single coil results
# mySolCalc.save_grid_calc(savetype='pkl',
# savename=datadir+base_name+f'.coil_{j}',
# all_solcalc_cols=False)
# combine fields
for i in ['x', 'y', 'z']:
cols = []
for col in df_calc.columns:
if f'B{i}_solcalc' in col:
# T to G
df_calc.eval(f'{col} = {col} * 1e4', inplace=True)
cols.append(col)
eval_str = f'B{i} = '+'+'.join(cols)
df_calc.eval(eval_str, inplace=True, engine='python')
print(df_calc)
print(df_calc.info())
return df_calc.to_json()
# update plot
@app.callback(
Output('field-plot', 'figure'),
[Input('field-data', 'children'),
Input('yaxis-column-field', 'value'),
Input('yaxis-type-field', 'value'),
Input('include-TS-field', 'value'),
Input('indiv-contrib', 'value'),
Input('field-unit', 'value')],
)
def field_plot(df, ycol, ytype, incTS, plotIndiv, unit):
# save original unit
unit_ = unit
unit_print = unit
#print(df)
df = pd.read_json(df)
#print(df)
xs = df.X.unique()
#print(xs)
rs = xs - 3.904
# shared calculations
zs = df.Z.values
m1 = (df.X == xs[0])
m2 = (df.X == xs[1])
m3 = (df.X == xs[2])
ms = [m1, m2, m3]
# plotting depends most heavily on whether plotting individual coils
if plotIndiv == 'combined':
B = df[ycol].values.astype(float)
if incTS == 'yes':
B += df_PSoff_lines[ycol].values
if unit == 'Tesla':
B *= 1e-4
t_inc = ''
if ytype == 'grad_z(B_i)':
ycol = f'grad_z({ycol})'
unit_print = unit_+'/m'
t_inc = ' Gradient'
for m_ in ms:
B[m_] = np.concatenate([[np.nan],np.diff(B[m_]) / np.diff(zs[m_])])
data = [go.Scatter(x=zs[m_], y=B[m_], mode='lines+markers',
marker={'color':c, 'size': marker_size, 'opacity': 0.85,
'line': {'width':0.1, 'color': 'white'}},
line={'width':1, 'color': c},
name=f'R = {r:0.2f}'
) for m_, c, r in zip(ms, ['blue', 'green', 'red'], rs)]
else:
cs_list = [['blue', 'green', 'red'],
['purple', 'lime', 'pink'],
['cyan', 'darkgreen', 'orange']]
data = []
ycols_PS = [1, 2, 3]
ycols_TS = ['']
for yc, cs in zip(ycols_PS, cs_list):
yc_full = ycol+f'_solcalc_{yc}'
B = df[yc_full].values.astype(float)
if unit == 'Tesla':
B *= 1e-4
t_inc = ''
if ytype == 'grad_z(B_i)':
ycol_ = f'grad_z({ycol})'
unit_print = unit_+'/m'
t_inc = ' Gradient'
for m_ in ms:
B[m_] = np.concatenate([[np.nan],np.diff(B[m_]) / np.diff(zs[m_])])
else:
ycol_ = ycol
for m_, c, r in zip(ms, cs, rs):
data.append(go.Scatter(
x=zs[m_], y=B[m_], mode='lines+markers',
marker={'color':c, 'size': marker_size, 'opacity': 0.85,
'line': {'width':0.1, 'color': 'white'}},
line={'width':1, 'color': c},
name=f'R = {r:0.2f}, Coil {yc}'))
# make another trace for TS if necessary
if incTS == 'yes':
B = df_PSoff_lines[ycol].values.astype(float)
if unit == 'Tesla':
B *= 1e-4
t_inc = ''
if ytype == 'grad_z(B_i)':
ycol_ = f'grad_z({ycol})'
unit_print = unit_+'/m'
t_inc = ' Gradient'
for m_ in ms:
B[m_] = np.concatenate([[np.nan],np.diff(B[m_]) / np.diff(zs[m_])])
else:
ycol_ = ycol
for m_, c, r in zip(ms, ['black', 'brown', 'yellow'], rs):
data.append(go.Scatter(
x=zs[m_], y=B[m_], mode='lines+markers',
marker={'color':c, 'size': marker_size, 'opacity': 0.85,
'line': {'width':0.1, 'color': 'white'}},
line={'width':1, 'color': c},
name=f'R = {r:0.2f}, TS+DS Coils'))
# layout should work with all configurations
layout = go.Layout(
title=f'Field{t_inc} Plot: y==0.0 m',
height=700,
font=dict(family="Courier New", size=fsize_plot,),
margin={'l': 60, 'b': 60, 't': 60, 'r': 60},
scene=dict(aspectmode='auto',
#xaxis={'title': 'Z [m]', 'tickfont':{'size': fsize_ticks}},
#yaxis={'title': f'{ycol} [{unit}]', 'tickfont':{'size': fsize_ticks}},
),
xaxis={'title': 'Z [m]', 'tickfont':{'size': fsize_ticks}},
yaxis={'title': f'{ycol} [{unit_print}]', 'tickfont':{'size': fsize_ticks}},
plot_bgcolor=plot_bg,
showlegend=True,
)
return {'data':data, 'layout':layout}
if __name__ == '__main__':
app.run_server(debug=True, host='127.0.0.1')
| [
"numpy.isin",
"helicalc.geometry.read_solenoid_geom_combined",
"plotly.graph_objects.Surface",
"dash.dcc.Graph",
"dash.dcc.RadioItems",
"pandas.DataFrame",
"dash.Dash",
"dash_table.Format.Format",
"dash.html.Div",
"dash.html.Button",
"dash.dependencies.State",
"dash.dcc.Dropdown",
"pandas.co... | [((860, 883), 'numpy.array', 'np.array', (['[0.005, 0.01]'], {}), '([0.005, 0.01])\n', (868, 883), True, 'import numpy as np\n'), ((1441, 1467), 'pandas.read_pickle', 'pd.read_pickle', (['PSoff_file'], {}), '(PSoff_file)\n', (1455, 1467), True, 'import pandas as pd\n'), ((2012, 2080), 'dash.Dash', 'dash.Dash', ([], {'name': '"""solcalc"""', 'external_stylesheets': 'external_stylesheets'}), "(name='solcalc', external_stylesheets=external_stylesheets)\n", (2021, 2080), False, 'import dash\n'), ((1601, 1643), 'numpy.isin', 'np.isin', (['df_PSoff.X', '[3.904, 4.304, 4.604]'], {}), '(df_PSoff.X, [3.904, 4.304, 4.604])\n', (1608, 1643), True, 'import numpy as np\n'), ((5912, 5988), 'pandas.DataFrame', 'pd.DataFrame', (['rows_edit'], {'columns': "[c['name'] for c in cols_edit]", 'dtype': 'float'}), "(rows_edit, columns=[c['name'] for c in cols_edit], dtype=float)\n", (5924, 5988), True, 'import pandas as pd\n'), ((6048, 6124), 'pandas.DataFrame', 'pd.DataFrame', (['rows_stat'], {'columns': "[c['name'] for c in cols_stat]", 'dtype': 'float'}), "(rows_stat, columns=[c['name'] for c in cols_stat], dtype=float)\n", (6060, 6124), True, 'import pandas as pd\n'), ((6569, 6606), 'pandas.concat', 'pd.concat', (['[df_stat, df_edit]'], {'axis': '(1)'}), '([df_stat, df_edit], axis=1)\n', (6578, 6606), True, 'import pandas as pd\n'), ((6801, 6817), 'pandas.read_json', 'pd.read_json', (['df'], {}), '(df)\n', (6813, 6817), True, 'import pandas as pd\n'), ((6862, 6903), 'helicalc.cylinders.get_thick_cylinders_padded', 'get_thick_cylinders_padded', (['df', '[1, 2, 3]'], {}), '(df, [1, 2, 3])\n', (6888, 6903), False, 'from helicalc.cylinders import get_thick_cylinders_padded\n'), ((6964, 7012), 'helicalc.cylinders.get_thick_cylinders_padded', 'get_thick_cylinders_padded', (['df_PS_nom', '[1, 2, 3]'], {}), '(df_PS_nom, [1, 2, 3])\n', (6990, 7012), False, 'from helicalc.cylinders import get_thick_cylinders_padded\n'), ((6699, 6729), 'dash.dependencies.Output', 'Output', (['"""coils-plot"""', '"""figure"""'], {}), "('coils-plot', 'figure')\n", (6705, 6729), False, 'from dash.dependencies import Input, Output, State\n'), ((8805, 8821), 'pandas.read_json', 'pd.read_json', (['df'], {}), '(df)\n', (8817, 8821), True, 'import pandas as pd\n'), ((8696, 8728), 'dash.dependencies.Output', 'Output', (['"""field-data"""', '"""children"""'], {}), "('field-data', 'children')\n", (8702, 8728), False, 'from dash.dependencies import Input, Output, State\n'), ((10517, 10533), 'pandas.read_json', 'pd.read_json', (['df'], {}), '(df)\n', (10529, 10533), True, 'import pandas as pd\n'), ((10101, 10131), 'dash.dependencies.Output', 'Output', (['"""field-plot"""', '"""figure"""'], {}), "('field-plot', 'figure')\n", (10107, 10131), False, 'from dash.dependencies import Input, Output, State\n'), ((736, 784), 'helicalc.geometry.read_solenoid_geom_combined', 'read_solenoid_geom_combined', (['paramdir', 'paramfile'], {}), '(paramdir, paramfile)\n', (763, 784), False, 'from helicalc.geometry import read_solenoid_geom_combined\n'), ((2110, 2165), 'dash.html.H1', 'html.H1', (['"""SolCalc Magnet Builder (Production Solenoid)"""'], {}), "('SolCalc Magnet Builder (Production Solenoid)')\n", (2117, 2165), False, 'from dash import html\n'), ((2200, 2226), 'dash.dcc.Graph', 'dcc.Graph', ([], {'id': '"""coils-plot"""'}), "(id='coils-plot')\n", (2209, 2226), False, 'from dash import dcc\n'), ((2232, 2258), 'dash.html.H2', 'html.H2', (['"""Coil Geometries"""'], {}), "('Coil Geometries')\n", (2239, 2258), False, 'from dash import html\n'), ((2277, 2307), 'dash.html.H3', 'html.H3', (['"""Editable Parameters"""'], {}), "('Editable Parameters')\n", (2284, 2307), False, 'from dash import html\n'), ((2659, 2668), 'dash.html.Br', 'html.Br', ([], {}), '()\n', (2666, 2668), False, 'from dash import html\n'), ((2674, 2744), 'dash.html.Button', 'html.Button', (['"""Recalculate Field"""'], {'id': '"""calc-button"""', 'style': 'button_style'}), "('Recalculate Field', id='calc-button', style=button_style)\n", (2685, 2744), False, 'from dash import html\n'), ((2767, 2788), 'dash.html.H2', 'html.H2', (['"""Field Plot"""'], {}), "('Field Plot')\n", (2774, 2788), False, 'from dash import html\n'), ((2794, 2825), 'dash.html.Label', 'html.Label', (['"""Plotting Options:"""'], {}), "('Plotting Options:')\n", (2804, 2825), False, 'from dash import html\n'), ((2831, 2861), 'dash.html.Label', 'html.Label', (['"""Field Component:"""'], {}), "('Field Component:')\n", (2841, 2861), False, 'from dash import html\n'), ((2867, 2962), 'dash.dcc.Dropdown', 'dcc.Dropdown', ([], {'id': '"""yaxis-column-field"""', 'options': "['Bx', 'By', 'Bz']", 'value': '"""Bz"""', 'multi': '(False)'}), "(id='yaxis-column-field', options=['Bx', 'By', 'Bz'], value=\n 'Bz', multi=False)\n", (2879, 2962), False, 'from dash import dcc\n'), ((3029, 3067), 'dash.html.Label', 'html.Label', (['"""Field value or gradient?"""'], {}), "('Field value or gradient?')\n", (3039, 3067), False, 'from dash import html\n'), ((3073, 3239), 'dash.dcc.RadioItems', 'dcc.RadioItems', ([], {'id': '"""yaxis-type-field"""', 'options': "[{'label': i, 'value': i} for i in ['B_i', 'grad_z(B_i)']]", 'value': '"""B_i"""', 'labelStyle': "{'display': 'inline-block'}"}), "(id='yaxis-type-field', options=[{'label': i, 'value': i} for\n i in ['B_i', 'grad_z(B_i)']], value='B_i', labelStyle={'display':\n 'inline-block'})\n", (3087, 3239), False, 'from dash import dcc\n'), ((3303, 3344), 'dash.html.Label', 'html.Label', (['"""Include TS/DS Contribution?"""'], {}), "('Include TS/DS Contribution?')\n", (3313, 3344), False, 'from dash import html\n'), ((3350, 3503), 'dash.dcc.RadioItems', 'dcc.RadioItems', ([], {'id': '"""include-TS-field"""', 'options': "[{'label': i, 'value': i} for i in ['yes', 'no']]", 'value': '"""yes"""', 'labelStyle': "{'display': 'inline-block'}"}), "(id='include-TS-field', options=[{'label': i, 'value': i} for\n i in ['yes', 'no']], value='yes', labelStyle={'display': 'inline-block'})\n", (3364, 3503), False, 'from dash import dcc\n'), ((3571, 3633), 'dash.html.Label', 'html.Label', (['"""Individual coil contributions or combined field?"""'], {}), "('Individual coil contributions or combined field?')\n", (3581, 3633), False, 'from dash import html\n'), ((3639, 3810), 'dash.dcc.RadioItems', 'dcc.RadioItems', ([], {'id': '"""indiv-contrib"""', 'options': "[{'label': i, 'value': i} for i in ['combined', 'individal']]", 'value': '"""combined"""', 'labelStyle': "{'display': 'inline-block'}"}), "(id='indiv-contrib', options=[{'label': i, 'value': i} for i in\n ['combined', 'individal']], value='combined', labelStyle={'display':\n 'inline-block'})\n", (3653, 3810), False, 'from dash import dcc\n'), ((3874, 3899), 'dash.html.Label', 'html.Label', (['"""Field unit:"""'], {}), "('Field unit:')\n", (3884, 3899), False, 'from dash import html\n'), ((3905, 4059), 'dash.dcc.RadioItems', 'dcc.RadioItems', ([], {'id': '"""field-unit"""', 'options': "[{'label': i, 'value': i} for i in ['Gauss', 'Tesla']]", 'value': '"""Gauss"""', 'labelStyle': "{'display': 'inline-block'}"}), "(id='field-unit', options=[{'label': i, 'value': i} for i in\n ['Gauss', 'Tesla']], value='Gauss', labelStyle={'display': 'inline-block'})\n", (3919, 4059), False, 'from dash import dcc\n'), ((4127, 4153), 'dash.dcc.Graph', 'dcc.Graph', ([], {'id': '"""field-plot"""'}), "(id='field-plot')\n", (4136, 4153), False, 'from dash import dcc\n'), ((4216, 4254), 'dash.html.H3', 'html.H3', (['"""Static/Dependent Parameters"""'], {}), "('Static/Dependent Parameters')\n", (4223, 4254), False, 'from dash import html\n'), ((4605, 4645), 'dash.html.H3', 'html.H3', (['"""Notes on Dependent Parameters"""'], {}), "('Notes on Dependent Parameters')\n", (4612, 4645), False, 'from dash import html\n'), ((5417, 5469), 'dash.html.Div', 'html.Div', ([], {'id': '"""field-data"""', 'style': "{'display': 'none'}"}), "(id='field-data', style={'display': 'none'})\n", (5425, 5469), False, 'from dash import html\n'), ((5536, 5567), 'dash.dependencies.Output', 'Output', (['"""geom-data"""', '"""children"""'], {}), "('geom-data', 'children')\n", (5542, 5567), False, 'from dash.dependencies import Input, Output, State\n'), ((5574, 5604), 'dash.dependencies.Output', 'Output', (['"""static-table"""', '"""data"""'], {}), "('static-table', 'data')\n", (5580, 5604), False, 'from dash.dependencies import Input, Output, State\n'), ((5613, 5645), 'dash.dependencies.Input', 'Input', (['"""calc-button"""', '"""n_clicks"""'], {}), "('calc-button', 'n_clicks')\n", (5618, 5645), False, 'from dash.dependencies import Input, Output, State\n'), ((5654, 5683), 'dash.dependencies.State', 'State', (['"""static-table"""', '"""data"""'], {}), "('static-table', 'data')\n", (5659, 5683), False, 'from dash.dependencies import Input, Output, State\n'), ((5690, 5722), 'dash.dependencies.State', 'State', (['"""static-table"""', '"""columns"""'], {}), "('static-table', 'columns')\n", (5695, 5722), False, 'from dash.dependencies import Input, Output, State\n'), ((5729, 5760), 'dash.dependencies.State', 'State', (['"""editable-table"""', '"""data"""'], {}), "('editable-table', 'data')\n", (5734, 5760), False, 'from dash.dependencies import Input, Output, State\n'), ((5767, 5801), 'dash.dependencies.State', 'State', (['"""editable-table"""', '"""columns"""'], {}), "('editable-table', 'columns')\n", (5772, 5801), False, 'from dash.dependencies import Input, Output, State\n'), ((6736, 6766), 'dash.dependencies.Input', 'Input', (['"""geom-data"""', '"""children"""'], {}), "('geom-data', 'children')\n", (6741, 6766), False, 'from dash.dependencies import Input, Output, State\n'), ((9242, 9280), 'helicalc.solcalc.SolCalcIntegrator', 'SolCalcIntegrator', (['df.iloc[i]'], {'drz': 'drz'}), '(df.iloc[i], drz=drz)\n', (9259, 9280), False, 'from helicalc.solcalc import SolCalcIntegrator\n'), ((8735, 8765), 'dash.dependencies.Input', 'Input', (['"""geom-data"""', '"""children"""'], {}), "('geom-data', 'children')\n", (8740, 8765), False, 'from dash.dependencies import Input, Output, State\n'), ((10138, 10169), 'dash.dependencies.Input', 'Input', (['"""field-data"""', '"""children"""'], {}), "('field-data', 'children')\n", (10143, 10169), False, 'from dash.dependencies import Input, Output, State\n'), ((10176, 10212), 'dash.dependencies.Input', 'Input', (['"""yaxis-column-field"""', '"""value"""'], {}), "('yaxis-column-field', 'value')\n", (10181, 10212), False, 'from dash.dependencies import Input, Output, State\n'), ((10219, 10253), 'dash.dependencies.Input', 'Input', (['"""yaxis-type-field"""', '"""value"""'], {}), "('yaxis-type-field', 'value')\n", (10224, 10253), False, 'from dash.dependencies import Input, Output, State\n'), ((10260, 10294), 'dash.dependencies.Input', 'Input', (['"""include-TS-field"""', '"""value"""'], {}), "('include-TS-field', 'value')\n", (10265, 10294), False, 'from dash.dependencies import Input, Output, State\n'), ((10301, 10332), 'dash.dependencies.Input', 'Input', (['"""indiv-contrib"""', '"""value"""'], {}), "('indiv-contrib', 'value')\n", (10306, 10332), False, 'from dash.dependencies import Input, Output, State\n'), ((10339, 10367), 'dash.dependencies.Input', 'Input', (['"""field-unit"""', '"""value"""'], {}), "('field-unit', 'value')\n", (10344, 10367), False, 'from dash.dependencies import Input, Output, State\n'), ((8066, 8268), 'plotly.graph_objects.Surface', 'go.Surface', ([], {'x': 'zs_n', 'y': 'xs_n', 'z': 'ys_n', 'surfacecolor': 'cs_n', 'colorscale': "[[0, 'rgba(0,0,0,0)'], [1, 'rgba(220, 50, 103, 0.8)']]", 'showscale': '(False)', 'showlegend': '(True)', 'opacity': '(1.0)', 'name': '"""PS Coils (nominal)"""'}), "(x=zs_n, y=xs_n, z=ys_n, surfacecolor=cs_n, colorscale=[[0,\n 'rgba(0,0,0,0)'], [1, 'rgba(220, 50, 103, 0.8)']], showscale=False,\n showlegend=True, opacity=1.0, name='PS Coils (nominal)')\n", (8076, 8268), True, 'import plotly.graph_objects as go\n'), ((8349, 8544), 'plotly.graph_objects.Surface', 'go.Surface', ([], {'x': 'zs', 'y': 'xs', 'z': 'ys', 'surfacecolor': 'cs', 'colorscale': "[[0, 'rgba(0,0,0,0)'], [1, 'rgba(138, 207, 103, 0.8)']]", 'showscale': '(False)', 'showlegend': '(True)', 'opacity': '(1.0)', 'name': '"""PS Coils (current)"""'}), "(x=zs, y=xs, z=ys, surfacecolor=cs, colorscale=[[0,\n 'rgba(0,0,0,0)'], [1, 'rgba(138, 207, 103, 0.8)']], showscale=False,\n showlegend=True, opacity=1.0, name='PS Coils (current)')\n", (8359, 8544), True, 'import plotly.graph_objects as go\n'), ((11305, 11518), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'zs[m_]', 'y': 'B[m_]', 'mode': '"""lines+markers"""', 'marker': "{'color': c, 'size': marker_size, 'opacity': 0.85, 'line': {'width': 0.1,\n 'color': 'white'}}", 'line': "{'width': 1, 'color': c}", 'name': 'f"""R = {r:0.2f}"""'}), "(x=zs[m_], y=B[m_], mode='lines+markers', marker={'color': c,\n 'size': marker_size, 'opacity': 0.85, 'line': {'width': 0.1, 'color':\n 'white'}}, line={'width': 1, 'color': c}, name=f'R = {r:0.2f}')\n", (11315, 11518), True, 'import plotly.graph_objects as go\n'), ((4971, 4980), 'dash.html.Br', 'html.Br', ([], {}), '()\n', (4978, 4980), False, 'from dash import html\n'), ((5030, 5039), 'dash.html.Br', 'html.Br', ([], {}), '()\n', (5037, 5039), False, 'from dash import html\n'), ((5131, 5140), 'dash.html.Br', 'html.Br', ([], {}), '()\n', (5138, 5140), False, 'from dash import html\n'), ((5198, 5207), 'dash.html.Br', 'html.Br', ([], {}), '()\n', (5205, 5207), False, 'from dash import html\n'), ((12539, 12763), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'zs[m_]', 'y': 'B[m_]', 'mode': '"""lines+markers"""', 'marker': "{'color': c, 'size': marker_size, 'opacity': 0.85, 'line': {'width': 0.1,\n 'color': 'white'}}", 'line': "{'width': 1, 'color': c}", 'name': 'f"""R = {r:0.2f}, Coil {yc}"""'}), "(x=zs[m_], y=B[m_], mode='lines+markers', marker={'color': c,\n 'size': marker_size, 'opacity': 0.85, 'line': {'width': 0.1, 'color':\n 'white'}}, line={'width': 1, 'color': c}, name=f'R = {r:0.2f}, Coil {yc}')\n", (12549, 12763), True, 'import plotly.graph_objects as go\n'), ((13491, 13722), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'zs[m_]', 'y': 'B[m_]', 'mode': '"""lines+markers"""', 'marker': "{'color': c, 'size': marker_size, 'opacity': 0.85, 'line': {'width': 0.1,\n 'color': 'white'}}", 'line': "{'width': 1, 'color': c}", 'name': 'f"""R = {r:0.2f}, TS+DS Coils"""'}), "(x=zs[m_], y=B[m_], mode='lines+markers', marker={'color': c,\n 'size': marker_size, 'opacity': 0.85, 'line': {'width': 0.1, 'color':\n 'white'}}, line={'width': 1, 'color': c}, name=f'R = {r:0.2f}, TS+DS Coils'\n )\n", (13501, 13722), True, 'import plotly.graph_objects as go\n'), ((2479, 2519), 'dash_table.Format.Format', 'Format', ([], {'scheme': 'Scheme.fixed', 'precision': '(4)'}), '(scheme=Scheme.fixed, precision=4)\n', (2485, 2519), False, 'from dash_table.Format import Format, Scheme\n'), ((4424, 4464), 'dash_table.Format.Format', 'Format', ([], {'scheme': 'Scheme.fixed', 'precision': '(4)'}), '(scheme=Scheme.fixed, precision=4)\n', (4430, 4464), False, 'from dash_table.Format import Format, Scheme\n'), ((11254, 11268), 'numpy.diff', 'np.diff', (['B[m_]'], {}), '(B[m_])\n', (11261, 11268), True, 'import numpy as np\n'), ((11271, 11286), 'numpy.diff', 'np.diff', (['zs[m_]'], {}), '(zs[m_])\n', (11278, 11286), True, 'import numpy as np\n'), ((12384, 12398), 'numpy.diff', 'np.diff', (['B[m_]'], {}), '(B[m_])\n', (12391, 12398), True, 'import numpy as np\n'), ((12401, 12416), 'numpy.diff', 'np.diff', (['zs[m_]'], {}), '(zs[m_])\n', (12408, 12416), True, 'import numpy as np\n'), ((13310, 13324), 'numpy.diff', 'np.diff', (['B[m_]'], {}), '(B[m_])\n', (13317, 13324), True, 'import numpy as np\n'), ((13327, 13342), 'numpy.diff', 'np.diff', (['zs[m_]'], {}), '(zs[m_])\n', (13334, 13342), True, 'import numpy as np\n')] |
from collections import defaultdict
import json
import os
from tqdm import tqdm
import numpy as np
import torch
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence
from utils.logger import LOGGER
from utils.const import PAD_TOKEN
from pdb import set_trace as bp
class VizWizDataset(Dataset):
def __init__(self, data_dir, split):
self.data_dir = data_dir
self.split = split
self.img_dir = os.path.join(self.data_dir, "img", self.split)
self.ann_path = os.path.join(self.data_dir, "ann", f"{split}.json")
with open(self.ann_path, "r") as ann_file:
anns = json.load(ann_file)
self.img_names = [ann["image"].split(".")[0] for ann in anns]
self.pre_dir = os.path.join(self.data_dir, "pre", self.split)
self.pre_vis_feat_dir = os.path.join(self.pre_dir, "visual_features")
self.pre_q_tok_dir = os.path.join(self.pre_dir, "question_tokens")
self.pre_ans_tok_dir = os.path.join(self.pre_dir, "answer_tokens")
LOGGER.info("Created {} set with {:,d} examples".format(split, len(self)))
def __len__(self):
return len(self.img_names)
def __getitem__(self, idx):
img_name = self.img_names[idx]
pre_vis_feat_path = os.path.join(self.pre_vis_feat_dir, img_name + ".npy")
img_feat = np.load(pre_vis_feat_path)
pre_q_tok_path = os.path.join(self.pre_q_tok_dir, img_name + ".npy")
q_tok = np.load(pre_q_tok_path)
attn_mask = np.ones_like(q_tok, dtype=np.int32)
pre_ans_tok_path = os.path.join(self.pre_ans_tok_dir, img_name + ".npy")
ans_tok = np.load(pre_ans_tok_path)
answerable = ans_tok[0][0]
answer_type = ans_tok[0][1]
answers = ans_tok[1:, :]
pad_mask = answers == -1
answers[pad_mask] = PAD_TOKEN
return img_name, img_feat, q_tok, attn_mask, answers, answerable, answer_type
def vizwiz_collate(inputs):
img_names = [inp[0] for inp in inputs]
img_feats = [inp[1] for inp in inputs]
qs_tok = [inp[2] for inp in inputs]
attn_masks = [inp[3] for inp in inputs]
answers_tok = [inp[4] for inp in inputs]
answerables = [inp[5] for inp in inputs]
answers_type = [inp[6] for inp in inputs]
max_num_feats = max([len(img_feat) for img_feat in img_feats])
all_feat = []
for img_feat in img_feats:
num_feats = len(img_feat)
num_diff = max_num_feats - num_feats
img_feat = torch.FloatTensor(img_feat)
img_feat_dim = img_feat.shape[1]
padding = torch.zeros((num_diff, img_feat_dim), dtype=torch.float32)
img_feat = torch.cat([img_feat, padding], dim=0)
all_feat.append(img_feat)
img_feats = torch.stack(all_feat, dim=0).float()
qs_tok = pad_sequence([torch.LongTensor(q_tok) for q_tok in qs_tok], batch_first=True)
attn_masks = pad_sequence(
[torch.LongTensor(attn_mask) for attn_mask in attn_masks],
batch_first=True,
padding_value=0
)
attn_padding = torch.ones((len(attn_masks), img_feats.shape[1]), dtype=torch.long)
attn_masks = torch.cat([attn_masks, attn_padding], dim=1)
position_ids = torch.arange(0, qs_tok.size(1), dtype=torch.long)
position_ids = torch.stack([position_ids] * len(img_feats), dim=0)
all_answers = []
max_answer_len = attn_masks.shape[1]
for answer_tok in answers_tok:
answer_len = answer_tok.shape[1]
len_diff = max_answer_len - answer_len
num_answers = len(answer_tok)
assert num_answers == 10
if len_diff < 0:
tmp_answer = torch.ones((num_answers, max_answer_len), dtype=torch.long) * PAD_TOKEN
all_answers.append(tmp_answer)
else:
answer_tok = torch.LongTensor(answer_tok)
num_answers = len(answer_tok)
padding = torch.ones((num_answers, len_diff), dtype=torch.long) * PAD_TOKEN
answer_tok = torch.cat([answer_tok, padding], dim=1)
all_answers.append(answer_tok)
answers_tok = torch.stack(all_answers, dim=0).long()
answerables = torch.LongTensor(answerables)
answers_type = torch.LongTensor(answers_type)
return {
"img_names" : img_names,
"img_feats" : img_feats,
"qs_tok" : qs_tok,
"attn_masks" : attn_masks,
"position_ids" : position_ids,
"answers_tok" : answers_tok,
"answerables" : answerables,
"answers_type" : answers_type
}
| [
"torch.ones",
"numpy.load",
"json.load",
"numpy.ones_like",
"torch.stack",
"torch.LongTensor",
"torch.FloatTensor",
"torch.cat",
"torch.zeros",
"os.path.join"
] | [((3151, 3195), 'torch.cat', 'torch.cat', (['[attn_masks, attn_padding]'], {'dim': '(1)'}), '([attn_masks, attn_padding], dim=1)\n', (3160, 3195), False, 'import torch\n'), ((4141, 4170), 'torch.LongTensor', 'torch.LongTensor', (['answerables'], {}), '(answerables)\n', (4157, 4170), False, 'import torch\n'), ((4191, 4221), 'torch.LongTensor', 'torch.LongTensor', (['answers_type'], {}), '(answers_type)\n', (4207, 4221), False, 'import torch\n'), ((461, 507), 'os.path.join', 'os.path.join', (['self.data_dir', '"""img"""', 'self.split'], {}), "(self.data_dir, 'img', self.split)\n", (473, 507), False, 'import os\n'), ((532, 583), 'os.path.join', 'os.path.join', (['self.data_dir', '"""ann"""', 'f"""{split}.json"""'], {}), "(self.data_dir, 'ann', f'{split}.json')\n", (544, 583), False, 'import os\n'), ((773, 819), 'os.path.join', 'os.path.join', (['self.data_dir', '"""pre"""', 'self.split'], {}), "(self.data_dir, 'pre', self.split)\n", (785, 819), False, 'import os\n'), ((852, 897), 'os.path.join', 'os.path.join', (['self.pre_dir', '"""visual_features"""'], {}), "(self.pre_dir, 'visual_features')\n", (864, 897), False, 'import os\n'), ((927, 972), 'os.path.join', 'os.path.join', (['self.pre_dir', '"""question_tokens"""'], {}), "(self.pre_dir, 'question_tokens')\n", (939, 972), False, 'import os\n'), ((1004, 1047), 'os.path.join', 'os.path.join', (['self.pre_dir', '"""answer_tokens"""'], {}), "(self.pre_dir, 'answer_tokens')\n", (1016, 1047), False, 'import os\n'), ((1293, 1347), 'os.path.join', 'os.path.join', (['self.pre_vis_feat_dir', "(img_name + '.npy')"], {}), "(self.pre_vis_feat_dir, img_name + '.npy')\n", (1305, 1347), False, 'import os\n'), ((1367, 1393), 'numpy.load', 'np.load', (['pre_vis_feat_path'], {}), '(pre_vis_feat_path)\n', (1374, 1393), True, 'import numpy as np\n'), ((1420, 1471), 'os.path.join', 'os.path.join', (['self.pre_q_tok_dir', "(img_name + '.npy')"], {}), "(self.pre_q_tok_dir, img_name + '.npy')\n", (1432, 1471), False, 'import os\n'), ((1488, 1511), 'numpy.load', 'np.load', (['pre_q_tok_path'], {}), '(pre_q_tok_path)\n', (1495, 1511), True, 'import numpy as np\n'), ((1532, 1567), 'numpy.ones_like', 'np.ones_like', (['q_tok'], {'dtype': 'np.int32'}), '(q_tok, dtype=np.int32)\n', (1544, 1567), True, 'import numpy as np\n'), ((1596, 1649), 'os.path.join', 'os.path.join', (['self.pre_ans_tok_dir', "(img_name + '.npy')"], {}), "(self.pre_ans_tok_dir, img_name + '.npy')\n", (1608, 1649), False, 'import os\n'), ((1668, 1693), 'numpy.load', 'np.load', (['pre_ans_tok_path'], {}), '(pre_ans_tok_path)\n', (1675, 1693), True, 'import numpy as np\n'), ((2508, 2535), 'torch.FloatTensor', 'torch.FloatTensor', (['img_feat'], {}), '(img_feat)\n', (2525, 2535), False, 'import torch\n'), ((2595, 2653), 'torch.zeros', 'torch.zeros', (['(num_diff, img_feat_dim)'], {'dtype': 'torch.float32'}), '((num_diff, img_feat_dim), dtype=torch.float32)\n', (2606, 2653), False, 'import torch\n'), ((2673, 2710), 'torch.cat', 'torch.cat', (['[img_feat, padding]'], {'dim': '(0)'}), '([img_feat, padding], dim=0)\n', (2682, 2710), False, 'import torch\n'), ((655, 674), 'json.load', 'json.load', (['ann_file'], {}), '(ann_file)\n', (664, 674), False, 'import json\n'), ((2761, 2789), 'torch.stack', 'torch.stack', (['all_feat'], {'dim': '(0)'}), '(all_feat, dim=0)\n', (2772, 2789), False, 'import torch\n'), ((2826, 2849), 'torch.LongTensor', 'torch.LongTensor', (['q_tok'], {}), '(q_tok)\n', (2842, 2849), False, 'import torch\n'), ((2931, 2958), 'torch.LongTensor', 'torch.LongTensor', (['attn_mask'], {}), '(attn_mask)\n', (2947, 2958), False, 'import torch\n'), ((3798, 3826), 'torch.LongTensor', 'torch.LongTensor', (['answer_tok'], {}), '(answer_tok)\n', (3814, 3826), False, 'import torch\n'), ((3982, 4021), 'torch.cat', 'torch.cat', (['[answer_tok, padding]'], {'dim': '(1)'}), '([answer_tok, padding], dim=1)\n', (3991, 4021), False, 'import torch\n'), ((4083, 4114), 'torch.stack', 'torch.stack', (['all_answers'], {'dim': '(0)'}), '(all_answers, dim=0)\n', (4094, 4114), False, 'import torch\n'), ((3644, 3703), 'torch.ones', 'torch.ones', (['(num_answers, max_answer_len)'], {'dtype': 'torch.long'}), '((num_answers, max_answer_len), dtype=torch.long)\n', (3654, 3703), False, 'import torch\n'), ((3891, 3944), 'torch.ones', 'torch.ones', (['(num_answers, len_diff)'], {'dtype': 'torch.long'}), '((num_answers, len_diff), dtype=torch.long)\n', (3901, 3944), False, 'import torch\n')] |
#!/usr/bin/env python
import numpy as np
import de421
from time import time
from jplephem import Ephemeris
from jplephem.spk import SPK
def main():
for size in 10, 1000, 100000:
jd = np.linspace(2414992.5, 2471184.50, size)
kernel = SPK.open('de421.bsp')
ephem = Ephemeris(de421)
mars = kernel[0,4]
print(size)
print('-- old code (2 successive runs):')
t0 = time()
ephem.position('mars', jd)
print(time() - t0)
t0 = time()
ephem.position('mars', jd)
print(time() - t0)
print('-- new SPK-powered code (2 successive runs):')
t0 = time()
mars.compute(jd)
print(time() - t0)
t0 = time()
mars.compute(jd)
print(time() - t0)
print()
if __name__ == '__main__':
main()
print(' Warmed up, running again '.center(72, '-'))
main()
| [
"jplephem.spk.SPK.open",
"jplephem.Ephemeris",
"numpy.linspace",
"time.time"
] | [((197, 236), 'numpy.linspace', 'np.linspace', (['(2414992.5)', '(2471184.5)', 'size'], {}), '(2414992.5, 2471184.5, size)\n', (208, 236), True, 'import numpy as np\n'), ((255, 276), 'jplephem.spk.SPK.open', 'SPK.open', (['"""de421.bsp"""'], {}), "('de421.bsp')\n", (263, 276), False, 'from jplephem.spk import SPK\n'), ((293, 309), 'jplephem.Ephemeris', 'Ephemeris', (['de421'], {}), '(de421)\n', (302, 309), False, 'from jplephem import Ephemeris\n'), ((422, 428), 'time.time', 'time', ([], {}), '()\n', (426, 428), False, 'from time import time\n'), ((505, 511), 'time.time', 'time', ([], {}), '()\n', (509, 511), False, 'from time import time\n'), ((651, 657), 'time.time', 'time', ([], {}), '()\n', (655, 657), False, 'from time import time\n'), ((724, 730), 'time.time', 'time', ([], {}), '()\n', (728, 730), False, 'from time import time\n'), ((478, 484), 'time.time', 'time', ([], {}), '()\n', (482, 484), False, 'from time import time\n'), ((561, 567), 'time.time', 'time', ([], {}), '()\n', (565, 567), False, 'from time import time\n'), ((697, 703), 'time.time', 'time', ([], {}), '()\n', (701, 703), False, 'from time import time\n'), ((770, 776), 'time.time', 'time', ([], {}), '()\n', (774, 776), False, 'from time import time\n')] |
import os
import sys
import numpy as np
import glob
def get_parent_dir(n=1):
"""returns the n-th parent dicrectory of the current
working directory"""
current_path = os.path.dirname(os.path.abspath(__file__))
for k in range(n):
current_path = os.path.dirname(current_path)
return current_path
src_path = os.path.join(get_parent_dir(1), "2_Training", "src")
utils_path = os.path.join(get_parent_dir(1), "Utils")
sys.path.append(src_path)
sys.path.append(utils_path)
import cv2
import argparse
from keras_yolo3.yolo import YOLO
from PIL import Image
from timeit import default_timer as timer
from utils import load_extractor_model, load_features, parse_input, detect_object, detect_frame
from keras_yolo3.yolo import YOLO, detect_video
import pandas as pd
from Train_Utils import get_anchors
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# Set up folder names for default values
data_folder = os.path.join(get_parent_dir(n=1), "Data")
model_folder = os.path.join(data_folder, "Model_Weights")
model_weights = os.path.join(model_folder, "yolo.h5")
model_classes = os.path.join(model_folder, "data_coco.txt")
class_list=model_classes
print(class_list,len(class_list))
anchors_path = os.path.join(src_path, "keras_yolo3", "model_data", "yolo_anchors.txt")
print()
FLAGS = None
color = list(np.random.random(size=3) * 256)
if __name__ == "__main__":
# Delete all default flags
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
"""
Command line options
"""
parser.add_argument(
"--no_save_img",
default=False,
action="store_true",
help="Only save bounding box coordinates but do not save output images with annotated boxes. Default is False.",
)
parser.add_argument(
"--file_types",
"--names-list",
nargs="*",
default=[],
help="Specify list of file types to include. Default is --file_types .jpg .jpeg .png .mp4",
)
parser.add_argument(
"--yolo_model",
type=str,
dest="model_path",
default=model_weights,
help="Path to pre-trained weight files. Default is " + model_weights,
)
parser.add_argument(
"--anchors",
type=str,
dest="anchors_path",
default=anchors_path,
help="Path to YOLO anchors. Default is " + anchors_path,
)
parser.add_argument(
"--classes",
type=str,
dest="classes_path",
default=model_classes,
help="Path to YOLO class specifications. Default is " + model_classes,
)
parser.add_argument(
"--gpu_num", type=int, default=1, help="Number of GPU to use. Default is 1"
)
parser.add_argument(
"--confidence",
type=float,
dest="score",
default=0.70,
help="Threshold for YOLO object confidence score to show predictions. Default is 0.25.",
)
parser.add_argument(
"--video",
type=str,
dest="video_path",
default='./carvid.mp4',
help="Path to the videos",
)
parser.add_argument(
"--video_out",
type=str,
dest="out_path",
default='./vidout/out.avi',
help="Path to the videos",
)
FLAGS = parser.parse_args()
# Split images and videos
img_endings = (".jpg", ".jpeg", ".png")
vid_endings = (".mp4", ".mpeg", ".mpg", ".avi")
anchors = get_anchors(anchors_path)
yolo = YOLO(
**{
"model_path": FLAGS.model_path,
"anchors_path": anchors_path,
"classes_path": FLAGS.classes_path,
"score": FLAGS.score,
"gpu_num": FLAGS.gpu_num,
"model_image_size": (416, 416),
}
)
# labels to draw on images
class_file = open(FLAGS.classes_path, "r")
input_labels = [line.rstrip("\n") for line in class_file.readlines()]
FLAGS.output='vidout'
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 0.5
thickness = 1
Path = FLAGS.video_path
out_path=FLAGS.out_path
if True:
start = timer()
cap = cv2.VideoCapture(Path)
cap.set(cv2.CAP_PROP_FPS, 20)
img_array=[]
while True:
try:
ret, img = cap.read()
img2 = Image.fromarray(img)
bbox, class_name, score = detect_frame(
yolo,
img2
)
c = cv2.waitKey(1)
if c == 27:
break
if len(bbox) != 0:
for i in range(len(bbox)):
sc ="{:.2f}".format(score[i])
G= (class_name[i]*45)%55
B= (class_name[i]*500)%255
R = (class_name[i] * 255) % 45
color = list(np.random.random(size=3) * 256)
cv2.rectangle(img, (bbox[i][0], bbox[i][1]), (bbox[i][2], bbox[i][3]), (int(R), 255,int(B)), 2)
cv2.putText(img, input_labels[class_name[i]] + ' Prob:' + str(sc), (bbox[i][0], bbox[i][1]),
font, fontScale, (int(R),255, int(B)), thickness, cv2.LINE_AA)
cv2.imshow('Input', img)
img_array.append(img)
except:
AttributeError
break
cap.release()
cv2.destroyAllWindows()
out = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*'DIVX'), 15, (640, 360))
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
out.release()
yolo.close_session()
| [
"sys.path.append",
"os.path.abspath",
"os.path.join",
"argparse.ArgumentParser",
"cv2.VideoWriter_fourcc",
"utils.detect_frame",
"timeit.default_timer",
"os.path.dirname",
"cv2.waitKey",
"cv2.imshow",
"cv2.VideoCapture",
"numpy.random.random",
"PIL.Image.fromarray",
"cv2.destroyAllWindows"... | [((458, 483), 'sys.path.append', 'sys.path.append', (['src_path'], {}), '(src_path)\n', (473, 483), False, 'import sys\n'), ((485, 512), 'sys.path.append', 'sys.path.append', (['utils_path'], {}), '(utils_path)\n', (500, 512), False, 'import sys\n'), ((1010, 1052), 'os.path.join', 'os.path.join', (['data_folder', '"""Model_Weights"""'], {}), "(data_folder, 'Model_Weights')\n", (1022, 1052), False, 'import os\n'), ((1072, 1109), 'os.path.join', 'os.path.join', (['model_folder', '"""yolo.h5"""'], {}), "(model_folder, 'yolo.h5')\n", (1084, 1109), False, 'import os\n'), ((1127, 1170), 'os.path.join', 'os.path.join', (['model_folder', '"""data_coco.txt"""'], {}), "(model_folder, 'data_coco.txt')\n", (1139, 1170), False, 'import os\n'), ((1248, 1319), 'os.path.join', 'os.path.join', (['src_path', '"""keras_yolo3"""', '"""model_data"""', '"""yolo_anchors.txt"""'], {}), "(src_path, 'keras_yolo3', 'model_data', 'yolo_anchors.txt')\n", (1260, 1319), False, 'import os\n'), ((1463, 1522), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'argument_default': 'argparse.SUPPRESS'}), '(argument_default=argparse.SUPPRESS)\n', (1486, 1522), False, 'import argparse\n'), ((3546, 3571), 'Train_Utils.get_anchors', 'get_anchors', (['anchors_path'], {}), '(anchors_path)\n', (3557, 3571), False, 'from Train_Utils import get_anchors\n'), ((3586, 3780), 'keras_yolo3.yolo.YOLO', 'YOLO', ([], {}), "(**{'model_path': FLAGS.model_path, 'anchors_path': anchors_path,\n 'classes_path': FLAGS.classes_path, 'score': FLAGS.score, 'gpu_num':\n FLAGS.gpu_num, 'model_image_size': (416, 416)})\n", (3590, 3780), False, 'from keras_yolo3.yolo import YOLO, detect_video\n'), ((201, 226), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (216, 226), False, 'import os\n'), ((276, 305), 'os.path.dirname', 'os.path.dirname', (['current_path'], {}), '(current_path)\n', (291, 305), False, 'import os\n'), ((1357, 1381), 'numpy.random.random', 'np.random.random', ([], {'size': '(3)'}), '(size=3)\n', (1373, 1381), True, 'import numpy as np\n'), ((4237, 4244), 'timeit.default_timer', 'timer', ([], {}), '()\n', (4242, 4244), True, 'from timeit import default_timer as timer\n'), ((4262, 4284), 'cv2.VideoCapture', 'cv2.VideoCapture', (['Path'], {}), '(Path)\n', (4278, 4284), False, 'import cv2\n'), ((5598, 5621), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5619, 5621), False, 'import cv2\n'), ((5663, 5694), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'DIVX'"], {}), "(*'DIVX')\n", (5685, 5694), False, 'import cv2\n'), ((4450, 4470), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (4465, 4470), False, 'from PIL import Image\n'), ((4514, 4538), 'utils.detect_frame', 'detect_frame', (['yolo', 'img2'], {}), '(yolo, img2)\n', (4526, 4538), False, 'from utils import load_extractor_model, load_features, parse_input, detect_object, detect_frame\n'), ((4623, 4637), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4634, 4637), False, 'import cv2\n'), ((5422, 5446), 'cv2.imshow', 'cv2.imshow', (['"""Input"""', 'img'], {}), "('Input', img)\n", (5432, 5446), False, 'import cv2\n'), ((5031, 5055), 'numpy.random.random', 'np.random.random', ([], {'size': '(3)'}), '(size=3)\n', (5047, 5055), True, 'import numpy as np\n')] |
import sys, os
import scipy
import math
import numpy as np
from scipy import integrate
from scipy import optimize as opt
from scipy.stats import gamma
from cell import Cell
class Simulator:
def __init__(self, ncells, gr, sb, steps, CV2div = 0, CV2gr = 0, lamb=1, V0array=None, sample_time = 0):
self.check_errors(ncells, gr, sb, steps, CV2div, CV2gr, lamb)
self.n = ncells # Number of cells to study
self.smplt = 0 # Sampling time
self.gr = gr # Growth rate
self.total_steps = steps # Division steps
self.sb = sb
self.l = lamb
if lamb ==1:
self.K = self.total_steps *self.gr/(self.sb)
else:
self.K = self.total_steps*self.getk()
self.CV2div = CV2div
self.CV2gr = CV2gr
self.output = "" # string to export data in dynamic simulation
self.output_size = "" # string to export data in divison strategy
self.num_steps = 0 # Initial steps
self.V = self.sb # Cell size
self.time = 0 # Simulation time
self.cells = [] # Array of cells
if hasattr(V0array, "__len__"):
self.V0arr = V0array
else:
self.V0arr = []
self.initialize_cells(V0array=self.V0arr) #Initialize cells
def check_errors(self, ncells, gr, sb, steps, CV2div, CV2gr, lamb):
if ncells <= 0:
raise NameError('the number of cells must be positive')
elif gr < 0:
raise NameError('The Growth rate must be positive')
elif sb < 0:
raise NameError('The sb must be positive or zero')
elif steps < 0:
raise NameError('The number of steps must be positive or zero')
elif CV2div < 0:
raise NameError('The CV2div must be positive or zero')
elif CV2gr < 0:
raise NameError('The CV2gr must be positive or zero')
elif lamb < 0.5 or lamb > 2:
raise NameError('Lamb must be higher than 0.5 and less than 2')
def newgr(self,CV2):
if CV2 ==0:
return 1.
else:
return np.random.gamma(shape=1/CV2,scale=CV2)
def newdivpar(self,CV2):
if CV2 ==0:
return 0.5
else:
beta = 0.5*((1/CV2)-1)
return np.random.beta(a=beta,b=beta)
def nextt (self,s0,r,cell):
mu= (self.gr*cell.gr)
k= self.K*cell.k
return (1/(self.l*mu))*np.log(1-((self.l*mu)/(k*s0**self.l))*np.log(r))
def getsb(self,k):
def root(tt):
return self.multimean(tt,k)-2*tt
def meansb():
return opt.bisect(root,0.00001,100000)
sb = meansb()
return sb
def multimean(self,s,k):
sb=s
def moment(sd):
return self.rhomulti(sb,sd,k)*sd
v=integrate.quad(moment, sb, np.inf)[0]
return v
def rhomulti(self,sb,sd,k):
n=self.total_steps
lamb=self.l
gr=self.gr
c=n*k/gr
x=c*((sd**lamb-sb**lamb)/lamb)
return gamma.pdf(x, n)*c*sd**(lamb-1)
def opti(self,k):
return self.getsb(k)-self.sb
def getk(self):
return opt.bisect(self.opti,0.001,1.5)
def initialize_cells(self, V0array):
self.cells=[]
if len(V0array)!=0:
idx = 0
for v in V0array:
gr = self.newgr(self.CV2gr)
divpar = self.newdivpar(self.CV2div)
cell = Cell(idx, v, num_steps=self.total_steps, gr=gr, divpar=divpar, k = gr)
cell.nextt = self.nextt(v,cell.rv,cell)
self.cells.append(cell)
idx += 1
else:
for i in range(self.n):
gr = self.newgr(self.CV2gr)
divpar = self.newdivpar(self.CV2div)
cell = Cell(i, self.sb, num_steps=self.total_steps, gr = gr, divpar = divpar, k = gr)
cell.nextt = self.nextt(self.sb,cell.rv,cell)
self.cells.append(cell)
if self.n>10:
print("Cells initialized")
def open_file(self, nameCRM="./dataCRM.csv"):
self.output = ""
self.file = open(nameCRM, "w")
self.output += "time,"
kk=1
for idx in range(len(self.cells)):
if kk<len(self.cells):
self.output += "Cell"+str(idx+1)+","
else:
self.output += "Cell"+str(idx+1)
kk+=1
self.output += "\n"
self.file.write(self.output)
self.output = ""
self.output += str(0.00)+","
kk=1
for cell in self.cells:
if kk<len(self.cells):
self.output += str(self.truncate(cell.get_size(), 4))+","
else:
self.output += str(self.truncate(cell.get_size(), 4))
kk+=1
self.output += "\n"
self.file.write(self.output)
def simulate(self,tmax):
for cell in self.cells:
t=0
while t<tmax:
tt = cell.nextt
if ((t+tt) <= tmax):
cell.num_steps += 1
Vn=cell.V*np.exp(self.gr*cell.gr*tt)
if cell.num_steps >= cell.total_steps:
dp = self.newdivpar(self.CV2div)
gr = self.newgr(self.CV2gr)
cell.division(Vn,dp,gr,k=gr)
else:
cell.change(Vn)
cell.nextt = self.nextt(cell.V,cell.rv,cell)
else:
Vn = cell.V*np.exp(self.gr*cell.gr*(tmax-t))
cell.change(Vn)
cell.nextt = cell.nextt - (tmax-t)
t += tt
def divstrat(self, tmax, sample_time, nameDSM = "./dataDSM.csv"):
self.initialize_cells(self.V0arr) #Initialize cells
self.file_size = open(nameDSM, "w")
self.file_size.write("S_b,S_d,time\n")
self.smplt = sample_time
self.time = 0
self.open_file()
self.time = 0
divarray = np.array([])
tgt = (tmax/10)
cnt = 0
for i in range(len(self.cells)):
divarray = np.concatenate((divarray,[self.get_ndiv(i)]),axis=0)
while self.time<tmax:
self.simulate(self.smplt)
cnt2 = 0
self.time += self.smplt
line = ""
for cell in self.cells:
if self.get_ndiv(i) > divarray[cnt2]:
line+=str(self.truncate(cell.Vb, 4))+","+str(self.truncate(cell.Vd, 4))+","+str(self.truncate(self.time, 4))+"\n "
divarray[cnt2] = self.get_ndiv(i)
cnt2+=1
self.file_size.write(line)
cnt +=self.smplt
if cnt >= tgt:
print(str(np.int(100*self.time/tmax))+"%")
cnt = 0
self.file_size.close()
def szdyn(self, tmax, sample_time, nameCRM = "./dataCRM.csv"):
self.initialize_cells(self.V0arr) #Initialize cells
self.open_file(nameCRM = nameCRM)
self.smplt = sample_time
self.time = 0
cnt = 0
tgt = 0
while self.time<tmax:
self.simulate(self.smplt)
self.time += self.smplt
self.output = ""
self.output += str(self.time)+","
kk=1
for cell in self.cells:
if kk<len(self.cells):
self.output += str(self.truncate(cell.get_size(), 4))+","
else:
self.output += str(self.truncate(cell.get_size(), 4))
kk+=1
self.output += "\n"
cnt +=self.smplt
if cnt >= tgt:
print(str(np.int(100*self.time/tmax))+"%")
tgt += (tmax/10)
self.file.write(self.output)
self.file.close()
def du(self,u,sb,t,dt):
mu=self.gr
lamb=self.l
k=self.K
v=np.zeros_like(u)
s=sb*np.exp(mu*t)
for l in range(len(u)):
if l==0:
v[0]=(-k*(s**lamb)*u[0])*dt
elif l==len(u)-1:
v[len(u)-1]=(k*(s**lamb)*u[len(u)-2])*dt
elif l==len(u)-2:
v[len(u)-2]=(-k*(s**lamb)*u[len(u)-2]+k*(s**lamb)*u[len(u)-3])*dt
else:
v[l]=(-k*(s**lamb)*u[l]+k*(s**lamb)*u[l-1])*dt
return v
def SdStat(self,sb):
mu=self.gr
tmax=5/self.gr
dt=0.001/self.gr
u=np.zeros(self.total_steps+1)
t=0
count=10
plim=[]
tarrayfsp=[]
u[0]=1
while t<tmax:
u+=self.du(u,sb,t,dt)
t+=dt
count+=1
if count>9:
plim.append(u[-1])
tarrayfsp.append(t)
count=0
tt=np.array(tarrayfsp)
h=tt[1]-tt[0]
rhot=np.diff(plim)/h
trho=0.5*(tt[1:] + tt[:-1])
sarray=sb*np.exp(mu*tt)
ds=np.diff(sarray)
ss=0.5*(sarray[1:] + sarray[:-1])
rhos=rhot=np.diff(plim)/ds
mn=np.trapz(rhos*ss,x=ss)
var=np.trapz(rhos*(ss)**2,x=ss)
CV2=(var-mn**2)/(mn-sb)**2
return mn,CV2
def szdynFSP(self, tmax, CV2sz = 0, nameFSP = "./dataFSP.csv"):
file = open(nameFSP, "w")
output = "time,Meansize,VarSize\n"
nsteps=self.total_steps
gr=self.gr
k=self.K
lamb=self.l
tmax=tmax
ndivs=int(1.5*tmax*self.gr/np.log(2))
dt=0.0001*np.log(2)/self.gr
if CV2sz==0:
s0arr=[self.V]
else:
s0arr = np.linspace(gamma.ppf(0.001,a=1/CV2sz,scale=self.V*CV2sz),
gamma.ppf(0.999, a=1/CV2sz,scale=self.V*CV2sz), 30)
dx=(s0arr[1]-s0arr[0])
wgs=[]
for l in s0arr:
wgs.append((gamma.cdf(l+dx/2,a=1/CV2sz,scale=self.V*CV2sz)-gamma.cdf(l-dx/2,a=1/CV2sz,scale=self.V*CV2sz))/dx)
allp=np.zeros([ndivs,len(s0arr),1000])
obj=0
countv0=0
for v0 in s0arr:
if obj%3==2:
print(str(np.int(100*obj/30))+"%")
obj+=1
t=0
steps=int(np.floor(tmax/dt))
u=np.zeros([ndivs,nsteps])#(DIVS,STEPS)
u[0]=np.zeros(nsteps)
u[0][0]=1#P_00
time=[]#time array
count=int(np.floor(tmax/(dt*1000)))-1
count2=0
for l in range(steps):
utemp=u
for n in range(len(utemp)):#n=divs,
for m in range(len(utemp[n])):#m=steps
arg=lamb*(gr*t-n*np.log(2))
if (m==0):#m=steps
if(n==0):#n=divs
dun=-k*v0**lamb*np.exp(lamb*gr*t)*(utemp[0][0])
u[n][m]+=dun*dt
else:
dun=k*v0**lamb*np.exp(arg)*(2**lamb*utemp[n-1][len(utemp[n])-1]-utemp[n][0])
u[n][m]+=dun*dt
elif(m==len(utemp[n])-1):
if(n==len(utemp)-1):
dun=k*v0**lamb*np.exp(arg)*(utemp[n][len(utemp[n])-2])
u[n][m]+=dun*dt
else:
dun=k*v0**lamb*np.exp(arg)*(utemp[n][m-1]-utemp[n][m])
u[n][m]+=dun*dt
else:
dun=k*v0**lamb*np.exp(arg)*(utemp[n][m-1]-utemp[n][m])
u[n][m]+=dun*dt
t+=dt
count=count+1
if count==int(np.floor(tmax/(dt*1000))):
time.append(t)
mean=0
for ii in range(len(allp)):
allp[ii][countv0][count2]=np.sum(u[ii])
count=0
count2+=1
countv0=countv0+1
if CV2sz==0:
fullmeansz=[]
fullvarsz=[]
fulltime=[]
t=0
dt=tmax/1000
for ll in range(len(allp[0][0])):
ms=0
for ctv0 in range(len(s0arr)):
tempms=0
for ii in range(ndivs):
arg=gr*t-np.log(2)*ii
tempms+=np.exp(arg)*allp[ii][ctv0][ll]
ms+=s0arr[ctv0]*tempms
fullmeansz.append(ms)
mvar=0
for ctv0 in range(len(s0arr)):
tempms=0
for ii in range(ndivs):
arg=gr*t-np.log(2)*ii
tempms+=(ms-s0arr[ctv0]*np.exp(arg))**2*allp[ii][ctv0][ll]
mvar+=tempms
fullvarsz.append(mvar)
fulltime.append(t)
t+=dt
else:
fullmeansz=[]
fullvarsz=[]
fulltime=[]
t=0
dt=tmax/1000
for ll in range(len(allp[0][0])):
ms=0
for ctv0 in range(len(s0arr)):
tempms=0
for ii in range(ndivs):
arg=gr*t-np.log(2)*ii
tempms+=np.exp(arg)*allp[ii][ctv0][ll]
ms+=s0arr[ctv0]*tempms*wgs[ctv0]*dx
fullmeansz.append(ms)
mvar=0
for ctv0 in range(len(s0arr)):
tempms=0
for ii in range(ndivs):
arg=gr*t-np.log(2)*ii
tempms+=(ms-s0arr[ctv0]*np.exp(arg))**2*allp[ii][ctv0][ll]
mvar+=tempms*wgs[ctv0]*dx
fullvarsz.append(mvar)
fulltime.append(t)
t+=dt
for m in range(len(fullmeansz)):
output += str(fulltime[m])+","+str(fullmeansz[m])+","+str(fullvarsz[m])+"\n"
file.write(output)
def get_sz(self, n, cells=[]):
if len(cells) > 0:
return cells[n].V
else:
return self.cells[n].V
def get_ndiv(self, n, cells=[]):
if len(cells) > 0:
return cells[n].ndiv
else:
return self.cells[n].ndiv
def get_gr(self, n, cells=[]):
if len(cells) > 0:
return cells[n].gr
else:
return self.cells[n].gr
def get_dp(self, n, cells=[]):
if len(cells) > 0:
return cells[n].dp
else:
return self.cells[n].dp
def get_next_t(self, n, cells=[]):
if len(cells) > 0:
return cells[n].nextt
else:
return self.cells[n].nextt
def truncate(self, num, ciphers):
pos = pow(10.0, ciphers)
return math.trunc(pos * num)/pos
def __str__(self):
out = "Initial Params: {\n tmax: "+str(self.total_time)+", \n sample time: "+str(self.smplt)+", \n ncells: "+str(self.n)+", \n dt: "+str(self.dt)+", \n alpha: "+str(self.alpha)+", \n k: "+str(self.K)+"\n}"
for cell in self.cells:
out+= str(cell)+"\n"
return out
| [
"numpy.sum",
"numpy.floor",
"numpy.random.gamma",
"numpy.exp",
"numpy.zeros_like",
"scipy.stats.gamma.ppf",
"numpy.int",
"math.trunc",
"cell.Cell",
"numpy.trapz",
"scipy.stats.gamma.pdf",
"scipy.stats.gamma.cdf",
"scipy.integrate.quad",
"numpy.random.beta",
"numpy.log",
"numpy.zeros",
... | [((3174, 3207), 'scipy.optimize.bisect', 'opt.bisect', (['self.opti', '(0.001)', '(1.5)'], {}), '(self.opti, 0.001, 1.5)\n', (3184, 3207), True, 'from scipy import optimize as opt\n'), ((6074, 6086), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6082, 6086), True, 'import numpy as np\n'), ((7971, 7987), 'numpy.zeros_like', 'np.zeros_like', (['u'], {}), '(u)\n', (7984, 7987), True, 'import numpy as np\n'), ((8512, 8542), 'numpy.zeros', 'np.zeros', (['(self.total_steps + 1)'], {}), '(self.total_steps + 1)\n', (8520, 8542), True, 'import numpy as np\n'), ((8847, 8866), 'numpy.array', 'np.array', (['tarrayfsp'], {}), '(tarrayfsp)\n', (8855, 8866), True, 'import numpy as np\n'), ((8997, 9012), 'numpy.diff', 'np.diff', (['sarray'], {}), '(sarray)\n', (9004, 9012), True, 'import numpy as np\n'), ((9101, 9126), 'numpy.trapz', 'np.trapz', (['(rhos * ss)'], {'x': 'ss'}), '(rhos * ss, x=ss)\n', (9109, 9126), True, 'import numpy as np\n'), ((9136, 9166), 'numpy.trapz', 'np.trapz', (['(rhos * ss ** 2)'], {'x': 'ss'}), '(rhos * ss ** 2, x=ss)\n', (9144, 9166), True, 'import numpy as np\n'), ((2117, 2158), 'numpy.random.gamma', 'np.random.gamma', ([], {'shape': '(1 / CV2)', 'scale': 'CV2'}), '(shape=1 / CV2, scale=CV2)\n', (2132, 2158), True, 'import numpy as np\n'), ((2297, 2327), 'numpy.random.beta', 'np.random.beta', ([], {'a': 'beta', 'b': 'beta'}), '(a=beta, b=beta)\n', (2311, 2327), True, 'import numpy as np\n'), ((2628, 2659), 'scipy.optimize.bisect', 'opt.bisect', (['root', '(1e-05)', '(100000)'], {}), '(root, 1e-05, 100000)\n', (2638, 2659), True, 'from scipy import optimize as opt\n'), ((2823, 2857), 'scipy.integrate.quad', 'integrate.quad', (['moment', 'sb', 'np.inf'], {}), '(moment, sb, np.inf)\n', (2837, 2857), False, 'from scipy import integrate\n'), ((8001, 8015), 'numpy.exp', 'np.exp', (['(mu * t)'], {}), '(mu * t)\n', (8007, 8015), True, 'import numpy as np\n'), ((8902, 8915), 'numpy.diff', 'np.diff', (['plim'], {}), '(plim)\n', (8909, 8915), True, 'import numpy as np\n'), ((8972, 8987), 'numpy.exp', 'np.exp', (['(mu * tt)'], {}), '(mu * tt)\n', (8978, 8987), True, 'import numpy as np\n'), ((9073, 9086), 'numpy.diff', 'np.diff', (['plim'], {}), '(plim)\n', (9080, 9086), True, 'import numpy as np\n'), ((10246, 10271), 'numpy.zeros', 'np.zeros', (['[ndivs, nsteps]'], {}), '([ndivs, nsteps])\n', (10254, 10271), True, 'import numpy as np\n'), ((10301, 10317), 'numpy.zeros', 'np.zeros', (['nsteps'], {}), '(nsteps)\n', (10309, 10317), True, 'import numpy as np\n'), ((14833, 14854), 'math.trunc', 'math.trunc', (['(pos * num)'], {}), '(pos * num)\n', (14843, 14854), False, 'import math\n'), ((3048, 3063), 'scipy.stats.gamma.pdf', 'gamma.pdf', (['x', 'n'], {}), '(x, n)\n', (3057, 3063), False, 'from scipy.stats import gamma\n'), ((3469, 3537), 'cell.Cell', 'Cell', (['idx', 'v'], {'num_steps': 'self.total_steps', 'gr': 'gr', 'divpar': 'divpar', 'k': 'gr'}), '(idx, v, num_steps=self.total_steps, gr=gr, divpar=divpar, k=gr)\n', (3473, 3537), False, 'from cell import Cell\n'), ((3831, 3903), 'cell.Cell', 'Cell', (['i', 'self.sb'], {'num_steps': 'self.total_steps', 'gr': 'gr', 'divpar': 'divpar', 'k': 'gr'}), '(i, self.sb, num_steps=self.total_steps, gr=gr, divpar=divpar, k=gr)\n', (3835, 3903), False, 'from cell import Cell\n'), ((9510, 9519), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (9516, 9519), True, 'import numpy as np\n'), ((9539, 9548), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (9545, 9548), True, 'import numpy as np\n'), ((9651, 9702), 'scipy.stats.gamma.ppf', 'gamma.ppf', (['(0.001)'], {'a': '(1 / CV2sz)', 'scale': '(self.V * CV2sz)'}), '(0.001, a=1 / CV2sz, scale=self.V * CV2sz)\n', (9660, 9702), False, 'from scipy.stats import gamma\n'), ((9714, 9765), 'scipy.stats.gamma.ppf', 'gamma.ppf', (['(0.999)'], {'a': '(1 / CV2sz)', 'scale': '(self.V * CV2sz)'}), '(0.999, a=1 / CV2sz, scale=self.V * CV2sz)\n', (9723, 9765), False, 'from scipy.stats import gamma\n'), ((10213, 10232), 'numpy.floor', 'np.floor', (['(tmax / dt)'], {}), '(tmax / dt)\n', (10221, 10232), True, 'import numpy as np\n'), ((10398, 10426), 'numpy.floor', 'np.floor', (['(tmax / (dt * 1000))'], {}), '(tmax / (dt * 1000))\n', (10406, 10426), True, 'import numpy as np\n'), ((2483, 2492), 'numpy.log', 'np.log', (['r'], {}), '(r)\n', (2489, 2492), True, 'import numpy as np\n'), ((5148, 5178), 'numpy.exp', 'np.exp', (['(self.gr * cell.gr * tt)'], {}), '(self.gr * cell.gr * tt)\n', (5154, 5178), True, 'import numpy as np\n'), ((5581, 5619), 'numpy.exp', 'np.exp', (['(self.gr * cell.gr * (tmax - t))'], {}), '(self.gr * cell.gr * (tmax - t))\n', (5587, 5619), True, 'import numpy as np\n'), ((11718, 11746), 'numpy.floor', 'np.floor', (['(tmax / (dt * 1000))'], {}), '(tmax / (dt * 1000))\n', (11726, 11746), True, 'import numpy as np\n'), ((11905, 11918), 'numpy.sum', 'np.sum', (['u[ii]'], {}), '(u[ii])\n', (11911, 11918), True, 'import numpy as np\n'), ((6815, 6845), 'numpy.int', 'np.int', (['(100 * self.time / tmax)'], {}), '(100 * self.time / tmax)\n', (6821, 6845), True, 'import numpy as np\n'), ((7743, 7773), 'numpy.int', 'np.int', (['(100 * self.time / tmax)'], {}), '(100 * self.time / tmax)\n', (7749, 7773), True, 'import numpy as np\n'), ((9876, 9932), 'scipy.stats.gamma.cdf', 'gamma.cdf', (['(l + dx / 2)'], {'a': '(1 / CV2sz)', 'scale': '(self.V * CV2sz)'}), '(l + dx / 2, a=1 / CV2sz, scale=self.V * CV2sz)\n', (9885, 9932), False, 'from scipy.stats import gamma\n'), ((9923, 9979), 'scipy.stats.gamma.cdf', 'gamma.cdf', (['(l - dx / 2)'], {'a': '(1 / CV2sz)', 'scale': '(self.V * CV2sz)'}), '(l - dx / 2, a=1 / CV2sz, scale=self.V * CV2sz)\n', (9932, 9979), False, 'from scipy.stats import gamma\n'), ((10131, 10153), 'numpy.int', 'np.int', (['(100 * obj / 30)'], {}), '(100 * obj / 30)\n', (10137, 10153), True, 'import numpy as np\n'), ((12409, 12420), 'numpy.exp', 'np.exp', (['arg'], {}), '(arg)\n', (12415, 12420), True, 'import numpy as np\n'), ((13317, 13328), 'numpy.exp', 'np.exp', (['arg'], {}), '(arg)\n', (13323, 13328), True, 'import numpy as np\n'), ((12364, 12373), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (12370, 12373), True, 'import numpy as np\n'), ((12697, 12706), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (12703, 12706), True, 'import numpy as np\n'), ((13272, 13281), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (13278, 13281), True, 'import numpy as np\n'), ((13618, 13627), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (13624, 13627), True, 'import numpy as np\n'), ((10658, 10667), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (10664, 10667), True, 'import numpy as np\n'), ((10805, 10826), 'numpy.exp', 'np.exp', (['(lamb * gr * t)'], {}), '(lamb * gr * t)\n', (10811, 10826), True, 'import numpy as np\n'), ((10966, 10977), 'numpy.exp', 'np.exp', (['arg'], {}), '(arg)\n', (10972, 10977), True, 'import numpy as np\n'), ((11552, 11563), 'numpy.exp', 'np.exp', (['arg'], {}), '(arg)\n', (11558, 11563), True, 'import numpy as np\n'), ((12758, 12769), 'numpy.exp', 'np.exp', (['arg'], {}), '(arg)\n', (12764, 12769), True, 'import numpy as np\n'), ((13679, 13690), 'numpy.exp', 'np.exp', (['arg'], {}), '(arg)\n', (13685, 13690), True, 'import numpy as np\n'), ((11222, 11233), 'numpy.exp', 'np.exp', (['arg'], {}), '(arg)\n', (11228, 11233), True, 'import numpy as np\n'), ((11391, 11402), 'numpy.exp', 'np.exp', (['arg'], {}), '(arg)\n', (11397, 11402), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding=utf-8 -*-
import cv2 as cv
import numpy as np
"""
模板匹配:
模板匹配被称为最简单的模式识别方式,模板匹配的工作条件严苛,因为其并不是基于特征的匹配,需要光照、背景、干扰一致
的情况下才能更好的工作,在工业、屏幕内容识别上运用广泛。
cv.matchTemplate(image, templ, result, method, mask)
- image : 输入进行匹配的图像
- templ : 模板图像
- result : 匹配结果集
- method : 匹配方法
- mask : 二值图遮罩
匹配方法集如下:
- TM_SQDIFF = 0
- TM_SQDIFF_NORMED = 1 # 平方不同与其归一化,值越小相关性越高,匹配程度越高
- TM_CCORR = 2
- TM_CCORR_NORMED = 3 # 相关性匹配,值越大相关性越强,匹配程度越高;Normed表示归一化,1表示高度匹配
- TM_CCOEFF = 4
- TM_CCOEFF_NORMED = 5 # 相关因子匹配,值越大相关性越强,匹配程度越高;Normed表示归一化,1表示高度匹配
"""
def template_match(image, template):
th, tw = template.shape[:2] # 获取模板的 高度与宽度
result = cv.matchTemplate(image, template, cv.TM_CCORR_NORMED)
cv.imshow("result", result)
threshold = 0.952
loc = np.where(result > threshold)
for pt in zip(*loc[::-1]):
cv.rectangle(image, pt, (pt[0] + tw, pt[1] + th), (0, 0, 255), 1, cv.LINE_8, 0)
cv.imshow("llk-dst", image)
def main():
src_input = cv.imread("../../pic/sw/sw_game_duiduipen.png")
cv.imshow("input image", src_input)
src_temp = cv.imread("../../pic/sw/temp_sw_game_blue.png")
cv.imshow("find this", src_temp)
template_match(src_input, src_temp)
cv.waitKey(0)
cv.destroyAllWindows()
if "__main__" == __name__:
main()
| [
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.imread",
"numpy.where",
"cv2.rectangle",
"cv2.imshow",
"cv2.matchTemplate"
] | [((895, 948), 'cv2.matchTemplate', 'cv.matchTemplate', (['image', 'template', 'cv.TM_CCORR_NORMED'], {}), '(image, template, cv.TM_CCORR_NORMED)\n', (911, 948), True, 'import cv2 as cv\n'), ((954, 981), 'cv2.imshow', 'cv.imshow', (['"""result"""', 'result'], {}), "('result', result)\n", (963, 981), True, 'import cv2 as cv\n'), ((1016, 1044), 'numpy.where', 'np.where', (['(result > threshold)'], {}), '(result > threshold)\n', (1024, 1044), True, 'import numpy as np\n'), ((1171, 1198), 'cv2.imshow', 'cv.imshow', (['"""llk-dst"""', 'image'], {}), "('llk-dst', image)\n", (1180, 1198), True, 'import cv2 as cv\n'), ((1233, 1280), 'cv2.imread', 'cv.imread', (['"""../../pic/sw/sw_game_duiduipen.png"""'], {}), "('../../pic/sw/sw_game_duiduipen.png')\n", (1242, 1280), True, 'import cv2 as cv\n'), ((1286, 1321), 'cv2.imshow', 'cv.imshow', (['"""input image"""', 'src_input'], {}), "('input image', src_input)\n", (1295, 1321), True, 'import cv2 as cv\n'), ((1338, 1385), 'cv2.imread', 'cv.imread', (['"""../../pic/sw/temp_sw_game_blue.png"""'], {}), "('../../pic/sw/temp_sw_game_blue.png')\n", (1347, 1385), True, 'import cv2 as cv\n'), ((1391, 1423), 'cv2.imshow', 'cv.imshow', (['"""find this"""', 'src_temp'], {}), "('find this', src_temp)\n", (1400, 1423), True, 'import cv2 as cv\n'), ((1470, 1483), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (1480, 1483), True, 'import cv2 as cv\n'), ((1489, 1511), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (1509, 1511), True, 'import cv2 as cv\n'), ((1086, 1165), 'cv2.rectangle', 'cv.rectangle', (['image', 'pt', '(pt[0] + tw, pt[1] + th)', '(0, 0, 255)', '(1)', 'cv.LINE_8', '(0)'], {}), '(image, pt, (pt[0] + tw, pt[1] + th), (0, 0, 255), 1, cv.LINE_8, 0)\n', (1098, 1165), True, 'import cv2 as cv\n')] |
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from tensorflow import keras
from keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import layers
from tensorflow.keras import Model
from keras.optimizers import Adam
from keras.models import load_model
model = load_model('model.h5')
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
base = 'images'
train_set = os.path.join(base,'train')
train_generator = ImageDataGenerator(rescale = 1./255)
train = train_generator.flow_from_directory(train_set,
target_size = (96,96),
color_mode = 'grayscale',
batch_size = 64,
class_mode = 'categorical')
video = cv2.VideoCapture(0)
while True:
ret, frame = video.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors= 5,
minSize=(30, 30)
)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 255), 2)
resized = cv2.resize(gray, (96,96))
resized = np.reshape(resized/255,(1,96,96,1))
output = model.predict(resized)
output = np.argmax(output.flatten())
labels = (train.class_indices)
labels = dict((v,k) for k,v in labels.items())
maximum = labels[output]
cv2.putText(frame, maximum, (x,y-20), cv2.FONT_HERSHEY_SIMPLEX,1, (0,255,255),1 )
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video.release()
cv2.destroyAllWindows()
| [
"keras.models.load_model",
"keras.preprocessing.image.ImageDataGenerator",
"cv2.putText",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imshow",
"cv2.VideoCapture",
"cv2.rectangle",
"numpy.reshape",
"cv2.CascadeClassifier",
"cv2.destroyAllWindows",
"os.path.join",
"cv2.resize"
] | [((343, 365), 'keras.models.load_model', 'load_model', (['"""model.h5"""'], {}), "('model.h5')\n", (353, 365), False, 'from keras.models import load_model\n'), ((380, 440), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_frontalface_default.xml"""'], {}), "('haarcascade_frontalface_default.xml')\n", (401, 440), False, 'import cv2\n'), ((470, 497), 'os.path.join', 'os.path.join', (['base', '"""train"""'], {}), "(base, 'train')\n", (482, 497), False, 'import os\n'), ((515, 552), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (533, 552), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((882, 901), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (898, 901), False, 'import cv2\n'), ((1741, 1764), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1762, 1764), False, 'import cv2\n'), ((958, 997), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (970, 997), False, 'import cv2\n'), ((1641, 1667), 'cv2.imshow', 'cv2.imshow', (['"""Video"""', 'frame'], {}), "('Video', frame)\n", (1651, 1667), False, 'import cv2\n'), ((1176, 1238), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(255, 0, 255)', '(2)'], {}), '(frame, (x, y), (x + w, y + h), (255, 0, 255), 2)\n', (1189, 1238), False, 'import cv2\n'), ((1253, 1279), 'cv2.resize', 'cv2.resize', (['gray', '(96, 96)'], {}), '(gray, (96, 96))\n', (1263, 1279), False, 'import cv2\n'), ((1297, 1338), 'numpy.reshape', 'np.reshape', (['(resized / 255)', '(1, 96, 96, 1)'], {}), '(resized / 255, (1, 96, 96, 1))\n', (1307, 1338), True, 'import numpy as np\n'), ((1554, 1646), 'cv2.putText', 'cv2.putText', (['frame', 'maximum', '(x, y - 20)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 255, 255)', '(1)'], {}), '(frame, maximum, (x, y - 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, \n 255, 255), 1)\n', (1565, 1646), False, 'import cv2\n'), ((1675, 1689), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1686, 1689), False, 'import cv2\n')] |
import typing
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pyextremes.plotting.style import pyextremes_rc
def plot_extremes(
ts: pd.Series,
extremes: pd.Series,
extremes_method: str,
extremes_type: typing.Optional[str] = None,
block_size: typing.Optional[typing.Union[str, pd.Timedelta]] = None,
threshold: typing.Optional[float] = None,
figsize: tuple = (8, 5),
ax: typing.Optional[plt.Axes] = None,
) -> typing.Tuple[plt.Figure, plt.Axes]:
"""
Plot extreme events.
Parameters
----------
ts : pandas.Series
Time series from which `extremes` were extracted.
extremes : pandas.Series
Time series of extreme events.
extremes_method : str
Extreme value extraction method.
Supported values:
BM - Block Maxima
POT - Peaks Over Threshold
extremes_type : str, optional
Type of `extremes`, used only if `extremes_method` is 'POT'
and `threshold` is not provided.
high - extreme high values
low - get low values
block_size : str or pandas.Timedelta, optional
Block size, used only if `extremes_method` is 'BM'.
If None (default), then calculated as median distance between extreme events.
threshold : float, optional
Threshold, used only if `extremes_method` is 'POT'.
If None (default), then is inferred from `extremes` as
minimum if `extremes_type` is 'high' or maximum if `extremes_type` is 'low'.
figsize : tuple, optional
Figure size in inches in format (width, height).
By default it is (8, 5).
ax : matplotlib.axes._axes.Axes, optional
Axes onto which extremes plot is drawn.
If None (default), a new figure and axes objects are created.
Returns
-------
figure : matplotlib.figure.Figure
Figure object.
axes : matplotlib.axes._axes.Axes
Axes object.
"""
with plt.rc_context(rc=pyextremes_rc):
# Create figure
if ax is None:
fig, ax = plt.subplots(figsize=figsize, dpi=96)
else:
try:
fig = ax.figure
except AttributeError as _error:
raise TypeError(
f"invalid type in {type(ax)} for the 'ax' argument, "
f"must be matplotlib Axes object"
) from _error
# Configure axes
ax.grid(False)
# Plot signal time series
ax.plot(ts.index, ts.values, ls="-", color="#5199FF", lw=0.25, zorder=10)
# Plot extreme events
ax.scatter(
extremes.index,
extremes.values,
s=20,
lw=0.5,
edgecolor="w",
facecolor="#F85C50",
zorder=20,
)
# Label the axes
ax.set_xlabel(extremes.index.name or "date-time")
ax.set_ylabel(extremes.name or "Extreme value")
if extremes_method == "BM":
# Infer 'block_size'
if block_size is None:
# Calculate 'block_size' as median of distances between extremes
block_size = pd.to_timedelta(
np.quantile(
np.diff(extremes.index),
0.5,
)
)
else:
if not isinstance(block_size, pd.Timedelta):
if isinstance(block_size, str):
block_size = pd.to_timedelta(block_size)
else:
raise TypeError(
f"invalid type in {type(block_size)} "
f"for the 'block_size' argument"
)
# Plot block boundaries
block_left_boundary = ts.index[0]
while block_left_boundary < ts.index.max() + block_size:
ax.axvline(
block_left_boundary, ls="--", lw=0.5, color="#D1D3D4", zorder=5
)
block_left_boundary += block_size
elif extremes_method == "POT":
# Parse 'threshold'
if threshold is None:
if extremes_type is None:
raise TypeError(
"'extremes_type' argument must be provided "
"for 'extremes_method' being 'POT' "
"when 'threshold' is not provided"
)
elif extremes_type == "high":
threshold = extremes.min()
elif extremes_type == "low":
threshold = extremes.max()
else:
raise ValueError(
f"invalid value in '{extremes_type}' "
f"for the 'extremes_type' argument"
)
# Plot threshold line
ax.axhline(threshold, ls="--", lw=1, color="#FF756B", zorder=15)
else:
raise ValueError(
f"invalid value in '{extremes_method}' "
f"for the 'extremes_method argument"
)
return fig, ax
| [
"numpy.diff",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.rc_context",
"pandas.to_timedelta"
] | [((1983, 2015), 'matplotlib.pyplot.rc_context', 'plt.rc_context', ([], {'rc': 'pyextremes_rc'}), '(rc=pyextremes_rc)\n', (1997, 2015), True, 'import matplotlib.pyplot as plt\n'), ((2086, 2123), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize', 'dpi': '(96)'}), '(figsize=figsize, dpi=96)\n', (2098, 2123), True, 'import matplotlib.pyplot as plt\n'), ((3257, 3280), 'numpy.diff', 'np.diff', (['extremes.index'], {}), '(extremes.index)\n', (3264, 3280), True, 'import numpy as np\n'), ((3519, 3546), 'pandas.to_timedelta', 'pd.to_timedelta', (['block_size'], {}), '(block_size)\n', (3534, 3546), True, 'import pandas as pd\n')] |
#=============================================================================
# PREDICTING THE PRICE OF PREOWNED CARS
#=============================================================================
import pandas as pd
import numpy as np
import seaborn as sns
# IMPORTING OS
import os
os.chdir("C:\\Users\\indra\\Documents\\DataScience_ML\\github_linkedin")
# READ CSV
data=pd.read_csv("cars_sampled.csv");
# CREATING A COPY FOR THE DATA
df=data.copy(deep=True)
df.info()
# CHECKING MEAN MEDION MODE OF EACH COLUMNS OF THE DATA
# SUMMARIZING
df.describe()
pd.set_option('display.float_format',lambda x:'%.3f' %x)
pd.set_option('display.max_columns',500)
df.describe()
# ==========================================================================
# DROPPING UNWANTED COLUMNS
# ==========================================================================
cols=['name','dateCrawled','dateCreated','lastSeen']
df=df.drop(columns=cols,axis=1)
print(df.head())
#============================================================================
# DROPPING DUPLICATES
#============================================================================
df.drop_duplicates(keep='first',inplace=True)
print(df.head())
print(df.shape)
# ===========================================================================
# DATA CLEANING
# ============================================================================
# NO OF MISSING VALUES IN EACH COLUMN
print(df.isnull().sum())
# VARIABLE YEAR OF REGESTRATION
yearwise_counts=df['yearOfRegistration'].value_counts().sort_index()
sum(df['yearOfRegistration']>2018)
sum(df['yearOfRegistration']<1950)
sns.regplot(x='yearOfRegistration',y='price',scatter=True,fit_reg=False,data=df)
# VARIABLE PRICE
price_counts=df['price'].value_counts().sort_index()
sns.distplot(df['price'])
sns.boxplot(y=df['price'])
df['price'].describe()
sum(df['price']>150000)
sum(df['price']<100)
#VARIABLE POWER PS
power_count=df['powerPS'].value_counts().sort_index()
sns.distplot(df['powerPS'])
sns.boxplot(y=df['powerPS'])
df['powerPS'].describe()
sns.regplot(x='powerPS',y='price',scatter=True,fit_reg=False,data=df)
sum(df['powerPS']>500)
sum(df['powerPS']<10)
#==========================================================================
# DATA CLEANING
#==========================================================================
#KEEPING INLY THE CERTAIN COLUMNS
df=df[(df.yearOfRegistration<=2018)
& (df.yearOfRegistration>=1950)
& (df.powerPS<=500)
& (df.powerPS>=10)
& (df.price>=100)
& (df.price<=150000)]
df['monthOfRegistration']/=12
# CREATING NEW COLUMNS AGE
df['Age']=(2018-df['yearOfRegistration'])+df['monthOfRegistration']
df['Age']=round(df['Age'],2)
df['Age'].describe()
# DROPPING YEAR AND MONTH OF REGISTRATION
df=df.drop(columns=['yearOfRegistration','monthOfRegistration'],axis=1)
print(df.shape)
#========================================================================
# VISUALISING PARAMETERS USING PLOTS
#========================================================================
# AGE
sns.distplot(df['Age'])
sns.boxplot(y=df['Age'])
# PRICE
sns.distplot(df['price'],bins=30)
sns.boxplot(y=df['powerPS'])
# POWERPS
sns.distplot(df['powerPS'])
sns.boxplot(y=df['powerPS'])
#PLOTTING TWO DIFFERENT VARIBLE
# POWERPS VS PRICE
sns.regplot(x=df['powerPS'],y=df['price'],fit_reg=True,scatter=True)
#===========================================================================
# DATA VISUALIZATION
#===========================================================================
# VARIABLE SELLER
df['seller'].value_counts()
pd.crosstab(df['seller'],columns='count',normalize=True)
sns.countplot(x=df['seller'])
# VARIABLE OFFERTYPE
pd.crosstab(df['offerType'],columns='count',normalize=False)
sns.countplot(x=df['offerType'])
# VARIABLE ABTEST
df['abtest'].value_counts()
sns.countplot(x=df['abtest'])
# VARIABLE VEHICLE
df['vehicleType'].value_counts()
sns.countplot(x=df['vehicleType'])
sns.boxplot(x=df['vehicleType'],y=df['price'])
# VARAIBLE GEARBOX
df['gearbox'].value_counts()
sns.countplot(x=df['gearbox'])
sns.boxplot(x=df['gearbox'],y=df['price'])
# VARIABLE MODEL
df['model'].value_counts()
# VARIABLE KILOMETER
df['kilometer'].value_counts()
sns.countplot(x=df['kilometer'])
sns.boxplot(x=df['kilometer'],y=df['price'])
# VARIABLE FUELTYPE
df['fuelType'].value_counts()
sns.countplot(x=df['fuelType'])
sns.boxplot(x=df['fuelType'],y=df['price'])
# VARIABLE BRAND
df['brand'].value_counts()
sns.countplot(x=df['brand'])
sns.boxplot(x=df['brand'],y=df['price'])
# VARIABLE NOT REPAIRED DAMAGE
df['notRepairedDamage'].value_counts()
sns.countplot(x=df['notRepairedDamage'])
sns.boxplot(x=df['notRepairedDamage'],y=df['price'])
#===========================================================================
# REMOVING INSIGNIFICANT VARIABLES
#===========================================================================
cols=['seller','offerType','abtest']
df=df.drop(columns=cols,axis=1)
#=================================================================
# OMITTING MISSING VALUES
#=================================================================
df_omit=df.dropna(axis=0)
# CREATING DUMMY VARIABLES FOR CATEGORICAL VARIABLES
df_omit=pd.get_dummies(df_omit,drop_first=True)
#=================================================================
# MACHINE LEARNING
#=================================================================
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from sklearn.metrics import accuracy_score
# MODEL BUILDING
x1=df_omit.drop(['price'],axis='columns',inplace=False)
y1=df_omit['price']
y1=np.log(y1)
# SPLITTING INTO TEST TRAIN SETS
x_train,x_test,y_train,y_test=train_test_split(x1,y1,test_size=0.3,random_state=3)
print(x_train.shape,x_test.shape,y_train.shape,y_test.shape)
# LINEAR REGRESSION
model=LinearRegression()
model.fit(x_train,y_train)
pred = model.predict(x_test)
acc=model.score(x_test,y_test)
print("Accuracy of this model: ",acc*100)
| [
"pandas.crosstab",
"numpy.log",
"pandas.read_csv",
"pandas.get_dummies",
"sklearn.model_selection.train_test_split",
"seaborn.regplot",
"sklearn.linear_model.LinearRegression",
"seaborn.boxplot",
"seaborn.distplot",
"seaborn.countplot",
"pandas.set_option",
"os.chdir"
] | [((286, 358), 'os.chdir', 'os.chdir', (['"""C:\\\\Users\\\\indra\\\\Documents\\\\DataScience_ML\\\\github_linkedin"""'], {}), "('C:\\\\Users\\\\indra\\\\Documents\\\\DataScience_ML\\\\github_linkedin')\n", (294, 358), False, 'import os\n'), ((376, 407), 'pandas.read_csv', 'pd.read_csv', (['"""cars_sampled.csv"""'], {}), "('cars_sampled.csv')\n", (387, 407), True, 'import pandas as pd\n'), ((564, 623), 'pandas.set_option', 'pd.set_option', (['"""display.float_format"""', "(lambda x: '%.3f' % x)"], {}), "('display.float_format', lambda x: '%.3f' % x)\n", (577, 623), True, 'import pandas as pd\n'), ((621, 662), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', '(500)'], {}), "('display.max_columns', 500)\n", (634, 662), True, 'import pandas as pd\n'), ((1633, 1721), 'seaborn.regplot', 'sns.regplot', ([], {'x': '"""yearOfRegistration"""', 'y': '"""price"""', 'scatter': '(True)', 'fit_reg': '(False)', 'data': 'df'}), "(x='yearOfRegistration', y='price', scatter=True, fit_reg=False,\n data=df)\n", (1644, 1721), True, 'import seaborn as sns\n'), ((1786, 1811), 'seaborn.distplot', 'sns.distplot', (["df['price']"], {}), "(df['price'])\n", (1798, 1811), True, 'import seaborn as sns\n'), ((1812, 1838), 'seaborn.boxplot', 'sns.boxplot', ([], {'y': "df['price']"}), "(y=df['price'])\n", (1823, 1838), True, 'import seaborn as sns\n'), ((1985, 2012), 'seaborn.distplot', 'sns.distplot', (["df['powerPS']"], {}), "(df['powerPS'])\n", (1997, 2012), True, 'import seaborn as sns\n'), ((2013, 2041), 'seaborn.boxplot', 'sns.boxplot', ([], {'y': "df['powerPS']"}), "(y=df['powerPS'])\n", (2024, 2041), True, 'import seaborn as sns\n'), ((2068, 2141), 'seaborn.regplot', 'sns.regplot', ([], {'x': '"""powerPS"""', 'y': '"""price"""', 'scatter': '(True)', 'fit_reg': '(False)', 'data': 'df'}), "(x='powerPS', y='price', scatter=True, fit_reg=False, data=df)\n", (2079, 2141), True, 'import seaborn as sns\n'), ((3090, 3113), 'seaborn.distplot', 'sns.distplot', (["df['Age']"], {}), "(df['Age'])\n", (3102, 3113), True, 'import seaborn as sns\n'), ((3114, 3138), 'seaborn.boxplot', 'sns.boxplot', ([], {'y': "df['Age']"}), "(y=df['Age'])\n", (3125, 3138), True, 'import seaborn as sns\n'), ((3148, 3182), 'seaborn.distplot', 'sns.distplot', (["df['price']"], {'bins': '(30)'}), "(df['price'], bins=30)\n", (3160, 3182), True, 'import seaborn as sns\n'), ((3182, 3210), 'seaborn.boxplot', 'sns.boxplot', ([], {'y': "df['powerPS']"}), "(y=df['powerPS'])\n", (3193, 3210), True, 'import seaborn as sns\n'), ((3222, 3249), 'seaborn.distplot', 'sns.distplot', (["df['powerPS']"], {}), "(df['powerPS'])\n", (3234, 3249), True, 'import seaborn as sns\n'), ((3250, 3278), 'seaborn.boxplot', 'sns.boxplot', ([], {'y': "df['powerPS']"}), "(y=df['powerPS'])\n", (3261, 3278), True, 'import seaborn as sns\n'), ((3333, 3404), 'seaborn.regplot', 'sns.regplot', ([], {'x': "df['powerPS']", 'y': "df['price']", 'fit_reg': '(True)', 'scatter': '(True)'}), "(x=df['powerPS'], y=df['price'], fit_reg=True, scatter=True)\n", (3344, 3404), True, 'import seaborn as sns\n'), ((3626, 3684), 'pandas.crosstab', 'pd.crosstab', (["df['seller']"], {'columns': '"""count"""', 'normalize': '(True)'}), "(df['seller'], columns='count', normalize=True)\n", (3637, 3684), True, 'import pandas as pd\n'), ((3683, 3712), 'seaborn.countplot', 'sns.countplot', ([], {'x': "df['seller']"}), "(x=df['seller'])\n", (3696, 3712), True, 'import seaborn as sns\n'), ((3736, 3798), 'pandas.crosstab', 'pd.crosstab', (["df['offerType']"], {'columns': '"""count"""', 'normalize': '(False)'}), "(df['offerType'], columns='count', normalize=False)\n", (3747, 3798), True, 'import pandas as pd\n'), ((3797, 3829), 'seaborn.countplot', 'sns.countplot', ([], {'x': "df['offerType']"}), "(x=df['offerType'])\n", (3810, 3829), True, 'import seaborn as sns\n'), ((3878, 3907), 'seaborn.countplot', 'sns.countplot', ([], {'x': "df['abtest']"}), "(x=df['abtest'])\n", (3891, 3907), True, 'import seaborn as sns\n'), ((3962, 3996), 'seaborn.countplot', 'sns.countplot', ([], {'x': "df['vehicleType']"}), "(x=df['vehicleType'])\n", (3975, 3996), True, 'import seaborn as sns\n'), ((3997, 4044), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': "df['vehicleType']", 'y': "df['price']"}), "(x=df['vehicleType'], y=df['price'])\n", (4008, 4044), True, 'import seaborn as sns\n'), ((4094, 4124), 'seaborn.countplot', 'sns.countplot', ([], {'x': "df['gearbox']"}), "(x=df['gearbox'])\n", (4107, 4124), True, 'import seaborn as sns\n'), ((4125, 4168), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': "df['gearbox']", 'y': "df['price']"}), "(x=df['gearbox'], y=df['price'])\n", (4136, 4168), True, 'import seaborn as sns\n'), ((4269, 4301), 'seaborn.countplot', 'sns.countplot', ([], {'x': "df['kilometer']"}), "(x=df['kilometer'])\n", (4282, 4301), True, 'import seaborn as sns\n'), ((4302, 4347), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': "df['kilometer']", 'y': "df['price']"}), "(x=df['kilometer'], y=df['price'])\n", (4313, 4347), True, 'import seaborn as sns\n'), ((4399, 4430), 'seaborn.countplot', 'sns.countplot', ([], {'x': "df['fuelType']"}), "(x=df['fuelType'])\n", (4412, 4430), True, 'import seaborn as sns\n'), ((4431, 4475), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': "df['fuelType']", 'y': "df['price']"}), "(x=df['fuelType'], y=df['price'])\n", (4442, 4475), True, 'import seaborn as sns\n'), ((4521, 4549), 'seaborn.countplot', 'sns.countplot', ([], {'x': "df['brand']"}), "(x=df['brand'])\n", (4534, 4549), True, 'import seaborn as sns\n'), ((4550, 4591), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': "df['brand']", 'y': "df['price']"}), "(x=df['brand'], y=df['price'])\n", (4561, 4591), True, 'import seaborn as sns\n'), ((4663, 4703), 'seaborn.countplot', 'sns.countplot', ([], {'x': "df['notRepairedDamage']"}), "(x=df['notRepairedDamage'])\n", (4676, 4703), True, 'import seaborn as sns\n'), ((4704, 4757), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': "df['notRepairedDamage']", 'y': "df['price']"}), "(x=df['notRepairedDamage'], y=df['price'])\n", (4715, 4757), True, 'import seaborn as sns\n'), ((5268, 5308), 'pandas.get_dummies', 'pd.get_dummies', (['df_omit'], {'drop_first': '(True)'}), '(df_omit, drop_first=True)\n', (5282, 5308), True, 'import pandas as pd\n'), ((5806, 5816), 'numpy.log', 'np.log', (['y1'], {}), '(y1)\n', (5812, 5816), True, 'import numpy as np\n'), ((5882, 5937), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x1', 'y1'], {'test_size': '(0.3)', 'random_state': '(3)'}), '(x1, y1, test_size=0.3, random_state=3)\n', (5898, 5937), False, 'from sklearn.model_selection import train_test_split\n'), ((6025, 6043), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (6041, 6043), False, 'from sklearn.linear_model import LinearRegression\n')] |
import numpy as np
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def tanh(x):
return np.tanh(x)
def identity(x):
return x
def sin(x):
return np.sin(x)
def relu(x):
return np.maximum(0, x)
def cos(x):
return np.cos(x)
def gaussian(x):
return np.exp(-x**2)
def square(x):
return x**2
def step(x):
return np.where(x > 0, 1, 0)
def string_to_fn(string):
if string == 'sigmoid':
return sigmoid
elif string == 'tanh':
return tanh
elif string == 'identity':
return identity
elif string == 'sin':
return sin
elif string == 'relu':
return relu
elif string == 'cos':
return cos
elif string == 'gaussian':
return gaussian
elif string == 'square':
return square
elif string == 'step':
return step
else:
raise ValueError('Unknown activation function: {}'.format(string))
def fn_to_string(fn):
if fn == sigmoid:
return 'sigmoid'
elif fn == tanh:
return 'tanh'
elif fn == identity:
return 'identity'
elif fn == sin:
return 'sin'
elif fn == relu:
return 'relu'
elif fn == cos:
return 'cos'
elif fn == gaussian:
return 'gaussian'
elif fn == square:
return 'square'
elif fn == step:
return 'step'
else:
raise ValueError('Unknown activation function: {}'.format(fn)) | [
"numpy.maximum",
"numpy.tanh",
"numpy.sin",
"numpy.where",
"numpy.exp",
"numpy.cos"
] | [((94, 104), 'numpy.tanh', 'np.tanh', (['x'], {}), '(x)\n', (101, 104), True, 'import numpy as np\n'), ((160, 169), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (166, 169), True, 'import numpy as np\n'), ((195, 211), 'numpy.maximum', 'np.maximum', (['(0)', 'x'], {}), '(0, x)\n', (205, 211), True, 'import numpy as np\n'), ((236, 245), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (242, 245), True, 'import numpy as np\n'), ((275, 290), 'numpy.exp', 'np.exp', (['(-x ** 2)'], {}), '(-x ** 2)\n', (281, 290), True, 'import numpy as np\n'), ((346, 367), 'numpy.where', 'np.where', (['(x > 0)', '(1)', '(0)'], {}), '(x > 0, 1, 0)\n', (354, 367), True, 'import numpy as np\n'), ((57, 67), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (63, 67), True, 'import numpy as np\n')] |
""" Convert "pre-ARD" to ARD
"""
from collections import defaultdict
import datetime as dt
import json
import logging
import os
from pathlib import Path
import numpy as np
import pandas as pd
import xarray as xr
from stems.gis import convert, georeference, grids
from stems.io.encoding import netcdf_encoding
from . import defaults, __version__
logger = logging.getLogger(__name__)
def process_preard(metadata, images, chunks=None):
""" Open and process pre-ARD data to ARD
Parameters
----------
metadata : dict
Image metadata
images : Sequence[str or Path]
Path(s) to pre-ARD imagery
chunks : dict, optional
Chunks to use when opening pre-ARD GeoTIFF files. If ``None``,
defaults to ``{'x': 256, 'y': 256, 'band': -1}``
Returns
-------
xr.Dataset
pre-ARD processed to (in memory) ARD format that can be written to disk
"""
image_metadata = metadata['image']
# Read metadata and determine key attributes
times = pd.to_datetime([_ard_image_timestamp(images)
for images in image_metadata['images']]).values
bands = image_metadata['bands']
# Create pre-ARD DataArray
preard_da = read_preard(images, chunks=chunks)
# Remove any attributes
preard_da.attrs = {}
# Convert to Dataset
ard_ds = preard_to_ard(preard_da, times, bands)
# Attach attribute metadata
version = metadata['program']['version']
order_metadata = metadata['order']
collection = order_metadata['collection']
submitted = order_metadata.get('submitted', 'UNKNOWN TIME')
date_start = order_metadata['date_start']
date_end = order_metadata['date_end']
dt_now = dt.datetime.today().strftime("%Y%m%dT%H%M%S")
attrs = {
'title': f'Collection "{collection}" Analysis Ready Data',
'history': '\n'.join([
(f'{submitted} - Ordered pre-ARD from GEE for collection '
f'"{collection}" between {date_start}-{date_end} using '
f'`cedar={version}`'),
f'{dt_now} - Converted to ARD using `cedar={__version__}`'
]),
'source': f'Google Earth Engine Collection "{collection}"',
'images': json.dumps(image_metadata['images'])
}
ard_ds.attrs = attrs
# Georeference
tile_ = grids.Tile.from_dict(metadata['tile'])
ard_ds = georeference(ard_ds, tile_.crs, tile_.transform)
return ard_ds
def find_preard(path, metadata_pattern='*.json'):
""" Match pre-ARD metadata with imagery in some location
Parameters
----------
path : str or Path
Path to a metadata file or directory of files (returning matches
inside the directory)
metadata_pattern : str, optional
If ``path`` is a directory, this value is used as a glob inside
``path`` to locate metadata files
Returns
-------
dict[str, list[str]]
Pairs of metadata filename to image filename(s)
"""
path = Path(path)
if path.is_dir():
metadata = list(path.glob(metadata_pattern))
else:
metadata = [path]
preard = {}
for meta in metadata:
images = sorted(meta.parent.glob(meta.stem + '*.tif'))
if images:
preard[meta] = images
else:
logger.debug(f'Could not find images for metadata file {meta}')
preard[meta] = []
return preard
def preard_to_ard(xarr, time, bands):
""" Convert a "pre-ARD" DataArray to an ARD xr.Dataset
Parameters
----------
xarr : xarray.DataArray
DataArray containing observations from all bands and time
time : np.ndarray
Time information for each observation
bands : Sequence[str]
Band names
Returns
-------
xr.Dataset
Dataset containing all observations split into subdatasets
according to band
Raises
------
ValueError
Raised if the number of bands and times specified do not match
the number of "bands" in the input DataArray
"""
n_band = len(bands)
n_time = len(time)
n_band_time = n_band * n_time
if n_band * n_time != xarr.band.size:
raise ValueError('Number of bands x time specified does not match '
'input data ({xarr.band.size})')
ds_bands = {}
for i_band, band_name in enumerate(bands):
# Select out this band from list of band x time
da_band = xarr[np.arange(i_band, n_band_time, n_band), ...]
# Replace "band" for "time" in two steps
da_band.coords['time'] = ('band', time)
ds_bands[band_name] = da_band.swap_dims({'band': 'time'}).drop('band')
ard_ds = xr.Dataset(ds_bands)
return ard_ds
def ard_netcdf_encoding(ard_ds, metadata, **encoding_kwds):
""" Return encoding for ARD NetCDF4 files
Parameters
----------
ard_ds : xr.Dataset
ARD as a XArray Dataset
metadata : dict
Metadata about ARD
Returns
-------
dict
NetCDF encoding to use with :py:meth:`xarray.Dataset.to_netcdf`
"""
assert 'nodata' not in encoding_kwds
nodata = metadata['image'].get('nodata', None)
encoding = netcdf_encoding(ard_ds, nodata=nodata, **encoding_kwds)
return encoding
def read_metadata(filename):
""" Read pre-ARD image metadata from a file
"""
with open(filename, 'r') as f:
meta = json.load(f)
return meta
def read_preard(filenames, chunks=None):
""" Read pre-ARD file(s) into a single DataArray
Parameters
----------
filenames : Sequence[str or Path]
Pre-ARD file name(s)
chunks : dict, optional
Chunks to use when opening pre-ARD GeoTIFF files. If ``None``,
defaults to ``{'x': 256, 'y': 256, 'band': -1}``
Returns
-------
xr.DataArray
Pre-ARD joined together as a single DataArray
"""
if isinstance(filenames, (str, Path)):
filenames = (filenames, )
filenames = [Path(f) for f in filenames]
if chunks is None:
chunks = defaults.PREARD_CHUNKS.copy()
common = os.path.commonprefix([f.stem for f in filenames])
by_row = defaultdict(list)
for fname in filenames:
shard = fname.stem[len(common):]
if not shard: # OK if just 1 file
assert len(filenames) == 1
ymin, xmin = 0, 0
else:
ymin, xmin = map(int, shard.split('-'))
da = xr.open_rasterio(fname, chunks=chunks)
by_row[ymin].append(da)
# Concat across columns
rows = {row: xr.concat(by_row[row], dim='x') for row in by_row}
# Concat across rows
preard = xr.concat(rows.values(), dim='y')
return preard
def _ard_image_timestamp(images):
return _unix2dt(min(i['system:time_start'] for i in images))
def _unix2dt(timestamp):
return dt.datetime.fromtimestamp(timestamp / 1e3)
| [
"stems.gis.georeference",
"os.path.commonprefix",
"json.load",
"datetime.datetime.today",
"stems.gis.grids.Tile.from_dict",
"stems.io.encoding.netcdf_encoding",
"xarray.open_rasterio",
"json.dumps",
"xarray.Dataset",
"collections.defaultdict",
"xarray.concat",
"pathlib.Path",
"numpy.arange",... | [((358, 385), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (375, 385), False, 'import logging\n'), ((2319, 2357), 'stems.gis.grids.Tile.from_dict', 'grids.Tile.from_dict', (["metadata['tile']"], {}), "(metadata['tile'])\n", (2339, 2357), False, 'from stems.gis import convert, georeference, grids\n'), ((2371, 2419), 'stems.gis.georeference', 'georeference', (['ard_ds', 'tile_.crs', 'tile_.transform'], {}), '(ard_ds, tile_.crs, tile_.transform)\n', (2383, 2419), False, 'from stems.gis import convert, georeference, grids\n'), ((2985, 2995), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (2989, 2995), False, 'from pathlib import Path\n'), ((4683, 4703), 'xarray.Dataset', 'xr.Dataset', (['ds_bands'], {}), '(ds_bands)\n', (4693, 4703), True, 'import xarray as xr\n'), ((5185, 5240), 'stems.io.encoding.netcdf_encoding', 'netcdf_encoding', (['ard_ds'], {'nodata': 'nodata'}), '(ard_ds, nodata=nodata, **encoding_kwds)\n', (5200, 5240), False, 'from stems.io.encoding import netcdf_encoding\n'), ((6088, 6137), 'os.path.commonprefix', 'os.path.commonprefix', (['[f.stem for f in filenames]'], {}), '([f.stem for f in filenames])\n', (6108, 6137), False, 'import os\n'), ((6152, 6169), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6163, 6169), False, 'from collections import defaultdict\n'), ((6829, 6874), 'datetime.datetime.fromtimestamp', 'dt.datetime.fromtimestamp', (['(timestamp / 1000.0)'], {}), '(timestamp / 1000.0)\n', (6854, 6874), True, 'import datetime as dt\n'), ((2219, 2255), 'json.dumps', 'json.dumps', (["image_metadata['images']"], {}), "(image_metadata['images'])\n", (2229, 2255), False, 'import json\n'), ((5398, 5410), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5407, 5410), False, 'import json\n'), ((5975, 5982), 'pathlib.Path', 'Path', (['f'], {}), '(f)\n', (5979, 5982), False, 'from pathlib import Path\n'), ((6430, 6468), 'xarray.open_rasterio', 'xr.open_rasterio', (['fname'], {'chunks': 'chunks'}), '(fname, chunks=chunks)\n', (6446, 6468), True, 'import xarray as xr\n'), ((6547, 6578), 'xarray.concat', 'xr.concat', (['by_row[row]'], {'dim': '"""x"""'}), "(by_row[row], dim='x')\n", (6556, 6578), True, 'import xarray as xr\n'), ((1713, 1732), 'datetime.datetime.today', 'dt.datetime.today', ([], {}), '()\n', (1730, 1732), True, 'import datetime as dt\n'), ((4448, 4486), 'numpy.arange', 'np.arange', (['i_band', 'n_band_time', 'n_band'], {}), '(i_band, n_band_time, n_band)\n', (4457, 4486), True, 'import numpy as np\n')] |
from nltk.tokenize import word_tokenize
from Categories_Data import categories
import numpy as np
import codecs
import glob
import os
import re
class Data_Preprocessor:
"""
This class contains utility methods in order to process the 20 NewsGroup DataSet
"""
""" Takes the following parameters as an input:
text : text to be tokenized (string)
Returns:
alpha : the tokenized text with non alphabetical values removed (list of strings)
"""
@staticmethod
def tokenize(text):
tokens = word_tokenize(text)
alpha = [t for t in tokens if unicode(t).isalpha()]
return alpha
""" Takes the following parameters as an input:
text : text whose header may still be present (string)
Returns:
True or False depending on the presence of part of the header
"""
@staticmethod
def header_not_fully_removed(text):
if ":" in text.splitlines()[0]:
return len(text.splitlines()[0].split(":")[0].split()) == 1
else:
return False
""" Takes the following parameters as an input:
text : text with header (string)
Returns:
after : text without header
"""
@staticmethod
def strip_newsgroup_header(text):
_before, _blankline, after = text.partition('\n\n')
if len(after) > 0 and Data_Preprocessor.header_not_fully_removed(after):
after = Data_Preprocessor.strip_newsgroup_header(after)
return after
""" Takes the following parameters as an input:
text : text with quotes (string)
Returns:
text without quotes
"""
@staticmethod
def strip_newsgroup_quoting(text):
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'r'|^In article|^Quoted from|^\||^>)')
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
""" Takes the following parameters as an input:
text : text with footer (string)
Returns:
text without footer
"""
@staticmethod
def strip_newsgroup_footer(text):
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
""" Takes the following parameters as an input:
path: path to the DataSet folder (string)
to_be_stripped: specifies which elements to remove (list of strings)
noise_threshold: specifies which document sizes to ignore (integer)
Returns:
train_data: samples in raw (text) format (numpy array of strings)
label_data: labels' data (numpy of integers)
"""
@staticmethod
def raw_to_vector(path, to_be_stripped=["header", "footer", "quoting"], noise_threshold=-1):
base_dir = os.getcwd()
train_data = []
label_data = []
for category in categories:
os.chdir(base_dir)
os.chdir(path+"/"+category[0])
for filename in glob.glob("*"):
with codecs.open(filename, 'r', encoding='utf-8', errors='replace') as target:
data = target.read()
if "quoting" in to_be_stripped:
data = Data_Preprocessor.strip_newsgroup_quoting(data)
if "header" in to_be_stripped:
data = Data_Preprocessor.strip_newsgroup_header(data)
if "footer" in to_be_stripped:
data = Data_Preprocessor.strip_newsgroup_footer(data)
data = re.sub("[^a-zA-Z]", " ", data)
if len(data) > noise_threshold:
train_data.append(data)
label_data.append(category[1])
os.chdir(base_dir)
return np.array(train_data), np.array(label_data)
""" Takes the following parameters as an input:
path: path to the saved vector folder (string)
"""
@staticmethod
def clean_saved_vector(path):
map(os.unlink, [os.path.join(path, f) for f in os.listdir(path)] )
| [
"os.listdir",
"codecs.open",
"os.getcwd",
"re.sub",
"numpy.array",
"glob.glob",
"os.path.join",
"os.chdir",
"nltk.tokenize.word_tokenize",
"re.compile"
] | [((544, 563), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['text'], {}), '(text)\n', (557, 563), False, 'from nltk.tokenize import word_tokenize\n'), ((1730, 1820), 're.compile', 're.compile', (['"""(writes in|writes:|wrote:|says:|said:|^In article|^Quoted from|^\\\\||^>)"""'], {}), "(\n '(writes in|writes:|wrote:|says:|said:|^In article|^Quoted from|^\\\\||^>)')\n", (1740, 1820), False, 'import re\n'), ((3009, 3020), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3018, 3020), False, 'import os\n'), ((3969, 3987), 'os.chdir', 'os.chdir', (['base_dir'], {}), '(base_dir)\n', (3977, 3987), False, 'import os\n'), ((3117, 3135), 'os.chdir', 'os.chdir', (['base_dir'], {}), '(base_dir)\n', (3125, 3135), False, 'import os\n'), ((3148, 3182), 'os.chdir', 'os.chdir', (["(path + '/' + category[0])"], {}), "(path + '/' + category[0])\n", (3156, 3182), False, 'import os\n'), ((3207, 3221), 'glob.glob', 'glob.glob', (['"""*"""'], {}), "('*')\n", (3216, 3221), False, 'import glob\n'), ((4003, 4023), 'numpy.array', 'np.array', (['train_data'], {}), '(train_data)\n', (4011, 4023), True, 'import numpy as np\n'), ((4025, 4045), 'numpy.array', 'np.array', (['label_data'], {}), '(label_data)\n', (4033, 4045), True, 'import numpy as np\n'), ((4238, 4259), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (4250, 4259), False, 'import os\n'), ((3244, 3306), 'codecs.open', 'codecs.open', (['filename', '"""r"""'], {'encoding': '"""utf-8"""', 'errors': '"""replace"""'}), "(filename, 'r', encoding='utf-8', errors='replace')\n", (3255, 3306), False, 'import codecs\n'), ((3775, 3805), 're.sub', 're.sub', (['"""[^a-zA-Z]"""', '""" """', 'data'], {}), "('[^a-zA-Z]', ' ', data)\n", (3781, 3805), False, 'import re\n'), ((4269, 4285), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (4279, 4285), False, 'import os\n')] |
__author__ = 'me'
import cv2
import math
import random
import numpy as np
def create_star_background(image,density):
h,w = image.shape[:2]
base_color = 20
for i in range(0,h,4):
for j in range(0,w,4):
rand = random.random()
if rand < density:
intensity = np.random.normal(0,.5,100)
intensity = intensity[random.randrange(0,99)] * 50
cv2.circle(image,(j,i),1,(base_color + intensity,)*3,1)
def create_contour_stars(image, contours):
for contour in contours:
center, radius = cv2.minEnclosingCircle(contour)
rad = 100 + int((math.log(radius)+1) * 25)
r = random.randrange(-15,15)
g = random.randrange(-15,15)
b = random.randrange(-15,15)
center = (int(center[0]), int(center[1]))
cv2.circle(image,center,2,(rad+ r -10, rad+g+-10,rad+b+-1), 0)
cv2.circle(image,center,1, (rad + r, rad+g, rad+b), -1)
def create_good_feature_stars(image, features):
for feature in features:
x,y = feature.ravel()
cv2.circle(image,(x,y),3,(0,)*3,-1)
| [
"cv2.circle",
"cv2.minEnclosingCircle",
"random.random",
"random.randrange",
"numpy.random.normal",
"math.log"
] | [((583, 614), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['contour'], {}), '(contour)\n', (605, 614), False, 'import cv2\n'), ((678, 703), 'random.randrange', 'random.randrange', (['(-15)', '(15)'], {}), '(-15, 15)\n', (694, 703), False, 'import random\n'), ((715, 740), 'random.randrange', 'random.randrange', (['(-15)', '(15)'], {}), '(-15, 15)\n', (731, 740), False, 'import random\n'), ((752, 777), 'random.randrange', 'random.randrange', (['(-15)', '(15)'], {}), '(-15, 15)\n', (768, 777), False, 'import random\n'), ((835, 911), 'cv2.circle', 'cv2.circle', (['image', 'center', '(2)', '(rad + r - 10, rad + g + -10, rad + b + -1)', '(0)'], {}), '(image, center, 2, (rad + r - 10, rad + g + -10, rad + b + -1), 0)\n', (845, 911), False, 'import cv2\n'), ((906, 967), 'cv2.circle', 'cv2.circle', (['image', 'center', '(1)', '(rad + r, rad + g, rad + b)', '(-1)'], {}), '(image, center, 1, (rad + r, rad + g, rad + b), -1)\n', (916, 967), False, 'import cv2\n'), ((1080, 1122), 'cv2.circle', 'cv2.circle', (['image', '(x, y)', '(3)', '((0,) * 3)', '(-1)'], {}), '(image, (x, y), 3, (0,) * 3, -1)\n', (1090, 1122), False, 'import cv2\n'), ((242, 257), 'random.random', 'random.random', ([], {}), '()\n', (255, 257), False, 'import random\n'), ((317, 346), 'numpy.random.normal', 'np.random.normal', (['(0)', '(0.5)', '(100)'], {}), '(0, 0.5, 100)\n', (333, 346), True, 'import numpy as np\n'), ((427, 489), 'cv2.circle', 'cv2.circle', (['image', '(j, i)', '(1)', '((base_color + intensity,) * 3)', '(1)'], {}), '(image, (j, i), 1, (base_color + intensity,) * 3, 1)\n', (437, 489), False, 'import cv2\n'), ((382, 405), 'random.randrange', 'random.randrange', (['(0)', '(99)'], {}), '(0, 99)\n', (398, 405), False, 'import random\n'), ((640, 656), 'math.log', 'math.log', (['radius'], {}), '(radius)\n', (648, 656), False, 'import math\n')] |
import numpy as np
import pandas as pd
from cli import get_args
from dataset import STR2ID, DATA_DIR, read_splits
from feature_extractor import FeatureExtractor
from logger import logger
def stats(type):
if type == "all":
for lang in STR2ID.keys():
data = pd.read_csv(DATA_DIR / "{}.tsv".format(lang), sep="\t")
logger.info("{lang}: {sum} samples".format(lang=lang, sum=len(data)))
feat_extractor = FeatureExtractor()
X_texts = data["Text"]
X_sents = [feat_extractor.extract_sents(text[0]) for text in X_texts]
X_words = [feat_extractor.extract_words(text) for text in X_texts]
X_sent_count = [len(sent) for sent in X_sents] # average sentences in post
X_word_count = [len(words) for words in X_words] # average words in post
logger.info(np.mean(X_sent_count))
logger.info(np.mean(X_word_count))
elif type == "experiments":
random_splits = [42, 1, 15]
for i in random_splits:
logger.info("Random seed {}".format(i))
X_train, y_train, X_dev, y_dev, X_test, y_test = read_splits(i)
logger.info("Train {}".format(len(y_train)))
logger.info("Dev {}".format(len(y_dev)))
logger.info("Test {}".format(len(y_test)))
feat_extractor = FeatureExtractor()
X_sents = [feat_extractor.extract_sents(text[0]) for text in X_train]
X_words = [feat_extractor.extract_words(text) for text in X_train]
X_sent_count = [len(sent) for sent in X_sents] # average sentences in post
X_word_count = [len(words) for words in X_words] # average words in post
logger.info(np.mean(X_sent_count))
logger.info(np.mean(X_word_count))
if __name__ == '__main__':
args = get_args()
stats(args.type)
| [
"dataset.STR2ID.keys",
"dataset.read_splits",
"feature_extractor.FeatureExtractor",
"numpy.mean",
"cli.get_args"
] | [((1851, 1861), 'cli.get_args', 'get_args', ([], {}), '()\n', (1859, 1861), False, 'from cli import get_args\n'), ((249, 262), 'dataset.STR2ID.keys', 'STR2ID.keys', ([], {}), '()\n', (260, 262), False, 'from dataset import STR2ID, DATA_DIR, read_splits\n'), ((450, 468), 'feature_extractor.FeatureExtractor', 'FeatureExtractor', ([], {}), '()\n', (466, 468), False, 'from feature_extractor import FeatureExtractor\n'), ((866, 887), 'numpy.mean', 'np.mean', (['X_sent_count'], {}), '(X_sent_count)\n', (873, 887), True, 'import numpy as np\n'), ((913, 934), 'numpy.mean', 'np.mean', (['X_word_count'], {}), '(X_word_count)\n', (920, 934), True, 'import numpy as np\n'), ((1150, 1164), 'dataset.read_splits', 'read_splits', (['i'], {}), '(i)\n', (1161, 1164), False, 'from dataset import STR2ID, DATA_DIR, read_splits\n'), ((1361, 1379), 'feature_extractor.FeatureExtractor', 'FeatureExtractor', ([], {}), '()\n', (1377, 1379), False, 'from feature_extractor import FeatureExtractor\n'), ((1741, 1762), 'numpy.mean', 'np.mean', (['X_sent_count'], {}), '(X_sent_count)\n', (1748, 1762), True, 'import numpy as np\n'), ((1788, 1809), 'numpy.mean', 'np.mean', (['X_word_count'], {}), '(X_word_count)\n', (1795, 1809), True, 'import numpy as np\n')] |
# -- python --
import cv2,tqdm,copy
import numpy as np
import unittest
import tempfile
import sys
from einops import rearrange
import shutil
from pathlib import Path
from easydict import EasyDict as edict
# -- vision --
from PIL import Image
# -- linalg --
import torch as th
import numpy as np
# -- package helper imports --
from faiss.contrib import kn3
from faiss.contrib import testing
# -- check if reordered --
from scipy import optimize
SAVE_DIR = Path("./output/tests/")
#
#
# -- Primary Testing Class --
#
#
PYTEST_OUTPUT = Path("./pytests/output/")
def save_image(burst,prefix="prefix"):
root = PYTEST_OUTPUT
if not(root.exists()): root.mkdir()
burst = rearrange(burst,'t c h w -> t h w c')
burst = np.clip(burst,0,255)
burst = burst.astype(np.uint8)
nframes = burst.shape[0]
for t in range(nframes):
fn = "%s_kn3_io_%02d.png" % (prefix,t)
img = Image.fromarray(burst[t])
path = str(root / fn)
img.save(path)
class TestIoPatches(unittest.TestCase):
#
# -- Load Data --
#
def do_load_data(self,dname,sigma,device="cuda:0"):
# -- Read Data (Image & VNLB-C++ Results) --
clean = testing.load_dataset(dname)
clean = clean[:15,:,:32,:32].to(device).type(th.float32)
# clean = th.zeros((15,3,32,32)).to(device).type(th.float32)
clean = clean * 1.0
noisy = clean + sigma * th.normal(0,1,size=clean.shape,device=device)
return clean,noisy
def do_load_flow(self,comp_flow,burst,sigma,device):
if comp_flow:
# -- TV-L1 Optical Flow --
flow_params = {"nproc":0,"tau":0.25,"lambda":0.2,"theta":0.3,
"nscales":100,"fscale":1,"zfactor":0.5,"nwarps":5,
"epsilon":0.01,"verbose":False,"testing":False,'bw':True}
fflow,bflow = vnlb.swig.runPyFlow(burst,sigma,flow_params)
else:
# -- Empty shells --
t,c,h,w = burst.shape
tf32,tfl = th.float32,th.long
fflow = th.zeros(t,2,h,w,dtype=tf32,device=device)
bflow = fflow.clone()
# -- pack --
flows = edict()
flows.fflow = fflow
flows.bflow = bflow
return flows
def get_search_inds(self,index,bsize,shape,device):
t,c,h,w = shape
start = index * bsize
stop = ( index + 1 ) * bsize
ti32 = th.int32
srch_inds = th.arange(start,stop,dtype=ti32,device=device)[:,None]
srch_inds = kn3.get_3d_inds(srch_inds,h,w)
srch_inds = srch_inds.contiguous()
return srch_inds
def init_topk_shells(self,bsize,k,pt,c,ps,device):
tf32,ti32 = th.float32,th.int32
vals = float("inf") * th.ones((bsize,k),dtype=tf32,device=device)
inds = -th.ones((bsize,k),dtype=ti32,device=device)
patches = -th.ones((bsize,k,pt,c,ps,ps),dtype=tf32,device=device)
return vals,inds,patches
def exec_kn3_search(self,K,clean,flows,sigma,args,bufs):
# -- unpack --
device = clean.device
shape = clean.shape
t,c,h,w = shape
# -- prepare kn3 search --
index,BSIZE = 0,t*h*w
args.k = K
numQueries = (BSIZE-1) // args.queryStride + 1
# -- search --
kn3.run_search(clean,0,numQueries,flows,sigma,args,bufs,pfill=True)
th.cuda.synchronize()
# -- unpack --
kn3_vals = bufs.dists
kn3_inds = bufs.inds
kn3_patches = bufs.patches
return kn3_inds,kn3_patches
#
# -- [Exec] Sim Search --
#
def run_comparison_fill_p2b(self,noisy,clean,sigma,flows,args):
# -- fixed testing params --
K = 100 # problem one
BSIZE = 50
NBATCHES = 3
shape = noisy.shape
device = noisy.device
# -- create empty bufs --
bufs = edict()
bufs.patches = None
bufs.dists = None
bufs.inds = None
clean /= 255.
clean *= 255.
args['queryStride'] = 7
args['stype'] = "faiss"
# -- exec over batches --
for index in range(NBATCHES):
# -- get new image --
noise = sigma * th.randn_like(clean)
noisy = (clean + noise).type(th.float32).contiguous()
# clean = 255.*th.rand_like(clean).type(th.float32)
fill_img = -th.ones_like(clean).contiguous()
# -- search using faiss code --
bufs = edict()
bufs.patches = None
bufs.dists = None
bufs.inds = None
_,patches = self.exec_kn3_search(K,noisy,flows,sigma,args,bufs)
# -- fill patches --
kn3.run_fill(fill_img,patches,0,args,"p2b",clock=None)
fmin,fmax = fill_img.min().item(),fill_img.max().item()
# -- cpu --
fill_img_np = fill_img.cpu().numpy()
noisy_np = noisy.cpu().numpy()
delta = 255.*(th.abs(fill_img - noisy) > 1e-6)
delta_np = delta.cpu().numpy()
save_image(fill_img_np,prefix="fill")
save_image(noisy_np,prefix="clean")
save_image(delta_np,prefix="delta")
# -- test --
np.testing.assert_array_equal(fill_img_np,noisy_np)
def run_comparison_fill_b2p(self,noisy,clean,sigma,flows,args):
# -- fixed testing params --
K = 100
BSIZE = 50
NBATCHES = 3
shape = noisy.shape
device = noisy.device
# -- create empty bufs --
bufs = edict()
bufs.patches = None
bufs.dists = None
bufs.inds = None
clean /= 255.
clean *= 255.
args['queryStride'] = 7
args['stype'] = "faiss"
# -- exec over batches --
for index in range(NBATCHES):
# -- get new image --
noise = sigma * th.randn_like(clean)
noisy = (clean + noise).type(th.float32).contiguous()
# clean = 255.*th.rand_like(clean).type(th.float32)
fill_img = -th.ones_like(clean).contiguous()
# -- search using faiss code --
inds,patches = self.exec_kn3_search(K,noisy,flows,sigma,args,bufs)
# -- fill patches --
fpatches = th.zeros_like(patches)
kn3.run_fill(noisy,fpatches,0,args,"b2p",inds=inds,clock=None)
# -- cpu --
patches_np = patches.cpu().numpy()
fpatches_np = fpatches.cpu().numpy()
# -- test --
np.testing.assert_array_equal(patches_np,fpatches_np)
def run_large_p2b(self,noisy,clean,sigma,flows,args):
# -- fixed testing params --
K = 100
BSIZE = 50
NBATCHES = 3
shape = noisy.shape
device = noisy.device
# -- create empty bufs --
bufs = edict()
bufs.patches = None
bufs.dists = None
bufs.inds = None
args['queryStride'] = 7
args['stype'] = "faiss"
noisy = th.zeros((10,3,128,128)).to(noisy.device)
# -- exec over batches --
for index in range(NBATCHES):
# -- random patches --
patches = th.rand((1024*20,26,1,3,5,5))
# -- fill patches --
kn3.run_fill(noisy,patches,0,args,"p2b")
assert True
def run_single_test(self,dname,sigma,comp_flow,pyargs):
noisy,clean = self.do_load_data(dname,sigma)
flows = self.do_load_flow(False,clean,sigma,noisy.device)
self.run_comparison_fill_p2b(noisy,clean,sigma,flows,pyargs)
self.run_comparison_fill_b2p(noisy,clean,sigma,flows,pyargs)
self.run_large_p2b(noisy,clean,sigma,flows,pyargs)
def test_sim_search(self):
# -- init save path --
np.random.seed(123)
save_dir = SAVE_DIR
if not save_dir.exists():
save_dir.mkdir(parents=True)
# -- test 1 --
sigma = 50./255.
dname = "text_tourbus_64"
comp_flow = False
args = edict({'ps':7,'pt':1,'c':3})
self.run_single_test(dname,sigma,comp_flow,args)
| [
"torch.cuda.synchronize",
"numpy.random.seed",
"numpy.clip",
"faiss.contrib.kn3.run_search",
"pathlib.Path",
"torch.arange",
"torch.ones",
"faiss.contrib.testing.load_dataset",
"easydict.EasyDict",
"torch.zeros",
"torch.zeros_like",
"torch.randn_like",
"numpy.testing.assert_array_equal",
"... | [((460, 483), 'pathlib.Path', 'Path', (['"""./output/tests/"""'], {}), "('./output/tests/')\n", (464, 483), False, 'from pathlib import Path\n'), ((539, 564), 'pathlib.Path', 'Path', (['"""./pytests/output/"""'], {}), "('./pytests/output/')\n", (543, 564), False, 'from pathlib import Path\n'), ((682, 720), 'einops.rearrange', 'rearrange', (['burst', '"""t c h w -> t h w c"""'], {}), "(burst, 't c h w -> t h w c')\n", (691, 720), False, 'from einops import rearrange\n'), ((732, 754), 'numpy.clip', 'np.clip', (['burst', '(0)', '(255)'], {}), '(burst, 0, 255)\n', (739, 754), True, 'import numpy as np\n'), ((907, 932), 'PIL.Image.fromarray', 'Image.fromarray', (['burst[t]'], {}), '(burst[t])\n', (922, 932), False, 'from PIL import Image\n'), ((1190, 1217), 'faiss.contrib.testing.load_dataset', 'testing.load_dataset', (['dname'], {}), '(dname)\n', (1210, 1217), False, 'from faiss.contrib import testing\n'), ((2172, 2179), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (2177, 2179), True, 'from easydict import EasyDict as edict\n'), ((2525, 2557), 'faiss.contrib.kn3.get_3d_inds', 'kn3.get_3d_inds', (['srch_inds', 'h', 'w'], {}), '(srch_inds, h, w)\n', (2540, 2557), False, 'from faiss.contrib import kn3\n'), ((3302, 3376), 'faiss.contrib.kn3.run_search', 'kn3.run_search', (['clean', '(0)', 'numQueries', 'flows', 'sigma', 'args', 'bufs'], {'pfill': '(True)'}), '(clean, 0, numQueries, flows, sigma, args, bufs, pfill=True)\n', (3316, 3376), False, 'from faiss.contrib import kn3\n'), ((3378, 3399), 'torch.cuda.synchronize', 'th.cuda.synchronize', ([], {}), '()\n', (3397, 3399), True, 'import torch as th\n'), ((3883, 3890), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (3888, 3890), True, 'from easydict import EasyDict as edict\n'), ((5556, 5563), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (5561, 5563), True, 'from easydict import EasyDict as edict\n'), ((6847, 6854), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (6852, 6854), True, 'from easydict import EasyDict as edict\n'), ((7774, 7793), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (7788, 7793), True, 'import numpy as np\n'), ((8021, 8054), 'easydict.EasyDict', 'edict', (["{'ps': 7, 'pt': 1, 'c': 3}"], {}), "({'ps': 7, 'pt': 1, 'c': 3})\n", (8026, 8054), True, 'from easydict import EasyDict as edict\n'), ((2057, 2104), 'torch.zeros', 'th.zeros', (['t', '(2)', 'h', 'w'], {'dtype': 'tf32', 'device': 'device'}), '(t, 2, h, w, dtype=tf32, device=device)\n', (2065, 2104), True, 'import torch as th\n'), ((2450, 2499), 'torch.arange', 'th.arange', (['start', 'stop'], {'dtype': 'ti32', 'device': 'device'}), '(start, stop, dtype=ti32, device=device)\n', (2459, 2499), True, 'import torch as th\n'), ((2750, 2796), 'torch.ones', 'th.ones', (['(bsize, k)'], {'dtype': 'tf32', 'device': 'device'}), '((bsize, k), dtype=tf32, device=device)\n', (2757, 2796), True, 'import torch as th\n'), ((2810, 2856), 'torch.ones', 'th.ones', (['(bsize, k)'], {'dtype': 'ti32', 'device': 'device'}), '((bsize, k), dtype=ti32, device=device)\n', (2817, 2856), True, 'import torch as th\n'), ((2873, 2934), 'torch.ones', 'th.ones', (['(bsize, k, pt, c, ps, ps)'], {'dtype': 'tf32', 'device': 'device'}), '((bsize, k, pt, c, ps, ps), dtype=tf32, device=device)\n', (2880, 2934), True, 'import torch as th\n'), ((4486, 4493), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (4491, 4493), True, 'from easydict import EasyDict as edict\n'), ((4707, 4766), 'faiss.contrib.kn3.run_fill', 'kn3.run_fill', (['fill_img', 'patches', '(0)', 'args', '"""p2b"""'], {'clock': 'None'}), "(fill_img, patches, 0, args, 'p2b', clock=None)\n", (4719, 4766), False, 'from faiss.contrib import kn3\n'), ((5233, 5285), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['fill_img_np', 'noisy_np'], {}), '(fill_img_np, noisy_np)\n', (5262, 5285), True, 'import numpy as np\n'), ((6276, 6298), 'torch.zeros_like', 'th.zeros_like', (['patches'], {}), '(patches)\n', (6289, 6298), True, 'import torch as th\n'), ((6311, 6379), 'faiss.contrib.kn3.run_fill', 'kn3.run_fill', (['noisy', 'fpatches', '(0)', 'args', '"""b2p"""'], {'inds': 'inds', 'clock': 'None'}), "(noisy, fpatches, 0, args, 'b2p', inds=inds, clock=None)\n", (6323, 6379), False, 'from faiss.contrib import kn3\n'), ((6533, 6587), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['patches_np', 'fpatches_np'], {}), '(patches_np, fpatches_np)\n', (6562, 6587), True, 'import numpy as np\n'), ((7187, 7223), 'torch.rand', 'th.rand', (['(1024 * 20, 26, 1, 3, 5, 5)'], {}), '((1024 * 20, 26, 1, 3, 5, 5))\n', (7194, 7223), True, 'import torch as th\n'), ((7263, 7307), 'faiss.contrib.kn3.run_fill', 'kn3.run_fill', (['noisy', 'patches', '(0)', 'args', '"""p2b"""'], {}), "(noisy, patches, 0, args, 'p2b')\n", (7275, 7307), False, 'from faiss.contrib import kn3\n'), ((1412, 1460), 'torch.normal', 'th.normal', (['(0)', '(1)'], {'size': 'clean.shape', 'device': 'device'}), '(0, 1, size=clean.shape, device=device)\n', (1421, 1460), True, 'import torch as th\n'), ((4214, 4234), 'torch.randn_like', 'th.randn_like', (['clean'], {}), '(clean)\n', (4227, 4234), True, 'import torch as th\n'), ((5887, 5907), 'torch.randn_like', 'th.randn_like', (['clean'], {}), '(clean)\n', (5900, 5907), True, 'import torch as th\n'), ((7014, 7041), 'torch.zeros', 'th.zeros', (['(10, 3, 128, 128)'], {}), '((10, 3, 128, 128))\n', (7022, 7041), True, 'import torch as th\n'), ((4973, 4997), 'torch.abs', 'th.abs', (['(fill_img - noisy)'], {}), '(fill_img - noisy)\n', (4979, 4997), True, 'import torch as th\n'), ((4389, 4408), 'torch.ones_like', 'th.ones_like', (['clean'], {}), '(clean)\n', (4401, 4408), True, 'import torch as th\n'), ((6062, 6081), 'torch.ones_like', 'th.ones_like', (['clean'], {}), '(clean)\n', (6074, 6081), True, 'import torch as th\n')] |
''' visdom related functions to print the curves
'''
import pdb
from visdom import Visdom
import numpy as np
import time
# viz = Visdom(server='http://192.168.3.11', port=4212) # aws server port
viz = None
def visdom_initialize(args):
'''
'''
global viz
if args.vis_port < 4212 or args.vis_port > 4223:
assert 0, 'Visdom port %d not supported' % (args.vis_port)
if args.vis_server == 'local':
viz = Visdom()
else:
viz = Visdom(server=args.vis_server, port=args.vis_port,
use_incoming_socket=False)
return None
def visdom_plot_curve(it, val, viz_win=None, title=''):
'''
'''
if it == 1 or viz_win == None:
return viz.line(X=np.array([it]),
Y=np.array([val]),
win=viz_win,
opts={'title': title})
else:
return viz.line(win=viz_win, update='append',
X=np.array([it]),
Y=np.array([val]))
return viz_win
def visdom_print_info(str_list, mytitle):
"""Visdom, display text info to the visdom window
input: 1. a list of strings
"""
mystr = ""
for i in range(len(str_list)):
mystr = mystr + str_list[i] + "\n" + "<br>"
opts = {'title': mytitle}
viz.text(mystr, opts=opts)
return None
def viz_line(i, vals, viz_win=None,
title='', xlabel='', ylabel='',
legend=None):
''' a more robust way to print multiple values on the same plot
NOTE:
this function only supports 2 or more valus. for plotting only one value,
refer to visdom_plot_curve()
UPDATE:
now it data input supports >= 1 plotting
'''
data_num = len(vals)
if legend != None: assert data_num == len(legend)
else: legend = ['' for _ in range(data_num)]
# make the input compatible with visdom API
X = [np.array([i]) for _ in range(data_num)]
Y = [np.array([val]) for val in vals]
if data_num != 1:
X = np.column_stack(X)
Y = np.column_stack(Y)
else:
X = X[-1]
Y = Y[-1]
if viz_win is None:
return viz.line(X=X, Y=Y,
opts=dict(title=title,
legend=legend,
xlabel=xlabel,
ylabel=ylabel))
else:
return viz.line(win=viz_win, update='append',
X=X, Y=Y,
opts=dict(title=title,
legend=legend))
def vis_stem_as_hist(samples):
''' using visdom to plot the histogram of 1 or more sample sequence
'''
raise NotImplementedError
return
def vis_hist_plot(i, samples, viz_win=None, title='', numbin=20):
''' directly use visdom histogram plot to visualize the sample sequence
'''
low, high = samples.min(), samples.max()
title = title + 'low:%.2f-high:%.2f-Gen:%d' % (float(low), float(high), i)
if viz_win == None:
return viz.histogram(X=samples,
opts=dict(numbins=numbin,
title=title))
else:
return viz.histogram(win=viz_win, X=samples,
opts=dict(numbins=numbin,
title=title))
return
if __name__ == '__main__':
win = None
# for i in range(100):
# win = visdom_plot_curve(i, i, viz_win=win, title='test')
for i in range(100):
time.sleep(1)
samples = np.random.normal(np.random.randint(5), 1, 10000)
# samples = np.random.uniform(0, 5, 10000)
win = vis_hist_plot(i, samples, viz_win=win, title='misc')
| [
"visdom.Visdom",
"time.sleep",
"numpy.random.randint",
"numpy.array",
"numpy.column_stack"
] | [((441, 449), 'visdom.Visdom', 'Visdom', ([], {}), '()\n', (447, 449), False, 'from visdom import Visdom\n'), ((474, 551), 'visdom.Visdom', 'Visdom', ([], {'server': 'args.vis_server', 'port': 'args.vis_port', 'use_incoming_socket': '(False)'}), '(server=args.vis_server, port=args.vis_port, use_incoming_socket=False)\n', (480, 551), False, 'from visdom import Visdom\n'), ((1933, 1946), 'numpy.array', 'np.array', (['[i]'], {}), '([i])\n', (1941, 1946), True, 'import numpy as np\n'), ((1982, 1997), 'numpy.array', 'np.array', (['[val]'], {}), '([val])\n', (1990, 1997), True, 'import numpy as np\n'), ((2049, 2067), 'numpy.column_stack', 'np.column_stack', (['X'], {}), '(X)\n', (2064, 2067), True, 'import numpy as np\n'), ((2080, 2098), 'numpy.column_stack', 'np.column_stack', (['Y'], {}), '(Y)\n', (2095, 2098), True, 'import numpy as np\n'), ((3555, 3568), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3565, 3568), False, 'import time\n'), ((3604, 3624), 'numpy.random.randint', 'np.random.randint', (['(5)'], {}), '(5)\n', (3621, 3624), True, 'import numpy as np\n'), ((720, 734), 'numpy.array', 'np.array', (['[it]'], {}), '([it])\n', (728, 734), True, 'import numpy as np\n'), ((762, 777), 'numpy.array', 'np.array', (['[val]'], {}), '([val])\n', (770, 777), True, 'import numpy as np\n'), ((953, 967), 'numpy.array', 'np.array', (['[it]'], {}), '([it])\n', (961, 967), True, 'import numpy as np\n'), ((995, 1010), 'numpy.array', 'np.array', (['[val]'], {}), '([val])\n', (1003, 1010), True, 'import numpy as np\n')] |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
# -*- coding: utf-8 -*-
"""CustomCNN2.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/17gmD7alNhFL8OsLS-AdxvTMgP-5wIrXd
"""
"""# Training CNN and saving the influence of every training example(for the test image with test_idx)
handling imports
"""
import numpy as np
import math
import copy
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import sklearn.linear_model as linear_model
import sklearn.preprocessing as preprocessing
import scipy
import scipy.linalg as slin
import scipy.sparse.linalg as sparselin
import scipy.sparse as sparse
sns.set(color_codes=True)
import tensorflow as tf
from influence.all_CNN_c import All_CNN_C
from scripts.load_mnist import load_small_mnist, load_mnist
"""loading the dataset"""
import h5py
path_to_matrices = "data/training_set.hdf5"
#path_to_matrices = "data/default_labeled_balanced.hdf5"
dataset = h5py.File(path_to_matrices, 'r')
matrices = np.array(dataset['matrices'])
labels = np.array(dataset['label_vectors'])
print(matrices.shape)
print(labels.shape)
matrices = matrices[0:50,:,:]
labels = labels[0:50,:]
print(matrices.shape)
print(labels.shape)
randomize = np.arange(len(matrices))
np.random.shuffle(randomize)
matrices = matrices[randomize]
labels = labels[randomize]
#from one hot to integer coding
i = 0
new_labels = []
for label in labels:
index = np.argmax(label)
new_labels.append(index)
i+=1
labels = new_labels
labels = np.asarray(labels)
training_test_split = 0.5
index = int(training_test_split * len(matrices))
train_matrices = np.expand_dims(matrices[:index], axis=3)
train_labels = labels[:index]
validation_matrices = np.expand_dims(matrices[index + 1:], axis=3)
validation_labels = labels[index + 1:]
from tensorflow.contrib.learn.python.learn.datasets import base
from influence.dataset import DataSet
train = DataSet(train_matrices, train_labels)
validation = DataSet(validation_matrices, validation_labels)
test = DataSet(validation_matrices, validation_labels)
data_sets = base.Datasets(train=train, validation=validation, test=test)
data_sets2 = load_small_mnist('data')
#(data_sets.train.labels.shape)
#print(data_sets2.train.labels.shape)
#print(data_sets.train.x.shape)
#print(data_sets2.train.x.shape)
"""defining the CNN
training the CNN
"""
num_classes = 4
input_side = 128
input_channels = 1
input_dim = input_side * input_side * input_channels
weight_decay = 0.001
batch_size = 1
initial_learning_rate = 0.0001
decay_epochs = [10000, 20000]
hidden1_units = 8
hidden2_units = 8
hidden3_units = 8
conv_patch_size = 3
keep_probs = [1.0, 1.0]
model = All_CNN_C(
input_side=input_side,
input_channels=input_channels,
conv_patch_size=conv_patch_size,
hidden1_units=hidden1_units,
hidden2_units=hidden2_units,
hidden3_units=hidden3_units,
weight_decay=weight_decay,
num_classes=num_classes,
batch_size=batch_size,
data_sets=data_sets,
initial_learning_rate=initial_learning_rate,
damping=1e-2,
decay_epochs=decay_epochs,
mini_batch=False,
train_dir='output',
log_dir='log',
model_name='mnist_small_all_cnn_c')
num_steps = 5000
'''model.train(
num_steps=num_steps,
iter_to_switch_to_batch=10000,
iter_to_switch_to_sgd=10000)
iter_to_load = num_steps - 1'''
"""calculating the influence"""
test_idx = 2
CNN_predicted_loss_diffs = model.get_influence_on_test_loss(
[test_idx],
np.arange(len(model.data_sets.train.labels)),
force_refresh=True)
#("x")
"""saving the influence"""
np.savez(
'output/CNN_results',
test_idx=test_idx,
CNN_predicted_loss_diffs=CNN_predicted_loss_diffs
)
#print("y")
"""# **Loading influences and plotting the most influential pictures**
getting Xtrain of the Training Dataset
"""
X_train = data_sets.train.x
"""loading the influences"""
f = np.load('output/CNN_results.npz')
test_idx = f['test_idx']
CNN_predicted_loss_diffs = f['CNN_predicted_loss_diffs']
print("Test Image:")
plt.spy(np.reshape(X_train[test_idx, :], [128, 128]))
print("Top 5 most influenatial matrices")
#x_train = []
#for counter, train_idx in enumerate(np.argsort(CNN_predicted_loss_diffs)[-5:]):
# x_train.append(X_train[train_idx, :])
x_train = []
for counter, train_idx in enumerate(np.argsort(CNN_predicted_loss_diffs)[0:5]):
x_train.append(X_train[train_idx, :])
plt.spy(np.reshape(x_train[0], [128, 128]))
plt.spy(np.reshape(x_train[1], [128, 128]))
plt.spy(np.reshape(x_train[2], [128, 128]))
plt.spy(np.reshape(x_train[3], [128, 128]))
plt.spy(np.reshape(x_train[4], [128, 128])) | [
"h5py.File",
"numpy.load",
"tensorflow.contrib.learn.python.learn.datasets.base.Datasets",
"numpy.argmax",
"numpy.asarray",
"influence.all_CNN_c.All_CNN_C",
"numpy.expand_dims",
"influence.dataset.DataSet",
"scripts.load_mnist.load_small_mnist",
"numpy.argsort",
"numpy.array",
"numpy.reshape",... | [((798, 823), 'seaborn.set', 'sns.set', ([], {'color_codes': '(True)'}), '(color_codes=True)\n', (805, 823), True, 'import seaborn as sns\n'), ((1103, 1135), 'h5py.File', 'h5py.File', (['path_to_matrices', '"""r"""'], {}), "(path_to_matrices, 'r')\n", (1112, 1135), False, 'import h5py\n'), ((1148, 1177), 'numpy.array', 'np.array', (["dataset['matrices']"], {}), "(dataset['matrices'])\n", (1156, 1177), True, 'import numpy as np\n'), ((1187, 1221), 'numpy.array', 'np.array', (["dataset['label_vectors']"], {}), "(dataset['label_vectors'])\n", (1195, 1221), True, 'import numpy as np\n'), ((1398, 1426), 'numpy.random.shuffle', 'np.random.shuffle', (['randomize'], {}), '(randomize)\n', (1415, 1426), True, 'import numpy as np\n'), ((1653, 1671), 'numpy.asarray', 'np.asarray', (['labels'], {}), '(labels)\n', (1663, 1671), True, 'import numpy as np\n'), ((1766, 1806), 'numpy.expand_dims', 'np.expand_dims', (['matrices[:index]'], {'axis': '(3)'}), '(matrices[:index], axis=3)\n', (1780, 1806), True, 'import numpy as np\n'), ((1860, 1904), 'numpy.expand_dims', 'np.expand_dims', (['matrices[index + 1:]'], {'axis': '(3)'}), '(matrices[index + 1:], axis=3)\n', (1874, 1904), True, 'import numpy as np\n'), ((2056, 2093), 'influence.dataset.DataSet', 'DataSet', (['train_matrices', 'train_labels'], {}), '(train_matrices, train_labels)\n', (2063, 2093), False, 'from influence.dataset import DataSet\n'), ((2107, 2154), 'influence.dataset.DataSet', 'DataSet', (['validation_matrices', 'validation_labels'], {}), '(validation_matrices, validation_labels)\n', (2114, 2154), False, 'from influence.dataset import DataSet\n'), ((2162, 2209), 'influence.dataset.DataSet', 'DataSet', (['validation_matrices', 'validation_labels'], {}), '(validation_matrices, validation_labels)\n', (2169, 2209), False, 'from influence.dataset import DataSet\n'), ((2223, 2283), 'tensorflow.contrib.learn.python.learn.datasets.base.Datasets', 'base.Datasets', ([], {'train': 'train', 'validation': 'validation', 'test': 'test'}), '(train=train, validation=validation, test=test)\n', (2236, 2283), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), ((2298, 2322), 'scripts.load_mnist.load_small_mnist', 'load_small_mnist', (['"""data"""'], {}), "('data')\n", (2314, 2322), False, 'from scripts.load_mnist import load_small_mnist, load_mnist\n'), ((2816, 3296), 'influence.all_CNN_c.All_CNN_C', 'All_CNN_C', ([], {'input_side': 'input_side', 'input_channels': 'input_channels', 'conv_patch_size': 'conv_patch_size', 'hidden1_units': 'hidden1_units', 'hidden2_units': 'hidden2_units', 'hidden3_units': 'hidden3_units', 'weight_decay': 'weight_decay', 'num_classes': 'num_classes', 'batch_size': 'batch_size', 'data_sets': 'data_sets', 'initial_learning_rate': 'initial_learning_rate', 'damping': '(0.01)', 'decay_epochs': 'decay_epochs', 'mini_batch': '(False)', 'train_dir': '"""output"""', 'log_dir': '"""log"""', 'model_name': '"""mnist_small_all_cnn_c"""'}), "(input_side=input_side, input_channels=input_channels,\n conv_patch_size=conv_patch_size, hidden1_units=hidden1_units,\n hidden2_units=hidden2_units, hidden3_units=hidden3_units, weight_decay=\n weight_decay, num_classes=num_classes, batch_size=batch_size, data_sets\n =data_sets, initial_learning_rate=initial_learning_rate, damping=0.01,\n decay_epochs=decay_epochs, mini_batch=False, train_dir='output',\n log_dir='log', model_name='mnist_small_all_cnn_c')\n", (2825, 3296), False, 'from influence.all_CNN_c import All_CNN_C\n'), ((3739, 3844), 'numpy.savez', 'np.savez', (['"""output/CNN_results"""'], {'test_idx': 'test_idx', 'CNN_predicted_loss_diffs': 'CNN_predicted_loss_diffs'}), "('output/CNN_results', test_idx=test_idx, CNN_predicted_loss_diffs=\n CNN_predicted_loss_diffs)\n", (3747, 3844), True, 'import numpy as np\n'), ((4051, 4084), 'numpy.load', 'np.load', (['"""output/CNN_results.npz"""'], {}), "('output/CNN_results.npz')\n", (4058, 4084), True, 'import numpy as np\n'), ((1572, 1588), 'numpy.argmax', 'np.argmax', (['label'], {}), '(label)\n', (1581, 1588), True, 'import numpy as np\n'), ((4202, 4246), 'numpy.reshape', 'np.reshape', (['X_train[test_idx, :]', '[128, 128]'], {}), '(X_train[test_idx, :], [128, 128])\n', (4212, 4246), True, 'import numpy as np\n'), ((4577, 4611), 'numpy.reshape', 'np.reshape', (['x_train[0]', '[128, 128]'], {}), '(x_train[0], [128, 128])\n', (4587, 4611), True, 'import numpy as np\n'), ((4622, 4656), 'numpy.reshape', 'np.reshape', (['x_train[1]', '[128, 128]'], {}), '(x_train[1], [128, 128])\n', (4632, 4656), True, 'import numpy as np\n'), ((4667, 4701), 'numpy.reshape', 'np.reshape', (['x_train[2]', '[128, 128]'], {}), '(x_train[2], [128, 128])\n', (4677, 4701), True, 'import numpy as np\n'), ((4712, 4746), 'numpy.reshape', 'np.reshape', (['x_train[3]', '[128, 128]'], {}), '(x_train[3], [128, 128])\n', (4722, 4746), True, 'import numpy as np\n'), ((4757, 4791), 'numpy.reshape', 'np.reshape', (['x_train[4]', '[128, 128]'], {}), '(x_train[4], [128, 128])\n', (4767, 4791), True, 'import numpy as np\n'), ((4484, 4520), 'numpy.argsort', 'np.argsort', (['CNN_predicted_loss_diffs'], {}), '(CNN_predicted_loss_diffs)\n', (4494, 4520), True, 'import numpy as np\n')] |
# Copyright 2008-2018 pydicom authors. See LICENSE file for details.
"""Utility functions used in the pixel data handlers."""
from sys import byteorder
try:
import numpy as np
HAVE_NP = True
except ImportError:
HAVE_NP = False
def convert_color_space(arr, current, desired):
"""Convert the image(s) in `arr` from one color space to another.
Parameters
----------
arr : numpy.ndarray
The image(s) as an ndarray with shape (frames, rows, columns, planes)
or (rows, columns, planes).
current : str
The current color space, should be a valid value for (0028,0004)
*Photometric Interpretation*. One of 'RGB', 'YBR_FULL'.
desired : str
The desired color space, should be a valid value for (0028,0004)
*Photometric Interpretation*. One of 'RGB', 'YBR_FULL'.
Returns
-------
numpy.ndarray
The image(s) converted to the desired color space.
"""
if not HAVE_NP:
raise ImportError(
"Numpy is required to convert the color space."
)
# No change needed
if current == desired:
return arr
_converters = {
'YBR_FULL': {
'RGB': _convert_YBR_FULL_to_RGB
},
'RGB': {
'YBR_FULL': _convert_RGB_to_YBR_FULL,
}
}
try:
converter = _converters[current][desired]
except KeyError:
raise NotImplementedError(
"Conversion from {0} to {1} is not supported."
.format(current, desired)
)
return converter(arr)
def dtype_corrected_for_endianness(is_little_endian, numpy_dtype):
"""Return a numpy dtype corrected for system and dataset endianness.
Parameters
----------
is_little_endian : bool
The endianess of the affected dataset.
numpy_dtype : numpy.dtype
The numpy data type used for the pixel data without considering
endianess.
Raises
------
ValueError
If `is_little_endian` is None, e.g. not initialized.
Returns
-------
numpy.dtype
The numpy data type to be used for the pixel data, considering
the endianess.
"""
if is_little_endian is None:
raise ValueError("Dataset attribute 'is_little_endian' "
"has to be set before writing the dataset")
if is_little_endian != (byteorder == 'little'):
return numpy_dtype.newbyteorder('S')
return numpy_dtype
def pixel_dtype(ds):
"""Return a numpy dtype for the pixel data in dataset in `ds`.
Suitable for use with IODs containing the Image Pixel module.
+------------------------------------------+--------------+
| Element | Supported |
+-------------+---------------------+------+ values |
| Tag | Keyword | Type | |
+=============+=====================+======+==============+
| (0028,0101) | BitsAllocated | 1 | 1, 8, 16, 32 |
+-------------+---------------------+------+--------------+
| (0028,0103) | PixelRepresentation | 1 | 0, 1 |
+-------------+---------------------+------+--------------+
Parameters
----------
ds : dataset.Dataset
The DICOM dataset containing the pixel data you wish to get the
numpy dtype for.
Returns
-------
numpy.dtype
A numpy dtype suitable for containing the dataset's pixel data.
Raises
------
NotImplementedError
If the pixel data is of a type that isn't supported by either numpy
or pydicom.
"""
if not HAVE_NP:
raise ImportError("Numpy is required to determine the dtype.")
if ds.is_little_endian is None:
ds.is_little_endian = ds.file_meta.TransferSyntaxUID.is_little_endian
# (0028,0103) Pixel Representation, US, 1
# Data representation of the pixel samples
# 0x0000 - unsigned int
# 0x0001 - 2's complement (signed int)
pixel_repr = ds.PixelRepresentation
if pixel_repr == 0:
dtype_str = 'uint'
elif pixel_repr == 1:
dtype_str = 'int'
else:
raise ValueError(
"Unable to determine the data type to use to contain the "
"Pixel Data as a value of '{}' for '(0028,0103) Pixel "
"Representation' is invalid".format(pixel_repr)
)
# (0028,0100) Bits Allocated, US, 1
# The number of bits allocated for each pixel sample
# PS3.5 8.1.1: Bits Allocated shall either be 1 or a multiple of 8
# For bit packed data we use uint8
bits_allocated = ds.BitsAllocated
if bits_allocated == 1:
dtype_str = 'uint8'
elif bits_allocated > 0 and bits_allocated % 8 == 0:
dtype_str += str(bits_allocated)
else:
raise ValueError(
"Unable to determine the data type to use to contain the "
"Pixel Data as a value of '{}' for '(0028,0100) Bits "
"Allocated' is invalid".format(bits_allocated)
)
# Check to see if the dtype is valid for numpy
try:
dtype = np.dtype(dtype_str)
except TypeError:
raise NotImplementedError(
"The data type '{}' needed to contain the Pixel Data is not "
"supported by numpy".format(dtype_str)
)
# Correct for endianness of the system vs endianness of the dataset
if ds.is_little_endian != (byteorder == 'little'):
# 'S' swap from current to opposite
dtype = dtype.newbyteorder('S')
return dtype
def reshape_pixel_array(ds, arr):
"""Return a reshaped ndarray `arr`.
+------------------------------------------+-----------+----------+
| Element | Supported | |
+-------------+---------------------+------+ values | |
| Tag | Keyword | Type | | |
+=============+=====================+======+===========+==========+
| (0028,0002) | SamplesPerPixel | 1 | N > 0 | Required |
+-------------+---------------------+------+-----------+----------+
| (0028,0006) | PlanarConfiguration | 1C | 0, 1 | Optional |
+-------------+---------------------+------+-----------+----------+
| (0028,0008) | NumberOfFrames | 1C | N > 0 | Optional |
+-------------+---------------------+------+-----------+----------+
| (0028,0010) | Rows | 1 | N > 0 | Required |
+-------------+---------------------+------+-----------+----------+
| (0028,0011) | Columns | 1 | N > 0 | Required |
+-------------+---------------------+------+-----------+----------+
(0028,0008) *Number of Frames* is required when the pixel data contains
more than 1 frame. (0028,0006) *Planar Configuration* is required when
(0028,0002) *Samples per Pixel* is greater than 1. For certain
compressed transfer syntaxes it is always taken to be either 0 or 1 as
shown in the table below.
+---------------------------------------------+-----------------------+
| Transfer Syntax | Planar Configuration |
+------------------------+--------------------+ |
| UID | Name | |
+========================+====================+=======================+
| 1.2.840.10008.192.168.127.12 | JPEG Baseline | 0 |
+------------------------+--------------------+-----------------------+
| 1.2.840.10008.172.16.17.32 | JPEG Lossless, | 0 |
| | Non-hierarchical | |
+------------------------+--------------------+-----------------------+
| 1.2.840.10008.192.168.127.12 | JPEG Lossless, | 0 |
| | Non-hierarchical, | |
| | SV1 | |
+------------------------+--------------------+-----------------------+
| 1.2.840.10008.172.16.17.32 | JPEG-LS Lossless | 1 |
+------------------------+--------------------+-----------------------+
| 1.2.840.10008.172.16.17.32 | JPEG-LS Lossy | 1 |
+------------------------+--------------------+-----------------------+
| 1.2.840.10008.172.16.31.100 | JPEG 2000 Lossless | 0 |
+------------------------+--------------------+-----------------------+
| 1.2.840.10008.1.2.4.91 | JPEG 2000 Lossy | 0 |
+------------------------+--------------------+-----------------------+
| 1.2.840.10008.1.2.5 | RLE Lossless | 1 |
+------------------------+--------------------+-----------------------+
Parameters
----------
ds : dataset.Dataset
The dataset containing the Image Pixel module corresponding to the
pixel data in `arr`.
arr : numpy.ndarray
The 1D array containing the pixel data.
Returns
-------
numpy.ndarray
A reshaped array containing the pixel data. The shape of the array
depends on the contents of the dataset:
* For single frame, single sample data (rows, columns)
* For single frame, multi-sample data (rows, columns, planes)
* For multi-frame, single sample data (frames, rows, columns)
* For multi-frame, multi-sample data (frames, rows, columns, planes)
References
----------
* DICOM Standard, Part 3, Annex C.7.6.3.1
* DICOM Standard, Part 4, Sections 8.2.1-4
"""
if not HAVE_NP:
raise ImportError("Numpy is required to reshape the pixel array.")
nr_frames = getattr(ds, 'NumberOfFrames', 1)
nr_samples = ds.SamplesPerPixel
if nr_frames < 1:
raise ValueError(
"Unable to reshape the pixel array as a value of {} for "
"(0028,0008) 'Number of Frames' is invalid."
.format(nr_frames)
)
if nr_samples < 1:
raise ValueError(
"Unable to reshape the pixel array as a value of {} for "
"(0028,0002) 'Samples per Pixel' is invalid."
.format(nr_samples)
)
# Valid values for Planar Configuration are dependent on transfer syntax
if nr_samples > 1:
transfer_syntax = ds.file_meta.TransferSyntaxUID
if transfer_syntax in ['1.2.840.10008.1.2.4.50',
'1.2.840.10008.1.2.4.57',
'1.2.840.10008.1.2.4.70',
'1.2.840.10008.1.2.4.90',
'1.2.840.10008.1.2.4.91']:
planar_configuration = 0
elif transfer_syntax in ['1.2.840.10008.1.2.4.80',
'1.2.840.10008.1.2.4.81',
'1.2.840.10008.1.2.5']:
planar_configuration = 1
else:
planar_configuration = ds.PlanarConfiguration
if planar_configuration not in [0, 1]:
raise ValueError(
"Unable to reshape the pixel array as a value of {} for "
"(0028,0006) 'Planar Configuration' is invalid."
.format(planar_configuration)
)
if nr_frames > 1:
# Multi-frame
if nr_samples == 1:
# Single plane
arr = arr.reshape(nr_frames, ds.Rows, ds.Columns)
else:
# Multiple planes, usually 3
if planar_configuration == 0:
arr = arr.reshape(nr_frames, ds.Rows, ds.Columns, nr_samples)
else:
arr = arr.reshape(nr_frames, nr_samples, ds.Rows, ds.Columns)
arr = arr.transpose(0, 2, 3, 1)
else:
# Single frame
if nr_samples == 1:
# Single plane
arr = arr.reshape(ds.Rows, ds.Columns)
else:
# Multiple planes, usually 3
if planar_configuration == 0:
arr = arr.reshape(ds.Rows, ds.Columns, nr_samples)
else:
arr = arr.reshape(nr_samples, ds.Rows, ds.Columns)
arr = arr.transpose(1, 2, 0)
return arr
def get_expected_length(ds, unit='bytes'):
"""Return the expected length (in bytes or pixels) of the pixel data.
+-----------------------------------+------+-------------+
| Element | Type | Required or |
+-------------+---------------------+ | optional |
| Tag | Keyword | | |
+=============+=====================+======+=============+
| (0028,0002) | SamplesPerPixel | 1 | Required |
+-------------+---------------------+------+-------------+
| (0028,0008) | NumberOfFrames | 1C | Optional |
+-------------+---------------------+------+-------------+
| (0028,0010) | Rows | 1 | Required |
+-------------+---------------------+------+-------------+
| (0028,0011) | Columns | 1 | Required |
+-------------+---------------------+------+-------------+
| (0028,0100) | BitsAllocated | 1 | Required |
+-------------+---------------------+------+-------------+
Parameters
----------
ds : dataset.Dataset
The DICOM dataset containing the Image Pixel module and pixel data.
unit : str, optional
If 'bytes' then returns the expected length of the Pixel Data in
whole bytes and NOT including an odd length trailing NULL padding
byte. If 'pixels' then returns the expected length of the Pixel Data
in terms of the total number of pixels (default 'bytes').
Returns
-------
int
The expected length of the pixel data in either whole bytes or pixels,
excluding the NULL trailing padding byte for odd length data.
"""
length = ds.Rows * ds.Columns * ds.SamplesPerPixel
length *= getattr(ds, 'NumberOfFrames', 1)
if unit == 'pixels':
return length
# Correct for the number of bytes per pixel
bits_allocated = ds.BitsAllocated
if bits_allocated == 1:
# Determine the nearest whole number of bytes needed to contain
# 1-bit pixel data. e.g. 10 x 10 1-bit pixels is 100 bits, which
# are packed into 12.5 -> 13 bytes
length = length // 8 + (length % 8 > 0)
else:
length *= bits_allocated // 8
return length
def _convert_RGB_to_YBR_FULL(arr):
"""Return an ndarray converted from RGB to YBR_FULL color space.
Parameters
----------
arr : numpy.ndarray
An ndarray of an 8-bit per channel images in RGB color space.
Returns
-------
numpy.ndarray
The array in YBR_FULL color space.
References
----------
* DICOM Standard, Part 3, Annex C.7.6.3.1.2
* ISO/IEC 10918-5:2012, Section 7
"""
orig_dtype = arr.dtype
arr = arr.astype(np.float)
rgb_to_ybr = np.asarray(
[[+0.299, +0.587, +0.114],
[-0.299, -0.587, +0.886],
[+0.701, -0.587, -0.114]], dtype=np.float)
arr = np.dot(arr, rgb_to_ybr.T)
if len(arr.shape) == 4:
# Multi-frame
arr[:, :, :, 1] /= 1.772
arr[:, :, :, 2] /= 1.402
else:
# Single frame
arr[:, :, 1] /= 1.772
arr[:, :, 2] /= 1.402
arr += [0, 128, 128]
# Round(x) -> floor of (arr + 0.5)
arr = np.floor(arr + 0.5)
# Max(0, arr) -> 0 if 0 >= arr, arr otherwise
arr[np.where(arr < 0)] = 0
# Min(arr, 255) -> arr if arr <= 255, 255 otherwise
arr[np.where(arr > 255)] = 255
return arr.astype(orig_dtype)
def _convert_YBR_FULL_to_RGB(arr):
"""Return an ndarray converted from YBR_FULL to RGB color space.
Parameters
----------
arr : numpy.ndarray
An ndarray of an 8-bit per channel images in YBR_FULL color space.
Returns
-------
numpy.ndarray
The array in RGB color space.
References
----------
* DICOM Standard, Part 3, Annex C.7.6.3.1.2
* ISO/IEC 10918-5:2012, Section 7
"""
orig_dtype = arr.dtype
ybr_to_rgb = np.asarray(
[[1.0, +0.0, +1.402],
[1.0, -0.114 * 1.772 / 0.587, -0.299 * 1.402 / 0.587],
[1.0, +1.772, +0.0]], dtype=np.float)
arr = arr.astype(np.float)
arr -= [0, 128, 128]
arr = np.dot(arr, ybr_to_rgb.T)
# Round(x) -> floor of (arr + 0.5)
arr = np.floor(arr + 0.5)
# Max(0, arr) -> 0 if 0 >= arr, arr otherwise
arr[np.where(arr < 0)] = 0
# Min(arr, 255) -> arr if arr <= 255, 255 otherwise
arr[np.where(arr > 255)] = 255
return arr.astype(orig_dtype)
| [
"numpy.asarray",
"numpy.dtype",
"numpy.floor",
"numpy.where",
"numpy.dot"
] | [((15081, 15192), 'numpy.asarray', 'np.asarray', (['[[+0.299, +0.587, +0.114], [-0.299, -0.587, +0.886], [+0.701, -0.587, -0.114]]'], {'dtype': 'np.float'}), '([[+0.299, +0.587, +0.114], [-0.299, -0.587, +0.886], [+0.701, -\n 0.587, -0.114]], dtype=np.float)\n', (15091, 15192), True, 'import numpy as np\n'), ((15226, 15251), 'numpy.dot', 'np.dot', (['arr', 'rgb_to_ybr.T'], {}), '(arr, rgb_to_ybr.T)\n', (15232, 15251), True, 'import numpy as np\n'), ((15537, 15556), 'numpy.floor', 'np.floor', (['(arr + 0.5)'], {}), '(arr + 0.5)\n', (15545, 15556), True, 'import numpy as np\n'), ((16252, 16382), 'numpy.asarray', 'np.asarray', (['[[1.0, +0.0, +1.402], [1.0, -0.114 * 1.772 / 0.587, -0.299 * 1.402 / 0.587],\n [1.0, +1.772, +0.0]]'], {'dtype': 'np.float'}), '([[1.0, +0.0, +1.402], [1.0, -0.114 * 1.772 / 0.587, -0.299 * \n 1.402 / 0.587], [1.0, +1.772, +0.0]], dtype=np.float)\n', (16262, 16382), True, 'import numpy as np\n'), ((16472, 16497), 'numpy.dot', 'np.dot', (['arr', 'ybr_to_rgb.T'], {}), '(arr, ybr_to_rgb.T)\n', (16478, 16497), True, 'import numpy as np\n'), ((16548, 16567), 'numpy.floor', 'np.floor', (['(arr + 0.5)'], {}), '(arr + 0.5)\n', (16556, 16567), True, 'import numpy as np\n'), ((5109, 5128), 'numpy.dtype', 'np.dtype', (['dtype_str'], {}), '(dtype_str)\n', (5117, 5128), True, 'import numpy as np\n'), ((15615, 15632), 'numpy.where', 'np.where', (['(arr < 0)'], {}), '(arr < 0)\n', (15623, 15632), True, 'import numpy as np\n'), ((15702, 15721), 'numpy.where', 'np.where', (['(arr > 255)'], {}), '(arr > 255)\n', (15710, 15721), True, 'import numpy as np\n'), ((16626, 16643), 'numpy.where', 'np.where', (['(arr < 0)'], {}), '(arr < 0)\n', (16634, 16643), True, 'import numpy as np\n'), ((16713, 16732), 'numpy.where', 'np.where', (['(arr > 255)'], {}), '(arr > 255)\n', (16721, 16732), True, 'import numpy as np\n')] |
"""
Finds all samples matching a specific organism,
downloads all antimicrobial metadata from ncbi
"""
#esearch -db biosample -query SAMN03988375 | efetch -mode xml
import pandas as pd
import numpy as np
import os, sys
from Bio import Entrez
import xml.etree.ElementTree as ET
from concurrent.futures import ProcessPoolExecutor
from multiprocessing import cpu_count
from itertools import repeat
"""
if we have a dataframe with biosamples and SRR run ids, we can add a columns of runs
I have mine at https://github.com/superphy/AMR_Predictor/blob/master/data/no_ecoli_GenotypicAMR_Master.xlsx
but just point the line below at yours and make sure it has columns 'run' and 'biosample'
Otherwise this document will only return antibiogram data with biosample ID's
"""
run_excel_path = "data/no_ecoli_GenotypicAMR_Master.xlsx"
other_available = []
def query_to_ids(query):
"""
input: NCBI query string
output: List of ids from the biosample database
"""
handle = Entrez.esearch(db='biosample', retmax = 1000000, term = query)
records = Entrez.read(handle)
handle.close()
print('Number of samples found in the NCBI database: ', len(records['IdList']))
return records['IdList']
def id_to_mic(sample, antimicrobials, mics):
"""
input: id to a BioSample
output: dictionary of metadata
"""
info = {}
headers = []
mic_rows = []
for i, name in enumerate(sample):
# BioSample Acc id
if i == 0:
#print(name[0].text)
info['BioSample'] = name[0].text
# antibiogram table
if i == 1:
# Description
try:
for header in name[2][0][1]:
headers.append(header.text)
for row in name[2][0][2]:
mic_rows.append([i.text for i in row])
except:
return 'skip'
# supplementary information such as serovar
if i == 5:
# Attributes
for attribute in name:
info[attribute.attrib['attribute_name']]=attribute.text
for drug in mic_rows:
if drug[0] not in antimicrobials:
# instead of returning the invalid drug name, it could be appended
# and the full name used as the 3 letter code
continue
mic_3l = mics[antimicrobials.index(drug[0])]
info["SIR_{}".format(mic_3l)] = drug[1]
info["MIC_{}".format(mic_3l)] = drug[2]+' '+drug[3].split('/')[0]
if "Typing Method" in info:
# check for consistency in typing method for a sample
if info["Typing Method"] != drug[8]:
# method inconsistent, set to blank
info["Typing Method"] = ' '
if "Testing Standard" in info:
# check for consistency in testing standard for a sample
if info["Testing Standard"] != drug[9]:
# standard inconsistent, set to blank
info["Testing Standard"] = ' '
info["Typing Method"] = drug[8]
info["Testing Standard"] = drug[9]
return info
def amr_row(info_dict, df_columns):
"""
If value isnt found, set it to empty
"""
vals = []
for i in df_columns:
try:
vals.append(info_dict[i])
except:
vals.append(' ')
return vals
def query_to_df(query, mics, antimicrobials):
"""
Takes in a query and antimicrobials,
returns a pandas df
"""
ids = list(query_to_ids(query))
handle = Entrez.efetch(db='biosample', id = ids)
data = handle.read()
handle.close()
root = ET.fromstring(data)
df_columns = ['BioSample']+['MIC_'+i for i in mics]+['SIR_'+i for i in mics]+[
'isolation_source', 'serovar', 'collection_date', 'collected_by',
'geo_loc_name', 'strain', 'sub_species', 'Typing Method', 'Testing Standard']
mic_data = []
with ProcessPoolExecutor(max_workers = cpu_count()-1) as ppe:
for info_dict in ppe.map(id_to_mic, root, repeat(antimicrobials), repeat(mics)):
if isinstance(info_dict, str):
if info_dict != 'skip':
other_available.append(info_dict)
else:
row = amr_row(info_dict, df_columns)
mic_data.append(amr_row(info_dict, df_columns))
if len(other_available) > 0:
print("The following antimicrobials had data but their 3 letter codes were not declared:")
print(set(other_available))
print("To use these, append their names and 3 letter codes to the lists.")
amr_df = pd.DataFrame(data = mic_data, columns = df_columns)
try:
run_df = pd.read_excel(run_excel_path)
except:
pass
#print('To add a column of runs alongside biosample, change run_excel_path')
else:
old_runs = list(run_df['run'])
old_biosamples = list(run_df['biosample'])
runs = []
for i in amr_df['BioSample']:
try:
runs.append(old_runs[old_biosamples.index(i)])
except:
runs.append(' ')
amr_df.insert(loc = 0, column = "run", value = runs)
return amr_df
def remove_non_mic(df):
"""
Certain samples having disc diffusion values instead of MIC,
with no metadata indicating this.
These need to be manually removed and are stored in
data/dd_blacklist.npy
"""
bl = np.load("data/dd_blacklist.npy", allow_pickle=True)
return df[~df['BioSample'].isin(bl)]
| [
"pandas.DataFrame",
"Bio.Entrez.esearch",
"numpy.load",
"xml.etree.ElementTree.fromstring",
"Bio.Entrez.read",
"Bio.Entrez.efetch",
"pandas.read_excel",
"itertools.repeat",
"multiprocessing.cpu_count"
] | [((984, 1042), 'Bio.Entrez.esearch', 'Entrez.esearch', ([], {'db': '"""biosample"""', 'retmax': '(1000000)', 'term': 'query'}), "(db='biosample', retmax=1000000, term=query)\n", (998, 1042), False, 'from Bio import Entrez\n'), ((1061, 1080), 'Bio.Entrez.read', 'Entrez.read', (['handle'], {}), '(handle)\n', (1072, 1080), False, 'from Bio import Entrez\n'), ((3528, 3565), 'Bio.Entrez.efetch', 'Entrez.efetch', ([], {'db': '"""biosample"""', 'id': 'ids'}), "(db='biosample', id=ids)\n", (3541, 3565), False, 'from Bio import Entrez\n'), ((3623, 3642), 'xml.etree.ElementTree.fromstring', 'ET.fromstring', (['data'], {}), '(data)\n', (3636, 3642), True, 'import xml.etree.ElementTree as ET\n'), ((4592, 4639), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'mic_data', 'columns': 'df_columns'}), '(data=mic_data, columns=df_columns)\n', (4604, 4639), True, 'import pandas as pd\n'), ((5419, 5470), 'numpy.load', 'np.load', (['"""data/dd_blacklist.npy"""'], {'allow_pickle': '(True)'}), "('data/dd_blacklist.npy', allow_pickle=True)\n", (5426, 5470), True, 'import numpy as np\n'), ((4671, 4700), 'pandas.read_excel', 'pd.read_excel', (['run_excel_path'], {}), '(run_excel_path)\n', (4684, 4700), True, 'import pandas as pd\n'), ((4015, 4037), 'itertools.repeat', 'repeat', (['antimicrobials'], {}), '(antimicrobials)\n', (4021, 4037), False, 'from itertools import repeat\n'), ((4039, 4051), 'itertools.repeat', 'repeat', (['mics'], {}), '(mics)\n', (4045, 4051), False, 'from itertools import repeat\n'), ((3942, 3953), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (3951, 3953), False, 'from multiprocessing import cpu_count\n')] |
#!/usr/bin/python2.7
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
import copy
import numpy as np
import cv2
class MultiStageModel(nn.Module):
def __init__(self, num_stages):
super(MultiStageModel, self).__init__()
# self.stage1 = SingleStageModel(num_layers, num_f_maps, dim, num_classes)
self.feature_extract1 = Feature_extraction(3,1)
self.feature_extract2 = Feature_extraction(3,1)
self.feature_extract3 = Feature_extraction(3,1)
self.feature_extract4 = Feature_extraction(1,0)
self.normal_cnn_layer = Normal_CNN_Layer(3,66,66)
self.conv_1x1_in = nn.Conv2d(44, 22, 1)
self.stages = nn.ModuleList([copy.deepcopy(SingleStageModel()) for s in range(num_stages-1)])
self.conv_out = nn.Conv2d(44, 22, kernel_size=1, padding=0)
self.num_stages=num_stages
def forward(self, x):
# print(mask)
# print(np.shape(x))
out = self.feature_extract1(x)
# out2 = self.feature_extract2(x)
# out3 = self.feature_extract3(x)
# out4 = self.feature_extract4(x)
# concat_layer=torch.cat((out1,out2,out3,out4),1)
# concat_layer=concat_layer.permute(0, 2, 3, 1)
# print(np.shape(x)," out")
# concat_layer=torch.reshape(concat_layer,[-1,32*32,66])
# concat_layer = self.conv_1x1_in(concat_layer)
out1=self.normal_cnn_layer(out)
concat_layer1=torch.cat((out,out1),1)
concat_layer1 = self.conv_1x1_in(concat_layer1)
out2=self.normal_cnn_layer(concat_layer1)
concat_layer2=torch.cat((out2,out),1)
concat_layer2 = self.conv_1x1_in(concat_layer2)
out3=self.normal_cnn_layer(concat_layer2)
# out=self.conv_out(out)
# out = torch.nn.functional.adaptive_avg_pool2d(out, (2,21))
print(np.shape(out),"out")
# print(np.shape(out),"test")
# print(np.shape(out))
# print(np.shape(outputs))
# for i,s in zip(range(0,self.num_stages), self.stages):
# # out = s(F.softmax(out, dim=1) * mask[:, 0:1, :], mask)
#
# # print(np.shape(torch.cat((out,features),1)))
# # print(np.shape(out), "out")
# # print(np.shape(torch.cat((out,features),1)), "features")
#
# # x = self.conv_1_1(torch.cat((out,features),1))
# # print(np.shape(x), "x")
#
# out=s(out)
#
# print(np.shape(out))
# print(np.shape(outputs))
# print(np.shape(out.unsqueeze(0)))
# print(np.shape(out),"safsdf")
# outputs = torch.cat((outputs, out.unsqueeze(0)), dim=0)
return out1,out2,out3
class Feature_extraction(nn.Module):
def __init__(self,k,p):
super(Feature_extraction, self).__init__()
self.conv_1 = nn.Conv2d(3, 64, kernel_size=5, padding=2)
self.conv_2 = nn.Conv2d(64, 64, kernel_size=k, padding=p)
self.conv_3 =nn.Conv2d(64, 128, kernel_size=k, padding=p)
self.conv_4 = nn.Conv2d(128, 128, kernel_size=k, padding=p)
self.conv_5 = nn.Conv2d(128, 256, kernel_size=k, padding=p)
self.conv_6 = nn.Conv2d(256, 256, kernel_size=k, padding=p)
self.conv_7 = nn.Conv2d(256,128, 1)
self.conv_8 = nn.Conv2d(128,22, 1)
# self.conv_256_3 = nn.Conv2d(256, 256, kernel_size=7, padding=3)
self.pool = nn.MaxPool2d(3, stride=2, padding=1)
# self.layers = nn.ModuleList([copy.deepcopy(DilatedResidualLayer(2 ** i, num_f_maps, num_f_maps)) for i in range(num_layers)])
# self.conv_out_128 = nn.Conv2d(256,128, 1)
# self.conv_out_22 = nn.Conv2d(128,22, 1)
def forward(self, x):
# print(np.shape(x)," input")
out1 = self.conv_1(x)
out2 = self.conv_2(out1)
pool1= self.pool(out2)
out3 = self.conv_3(pool1)
out4 = self.conv_4(out3)
pool2= self.pool(out4)
out5 = self.conv_5(pool2)
out6 = self.conv_6(out5)
out7 = self.conv_7(out6)
out = self.conv_8(out7)
return out
class SingleStageModel(nn.Module):
def __init__(self):
super(SingleStageModel, self).__init__()
self.conv_1 = nn.Conv2d(256, 128, kernel_size=3, padding=1)
self.conv_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)
self.conv_3 = nn.Conv2d(128, 256, kernel_size=3, padding=1)
self.conv_4 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.conv_5 = nn.Conv2d(256,128, 1)
self.conv_out = nn.Conv2d(128,22, 1)
# self.conv_1x1 = nn.Conv1d(dim, num_f_maps, 1)
self.layers = nn.ModuleList([copy.deepcopy(DilatedResidualLayer(2 ** i, 66-i, 66-2*(1+i*2))) for i in range(4)])
# self.conv_out = nn.Conv1d(num_f_maps, num_classes, 1)
def forward(self, x):
out1 = self.conv_1(x)
out2 = self.conv_2(out1)
out3 = self.conv_3(out2)
out4 = self.conv_4(out3)
out5 = self.conv_5(out4)
out = self.conv_out(out5)
return out
class DilatedResidualLayer(nn.Module):
def __init__(self, dilation, in_channels, out_channels):
super(DilatedResidualLayer, self).__init__()
self.conv_dilated = nn.Conv1d(256, 256, 3, padding=0, dilation=1)
self.conv_dilated1 = nn.Conv1d(256, 256, 3, padding=1, dilation=3)
self.conv_dilated2= nn.Conv1d(256, 256, 3, padding=1, dilation=5)
self.conv_dilated3 = nn.Conv1d(256, 256, 3, padding=1, dilation=7)
self.conv_dilated4 = nn.Conv1d(256, 256, 3, padding=0, dilation=9)
# self.conv_dilated5 = nn.Conv1d(256, 256, 3, padding=0, dilation=3)
self.conv_1x1 = nn.Conv1d(256, 256, 1)
self.dropout = nn.Dropout()
def forward(self, x):
# print(np.shape(x)," input")
# print(np.shape(x)," out")
out = F.relu(self.conv_dilated(x))
# print(np.shape(out)," out")
out = F.relu(self.conv_dilated1(out))
# print(np.shape(out)," out")
out = F.relu(self.conv_dilated2(out))
# print(np.shape(out)," out")
out = F.relu(self.conv_dilated3(out))
# print(np.shape(out)," out")
out = F.relu(self.conv_dilated4(out))
# print(np.shape(out)," out")
# out = self.dropout(out)
return out
class Normal_CNN_Layer(nn.Module):
def __init__(self, dilation, in_channels, out_channels):
super(Normal_CNN_Layer, self).__init__()
self.conv_dilated = nn.Conv2d(22, 128, 3, padding=1, dilation=1)
self.conv_dilated1 = nn.Conv2d(128, 128, 3, padding=1, dilation=1)
self.conv_dilated2= nn.Conv2d(128, 256, 3, padding=1, dilation=1)
self.conv_dilated3 = nn.Conv2d(256, 128, 3, padding=1, dilation=1)
self.conv_dilated4 = nn.Conv2d(128, 22, 3, padding=1, dilation=1)
# self.conv_dilated5 = nn.Conv1d(256, 256, 3, padding=0, dilation=3)
self.conv_1x1 = nn.Conv2d(256, 256, 1)
self.dropout = nn.Dropout()
def forward(self, x):
# print(np.shape(x)," input")
# print(np.shape(x)," out")
out = F.relu(self.conv_dilated(x))
# print(np.shape(out)," out")
out = F.relu(self.conv_dilated1(out))
# print(np.shape(out)," out")
out = F.relu(self.conv_dilated2(out))
# print(np.shape(out)," out")
out = F.relu(self.conv_dilated3(out))
# print(np.shape(out)," out")
out = (self.conv_dilated4(out))
out = (out)
# print(np.shape(out)," out")
# out = self.dropout(out)
return out
class Hand_Detect_layer(nn.Module):
def __init__(self):
super(Hand_Detect_layer, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, 5, stride=1, padding=2),
nn.ReLU(True),
nn.Conv2d(64, 64, 3, stride=1, padding=1),
nn.ReLU(True),
nn.Conv2d(64, 64, 3, stride=1, padding=1),
nn.MaxPool2d(3, stride=2, padding=1),
nn.ReLU(True),
nn.Conv2d(64, 128, 3, stride=1, padding=1),
nn.ReLU(True),
nn.Conv2d(128, 128, 3, stride=1, padding=1),
nn.ReLU(True),
nn.Conv2d(128, 128, 3, stride=1, padding=1),
nn.MaxPool2d(3, stride=2, padding=1),
nn.ReLU(True),
nn.Conv2d(128, 256, 3, stride=1, padding=1),
nn.ReLU(True),
nn.Conv2d(256, 256, 3, stride=1, padding=1),
nn.ReLU(True),
nn.Conv2d(256, 256, 3, stride=1, padding=1),
nn.ReLU(True)
)
# self.conv_dilated5 = nn.Conv1d(256, 256, 3, padding=0, dilation=3)
self.pool = nn.MaxPool2d(3, stride=2, padding=1)
self.stg0_conv0=nn.Conv2d(256, 256, 3,stride=1, padding=1)
self.stg0_conv1=nn.Conv2d(256, 128, 3,stride=1, padding=1)
self.stg0_conv2=nn.Conv2d(128, 128, 3,stride=1, padding=1)
self.stg0_conv3=nn.Conv2d(128, 64, 3,stride=1, padding=1)
self.stg0_conv4=nn.Conv2d(64, 64, 3,stride=1, padding=1)
self.stg0_conv5=nn.Conv2d(64, 22, 3,stride=1, padding=1)
self.stg1_conv0=nn.Conv2d(278, 256, 3,stride=1, padding=1)
self.stg1_conv1=nn.Conv2d(256, 128, 3,stride=1, padding=1)
self.stg1_conv2=nn.Conv2d(128, 128, 3,stride=1, padding=1)
self.stg1_conv3=nn.Conv2d(128, 64, 3,stride=1, padding=1)
self.stg1_conv4=nn.Conv2d(64, 64, 3,stride=1, padding=1)
self.stg1_conv5=nn.Conv2d(64, 22, 3,stride=1, padding=1)
self.stg2_conv0=nn.Conv2d(278, 256, 3,stride=1, padding=1)
self.stg2_conv1=nn.Conv2d(256, 128, 3,stride=1, padding=1)
self.stg2_conv2=nn.Conv2d(128, 128, 3,stride=1, padding=1)
self.stg2_conv3=nn.Conv2d(128, 64, 3,stride=1, padding=1)
self.stg2_conv4=nn.Conv2d(64, 64, 3,stride=1, padding=1)
self.stg2_conv5=nn.Conv2d(64, 22, 3,stride=1, padding=1)
self.stg3_conv0=nn.Conv2d(278, 256, 3,stride=1, padding=1)
self.stg3_conv1=nn.Conv2d(256, 128, 3,stride=1, padding=1)
self.stg3_conv2=nn.Conv2d(128, 128, 3,stride=1, padding=1)
self.stg3_conv3=nn.Conv2d(128, 64, 3,stride=1, padding=1)
self.stg3_conv4=nn.Conv2d(64, 64, 3,stride=1, padding=1)
self.stg3_conv5=nn.Conv2d(64, 22, 3,stride=1, padding=1)
self.dropout = nn.Dropout()
def forward(self, x):
# print(np.shape(x)," input")
# print(np.shape(x)," out")
# out = F.relu(self.conv_1(x))
# out = F.relu(self.conv_2(out))
# pool=self.pool(out)
# out = F.relu(self.conv_3(pool))
# out = F.relu(self.conv_4(out))
# pool1=self.pool(out)
# out = (self.conv_5(pool1))
# out = (self.conv_6(out))
out=self.features(x)
stg0_out = self.stg0_conv0(out)
stg0_out = F.relu(stg0_out)
stg0_out=self.stg0_conv1(stg0_out)
stg0_out = F.relu(stg0_out)
stg0_out=self.stg0_conv2(stg0_out)
stg0_out = F.relu(stg0_out)
stg0_out=self.stg0_conv3(stg0_out)
stg0_out = F.relu(stg0_out)
stg0_out=self.stg1_conv4(stg0_out)
stg0_out = F.relu(stg0_out)
stg0_out=self.stg0_conv5(stg0_out)
stg0_out = F.relu(stg0_out)
con0_out=torch.cat((out,stg0_out),1)
stg1_out = self.stg1_conv0(con0_out)
stg1_out = F.relu(stg1_out)
stg1_out=self.stg1_conv1(stg1_out)
stg1_out = F.relu(stg1_out)
stg1_out=self.stg1_conv2(stg1_out)
stg1_out = F.relu(stg1_out)
stg1_out=self.stg1_conv3(stg1_out)
stg1_out = F.relu(stg1_out)
stg1_out=self.stg1_conv4(stg1_out)
stg1_out = F.relu(stg1_out)
stg1_out=self.stg1_conv5(stg1_out)
stg1_out = F.relu(stg1_out)
con1_out=torch.cat((out,stg1_out),1)
stg2_out = self.stg2_conv0(con1_out)
stg2_out = F.relu(stg2_out)
stg2_out=self.stg2_conv1(stg2_out)
stg2_out = F.relu(stg2_out)
stg2_out=self.stg2_conv2(stg2_out)
stg2_out = F.relu(stg2_out)
stg2_out=self.stg2_conv3(stg2_out)
stg2_out = F.relu(stg2_out)
stg2_out=self.stg2_conv4(stg2_out)
stg2_out = F.relu(stg2_out)
stg2_out=self.stg2_conv5(stg2_out)
stg2_out = F.relu(stg2_out)
con2_out=torch.cat((out,stg2_out),1)
stg3_out = self.stg3_conv0(con2_out)
stg3_out = F.relu(stg3_out)
stg3_out=self.stg3_conv1(stg3_out)
stg3_out = F.relu(stg3_out)
stg3_out=self.stg3_conv2(stg3_out)
stg3_out = F.relu(stg3_out)
stg3_out=self.stg3_conv3(stg3_out)
stg3_out = F.relu(stg3_out)
stg3_out=self.stg3_conv4(stg3_out)
stg3_out = F.relu(stg3_out)
stg3_out=self.stg3_conv5(stg3_out)
stg3_out = F.relu(stg3_out)
# out = self.dropout(out)
return stg1_out,stg2_out,stg3_out
class Trainer:
def __init__(self):
self.model = Hand_Detect_layer()
self.mse = nn.MSELoss(reduction='sum')
def adjust_learning_rate(self,optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def train(self, save_dir, batch_gen, num_epochs, batch_size, learning_rate, device):
# self.model.train()
optimizer = optim.Adam(self.model.parameters(), lr=learning_rate)
self.model.to(device)
checkpoint = torch.load(save_dir + "/epoch-" + str(40002) + ".pt")
self.model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
#epoch = checkpoint['epoch']
# loss = checkpoint['loss']
self.lr=learning_rate
# optimizer = optimizer.load_state_dict(self.optimizer)
# optimizer = self.optimizer
self.step = 0
for epoch in range(num_epochs):
if self.step % 1000==1:
self.lr = self.lr*0.99
self.adjust_learning_rate(optimizer, lr)
epoch_loss = 0
correct = 0
total = 0
while batch_gen.has_next():
self.step=self.step+1
batch_input, batch_target = batch_gen.next_batch(batch_size)
# print(np.shape(batch_joints))
cv_crop1=batch_input[0]
predictions=batch_target[0]
cv_crop1=cv_crop1.permute(1, 2, 0)
cv_crop1=cv_crop1.cpu().detach().numpy()
cv_crop1=(cv_crop1+0.5)*255
hm1=predictions.permute(1, 2, 0)
hm1=hm1.cpu().detach().numpy()
#
hm_demo1=np.sum(hm1[:,:,0:-1],-1)
hm_demo1=cv2.resize(hm_demo1,(128,128))
hm_demo1=hm_demo1*255
hm_demo1=hm_demo1.astype(np.uint8)
hm_demo1 = np.expand_dims(hm_demo1, axis=-1)
hm_demo1=np.repeat(hm_demo1, 3,axis=-1)
cv_crop1=0.5*cv_crop1+0.5*hm_demo1
cv_crop1=cv_crop1.astype(np.uint8)
# cv2.imshow("cv_crop1",cv_crop1)
# cv2.imshow("hm_demo1",hm_demo1)
# cv2.waitKey(1)
# print(np.shape(batch_target))
batch_input, batch_target = batch_input.to(device), batch_target.to(device)
optimizer.zero_grad()
hm1,hm2,hm3 = self.model(batch_input)
# print(np.shape(hm1))
# print(np.shape(batch_target))
loss1=self.mse(hm1, batch_target)/batch_size
loss2=self.mse(hm2, batch_target)/batch_size
loss3=self.mse(hm3, batch_target)/batch_size
# loss4=self.mse(joints, batch_joints)
loss = loss1+loss2+loss3
print(loss1)
print(loss2)
print(loss3)
print("epoch ",epoch, " step ",self.step)
#
# print(loss,' loss')
# print(loss2,' loss2')
# print(loss3,' loss3')
# print(loss4,' loss4')
# cv2.imshow("hm_demo1",hm_demo1)
# loss = 0
# for p in predictions:#for each stage
# print(np.shape(p))
# print(np.shape(p.transpose(2, 1).contiguous().view(-1, self.num_classes)))
# print(np.shape(p.transpose(2, 1).contiguous().view(-1, self.num_classes)))
# print(np.shape(batch_target))
# loss += self.ce(p.transpose(2, 1).contiguous().view(-1, self.num_classes), batch_target.view(-1))
# loss += torch.mean(torch.clamp(self.mse(F.log_softmax(p[:, :, 1:], dim=1), F.log_softmax(p.detach()[:, :, :-1], dim=1)), min=0, max=16)*mask[:, :, 1:])
# print((loss),"loss")
epoch_loss += loss.item()
loss.backward()
optimizer.step()
cv_crop1=batch_input[0]
predictions=hm3[0]
cv_crop1=cv_crop1.permute(1, 2, 0)
cv_crop1=cv_crop1.cpu().detach().numpy()
cv_crop1=(cv_crop1+0.5)*255
hm2=predictions.permute(1, 2, 0)
hm2=hm2.cpu().detach().numpy()
hm_demo2=np.sum(hm2[:,:,0:-1],-1)
hm_demo2=cv2.resize(hm_demo2,(128,128))
hm_demo2=hm_demo2*255
hm_demo2=hm_demo2.astype(np.uint8)
hm_demo2 = np.expand_dims(hm_demo2, axis=-1)
hm_demo2=np.repeat(hm_demo2, 3,axis=-1)
cv_crop1=0.5*cv_crop1+0.5*hm_demo1
cv_crop1=cv_crop1.astype(np.uint8)
cv2.imshow("cv_crop1",cv_crop1)
cv2.imshow("hm_demo1",hm_demo2)
cv2.waitKey(1)
# print(optimizer.step())
if self.step%500==1:
torch.save(self.model, save_dir + "/epoch-" + str(self.step + 1) + ".model")
torch.save(optimizer.state_dict(), save_dir + "/epoch-" + str(self.step + 1) + ".opt")
torch.save({
'epoch': epoch,
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': loss,
}, save_dir + "/epoch-" + str(self.step + 1) + ".pt")
# _, predicted = torch.max(predictions[-1].data, 1)
# correct += ((predicted == batch_target).float()*mask[:, 0, :].squeeze(1)).sum().item()
# total += torch.sum(mask[:, 0, :]).item()
batch_gen.reset()
# print("[epoch %d]: epoch loss = %f, acc = %f" % (
# epoch + 1, epoch_loss / len(batch_gen.data_list),
# float(correct) / total))
def predict(self, model_dir, results_dir, features_path, vid_list_file, epoch, actions_dict, device, sample_rate):
self.model.eval()
with torch.no_grad():
self.model.to(device)
self.model.load_state_dict(torch.load(model_dir + "/epoch-" + str(2002) + ".model"))
file_ptr = open(vid_list_file, 'r')
list_of_vids = file_ptr.read().split('\n')[:-1]
file_ptr.close()
for vid in list_of_vids:
print (vid)
features = np.load(features_path + vid.split('.')[0] + '.npy')
features = features[:, ::sample_rate]
input_x = torch.tensor(features, dtype=torch.float)
input_x.unsqueeze_(0)
input_x = input_x.to(device)
predictions = self.model(input_x, torch.ones(input_x.size(), device=device))
_, predicted = torch.max(predictions[-1].data, 1)
predicted = predicted.squeeze()
recognition = []
for i in range(len(predicted)):
recognition = np.concatenate((recognition, [actions_dict.keys()[actions_dict.values().index(predicted[i].item())]]*sample_rate))
f_name = vid.split('/')[-1].split('.')[0]
f_ptr = open(results_dir + "/" + f_name, "w")
f_ptr.write("### Frame level recognition: ###\n")
f_ptr.write(' '.join(recognition))
f_ptr.close()
| [
"torch.nn.Dropout",
"torch.nn.MSELoss",
"torch.nn.ReLU",
"numpy.sum",
"cv2.waitKey",
"torch.nn.Conv1d",
"torch.nn.Conv2d",
"torch.cat",
"numpy.expand_dims",
"cv2.imshow",
"numpy.shape",
"torch.max",
"torch.nn.functional.relu",
"torch.nn.MaxPool2d",
"torch.tensor",
"torch.no_grad",
"c... | [((669, 689), 'torch.nn.Conv2d', 'nn.Conv2d', (['(44)', '(22)', '(1)'], {}), '(44, 22, 1)\n', (678, 689), True, 'import torch.nn as nn\n'), ((817, 860), 'torch.nn.Conv2d', 'nn.Conv2d', (['(44)', '(22)'], {'kernel_size': '(1)', 'padding': '(0)'}), '(44, 22, kernel_size=1, padding=0)\n', (826, 860), True, 'import torch.nn as nn\n'), ((1478, 1503), 'torch.cat', 'torch.cat', (['(out, out1)', '(1)'], {}), '((out, out1), 1)\n', (1487, 1503), False, 'import torch\n'), ((1630, 1655), 'torch.cat', 'torch.cat', (['(out2, out)', '(1)'], {}), '((out2, out), 1)\n', (1639, 1655), False, 'import torch\n'), ((2903, 2945), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(3, 64, kernel_size=5, padding=2)\n', (2912, 2945), True, 'import torch.nn as nn\n'), ((2968, 3011), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'kernel_size': 'k', 'padding': 'p'}), '(64, 64, kernel_size=k, padding=p)\n', (2977, 3011), True, 'import torch.nn as nn\n'), ((3033, 3077), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)'], {'kernel_size': 'k', 'padding': 'p'}), '(64, 128, kernel_size=k, padding=p)\n', (3042, 3077), True, 'import torch.nn as nn\n'), ((3100, 3145), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)'], {'kernel_size': 'k', 'padding': 'p'}), '(128, 128, kernel_size=k, padding=p)\n', (3109, 3145), True, 'import torch.nn as nn\n'), ((3168, 3213), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)'], {'kernel_size': 'k', 'padding': 'p'}), '(128, 256, kernel_size=k, padding=p)\n', (3177, 3213), True, 'import torch.nn as nn\n'), ((3236, 3281), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'kernel_size': 'k', 'padding': 'p'}), '(256, 256, kernel_size=k, padding=p)\n', (3245, 3281), True, 'import torch.nn as nn\n'), ((3304, 3326), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(128)', '(1)'], {}), '(256, 128, 1)\n', (3313, 3326), True, 'import torch.nn as nn\n'), ((3348, 3369), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(22)', '(1)'], {}), '(128, 22, 1)\n', (3357, 3369), True, 'import torch.nn as nn\n'), ((3465, 3501), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(3)'], {'stride': '(2)', 'padding': '(1)'}), '(3, stride=2, padding=1)\n', (3477, 3501), True, 'import torch.nn as nn\n'), ((4283, 4328), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(128)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(256, 128, kernel_size=3, padding=1)\n', (4292, 4328), True, 'import torch.nn as nn\n'), ((4351, 4396), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(128, 128, kernel_size=3, padding=1)\n', (4360, 4396), True, 'import torch.nn as nn\n'), ((4419, 4464), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(128, 256, kernel_size=3, padding=1)\n', (4428, 4464), True, 'import torch.nn as nn\n'), ((4487, 4532), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(256, 256, kernel_size=3, padding=1)\n', (4496, 4532), True, 'import torch.nn as nn\n'), ((4555, 4577), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(128)', '(1)'], {}), '(256, 128, 1)\n', (4564, 4577), True, 'import torch.nn as nn\n'), ((4601, 4622), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(22)', '(1)'], {}), '(128, 22, 1)\n', (4610, 4622), True, 'import torch.nn as nn\n'), ((5289, 5334), 'torch.nn.Conv1d', 'nn.Conv1d', (['(256)', '(256)', '(3)'], {'padding': '(0)', 'dilation': '(1)'}), '(256, 256, 3, padding=0, dilation=1)\n', (5298, 5334), True, 'import torch.nn as nn\n'), ((5364, 5409), 'torch.nn.Conv1d', 'nn.Conv1d', (['(256)', '(256)', '(3)'], {'padding': '(1)', 'dilation': '(3)'}), '(256, 256, 3, padding=1, dilation=3)\n', (5373, 5409), True, 'import torch.nn as nn\n'), ((5438, 5483), 'torch.nn.Conv1d', 'nn.Conv1d', (['(256)', '(256)', '(3)'], {'padding': '(1)', 'dilation': '(5)'}), '(256, 256, 3, padding=1, dilation=5)\n', (5447, 5483), True, 'import torch.nn as nn\n'), ((5513, 5558), 'torch.nn.Conv1d', 'nn.Conv1d', (['(256)', '(256)', '(3)'], {'padding': '(1)', 'dilation': '(7)'}), '(256, 256, 3, padding=1, dilation=7)\n', (5522, 5558), True, 'import torch.nn as nn\n'), ((5588, 5633), 'torch.nn.Conv1d', 'nn.Conv1d', (['(256)', '(256)', '(3)'], {'padding': '(0)', 'dilation': '(9)'}), '(256, 256, 3, padding=0, dilation=9)\n', (5597, 5633), True, 'import torch.nn as nn\n'), ((5737, 5759), 'torch.nn.Conv1d', 'nn.Conv1d', (['(256)', '(256)', '(1)'], {}), '(256, 256, 1)\n', (5746, 5759), True, 'import torch.nn as nn\n'), ((5784, 5796), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (5794, 5796), True, 'import torch.nn as nn\n'), ((6551, 6595), 'torch.nn.Conv2d', 'nn.Conv2d', (['(22)', '(128)', '(3)'], {'padding': '(1)', 'dilation': '(1)'}), '(22, 128, 3, padding=1, dilation=1)\n', (6560, 6595), True, 'import torch.nn as nn\n'), ((6625, 6670), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)'], {'padding': '(1)', 'dilation': '(1)'}), '(128, 128, 3, padding=1, dilation=1)\n', (6634, 6670), True, 'import torch.nn as nn\n'), ((6699, 6744), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)', '(3)'], {'padding': '(1)', 'dilation': '(1)'}), '(128, 256, 3, padding=1, dilation=1)\n', (6708, 6744), True, 'import torch.nn as nn\n'), ((6774, 6819), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(128)', '(3)'], {'padding': '(1)', 'dilation': '(1)'}), '(256, 128, 3, padding=1, dilation=1)\n', (6783, 6819), True, 'import torch.nn as nn\n'), ((6849, 6893), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(22)', '(3)'], {'padding': '(1)', 'dilation': '(1)'}), '(128, 22, 3, padding=1, dilation=1)\n', (6858, 6893), True, 'import torch.nn as nn\n'), ((6997, 7019), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(1)'], {}), '(256, 256, 1)\n', (7006, 7019), True, 'import torch.nn as nn\n'), ((7044, 7056), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (7054, 7056), True, 'import torch.nn as nn\n'), ((8760, 8796), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(3)'], {'stride': '(2)', 'padding': '(1)'}), '(3, stride=2, padding=1)\n', (8772, 8796), True, 'import torch.nn as nn\n'), ((8822, 8865), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(256, 256, 3, stride=1, padding=1)\n', (8831, 8865), True, 'import torch.nn as nn\n'), ((8889, 8932), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(128)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(256, 128, 3, stride=1, padding=1)\n', (8898, 8932), True, 'import torch.nn as nn\n'), ((8956, 8999), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(128, 128, 3, stride=1, padding=1)\n', (8965, 8999), True, 'import torch.nn as nn\n'), ((9023, 9065), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(64)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(128, 64, 3, stride=1, padding=1)\n', (9032, 9065), True, 'import torch.nn as nn\n'), ((9089, 9130), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(64, 64, 3, stride=1, padding=1)\n', (9098, 9130), True, 'import torch.nn as nn\n'), ((9154, 9195), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(22)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(64, 22, 3, stride=1, padding=1)\n', (9163, 9195), True, 'import torch.nn as nn\n'), ((9220, 9263), 'torch.nn.Conv2d', 'nn.Conv2d', (['(278)', '(256)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(278, 256, 3, stride=1, padding=1)\n', (9229, 9263), True, 'import torch.nn as nn\n'), ((9287, 9330), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(128)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(256, 128, 3, stride=1, padding=1)\n', (9296, 9330), True, 'import torch.nn as nn\n'), ((9354, 9397), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(128, 128, 3, stride=1, padding=1)\n', (9363, 9397), True, 'import torch.nn as nn\n'), ((9421, 9463), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(64)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(128, 64, 3, stride=1, padding=1)\n', (9430, 9463), True, 'import torch.nn as nn\n'), ((9487, 9528), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(64, 64, 3, stride=1, padding=1)\n', (9496, 9528), True, 'import torch.nn as nn\n'), ((9552, 9593), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(22)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(64, 22, 3, stride=1, padding=1)\n', (9561, 9593), True, 'import torch.nn as nn\n'), ((9618, 9661), 'torch.nn.Conv2d', 'nn.Conv2d', (['(278)', '(256)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(278, 256, 3, stride=1, padding=1)\n', (9627, 9661), True, 'import torch.nn as nn\n'), ((9685, 9728), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(128)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(256, 128, 3, stride=1, padding=1)\n', (9694, 9728), True, 'import torch.nn as nn\n'), ((9752, 9795), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(128, 128, 3, stride=1, padding=1)\n', (9761, 9795), True, 'import torch.nn as nn\n'), ((9819, 9861), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(64)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(128, 64, 3, stride=1, padding=1)\n', (9828, 9861), True, 'import torch.nn as nn\n'), ((9885, 9926), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(64, 64, 3, stride=1, padding=1)\n', (9894, 9926), True, 'import torch.nn as nn\n'), ((9950, 9991), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(22)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(64, 22, 3, stride=1, padding=1)\n', (9959, 9991), True, 'import torch.nn as nn\n'), ((10016, 10059), 'torch.nn.Conv2d', 'nn.Conv2d', (['(278)', '(256)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(278, 256, 3, stride=1, padding=1)\n', (10025, 10059), True, 'import torch.nn as nn\n'), ((10083, 10126), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(128)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(256, 128, 3, stride=1, padding=1)\n', (10092, 10126), True, 'import torch.nn as nn\n'), ((10150, 10193), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(128, 128, 3, stride=1, padding=1)\n', (10159, 10193), True, 'import torch.nn as nn\n'), ((10217, 10259), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(64)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(128, 64, 3, stride=1, padding=1)\n', (10226, 10259), True, 'import torch.nn as nn\n'), ((10283, 10324), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(64, 64, 3, stride=1, padding=1)\n', (10292, 10324), True, 'import torch.nn as nn\n'), ((10348, 10389), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(22)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(64, 22, 3, stride=1, padding=1)\n', (10357, 10389), True, 'import torch.nn as nn\n'), ((10414, 10426), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (10424, 10426), True, 'import torch.nn as nn\n'), ((10915, 10931), 'torch.nn.functional.relu', 'F.relu', (['stg0_out'], {}), '(stg0_out)\n', (10921, 10931), True, 'import torch.nn.functional as F\n'), ((10995, 11011), 'torch.nn.functional.relu', 'F.relu', (['stg0_out'], {}), '(stg0_out)\n', (11001, 11011), True, 'import torch.nn.functional as F\n'), ((11075, 11091), 'torch.nn.functional.relu', 'F.relu', (['stg0_out'], {}), '(stg0_out)\n', (11081, 11091), True, 'import torch.nn.functional as F\n'), ((11155, 11171), 'torch.nn.functional.relu', 'F.relu', (['stg0_out'], {}), '(stg0_out)\n', (11161, 11171), True, 'import torch.nn.functional as F\n'), ((11235, 11251), 'torch.nn.functional.relu', 'F.relu', (['stg0_out'], {}), '(stg0_out)\n', (11241, 11251), True, 'import torch.nn.functional as F\n'), ((11315, 11331), 'torch.nn.functional.relu', 'F.relu', (['stg0_out'], {}), '(stg0_out)\n', (11321, 11331), True, 'import torch.nn.functional as F\n'), ((11350, 11379), 'torch.cat', 'torch.cat', (['(out, stg0_out)', '(1)'], {}), '((out, stg0_out), 1)\n', (11359, 11379), False, 'import torch\n'), ((11443, 11459), 'torch.nn.functional.relu', 'F.relu', (['stg1_out'], {}), '(stg1_out)\n', (11449, 11459), True, 'import torch.nn.functional as F\n'), ((11523, 11539), 'torch.nn.functional.relu', 'F.relu', (['stg1_out'], {}), '(stg1_out)\n', (11529, 11539), True, 'import torch.nn.functional as F\n'), ((11603, 11619), 'torch.nn.functional.relu', 'F.relu', (['stg1_out'], {}), '(stg1_out)\n', (11609, 11619), True, 'import torch.nn.functional as F\n'), ((11683, 11699), 'torch.nn.functional.relu', 'F.relu', (['stg1_out'], {}), '(stg1_out)\n', (11689, 11699), True, 'import torch.nn.functional as F\n'), ((11763, 11779), 'torch.nn.functional.relu', 'F.relu', (['stg1_out'], {}), '(stg1_out)\n', (11769, 11779), True, 'import torch.nn.functional as F\n'), ((11843, 11859), 'torch.nn.functional.relu', 'F.relu', (['stg1_out'], {}), '(stg1_out)\n', (11849, 11859), True, 'import torch.nn.functional as F\n'), ((11880, 11909), 'torch.cat', 'torch.cat', (['(out, stg1_out)', '(1)'], {}), '((out, stg1_out), 1)\n', (11889, 11909), False, 'import torch\n'), ((11973, 11989), 'torch.nn.functional.relu', 'F.relu', (['stg2_out'], {}), '(stg2_out)\n', (11979, 11989), True, 'import torch.nn.functional as F\n'), ((12053, 12069), 'torch.nn.functional.relu', 'F.relu', (['stg2_out'], {}), '(stg2_out)\n', (12059, 12069), True, 'import torch.nn.functional as F\n'), ((12133, 12149), 'torch.nn.functional.relu', 'F.relu', (['stg2_out'], {}), '(stg2_out)\n', (12139, 12149), True, 'import torch.nn.functional as F\n'), ((12213, 12229), 'torch.nn.functional.relu', 'F.relu', (['stg2_out'], {}), '(stg2_out)\n', (12219, 12229), True, 'import torch.nn.functional as F\n'), ((12293, 12309), 'torch.nn.functional.relu', 'F.relu', (['stg2_out'], {}), '(stg2_out)\n', (12299, 12309), True, 'import torch.nn.functional as F\n'), ((12373, 12389), 'torch.nn.functional.relu', 'F.relu', (['stg2_out'], {}), '(stg2_out)\n', (12379, 12389), True, 'import torch.nn.functional as F\n'), ((12411, 12440), 'torch.cat', 'torch.cat', (['(out, stg2_out)', '(1)'], {}), '((out, stg2_out), 1)\n', (12420, 12440), False, 'import torch\n'), ((12504, 12520), 'torch.nn.functional.relu', 'F.relu', (['stg3_out'], {}), '(stg3_out)\n', (12510, 12520), True, 'import torch.nn.functional as F\n'), ((12584, 12600), 'torch.nn.functional.relu', 'F.relu', (['stg3_out'], {}), '(stg3_out)\n', (12590, 12600), True, 'import torch.nn.functional as F\n'), ((12664, 12680), 'torch.nn.functional.relu', 'F.relu', (['stg3_out'], {}), '(stg3_out)\n', (12670, 12680), True, 'import torch.nn.functional as F\n'), ((12744, 12760), 'torch.nn.functional.relu', 'F.relu', (['stg3_out'], {}), '(stg3_out)\n', (12750, 12760), True, 'import torch.nn.functional as F\n'), ((12824, 12840), 'torch.nn.functional.relu', 'F.relu', (['stg3_out'], {}), '(stg3_out)\n', (12830, 12840), True, 'import torch.nn.functional as F\n'), ((12904, 12920), 'torch.nn.functional.relu', 'F.relu', (['stg3_out'], {}), '(stg3_out)\n', (12910, 12920), True, 'import torch.nn.functional as F\n'), ((13103, 13130), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (13113, 13130), True, 'import torch.nn as nn\n'), ((1879, 1892), 'numpy.shape', 'np.shape', (['out'], {}), '(out)\n', (1887, 1892), True, 'import numpy as np\n'), ((7815, 7855), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)', '(5)'], {'stride': '(1)', 'padding': '(2)'}), '(3, 64, 5, stride=1, padding=2)\n', (7824, 7855), True, 'import torch.nn as nn\n'), ((7869, 7882), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (7876, 7882), True, 'import torch.nn as nn\n'), ((7896, 7937), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(64, 64, 3, stride=1, padding=1)\n', (7905, 7937), True, 'import torch.nn as nn\n'), ((7951, 7964), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (7958, 7964), True, 'import torch.nn as nn\n'), ((7978, 8019), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(64, 64, 3, stride=1, padding=1)\n', (7987, 8019), True, 'import torch.nn as nn\n'), ((8033, 8069), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(3)'], {'stride': '(2)', 'padding': '(1)'}), '(3, stride=2, padding=1)\n', (8045, 8069), True, 'import torch.nn as nn\n'), ((8083, 8096), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (8090, 8096), True, 'import torch.nn as nn\n'), ((8110, 8152), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(64, 128, 3, stride=1, padding=1)\n', (8119, 8152), True, 'import torch.nn as nn\n'), ((8166, 8179), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (8173, 8179), True, 'import torch.nn as nn\n'), ((8193, 8236), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(128, 128, 3, stride=1, padding=1)\n', (8202, 8236), True, 'import torch.nn as nn\n'), ((8250, 8263), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (8257, 8263), True, 'import torch.nn as nn\n'), ((8277, 8320), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(128, 128, 3, stride=1, padding=1)\n', (8286, 8320), True, 'import torch.nn as nn\n'), ((8334, 8370), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(3)'], {'stride': '(2)', 'padding': '(1)'}), '(3, stride=2, padding=1)\n', (8346, 8370), True, 'import torch.nn as nn\n'), ((8384, 8397), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (8391, 8397), True, 'import torch.nn as nn\n'), ((8411, 8454), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(128, 256, 3, stride=1, padding=1)\n', (8420, 8454), True, 'import torch.nn as nn\n'), ((8468, 8481), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (8475, 8481), True, 'import torch.nn as nn\n'), ((8495, 8538), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(256, 256, 3, stride=1, padding=1)\n', (8504, 8538), True, 'import torch.nn as nn\n'), ((8552, 8565), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (8559, 8565), True, 'import torch.nn as nn\n'), ((8579, 8622), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(256, 256, 3, stride=1, padding=1)\n', (8588, 8622), True, 'import torch.nn as nn\n'), ((8636, 8649), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (8643, 8649), True, 'import torch.nn as nn\n'), ((19159, 19174), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (19172, 19174), False, 'import torch\n'), ((14761, 14788), 'numpy.sum', 'np.sum', (['hm1[:, :, 0:-1]', '(-1)'], {}), '(hm1[:, :, 0:-1], -1)\n', (14767, 14788), True, 'import numpy as np\n'), ((14811, 14843), 'cv2.resize', 'cv2.resize', (['hm_demo1', '(128, 128)'], {}), '(hm_demo1, (128, 128))\n', (14821, 14843), False, 'import cv2\n'), ((14958, 14991), 'numpy.expand_dims', 'np.expand_dims', (['hm_demo1'], {'axis': '(-1)'}), '(hm_demo1, axis=-1)\n', (14972, 14991), True, 'import numpy as np\n'), ((15017, 15048), 'numpy.repeat', 'np.repeat', (['hm_demo1', '(3)'], {'axis': '(-1)'}), '(hm_demo1, 3, axis=-1)\n', (15026, 15048), True, 'import numpy as np\n'), ((17431, 17458), 'numpy.sum', 'np.sum', (['hm2[:, :, 0:-1]', '(-1)'], {}), '(hm2[:, :, 0:-1], -1)\n', (17437, 17458), True, 'import numpy as np\n'), ((17481, 17513), 'cv2.resize', 'cv2.resize', (['hm_demo2', '(128, 128)'], {}), '(hm_demo2, (128, 128))\n', (17491, 17513), False, 'import cv2\n'), ((17628, 17661), 'numpy.expand_dims', 'np.expand_dims', (['hm_demo2'], {'axis': '(-1)'}), '(hm_demo2, axis=-1)\n', (17642, 17661), True, 'import numpy as np\n'), ((17687, 17718), 'numpy.repeat', 'np.repeat', (['hm_demo2', '(3)'], {'axis': '(-1)'}), '(hm_demo2, 3, axis=-1)\n', (17696, 17718), True, 'import numpy as np\n'), ((17840, 17872), 'cv2.imshow', 'cv2.imshow', (['"""cv_crop1"""', 'cv_crop1'], {}), "('cv_crop1', cv_crop1)\n", (17850, 17872), False, 'import cv2\n'), ((17888, 17920), 'cv2.imshow', 'cv2.imshow', (['"""hm_demo1"""', 'hm_demo2'], {}), "('hm_demo1', hm_demo2)\n", (17898, 17920), False, 'import cv2\n'), ((17936, 17950), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (17947, 17950), False, 'import cv2\n'), ((19668, 19709), 'torch.tensor', 'torch.tensor', (['features'], {'dtype': 'torch.float'}), '(features, dtype=torch.float)\n', (19680, 19709), False, 'import torch\n'), ((19917, 19951), 'torch.max', 'torch.max', (['predictions[-1].data', '(1)'], {}), '(predictions[-1].data, 1)\n', (19926, 19951), False, 'import torch\n')] |
# Copyright 2021 Toyota Research Institute. All rights reserved.
import numpy as np
from camviz.objects.object import Object
from camviz.utils.geometry import transpose, invert
from camviz.utils.types import is_list, is_float
from camviz.utils.utils import numpyf, add_row0, add_col1, image_grid
def camviz_camera(camera):
"""
Converts packnet-sfm cameras to camviz cameras
Parameters
----------
camera : Camera or list[Camera]
Input packnet-sfm cameras
Returns
-------
camera_cv : camviz.objects.camera.Camera
output camviz cameras
"""
# Create a list of cameras if necessary
if is_list(camera):
return [camviz_camera(cam) for cam in camera]
# Return a list of cameras for each batch camera
return [Camera(cam=cam) for cam in camera]
class Camera(Object):
"""
Create a camera class
Parameters
----------
scale : float
Scale used when drawing the object
wh : tuple
Image dimensions (width, height)
K : np.array
Camera intrinsics [3,3]
pose : np.array
Object pose
"""
def __init__(self, scale=1.0, wh=None, K=None, pose=None):
# Initialize object super-class
super().__init__(scale, pose)
# If intrinsics is provided, use it
if K is not None:
self.K = transpose(numpyf(K))
self.iK = np.linalg.inv(self.K)
# If image dimensions is not provided, use it
if wh is not None:
if not isinstance(wh, (list, tuple)):
wh = wh.shape[:2]
self.w, self.h = wh
uv = numpyf([[self.w - 1, 0 ],
[self.w - 1, self.h - 1],
[ 0 , self.h - 1],
[ 0 , 0 ]])
self.v = add_row0(self.i2c(scale, uv))
@staticmethod
def from_vidar(cam, b, scale=1.0):
return Camera(K=cam.K[b][:3, :3],
pose=cam.Tcw.T[b] if cam.Twc is not None else None,
wh=cam.wh, scale=scale)
def i2c(self, depth=1.0, uv=None):
"""
Project an image to camera coordinates using a depth map
Parameters
----------
depth : float or np.array
Depth values for lifting
uv : np.array
Image grid for lifting
Returns
-------
xyz : np.array
Lifted 3D points in camera frame of reference
"""
# If no grid is provided, uses depth map
if uv is None:
if not is_float(depth):
# Create image grid from depth values
uv = image_grid(depth)
else:
# Impossible to create an image grid
raise ValueError('No available grid for camera')
# Add third unitary coordinate to the image grid
if uv.shape[1] == 2:
uv = add_col1(uv)
# A depth map was provided, create a grid from it
elif uv.shape[1] > 3:
uv = image_grid(uv)
# If there are individual depth values per image grid cell
if not is_float(depth):
if len(depth.shape) == 1:
depth = depth[:, np.newaxis]
elif depth.shape[1] > 1:
if len(depth.shape) == 3:
depth = depth[:, :, 0]
depth = depth.reshape(-1, 1)
return (uv @ self.iK) * depth
def c2i(self, xyz, filter=False, padding=0, return_z=False):
"""
Project 3D points in camera frame of reference to the image plane
Parameters
----------
xyz : np.array
3D points to be projected
filter : bool
Filter points outside boundaries
padding : int or float
Padding for filtering
return_z : bool
Return z values as well or not
Returns
-------
uv : np.array
2D coordinates of projected points
idx : np.array
Valid indexes in case filtering was enabled
depth : np.array
Depth values in case return_z was enabled
"""
uv = (xyz / xyz[:, 2:] @ self.K)[:, :2]
if filter:
idx = (uv[:, 0] > -padding) & (uv[:, 0] < self.w + padding) & \
(uv[:, 1] > -padding) & (uv[:, 1] < self.h + padding) & (xyz[:, 2] > 0)
if return_z:
return uv[idx], xyz[idx, 2:], idx
else:
return uv[idx], idx
else:
if return_z:
return uv, xyz[:, 2:]
else:
return uv
def c2w(self, xyz):
"""Transform 3D points in camera frame of reference to world frame of reference"""
if xyz.shape[1] == 3:
xyz = add_col1(xyz)
return (xyz @ self.Tt)[:, :3]
def w2c(self, xyz):
"""Transform 3D points in world frame of reference to camera frame of reference"""
if xyz.shape[1] == 3:
xyz = add_col1(xyz)
return (xyz @ invert(self.Tt))[:, :3]
def i2w(self, depth=1.0, uv=None):
"""Lift 2D image points to 3D space in world frame of reference"""
return self.c2w(self.i2c(depth, uv))
def w2i(self, xyz, filter=False, padding=0, return_z=False):
"""Project 3D points in world frame of reference to the image plane"""
return self.c2i(self.w2c(xyz), filter=filter,
padding=padding, return_z=return_z)
def draw(self, draw, tex=None, axes=True, color='gra'):
"""
Draw a camera in a 3D screen
Parameters
----------
draw : Draw
Draw class to be used
tex : int
Optional texture to draw on the camera image plane
axes : bool
True if coordinate axes should be drawn as well
color : str
Which color should be used for the camera
"""
draw.image(tex, verts=self.v[:4])
draw.color(color).width(4).connects(self.v[4], self.v[:4]).loop(self.v[:4])
if axes:
draw.axis(0.25 * self.scale)
| [
"camviz.utils.types.is_list",
"camviz.utils.utils.image_grid",
"camviz.utils.types.is_float",
"camviz.utils.geometry.invert",
"camviz.utils.utils.numpyf",
"numpy.linalg.inv",
"camviz.utils.utils.add_col1"
] | [((647, 662), 'camviz.utils.types.is_list', 'is_list', (['camera'], {}), '(camera)\n', (654, 662), False, 'from camviz.utils.types import is_list, is_float\n'), ((1396, 1417), 'numpy.linalg.inv', 'np.linalg.inv', (['self.K'], {}), '(self.K)\n', (1409, 1417), True, 'import numpy as np\n'), ((1632, 1708), 'camviz.utils.utils.numpyf', 'numpyf', (['[[self.w - 1, 0], [self.w - 1, self.h - 1], [0, self.h - 1], [0, 0]]'], {}), '([[self.w - 1, 0], [self.w - 1, self.h - 1], [0, self.h - 1], [0, 0]])\n', (1638, 1708), False, 'from camviz.utils.utils import numpyf, add_row0, add_col1, image_grid\n'), ((2941, 2953), 'camviz.utils.utils.add_col1', 'add_col1', (['uv'], {}), '(uv)\n', (2949, 2953), False, 'from camviz.utils.utils import numpyf, add_row0, add_col1, image_grid\n'), ((3156, 3171), 'camviz.utils.types.is_float', 'is_float', (['depth'], {}), '(depth)\n', (3164, 3171), False, 'from camviz.utils.types import is_list, is_float\n'), ((4831, 4844), 'camviz.utils.utils.add_col1', 'add_col1', (['xyz'], {}), '(xyz)\n', (4839, 4844), False, 'from camviz.utils.utils import numpyf, add_row0, add_col1, image_grid\n'), ((5047, 5060), 'camviz.utils.utils.add_col1', 'add_col1', (['xyz'], {}), '(xyz)\n', (5055, 5060), False, 'from camviz.utils.utils import numpyf, add_row0, add_col1, image_grid\n'), ((1363, 1372), 'camviz.utils.utils.numpyf', 'numpyf', (['K'], {}), '(K)\n', (1369, 1372), False, 'from camviz.utils.utils import numpyf, add_row0, add_col1, image_grid\n'), ((2592, 2607), 'camviz.utils.types.is_float', 'is_float', (['depth'], {}), '(depth)\n', (2600, 2607), False, 'from camviz.utils.types import is_list, is_float\n'), ((2684, 2701), 'camviz.utils.utils.image_grid', 'image_grid', (['depth'], {}), '(depth)\n', (2694, 2701), False, 'from camviz.utils.utils import numpyf, add_row0, add_col1, image_grid\n'), ((3059, 3073), 'camviz.utils.utils.image_grid', 'image_grid', (['uv'], {}), '(uv)\n', (3069, 3073), False, 'from camviz.utils.utils import numpyf, add_row0, add_col1, image_grid\n'), ((5083, 5098), 'camviz.utils.geometry.invert', 'invert', (['self.Tt'], {}), '(self.Tt)\n', (5089, 5098), False, 'from camviz.utils.geometry import transpose, invert\n')] |
from pymongo import MongoClient
import numpy as np
from scipy import linalg
import datetime
import json
offset=1
hashtag_number=21
timeinterval_number=10
interval_day=1
duration_in_days=60
end_time=datetime.datetime(2016, 5, 1, 0)
co_occurrence_matrix=np.zeros((hashtag_number,hashtag_number))
client=MongoClient('192.168.3.11',27020)#build a connection to MongoDB
database=client.get_database('Twitter_DATA')
database.authenticate('twitterApplication','gdotwitter')
collection=database.Twitter_Brexit_GNIP
for i in range(duration_in_days):
taglist=[]
result_file='dailydata/GNIP_hashtag_daily_frequency_'+str(1+i)+'.json'
start_time=end_time
end_time=start_time+datetime.timedelta(days=1)
cursor=collection.aggregate(
[
{'$match':{'ISO_created_at':{'$gte':start_time,'$lt':end_time}}},
{'$unwind':'$hashtags'},
{'$group':{'_id':'$hashtags','frequency':{'$sum':1}}},
{'$sort':{'frequency':-1}}
]
)
for record in cursor:
taglist.append(record)
with open(result_file, mode='w') as f:
json.dump(taglist,f)
| [
"pymongo.MongoClient",
"json.dump",
"numpy.zeros",
"datetime.datetime",
"datetime.timedelta"
] | [((199, 231), 'datetime.datetime', 'datetime.datetime', (['(2016)', '(5)', '(1)', '(0)'], {}), '(2016, 5, 1, 0)\n', (216, 231), False, 'import datetime\n'), ((253, 295), 'numpy.zeros', 'np.zeros', (['(hashtag_number, hashtag_number)'], {}), '((hashtag_number, hashtag_number))\n', (261, 295), True, 'import numpy as np\n'), ((303, 337), 'pymongo.MongoClient', 'MongoClient', (['"""192.168.3.11"""', '(27020)'], {}), "('192.168.3.11', 27020)\n", (314, 337), False, 'from pymongo import MongoClient\n'), ((682, 708), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (700, 708), False, 'import datetime\n'), ((1099, 1120), 'json.dump', 'json.dump', (['taglist', 'f'], {}), '(taglist, f)\n', (1108, 1120), False, 'import json\n')] |
"""Nested-cv to evaluate models and learn who'll survive the Titanic."""
from sklearn.pipeline import Pipeline
from pandas import read_csv
# from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
import numpy as np
import os
from autoclf.classification import eval_utils as eu
from autoclf.classification import evaluate as eva
from autoclf.classification import param_grids_distros as pgd
import autoclf.getargs as ga
from pkg_resources import resource_string
from io import StringIO
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
# starting program
if __name__ == '__main__':
print("### Probability Calibration Experiment -- CalibratedClassifierCV "
"with cv=cv (no prefit) ###")
print()
d_name = ga.get_name()
if d_name is None:
d_name = "Titanic"
# fix random seed for reproducibility
seed = 7
np.random.seed(seed)
# load data
try:
df = read_csv(
'datasets/titanic_train.csv', delimiter=",",
na_values={'Age': '', 'Cabin': '', 'Embarked': ''},
dtype={'Name': 'category', 'Sex': 'category',
'Ticket': 'category', 'Cabin': 'category',
'Embarked': 'category'})
print("Found data in 'autoclf/datasets'")
except FileNotFoundError as fe:
titanic_bytes = resource_string(
"autoclf", os.path.join("datasets", 'titanic_train.csv'))
titanic_file = StringIO(str(titanic_bytes,'utf-8'))
names = ['PassengerId','Survived','Pclass','Name','Sex','Age','SibSp',
'Parch','Ticket','Fare','Cabin','Embarked']
df = read_csv(
titanic_file, delimiter=",",
# header=0, names=names,
na_values={'Age': '', 'Cabin': '', 'Embarked': ''},
dtype={'Name': 'category', 'Sex': 'category',
'Ticket': 'category', 'Cabin': 'category',
'Embarked': 'category'})
except Exception as e:
raise e
# data exploration
print("shape: ", df.shape)
# statistical summary
description = df.describe()
print("description - no encoding:\n", description)
print()
plt.style.use('ggplot')
# Feature-Feature Relationships
# scatter_matrix(df)
print()
# too many missing values in 'Cabin' columns: about 3/4
print("Dropping 'Cabin' column -- too many missing values")
df.drop(['Cabin'], axis=1, inplace=True)
print()
print("Now, shape: ", df.shape)
print("df.head():\n", df.head())
print()
description = df.describe()
print("Once again, description - no encoding:\n", description)
print()
# input("Enter key to continue... \n")
target = 'Survived'
# check dataframe to estimate a learning mode
learnm, ldf = eu.learning_mode(df)
odf = None
if len(ldf.index) < len(df.index):
odf = df
# dataframe have been reduced based on its size
df = ldf
print("Learning mode:", learnm)
print()
# feature engineering
sltt = eu.scoring_and_tt_split(df, target, 0.2, seed)
X_train, X_test, y_train, y_test = sltt['arrays']
scoring = sltt['scoring']
Y_type = sltt['target_type']
labels = sltt['labels']
print("Classes:", labels)
print()
print("X_train shape: ", X_train.shape)
print("X_train -- first row:", X_train.values[0])
print("y_train shape: ", y_train.shape)
print()
print("X_test shape: ", X_test.shape)
print("X_test -- first row:", X_test.values[0])
print("y_test shape: ", y_test.shape)
print()
print(y_train[:3])
# input("Enter key to continue... \n")
print()
print("scoring:", scoring)
print()
auto_feat_eng_data = eu.auto_X_encoding(sltt, seed)
print()
eva.select_evaluation_strategy(
auto_feat_eng_data, scoring, Y_type, labels=labels,
d_name=d_name, random_state=seed, learn=learnm)
input("=== [End Of Program] Enter key to continue... \n")
| [
"autoclf.classification.eval_utils.scoring_and_tt_split",
"numpy.random.seed",
"warnings.filterwarnings",
"pandas.read_csv",
"autoclf.classification.eval_utils.auto_X_encoding",
"autoclf.getargs.get_name",
"matplotlib.pyplot.style.use",
"autoclf.classification.eval_utils.learning_mode",
"autoclf.cla... | [((525, 582), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'FutureWarning'}), "('ignore', category=FutureWarning)\n", (548, 582), False, 'import warnings\n'), ((776, 789), 'autoclf.getargs.get_name', 'ga.get_name', ([], {}), '()\n', (787, 789), True, 'import autoclf.getargs as ga\n'), ((901, 921), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (915, 921), True, 'import numpy as np\n'), ((2216, 2239), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (2229, 2239), True, 'import matplotlib.pyplot as plt\n'), ((2835, 2855), 'autoclf.classification.eval_utils.learning_mode', 'eu.learning_mode', (['df'], {}), '(df)\n', (2851, 2855), True, 'from autoclf.classification import eval_utils as eu\n'), ((3090, 3136), 'autoclf.classification.eval_utils.scoring_and_tt_split', 'eu.scoring_and_tt_split', (['df', 'target', '(0.2)', 'seed'], {}), '(df, target, 0.2, seed)\n', (3113, 3136), True, 'from autoclf.classification import eval_utils as eu\n'), ((3780, 3810), 'autoclf.classification.eval_utils.auto_X_encoding', 'eu.auto_X_encoding', (['sltt', 'seed'], {}), '(sltt, seed)\n', (3798, 3810), True, 'from autoclf.classification import eval_utils as eu\n'), ((3829, 3964), 'autoclf.classification.evaluate.select_evaluation_strategy', 'eva.select_evaluation_strategy', (['auto_feat_eng_data', 'scoring', 'Y_type'], {'labels': 'labels', 'd_name': 'd_name', 'random_state': 'seed', 'learn': 'learnm'}), '(auto_feat_eng_data, scoring, Y_type, labels=\n labels, d_name=d_name, random_state=seed, learn=learnm)\n', (3859, 3964), True, 'from autoclf.classification import evaluate as eva\n'), ((962, 1193), 'pandas.read_csv', 'read_csv', (['"""datasets/titanic_train.csv"""'], {'delimiter': '""","""', 'na_values': "{'Age': '', 'Cabin': '', 'Embarked': ''}", 'dtype': "{'Name': 'category', 'Sex': 'category', 'Ticket': 'category', 'Cabin':\n 'category', 'Embarked': 'category'}"}), "('datasets/titanic_train.csv', delimiter=',', na_values={'Age': '',\n 'Cabin': '', 'Embarked': ''}, dtype={'Name': 'category', 'Sex':\n 'category', 'Ticket': 'category', 'Cabin': 'category', 'Embarked':\n 'category'})\n", (970, 1193), False, 'from pandas import read_csv\n'), ((1669, 1880), 'pandas.read_csv', 'read_csv', (['titanic_file'], {'delimiter': '""","""', 'na_values': "{'Age': '', 'Cabin': '', 'Embarked': ''}", 'dtype': "{'Name': 'category', 'Sex': 'category', 'Ticket': 'category', 'Cabin':\n 'category', 'Embarked': 'category'}"}), "(titanic_file, delimiter=',', na_values={'Age': '', 'Cabin': '',\n 'Embarked': ''}, dtype={'Name': 'category', 'Sex': 'category', 'Ticket':\n 'category', 'Cabin': 'category', 'Embarked': 'category'})\n", (1677, 1880), False, 'from pandas import read_csv\n'), ((1407, 1452), 'os.path.join', 'os.path.join', (['"""datasets"""', '"""titanic_train.csv"""'], {}), "('datasets', 'titanic_train.csv')\n", (1419, 1452), False, 'import os\n')] |
""" Wrapper functions for TensorFlow layers.
Author: <NAME>
Date: July 2019
"""
import numpy as np
import tensorflow as tf
import tf_util
# from pointnet_util import pointnet_sa_module
def placeholder_inputs(batch_size, num_point,NUM_DIMS=2):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, NUM_DIMS))
pointclouds_pl_same = tf.placeholder(tf.float32, shape=(batch_size, num_point, NUM_DIMS))
pointclouds_pl_not_same = tf.placeholder(tf.float32, shape=(batch_size, num_point, NUM_DIMS))
noise_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, NUM_DIMS))
labels_ns = tf.placeholder(tf.float32, shape=(batch_size, num_point))
return pointclouds_pl, pointclouds_pl_same,pointclouds_pl_not_same,noise_pl,labels_ns
def get_3dmfv_tf(points,n_gaussians=9, sigma = 0.0625,flatten=True, normalize=True,full_fv = True):
"""
Compute the fisher vector (on the gpu using tf) given the gmm model parameters (w,mu,sigma) and a set of points for classification network
Input:
points: B X N x 3 tensor of XYZ points
w: B X n_gaussians tensor of gaussian weights
mu: B X n_gaussians X 63 tensor of gaussian cetnters
sigma: B X n_gaussians X 3 tensor of stddev of diagonal covariance
Output:
fv: B X 7*n_gaussians tensor of the fisher vector
"""
n_batches = points.shape[0].value
n_points = points.shape[1].value
# n_gaussians = mu.shape[0].value
# D = mu.shape[1].value
D = points.shape[-1].value
if D==2:
grid_size = int(np.sqrt(n_gaussians))
else:
grid_size = int(np.ceil(np.power(n_gaussians, 1 / 3)))
l = np.linspace(-1,1,grid_size,False)+(1/grid_size)
if D==2:
x,y = np.meshgrid(l,l)
x = np.stack([x.flatten(),y.flatten()]).T
elif D==3:
x,y,z = np.meshgrid(l,l,l)
x = np.stack([x.flatten(), y.flatten(),z.flatten()]).T
w = tf.ones([n_gaussians])/(n_gaussians)
mu = tf.constant(x,tf.float32)
sigma = sigma*tf.ones([n_gaussians,D])
#Expand dimension for batch compatibility
batch_sig = tf.tile(tf.expand_dims(sigma,0),[n_points, 1, 1]) #n_points X n_gaussians X D
batch_sig = tf.tile(tf.expand_dims(batch_sig, 0), [n_batches, 1, 1,1]) #n_batches X n_points X n_gaussians X D
batch_mu = tf.tile(tf.expand_dims(mu, 0),[n_points, 1, 1]) #n_points X n_gaussians X D
batch_mu = tf.tile(tf.expand_dims(batch_mu, 0), [n_batches, 1, 1, 1]) #n_batches X n_points X n_gaussians X D
batch_w = tf.tile(tf.expand_dims(tf.expand_dims(w, 0), 0), [n_batches, n_points, 1]) #n_batches X n_points X n_guassians X D - should check what happens when weights change
batch_points = tf.tile(tf.expand_dims(points, -2), [1, 1, n_gaussians,1]) #n_batchesXn_pointsXn_gaussians_D # Generating the number of points for each gaussian for separate computation
#Compute derivatives
if full_fv:
w_per_batch_per_d = tf.tile(tf.expand_dims(tf.expand_dims(w, 0), -1), [n_batches, 1, D*3]) #n_batches X n_gaussians X 128*D (D for min and D for max)
else:
w_per_batch_per_d = tf.tile(tf.expand_dims(tf.expand_dims(w, 0), -1), [n_batches, 1, D]) #n_batches X n_gaussians X 128*D (D for min and D for max)
#Define multivariate noraml distributions
mvn = tf.contrib.distributions.MultivariateNormalDiag(loc=batch_mu, scale_diag=batch_sig)
#Compute probability per point
p_per_point = mvn.prob(batch_points)
w_p = tf.multiply(p_per_point,batch_w)
Q = w_p/tf.tile(tf.reduce_sum(w_p, axis=-1,keepdims=True),[1, 1, n_gaussians])
Q_per_d = tf.tile(tf.expand_dims(Q, -1), [1, 1, 1, D])
# Compute derivatives and take max and min
d_pi_all = tf.expand_dims((Q - batch_w)/ (tf.sqrt(batch_w) * n_points), -1)
# d_pi_sum = tf.reduce_sum(d_pi_all , axis=1)
d_pi_max = tf.reduce_max(d_pi_all , axis=1)
d_pi_mean = tf.reduce_mean(d_pi_all , axis=1)
if full_fv:
d_pi = tf.concat([d_pi_mean,d_pi_max],2)
else:
d_pi = d_pi_mean
d_mu_all = Q_per_d * (batch_points - batch_mu) / batch_sig
# d_mu_all_sum = tf.reduce_sum(d_mu_all , axis=1)
d_mu_all_max = tf.reduce_max(d_mu_all , axis=1)
d_mu_all_min = tf.reduce_min(d_mu_all , axis=1)
d_mu_all_mean = tf.reduce_mean(d_mu_all , axis=1)
if full_fv:
d_mu_all_full = tf.concat([d_mu_all_mean, d_mu_all_max, d_mu_all_min], 2)
else:
d_mu_all_full = d_mu_all_mean
d_mu = (1 / (tf.sqrt(w_per_batch_per_d))) * d_mu_all_full
d_sig_all = Q_per_d * ( tf.pow((batch_points - batch_mu) / batch_sig,2) - 1)
# d_sig_all_sum = tf.reduce_sum(d_sig_all , axis=1)
d_sig_all_max = tf.reduce_max(d_sig_all , axis=1)
d_sig_all_min = tf.reduce_min(d_sig_all , axis=1)
d_sig_all_mean = tf.reduce_mean(d_sig_all , axis=1)
if full_fv:
d_sig_all_full = tf.concat([d_sig_all_mean,d_sig_all_max,d_sig_all_min],2)
else:
d_sig_all_full = d_sig_all_mean
d_sigma = (1 / (tf.sqrt(2*w_per_batch_per_d))) * d_sig_all_full
normalize=True
if normalize:
#Power normaliation
alpha = 0.5
# d_pi = tf.sign(d_pi) * tf.pow(tf.abs(d_pi),alpha)
# d_mu = tf.sign(d_mu) * tf.pow(tf.abs(d_mu), alpha)
# d_sigma = tf.sign(d_sigma) * tf.pow(tf.abs(d_sigma), alpha)
epsilon = 1e-12
d_pi = tf.sign(d_pi) * tf.pow(tf.maximum(tf.abs(d_pi),epsilon),alpha)
d_mu = tf.sign(d_mu) * tf.pow(tf.maximum(tf.abs(d_mu),epsilon), alpha)
d_sigma = tf.sign(d_sigma) * tf.pow(tf.maximum(tf.abs(d_sigma),epsilon), alpha)
# L2 normaliation
d_pi = tf.nn.l2_normalize(d_pi, dim=1)
d_mu = tf.nn.l2_normalize(d_mu, dim=1)
d_sigma = tf.nn.l2_normalize(d_sigma, dim=1)
if flatten:
#flatten d_mu and d_sigma
d_pi = tf.contrib.layers.flatten(tf.transpose(d_pi, perm=[0, 2, 1]))
d_mu = tf.contrib.layers.flatten(tf.transpose(d_mu,perm=[0,2,1]))
d_sigma = tf.contrib.layers.flatten(tf.transpose(d_sigma,perm=[0,2,1]))
fv = tf.concat([d_pi, d_mu, d_sigma], axis=1)
else:
fv = tf.concat([d_pi, d_mu, d_sigma], axis=2)
fv = tf.transpose(fv, perm=[0, 2, 1])
fv = tf.transpose(fv ,[0,2,1]) # BX20XV->BXVX20
# print(fv)
# fv = fv / 2
return fv #BX20XK
def SPD(point_cloud,
embedding, C, is_training, bn_decay=None, reuse=None,
bn = True, wd=0.0,
sig = True,Embedding_Size = 512,
NUM_DIMS = 2,mlp=[32,16,16],k=3,conve_type=1):
#get sizes:
E = embedding.shape[2].value #embedding per voxel
V = embedding.shape[1].value #number of voxels
B = embedding.shape[0].value
NP = point_cloud.shape[1].value
output_size = NUM_DIMS #2d:2, 3d:3
def get_emb_and_concat(point_cloud,embedding,argmax,bv):
D=point_cloud.shape[-1].value
bv = tf.gather_nd(bv, tf.stack(
[tf.tile(tf.expand_dims(tf.range(tf.shape(argmax)[0]), 1), [1, tf.shape(argmax)[1]]),
tf.transpose(tf.tile(tf.expand_dims(tf.range(tf.shape(argmax)[1]), 1), [1, tf.shape(argmax)[0]])),
tf.cast(argmax, tf.int32)],
2))
bv = tf.tile(tf.expand_dims(tf.expand_dims(bv,-1),-1),[1,1,1,D]) #BxNx1xD
new_pc = tf.gather_nd(point_cloud, tf.stack(
[tf.tile(tf.expand_dims(tf.range(tf.shape(argmax)[0]), 1), [1, tf.shape(argmax)[1]]),
tf.transpose(tf.tile(tf.expand_dims(tf.range(tf.shape(argmax)[1]), 1), [1, tf.shape(argmax)[0]])),
tf.cast(argmax, tf.int32)],
2))
new_emb = tf.gather_nd(embedding, tf.stack(
[tf.tile(tf.expand_dims(tf.range(tf.shape(argmax)[0]), 1), [1, tf.shape(argmax)[1]]),
tf.zeros(tf.shape(argmax),tf.int32),
tf.cast(argmax, tf.int32)],
2))
new_in = tf.concat([new_pc,new_emb],-1)
new_in = tf.expand_dims(new_in,-1) #BXNXE+DX1
return new_in,bv
def get_pc_grid_binary_mask_from_centers(Centers, point_cloud):
Dim = point_cloud.shape[-1].value
batch_size = point_cloud.shape[0].value
num_points = point_cloud.shape[1].value
V = Centers.shape[0].value
print('get_pc_grid_binary_mask_from_centers')
if Dim == 2:
grid_size = tf.abs(Centers[0][0] - Centers[1][0]) / 2
else:
grid_size = tf.abs(Centers[0][2] - Centers[1][2]) / 2
Centers = tf.expand_dims(tf.expand_dims(Centers, 0), 0) # 1X1XVXD
Centers = tf.tile(Centers, [batch_size, num_points, 1, 1]) # BXNXVXD
point_cloud = tf.tile(tf.expand_dims(point_cloud, -2), [1, 1, V, 1]) # BXNXD->BXNXVXD
pc_x = point_cloud[:, :, :, 0]
pc_y = point_cloud[:, :, :, 1]
C_x = Centers[:, :, :, 0]
C_y = Centers[:, :, :, 1]
A = tf.cast(pc_x > C_x - grid_size, tf.float32)
B = tf.cast(pc_x <= C_x + grid_size, tf.float32)
C = tf.cast(pc_y > C_y - grid_size, tf.float32)
D = tf.cast(pc_y <= C_y + grid_size, tf.float32)
binary_vect = A * B * C * D
if Dim == 3:
pc_z = point_cloud[:, :, :, 2]
C_z = Centers[:, :, :, 2]
E = tf.cast(pc_z > C_z - grid_size, tf.float32)
F = tf.cast(pc_z <= C_z + grid_size, tf.float32)
binary_vect = binary_vect * E * F
# print(binary_vect)
argmax = tf.math.argmax(input=binary_vect, axis=2)
point_cloud = point_cloud - Centers
return binary_vect, point_cloud, argmax
bv,net, argmax = get_pc_grid_binary_mask_from_centers(C, point_cloud)
embedding = tf.expand_dims(embedding,1) #BX1XVXE
net,binary_vect = get_emb_and_concat(net, embedding, argmax,bv)
if conve_type == 1: #fully connected over E+D vec:
with tf.variable_scope('dpdist_local', reuse=reuse) as sc:
net = tf_util.conv2d(net, mlp[0], [1,E+NUM_DIMS],
padding='VALID', stride=[1,1],
bn=bn, is_training=is_training,
scope='mapper_conv1', bn_decay=bn_decay,
reuse=reuse,weight_decay=wd) #no BN in the first layer.
# print(net)
net = tf_util.conv2d(net, mlp[1], [1,1],
padding='VALID', stride=[1,1],
bn=bn, is_training=is_training,
scope='mapper_conv2', bn_decay=bn_decay,
reuse=reuse,weight_decay=wd)
# print(net)
net = tf_util.conv2d(net, mlp[2], [1,1],
padding='VALID', stride=[1,1],
bn=bn, is_training=is_training,
scope='mapper_conv3', bn_decay=bn_decay,
reuse=reuse,weight_decay=wd)
# print(net)
net = tf_util.conv2d(net, output_size, [1,1],
padding='VALID', stride=[1,1],
bn=bn, is_training=is_training,
scope='mapper_conv4', bn_decay=bn_decay,
reuse=reuse,weight_decay=wd,activation_fn=None)
# print(net)
# net = tf.maximum(-1.0,net)
# net = tf.minimum(1.0,net)
elif conve_type ==2: #cnn over embedding+3
if NUM_DIMS==2:
net_E = net[:,:,:E,:]
net_D = net[:,:,E:,:]
net_D = tf.transpose(net_D,[0,1,3,2])
net_D = tf.expand_dims(net_D,2)
net_D = tf.tile(net_D,[1,1,k,k,1])
net_E = tf.reshape(net_E,[4*B,NP,k,k,-1])
net = tf.concat([net_E,net_D],-1)
net = tf.reshape(net,[4*B*NP,k,k,-1])
with tf.variable_scope('dpdist_local_cnn_fc', reuse=reuse) as sc:
net = tf_util.conv2d(net, 64, [3,3],
padding='SAME', stride=[1, 1],
bn=bn, is_training=is_training,
scope='mapper_conv1', bn_decay=bn_decay,
reuse=reuse, weight_decay=wd)
# print(net)
net = tf_util.conv2d(net, 64, [3,3],
padding='VALID', stride=[1, 1],
bn=bn, is_training=is_training,
scope='mapper_conv2', bn_decay=bn_decay,
reuse=reuse, weight_decay=wd)
k_new = net.shape[2].value #flat the vector output
net = tf.reshape(net, [4 * B , NP, 1, -1])
# print(net)
net = tf_util.conv2d(net, mlp[2], [1, 1],
padding='VALID', stride=[1, 1],
bn=bn, is_training=is_training,
scope='mapper_conv3', bn_decay=bn_decay,
reuse=reuse, weight_decay=wd)
# print(net)
net = tf_util.conv2d(net, output_size, [1, 1],
padding='VALID', stride=[1, 1],
bn=bn, is_training=is_training,
scope='mapper_conv4', bn_decay=bn_decay,
reuse=reuse, weight_decay=wd, activation_fn=None)
# print(net)
# net = tf.maximum(-1.0,net)
# net = tf.minimum(1.0,net)
else:
net_E = net[:,:,:E,:]
net_D = net[:,:,E:,:]
net_D = tf.transpose(net_D,[0,1,3,2])
net_D = tf.expand_dims(net_D,2)
net_D = tf.expand_dims(net_D,2)
net_D = tf.tile(net_D,[1,1,k,k,k,1])
net_E = tf.reshape(net_E,[4*B,NP,k,k,k,-1])
net = tf.concat([net_E,net_D],-1)
net = tf.reshape(net,[4*B*NP,k,k,k,-1])
else: #cnn over embedding, then fc over E+3
if NUM_DIMS==2:
net_E = net[:,:,:E,:]
net_D = net[:,:,E:,:]
print(net_D)
net_D = tf.transpose(net_D,[0,1,3,2])
print(net_D)
net_E = tf.reshape(net_E,[B,NP,k,k,-1])
# net = tf.concat([net_E,net_D],-1)
net = tf.reshape(net_E,[B*NP,k,k,-1])
with tf.variable_scope('dpdist_local_cnn_fc', reuse=reuse) as sc:
net = tf_util.conv2d(net, 64, [3,3],
padding='SAME', stride=[1, 1],
bn=bn, is_training=is_training,
scope='mapper_conv1', bn_decay=bn_decay,
reuse=reuse, weight_decay=wd) # no BN in the first layer.
# print(net)
net = tf_util.conv2d(net, 64, [3,3],
padding='VALID', stride=[1, 1],
bn=bn, is_training=is_training,
scope='mapper_conv2', bn_decay=bn_decay,
reuse=reuse, weight_decay=wd)
k_new = net.shape[2].value #flat the vector output
net = tf.reshape(net, [B , NP, 1, -1])
net = tf.concat([net,net_D],-1)
# print(net)
net = tf_util.conv2d(net, mlp[2], [1, 1],
padding='VALID', stride=[1, 1],
bn=bn, is_training=is_training,
scope='mapper_conv3', bn_decay=bn_decay,
reuse=reuse, weight_decay=wd)
# print(net)
net = tf_util.conv2d(net, output_size, [1, 1],
padding='VALID', stride=[1, 1],
bn=bn, is_training=is_training,
scope='mapper_conv4', bn_decay=bn_decay,
reuse=reuse, weight_decay=wd, activation_fn=None)
# print(net)
# net = tf.maximum(-1.0,net)
# net = tf.minimum(1.0,net)
else:
net_E = net[:,:,:E,:]
net_D = net[:,:,E:,:]
net_D = tf.transpose(net_D,[0,1,3,2])
net_E = tf.reshape(net_E,[B,NP,k,k,k,-1])
# net = tf.concat([net_E,net_D],-1)
net = tf.reshape(net_E,[B*NP,k,k,k,-1])
with tf.variable_scope('dpdist_local_cnn_fc', reuse=reuse) as sc:
# net = tf_util.conv3d(net, 32, [1,1,1],
# padding='SAME', stride=[1, 1, 1],
# bn=bn, is_training=is_training,
# scope='mapper_conv0', bn_decay=bn_decay)
# print(net)
net = resnet3d(net, [3,3,3],
padding='SAME', stride=[1, 1, 1],
bn=bn, is_training=is_training,
scope='mapper_conv1', bn_decay=bn_decay,
reuse=reuse, weight_decay=wd)
# print(net)
net = resnet3d(net, [3,3,3],
padding='SAME', stride=[1, 1, 1],
bn=bn, is_training=is_training,
scope='mapper_conv2', bn_decay=bn_decay,
reuse=reuse, weight_decay=wd)
# print(net)
# net = tf_util.conv3d(net, 16, [1,1,1],
# padding='SAME', stride=[1, 1, 1],
# bn=bn, is_training=is_training,
# scope='mapper_conv3', bn_decay=bn_decay)
net = tf.reshape(net,[B, NP,1,-1]) #B,N,1,E*k**3
net = tf.concat([net,net_D],-1) #B,N,1,E*k**3+3
net = tf_util.conv2d(net, mlp[2], [1, 1],
padding='VALID', stride=[1, 1],
bn=bn, is_training=is_training,
scope='mapper_conv4', bn_decay=bn_decay,
reuse=reuse, weight_decay=wd)
print(net)
net = tf_util.conv2d(net, output_size, [1, 1],
padding='VALID', stride=[1, 1],
bn=bn, is_training=is_training,
scope='mapper_conv5', bn_decay=bn_decay,
reuse=reuse, weight_decay=wd, activation_fn=None)
#net = tf.nn.relu6(net+3)/3-1 #(-1,1) output range # +3 to center te relu around 0 (from -3 to 3)
net = tf.nn.relu(net)
net = net * binary_vect
return net
def resnet3d(net, kernel=[3, 3, 3],
padding='SAME', stride=[1, 1, 1],
bn=True, is_training=False,
scope='', bn_decay=0.7,
reuse=False, weight_decay=None):
filters = net.shape[-1].value
net_in = net
net = tf_util.conv3d(net, filters, kernel,
padding=padding, stride=stride,
bn=bn, is_training=is_training,
scope=scope + '_1', bn_decay=bn_decay)
net = tf_util.conv3d(net, filters, kernel,
padding=padding, stride=stride,
bn=bn, is_training=is_training,
scope=scope + '_2', bn_decay=bn_decay)
net = net + net_in
return net
def DPDist(point_cloud,point_cloudB,embedding,
embeddingB, C, is_training, bn_decay=None, reuse=None,
bn = True, wd=0.0,
sig = True,Embedding_Size = 512,
NUM_DIMS = 2,mlp=[32,16,16],k=3,conv_version=1,output_act='relu'):
#get sizes:
if k>0:
E = embedding.shape[2].value #embedding per voxel
V = embedding.shape[1].value #number of voxels
else:
print(embedding)
embedding = tf.squeeze(embedding)
embeddingB = tf.squeeze(embeddingB)
print(embedding)
E = embedding.shape[-1].value
B = embedding.shape[0].value
NP = point_cloud.shape[1].value
output_size = NUM_DIMS #2d:2, 3d:3
if k>0:
def get_emb_and_concat(point_cloud,embedding,argmax,bv):
D=point_cloud.shape[-1].value
bv = tf.gather_nd(bv, tf.stack(
[tf.tile(tf.expand_dims(tf.range(tf.shape(argmax)[0]), 1), [1, tf.shape(argmax)[1]]),
tf.transpose(tf.tile(tf.expand_dims(tf.range(tf.shape(argmax)[1]), 1), [1, tf.shape(argmax)[0]])),
tf.cast(argmax, tf.int32)],
2))
bv = tf.tile(tf.expand_dims(tf.expand_dims(bv,-1),-1),[1,1,1,D]) #BxNx1xD
new_pc = tf.gather_nd(point_cloud, tf.stack(
[tf.tile(tf.expand_dims(tf.range(tf.shape(argmax)[0]), 1), [1, tf.shape(argmax)[1]]),
tf.transpose(tf.tile(tf.expand_dims(tf.range(tf.shape(argmax)[1]), 1), [1, tf.shape(argmax)[0]])),
tf.cast(argmax, tf.int32)],
2))
new_emb = tf.gather_nd(embedding, tf.stack(
[tf.tile(tf.expand_dims(tf.range(tf.shape(argmax)[0]), 1), [1, tf.shape(argmax)[1]]),
tf.zeros(tf.shape(argmax),tf.int32),
tf.cast(argmax, tf.int32)],
2))
new_in = tf.concat([new_pc,new_emb],-1)
new_in = tf.expand_dims(new_in,-1) #BXNXE+DX1
return new_in,bv
def get_pc_grid_binary_mask_from_centers(Centers, point_cloud):
Dim = point_cloud.shape[-1].value
batch_size = point_cloud.shape[0].value
num_points = point_cloud.shape[1].value
V = Centers.shape[0].value
print('get_pc_grid_binary_mask_from_centers')
if Dim == 2:
grid_size = tf.abs(Centers[0][0] - Centers[1][0]) / 2
else:
grid_size = tf.abs(Centers[0][2] - Centers[1][2]) / 2
Centers = tf.expand_dims(tf.expand_dims(Centers, 0), 0) # 1X1XVXD
Centers = tf.tile(Centers, [batch_size, num_points, 1, 1]) # BXNXVXD
point_cloud = tf.tile(tf.expand_dims(point_cloud, -2), [1, 1, V, 1]) # BXNXD->BXNXVXD
pc_x = point_cloud[:, :, :, 0]
pc_y = point_cloud[:, :, :, 1]
C_x = Centers[:, :, :, 0]
C_y = Centers[:, :, :, 1]
A = tf.cast(pc_x > C_x - grid_size, tf.float32)
B = tf.cast(pc_x <= C_x + grid_size, tf.float32)
C = tf.cast(pc_y > C_y - grid_size, tf.float32)
D = tf.cast(pc_y <= C_y + grid_size, tf.float32)
binary_vect = A * B * C * D
if Dim == 3:
pc_z = point_cloud[:, :, :, 2]
C_z = Centers[:, :, :, 2]
E = tf.cast(pc_z > C_z - grid_size, tf.float32)
F = tf.cast(pc_z <= C_z + grid_size, tf.float32)
binary_vect = binary_vect * E * F
# print(binary_vect)
argmax = tf.math.argmax(input=binary_vect, axis=2)
point_cloud = point_cloud - Centers
return binary_vect, point_cloud, argmax
bv,net, argmax = get_pc_grid_binary_mask_from_centers(C, point_cloudB)
embedding = tf.expand_dims(embedding,1) #BX1XVXE
net,binary_vect = get_emb_and_concat(net, embedding, argmax,bv)
bvB,netB, argmaxB = get_pc_grid_binary_mask_from_centers(C, point_cloud)
embeddingB = tf.expand_dims(embeddingB,1) #BX1XVXE
netB,binary_vectB = get_emb_and_concat(netB, embeddingB, argmaxB,bvB)
else:
print(embedding)
embedding = tf.tile(tf.expand_dims(embedding,1),[1,NP,1]) #BX1XE
print(embedding)
embeddingB = tf.tile(tf.expand_dims(embeddingB,1),[1,NP,1]) #BX1XE
print(point_cloudB)
net = tf.expand_dims(tf.concat([point_cloudB,embedding],-1),2)
print(net)
netB = tf.expand_dims(tf.concat([point_cloud,embeddingB],-1),2)
net_all = tf.concat([net,netB],0) #2BxNx1xE+D
net = net_all
if conv_version == 1: #fully connected over E+D vec:
with tf.variable_scope('dpdist_local', reuse=reuse) as sc:
if k>0:
net = tf_util.conv2d(net, mlp[0], [1, E+NUM_DIMS],
padding='VALID', stride=[1, 1],
bn=bn, is_training=is_training,
scope='mapper_conv1', bn_decay=bn_decay,
reuse=reuse, weight_decay=wd)
else:
net = tf_util.conv2d(net, mlp[0], [1,1],
padding='VALID', stride=[1,1],
bn=bn, is_training=is_training,
scope='mapper_conv1', bn_decay=bn_decay,
reuse=reuse,weight_decay=wd) #no BN in the first layer.
# print(net)
net = tf_util.conv2d(net, mlp[1], [1,1],
padding='VALID', stride=[1,1],
bn=bn, is_training=is_training,
scope='mapper_conv2', bn_decay=bn_decay,
reuse=reuse,weight_decay=wd)
# print(net)
net = tf_util.conv2d(net, mlp[2], [1,1],
padding='VALID', stride=[1,1],
bn=bn, is_training=is_training,
scope='mapper_conv3', bn_decay=bn_decay,
reuse=reuse,weight_decay=wd)
# print(net)
net = tf_util.conv2d(net, output_size, [1,1],
padding='VALID', stride=[1,1],
bn=bn, is_training=is_training,
scope='mapper_conv4', bn_decay=bn_decay,
reuse=reuse,weight_decay=wd,activation_fn=None)
# print(net)
# net = tf.maximum(-1.0,net)
# net = tf.minimum(1.0,net)
elif conv_version ==2: #cnn over embedding+3
if NUM_DIMS==2:
net_E = net[:,:,:E,:]
net_D = net[:,:,E:,:]
net_D = tf.transpose(net_D,[0,1,3,2])
net_D = tf.expand_dims(net_D,2)
net_D = tf.tile(net_D,[1,1,k,k,1])
net_E = tf.reshape(net_E,[4*B,NP,k,k,-1])
net = tf.concat([net_E,net_D],-1)
net = tf.reshape(net,[4*B*NP,k,k,-1])
with tf.variable_scope('dpdist_local_cnn_fc', reuse=reuse) as sc:
net = tf_util.conv2d(net, 64, [3,3],
padding='SAME', stride=[1, 1],
bn=bn, is_training=is_training,
scope='mapper_conv1', bn_decay=bn_decay,
reuse=reuse, weight_decay=wd)
# print(net)
net = tf_util.conv2d(net, 64, [3,3],
padding='VALID', stride=[1, 1],
bn=bn, is_training=is_training,
scope='mapper_conv2', bn_decay=bn_decay,
reuse=reuse, weight_decay=wd)
k_new = net.shape[2].value #flat the vector output
net = tf.reshape(net, [4 * B , NP, 1, -1])
# print(net)
net = tf_util.conv2d(net, mlp[2], [1, 1],
padding='VALID', stride=[1, 1],
bn=bn, is_training=is_training,
scope='mapper_conv3', bn_decay=bn_decay,
reuse=reuse, weight_decay=wd)
# print(net)
net = tf_util.conv2d(net, output_size, [1, 1],
padding='VALID', stride=[1, 1],
bn=bn, is_training=is_training,
scope='mapper_conv4', bn_decay=bn_decay,
reuse=reuse, weight_decay=wd, activation_fn=None)
# print(net)
# net = tf.maximum(-1.0,net)
# net = tf.minimum(1.0,net)
else:
net_E = net[:,:,:E,:]
net_D = net[:,:,E:,:]
net_D = tf.transpose(net_D,[0,1,3,2])
net_D = tf.expand_dims(net_D,2)
net_D = tf.expand_dims(net_D,2)
net_D = tf.tile(net_D,[1,1,k,k,k,1])
net_E = tf.reshape(net_E,[4*B,NP,k,k,k,-1])
net = tf.concat([net_E,net_D],-1)
net = tf.reshape(net,[4*B*NP,k,k,k,-1])
else: #cnn over embedding, then fc over E+3
if NUM_DIMS==2:
net_E = net[:,:,:E,:]
net_D = net[:,:,E:,:]
net_D = tf.transpose(net_D,[0,1,3,2])
net_E = tf.reshape(net_E,[4*B,NP,k,k,-1])
# net = tf.concat([net_E,net_D],-1)
net = tf.reshape(net_E,[4*B*NP,k,k,-1])
with tf.variable_scope('dpdist_local_cnn_fc', reuse=reuse) as sc:
net = tf_util.conv2d(net, 64, [3,3],
padding='SAME', stride=[1, 1],
bn=bn, is_training=is_training,
scope='mapper_conv1', bn_decay=bn_decay,
reuse=reuse, weight_decay=wd) # no BN in the first layer.
# print(net)
net = tf_util.conv2d(net, 64, [3,3],
padding='VALID', stride=[1, 1],
bn=bn, is_training=is_training,
scope='mapper_conv2', bn_decay=bn_decay,
reuse=reuse, weight_decay=wd)
k_new = net.shape[2].value #flat the vector output
net = tf.reshape(net, [4 * B , NP, 1, -1])
net = tf.concat([net,net_D],-1)
# print(net)
net = tf_util.conv2d(net, mlp[2], [1, 1],
padding='VALID', stride=[1, 1],
bn=bn, is_training=is_training,
scope='mapper_conv3', bn_decay=bn_decay,
reuse=reuse, weight_decay=wd)
# print(net)
net = tf_util.conv2d(net, output_size, [1, 1],
padding='VALID', stride=[1, 1],
bn=bn, is_training=is_training,
scope='mapper_conv4', bn_decay=bn_decay,
reuse=reuse, weight_decay=wd, activation_fn=None)
# print(net)
# net = tf.maximum(-1.0,net)
# net = tf.minimum(1.0,net)
else:
net_E = net[:,:,:E,:]
net_D = net[:,:,E:,:]
net_D = tf.transpose(net_D,[0,1,3,2])
net_E = tf.reshape(net_E,[2*B,NP,k,k,k,-1])
# net = tf.concat([net_E,net_D],-1)
net = tf.reshape(net_E,[2*B*NP,k,k,k,-1])
with tf.variable_scope('dpdist_local_cnn_fc', reuse=reuse) as sc:
net = tf_util.conv3d(net, 64, [1,1,1],
padding='SAME', stride=[1, 1, 1],
bn=bn, is_training=is_training,
scope='mapper_conv0', bn_decay=bn_decay,
weight_decay=wd)
net = resnet3d(net, [3,3,3],
padding='SAME', stride=[1, 1, 1],
bn=bn, is_training=is_training,
scope='mapper_conv1', bn_decay=bn_decay,
reuse=reuse, weight_decay=wd)
net = resnet3d(net, [3,3,3],
padding='SAME', stride=[1, 1, 1],
bn=bn, is_training=is_training,
scope='mapper_conv2', bn_decay=bn_decay,
reuse=reuse, weight_decay=wd)
net = tf_util.conv3d(net, 16, [1,1,1],
padding='SAME', stride=[1, 1, 1],
bn=bn, is_training=is_training,
scope='mapper_conv3', bn_decay=bn_decay,
weight_decay=wd)
net = tf.reshape(net,[2*B, NP,1,-1]) #B,N,1,E*k**3
net = tf.identity(net,'embedding_layer')
net = tf.concat([net,net_D],-1) #B,N,1,E*k**3+3
# net = tf_util.conv2d(net, mlp[1], [1, 1],
# padding='VALID', stride=[1, 1],
# bn=bn, is_training=is_training,
# scope='mapper_conv4', bn_decay=bn_decay,
# reuse=reuse, weight_decay=wd)
net = tf_util.conv2d(net, mlp[2], [1, 1],
padding='VALID', stride=[1, 1],
bn=bn, is_training=is_training,
scope='mapper_conv5', bn_decay=bn_decay,
reuse=reuse, weight_decay=wd)
# print(net)
net = tf_util.conv2d(net, output_size, [1, 1],
padding='VALID', stride=[1, 1],
bn=bn, is_training=is_training,
scope='mapper_conv6', bn_decay=bn_decay,
reuse=reuse, weight_decay=wd, activation_fn=None)
if output_act=='tanh':
net = tf.nn.tanh(net)
if output_act=='relu':
net = tf.nn.relu6(net)/3
else:
net = tf.nn.relu6(net+3)/3-1 #(-1,1) output range # +3 to center te relu around 0 (from -3 to 3)
# net = tf.nn.tanh(net)
net = tf.split(net,2,0)
if k>0:
net[0] = net[0] * binary_vect
net[1] = net[1] * binary_vectB
return net
def pointnet_basic_model(point_cloud, is_training, bn_decay=None,
reuse=None, bn = True, wd=0.0,sig=True,
Embedding_Size=512,
POOLING = 'max',NUM_DIMS=2):
""" Classification PointNet, input is BxNx3, output Bx40 """
embedding_size = Embedding_Size
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
input_image = tf.expand_dims(point_cloud, -1) #batch borm, channel last
# Point functions (MLP implemented as conv2d)
with tf.variable_scope('pointnet', reuse=reuse) as sc:
net = tf_util.conv2d(input_image, 128, [1,NUM_DIMS],
padding='VALID', stride=[1,1],
bn=False, is_training=is_training,
scope='conv1', bn_decay=bn_decay,
reuse=reuse,weight_decay=wd)
# tf_util.variable_summaries(tf.gradients(net, input_image), 'pointnet_grad_1')
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=bn, is_training=is_training,
scope='conv2', bn_decay=bn_decay,
reuse=reuse,weight_decay=wd)
net = tf_util.conv2d(net, 512, [1,1],
padding='VALID', stride=[1,1],
bn=bn, is_training=is_training,
scope='conv3', bn_decay=bn_decay,
reuse=reuse,weight_decay=wd)
net = tf_util.conv2d(net, Embedding_Size, [1,1],
padding='VALID', stride=[1,1],
bn=bn, is_training=is_training,
scope='conv4', bn_decay=bn_decay,
reuse=reuse,weight_decay=wd)
# tf_util.variable_summaries(tf.gradients(net, input_image), 'pointnet_grad_2')
# Symmetric function: max pooling
if POOLING == 'max':
net = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='maxpool')
else:
net = tf_util.avg_pool2d(net, [num_point,1],
padding='VALID', scope='avgpool')
# # MLP on global point cloud vector
# net = tf.reshape(net, [batch_size, -1])
# net = tf_util.fully_connected(net, 256, bn=bn, is_training=is_training,
# scope='fc1', bn_decay=bn_decay,
# reuse=reuse, weight_decay=wd)
# net = tf_util.fully_connected(net, 256, bn=bn, is_training=is_training,
# scope='fc2', bn_decay=bn_decay,
# reuse=reuse, weight_decay=wd)
# # net = tf_util.dropout(net, keep_prob=0.3, is_training=is_training,
# # scope='dp1')
#
# net = tf_util.fully_connected(net, embedding_size,
# activation_fn=None, scope='fc3',
# bn=bn, is_training=is_training,
# reuse=reuse, weight_decay=wd)
# end_points['z']=net
# if sig:
# net = tf.nn.sigmoid(net)
# # tf_util.variable_summaries(tf.gradients(net, input_image), 'pointnet_grad_3')
# end_points['z_sig']=net
# net = tf.nn.relu6(net)/3-1
return net, end_points
def vox_vect(point_cloud, is_training,reuse=False,
Embedding_Size=512):
NUM_DIMS = point_cloud.shape[-1].value
end_points={}
with tf.variable_scope('vox_vect', reuse=reuse) as sc:
batch_size = point_cloud.shape[0].value
num_points = point_cloud.shape[1].value
if NUM_DIMS==2:
vec_size = np.floor(np.sqrt(Embedding_Size))
else:
vec_size = np.ceil(np.power(Embedding_Size,1/3))
out_vect = get_pc_grid_binary_mask(Embedding_Size, point_cloud)
out_vect = tf_util.max_pool2d(tf.expand_dims(out_vect, 1), [1, num_points], 'maxpool')
out_vect = tf.concat([out_vect,tf.zeros([batch_size,1,1,int(Embedding_Size-vec_size**NUM_DIMS)],tf.float32)],-1) #wrap with zeros to emb size
out_vect = tf.reshape(out_vect,[batch_size,Embedding_Size])
end_points['z']=out_vect
end_points['z_sig']=out_vect
return out_vect, end_points
def local_vox(net, is_training,reuse=False,NUM_DIMS=2,k=3,overlap=True):
net = tf.expand_dims(net,-1)
return local_z_3d(net, is_training, reuse=reuse, NUM_DIMS=NUM_DIMS, k=k, overlap=overlap)
def local_vox_3d(net, is_training, reuse=False, NUM_DIMS=2, k=3, overlap=True):
batch_size = net.shape[0].value
Embedding_Size = net.shape[1].value
vec_size = int(np.ceil(np.power(Embedding_Size, 1 / 3)))
net = net[:, :int(vec_size ** NUM_DIMS)]
net = tf.reshape(net, [batch_size, vec_size, vec_size, vec_size, -1])
# print(net)
X, Y, Z = get_grid_centers(Embedding_Size, NUM_DIMS)
k_half = int(np.floor(k / 2))
# input_wrap = tf.zeros([batch_size,vec_size+2*k_half,vec_size+2*k_half]) #wrap with zeros
# print(input_wrap.shape)
# input_wrap[:,k_half:-k_half,k_half:-k_half] += net
output = []
# output = tf.zeros([batch_size,vec_size,vec_size,k,k])
if overlap:
V = range(k_half, vec_size - k_half) # only valid!
# V = range(vec_size) #Same size
else:
V = range(k_half, vec_size - k_half, k)
C = []
for ii in V:
for jj in V:
for ll in V:
z_iijj = []
for i in range(-k_half, k_half + 1):
for j in range(-k_half, k_half + 1):
for l in range(-k_half, k_half + 1):
if ii + i < 0 or ii + i > vec_size - 1 \
or jj + j < 0 or jj + j > vec_size - 1 \
or ll + l < 0 or ll + l > vec_size - 1:
z_ij = tf.constant(np.zeros([batch_size, 1]), tf.float32)
else:
z_ij = net[:, ii + i, jj + j,ll + l]
# x,y = X[ii+i,jj+j],Y[ii+i,jj+j]
# print(x)
z_iijj.append(z_ij)
c = tf.stack([X[ii, jj,ll], Y[ii, jj,ll], Z[ii,jj,ll]])
C.append(c)
z_iijj = tf.stack(z_iijj)
output.append(z_iijj)
C = tf.stack(C)
output = tf.stack(output)
output = tf.transpose(output, [2, 0, 1, 3])
output = tf.reshape(output, [output.shape[0], output.shape[1], -1])
return output, C # [B,V,Z,Z_dim]
def local_z(net, is_training,reuse=False,NUM_DIMS=2,k=3,overlap=True):
if NUM_DIMS==2:
return local_z_2d(net, is_training, reuse=reuse, NUM_DIMS=NUM_DIMS, k=k, overlap=overlap)
else:
return local_z_3d(net, is_training, reuse=reuse, NUM_DIMS=NUM_DIMS, k=k, overlap=overlap)
def local_z_2d(net, is_training,reuse=False,NUM_DIMS=2,k=3,overlap=True):
batch_size = net.shape[0].value
Embedding_Size = net.shape[1].value
Z = net.shape[2].value
vec_size = int(np.floor(np.sqrt(Embedding_Size)))
net = net[:,:int(vec_size**2),:]
net = tf.reshape(net,[batch_size,vec_size,vec_size,-1])
# print(net)
output = tf.image.extract_image_patches(net,[1,k,k,1],[1,1,1,1],
rates=[1,1,1,1],padding='VALID')
X,Y = get_grid_centers(Embedding_Size)
kh = int(np.floor(k/2))
C = tf.stack([X[kh:-kh,kh:-kh],
Y[kh:-kh,kh:-kh]],-1)
C = tf.cast(C,tf.float32)
C = tf.reshape(C,[-1,NUM_DIMS])
# input_wrap = tf.zeros([batch_size,vec_size+2*k_half,vec_size+2*k_half]) #wrap with zeros
# print(input_wrap.shape)
# input_wrap[:,k_half:-k_half,k_half:-k_half] += net
# output = []
# # output = tf.zeros([batch_size,vec_size,vec_size,k,k])
# if overlap:
# V = range(k_half,vec_size-k_half) # only valid!
# #V = range(vec_size) #Same size
# else:
# V = range(k_half,vec_size-k_half,k)
# C = []
# for ii in V:
# for jj in V:
# z_iijj = []
# for i in range(-k_half,k_half+1):
# for j in range(-k_half, k_half+1):
# if ii+i<0 or ii+i>vec_size-1 or jj+j<0 or jj+j>vec_size-1:
# z_ij = tf.constant(np.zeros([batch_size,1,Z]),tf.float32)
# else:
# z_ij = net[:,ii+i,jj+j,:]
# # x,y = X[ii+i,jj+j],Y[ii+i,jj+j]
# # print(x)
# z_iijj.append(z_ij)
# c = tf.stack([X[ii, jj], Y[ii, jj]])
# C.append(c)
# z_iijj = tf.stack(z_iijj)
# output.append(z_iijj)
# C = tf.stack(C)
#
# output = tf.stack(output)
# output = tf.transpose(output,[2,0,1,3])
print('check local 2dz')
# print(output)
# print(vec_size)
# print(kh)
output = tf.reshape(output,[output.shape[0],(vec_size-2*kh)**2,-1])
# print(output)
return output,C #[B,V,K**2*Z]
def local_z_3d(net, is_training,reuse=False,NUM_DIMS=3,k=3,overlap=True):
TF14=True #TF>14 extract_volume_patches works with gradients!
with tf.variable_scope('local_z_3d', reuse=reuse) as sc:
batch_size = net.shape[0].value
num_vox = net.shape[1].value
grid_len = int(np.round(np.power(num_vox,1/3)))
net = net[:,:int(grid_len**NUM_DIMS),:]
net = tf.reshape(net,[batch_size,grid_len,grid_len,grid_len,-1])
if TF14:
output = tf.extract_volume_patches(net,[1,k,k,k,1],[1,1,1,1,1],
'SAME',
name='EXTRACT_LOCAL_Z')
X, Y, Z = get_grid_centers(num_vox, NUM_DIMS)
C = tf.stack([X,Y,Z],-1)
C = tf.cast(C,tf.float32)
C = tf.reshape(C,[-1,NUM_DIMS])
output = tf.reshape(output, [batch_size, output.shape[1].value ** 3, -1])
# print(net)
else:
X,Y,Z = get_grid_centers(num_vox,NUM_DIMS)
kh = int(np.floor(k / 2)) #half k
paddings = tf.constant([[0, 0],
[kh, kh],
[kh, kh],
[kh, kh],
[0, 0]])
net = tf.pad(net, paddings, "CONSTANT") # [[0, 0, 0, 0, 0, 0, 0],
output = []
C = []
V = range(grid_len) # Same size
for ii in V:
for jj in V:
for ll in V:
z_iijjll = net[:,ii:ii+2*kh+1,
jj:jj + 2*kh + 1,
ll:ll + 2*kh + 1,:] # kh+(ii-kh:ii+kh+1), BXKXKXKXE
output.append(z_iijjll)
c = tf.stack([X[ii,jj,ll],
Y[ii,jj,ll],
Z[ii,jj,ll]]) # (3,)
C.append(c)
C = tf.stack(C)
output = tf.stack(output,1) #BxVxKxKxKxE
output = tf.reshape(output,[batch_size,output.shape[1].value,-1])
print('local_z')
return output,C #[B,V,K**3*E]
def get_loss(pred_set, end_points,labels,loss_type = 'l1_dist'):
pred_listAB = pred_set['pred_listAB']
pred_listBA = pred_set['pred_listBA']
if loss_type == 'l1_dist':
#loss1: L1
loss_samples = pred_listAB[:,:,:,0] #Only these labels use for training DPDist
loss_samples = tf.squeeze(loss_samples)
print('labels')
print(loss_samples)
print(labels)
loss = tf.identity(tf.reduce_mean(tf.abs(loss_samples-labels)),name='loss_samples')
# tf.summary.scalar('labels',tf.reduce_mean(labels))
tf.add_to_collection('loss_samples', loss)
#loss2: minimize total prediction to 0:
loss_pred = (tf.reduce_mean(pred_listAB[:,:,:,0]) +
tf.reduce_mean(pred_listBA[:,:,:,0]))/2
loss_pred = tf.identity(loss_pred,name='loss_pred')
tf.add_to_collection('loss_pred', loss_pred)
return loss_samples,loss_pred
def get_grid_centers(Embedding_Size,NUM_DIMS=2):
if NUM_DIMS==2:
vec_size = int(np.floor(np.sqrt(Embedding_Size)))
else:
vec_size = int(np.ceil(np.power(Embedding_Size,1/3)))
grid_step = 2 / vec_size
l = np.arange(-1, 1, grid_step) + grid_step/2 #
if NUM_DIMS == 2:
return np.meshgrid(l, l) #cells centers
else:
return np.meshgrid(l, l, l)
if __name__=='__main__':
is_training=0 | [
"tensorflow.reduce_sum",
"tensorflow.nn.tanh",
"tensorflow.identity",
"numpy.floor",
"tensorflow.reshape",
"tensorflow.nn.l2_normalize",
"tf_util.avg_pool2d",
"tensorflow.multiply",
"numpy.arange",
"tensorflow.reduce_max",
"tensorflow.split",
"tensorflow.sqrt",
"tensorflow.extract_volume_pat... | [((281, 348), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(batch_size, num_point, NUM_DIMS)'}), '(tf.float32, shape=(batch_size, num_point, NUM_DIMS))\n', (295, 348), True, 'import tensorflow as tf\n'), ((376, 443), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(batch_size, num_point, NUM_DIMS)'}), '(tf.float32, shape=(batch_size, num_point, NUM_DIMS))\n', (390, 443), True, 'import tensorflow as tf\n'), ((475, 542), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(batch_size, num_point, NUM_DIMS)'}), '(tf.float32, shape=(batch_size, num_point, NUM_DIMS))\n', (489, 542), True, 'import tensorflow as tf\n'), ((559, 626), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(batch_size, num_point, NUM_DIMS)'}), '(tf.float32, shape=(batch_size, num_point, NUM_DIMS))\n', (573, 626), True, 'import tensorflow as tf\n'), ((644, 701), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(batch_size, num_point)'}), '(tf.float32, shape=(batch_size, num_point))\n', (658, 701), True, 'import tensorflow as tf\n'), ((2028, 2054), 'tensorflow.constant', 'tf.constant', (['x', 'tf.float32'], {}), '(x, tf.float32)\n', (2039, 2054), True, 'import tensorflow as tf\n'), ((3370, 3458), 'tensorflow.contrib.distributions.MultivariateNormalDiag', 'tf.contrib.distributions.MultivariateNormalDiag', ([], {'loc': 'batch_mu', 'scale_diag': 'batch_sig'}), '(loc=batch_mu, scale_diag=\n batch_sig)\n', (3417, 3458), True, 'import tensorflow as tf\n'), ((3545, 3578), 'tensorflow.multiply', 'tf.multiply', (['p_per_point', 'batch_w'], {}), '(p_per_point, batch_w)\n', (3556, 3578), True, 'import tensorflow as tf\n'), ((3920, 3951), 'tensorflow.reduce_max', 'tf.reduce_max', (['d_pi_all'], {'axis': '(1)'}), '(d_pi_all, axis=1)\n', (3933, 3951), True, 'import tensorflow as tf\n'), ((3970, 4002), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['d_pi_all'], {'axis': '(1)'}), '(d_pi_all, axis=1)\n', (3984, 4002), True, 'import tensorflow as tf\n'), ((4249, 4280), 'tensorflow.reduce_max', 'tf.reduce_max', (['d_mu_all'], {'axis': '(1)'}), '(d_mu_all, axis=1)\n', (4262, 4280), True, 'import tensorflow as tf\n'), ((4302, 4333), 'tensorflow.reduce_min', 'tf.reduce_min', (['d_mu_all'], {'axis': '(1)'}), '(d_mu_all, axis=1)\n', (4315, 4333), True, 'import tensorflow as tf\n'), ((4356, 4388), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['d_mu_all'], {'axis': '(1)'}), '(d_mu_all, axis=1)\n', (4370, 4388), True, 'import tensorflow as tf\n'), ((4769, 4801), 'tensorflow.reduce_max', 'tf.reduce_max', (['d_sig_all'], {'axis': '(1)'}), '(d_sig_all, axis=1)\n', (4782, 4801), True, 'import tensorflow as tf\n'), ((4824, 4856), 'tensorflow.reduce_min', 'tf.reduce_min', (['d_sig_all'], {'axis': '(1)'}), '(d_sig_all, axis=1)\n', (4837, 4856), True, 'import tensorflow as tf\n'), ((4880, 4913), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['d_sig_all'], {'axis': '(1)'}), '(d_sig_all, axis=1)\n', (4894, 4913), True, 'import tensorflow as tf\n'), ((9867, 9895), 'tensorflow.expand_dims', 'tf.expand_dims', (['embedding', '(1)'], {}), '(embedding, 1)\n', (9881, 9895), True, 'import tensorflow as tf\n'), ((19341, 19356), 'tensorflow.nn.relu', 'tf.nn.relu', (['net'], {}), '(net)\n', (19351, 19356), True, 'import tensorflow as tf\n'), ((19682, 19825), 'tf_util.conv3d', 'tf_util.conv3d', (['net', 'filters', 'kernel'], {'padding': 'padding', 'stride': 'stride', 'bn': 'bn', 'is_training': 'is_training', 'scope': "(scope + '_1')", 'bn_decay': 'bn_decay'}), "(net, filters, kernel, padding=padding, stride=stride, bn=bn,\n is_training=is_training, scope=scope + '_1', bn_decay=bn_decay)\n", (19696, 19825), False, 'import tf_util\n'), ((19911, 20054), 'tf_util.conv3d', 'tf_util.conv3d', (['net', 'filters', 'kernel'], {'padding': 'padding', 'stride': 'stride', 'bn': 'bn', 'is_training': 'is_training', 'scope': "(scope + '_2')", 'bn_decay': 'bn_decay'}), "(net, filters, kernel, padding=padding, stride=stride, bn=bn,\n is_training=is_training, scope=scope + '_2', bn_decay=bn_decay)\n", (19925, 20054), False, 'import tf_util\n'), ((24838, 24863), 'tensorflow.concat', 'tf.concat', (['[net, netB]', '(0)'], {}), '([net, netB], 0)\n', (24847, 24863), True, 'import tensorflow as tf\n'), ((35209, 35228), 'tensorflow.split', 'tf.split', (['net', '(2)', '(0)'], {}), '(net, 2, 0)\n', (35217, 35228), True, 'import tensorflow as tf\n'), ((35817, 35848), 'tensorflow.expand_dims', 'tf.expand_dims', (['point_cloud', '(-1)'], {}), '(point_cloud, -1)\n', (35831, 35848), True, 'import tensorflow as tf\n'), ((40040, 40063), 'tensorflow.expand_dims', 'tf.expand_dims', (['net', '(-1)'], {}), '(net, -1)\n', (40054, 40063), True, 'import tensorflow as tf\n'), ((40440, 40503), 'tensorflow.reshape', 'tf.reshape', (['net', '[batch_size, vec_size, vec_size, vec_size, -1]'], {}), '(net, [batch_size, vec_size, vec_size, vec_size, -1])\n', (40450, 40503), True, 'import tensorflow as tf\n'), ((42121, 42132), 'tensorflow.stack', 'tf.stack', (['C'], {}), '(C)\n', (42129, 42132), True, 'import tensorflow as tf\n'), ((42149, 42165), 'tensorflow.stack', 'tf.stack', (['output'], {}), '(output)\n', (42157, 42165), True, 'import tensorflow as tf\n'), ((42180, 42214), 'tensorflow.transpose', 'tf.transpose', (['output', '[2, 0, 1, 3]'], {}), '(output, [2, 0, 1, 3])\n', (42192, 42214), True, 'import tensorflow as tf\n'), ((42229, 42287), 'tensorflow.reshape', 'tf.reshape', (['output', '[output.shape[0], output.shape[1], -1]'], {}), '(output, [output.shape[0], output.shape[1], -1])\n', (42239, 42287), True, 'import tensorflow as tf\n'), ((42920, 42973), 'tensorflow.reshape', 'tf.reshape', (['net', '[batch_size, vec_size, vec_size, -1]'], {}), '(net, [batch_size, vec_size, vec_size, -1])\n', (42930, 42973), True, 'import tensorflow as tf\n'), ((43002, 43106), 'tensorflow.image.extract_image_patches', 'tf.image.extract_image_patches', (['net', '[1, k, k, 1]', '[1, 1, 1, 1]'], {'rates': '[1, 1, 1, 1]', 'padding': '"""VALID"""'}), "(net, [1, k, k, 1], [1, 1, 1, 1], rates=[1, 1,\n 1, 1], padding='VALID')\n", (43032, 43106), True, 'import tensorflow as tf\n'), ((43218, 43270), 'tensorflow.stack', 'tf.stack', (['[X[kh:-kh, kh:-kh], Y[kh:-kh, kh:-kh]]', '(-1)'], {}), '([X[kh:-kh, kh:-kh], Y[kh:-kh, kh:-kh]], -1)\n', (43226, 43270), True, 'import tensorflow as tf\n'), ((43296, 43318), 'tensorflow.cast', 'tf.cast', (['C', 'tf.float32'], {}), '(C, tf.float32)\n', (43303, 43318), True, 'import tensorflow as tf\n'), ((43327, 43356), 'tensorflow.reshape', 'tf.reshape', (['C', '[-1, NUM_DIMS]'], {}), '(C, [-1, NUM_DIMS])\n', (43337, 43356), True, 'import tensorflow as tf\n'), ((44756, 44823), 'tensorflow.reshape', 'tf.reshape', (['output', '[output.shape[0], (vec_size - 2 * kh) ** 2, -1]'], {}), '(output, [output.shape[0], (vec_size - 2 * kh) ** 2, -1])\n', (44766, 44823), True, 'import tensorflow as tf\n'), ((47942, 47982), 'tensorflow.identity', 'tf.identity', (['loss_pred'], {'name': '"""loss_pred"""'}), "(loss_pred, name='loss_pred')\n", (47953, 47982), True, 'import tensorflow as tf\n'), ((47987, 48031), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""loss_pred"""', 'loss_pred'], {}), "('loss_pred', loss_pred)\n", (48007, 48031), True, 'import tensorflow as tf\n'), ((1711, 1747), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'grid_size', '(False)'], {}), '(-1, 1, grid_size, False)\n', (1722, 1747), True, 'import numpy as np\n'), ((1788, 1805), 'numpy.meshgrid', 'np.meshgrid', (['l', 'l'], {}), '(l, l)\n', (1799, 1805), True, 'import numpy as np\n'), ((1981, 2003), 'tensorflow.ones', 'tf.ones', (['[n_gaussians]'], {}), '([n_gaussians])\n', (1988, 2003), True, 'import tensorflow as tf\n'), ((2073, 2098), 'tensorflow.ones', 'tf.ones', (['[n_gaussians, D]'], {}), '([n_gaussians, D])\n', (2080, 2098), True, 'import tensorflow as tf\n'), ((2172, 2196), 'tensorflow.expand_dims', 'tf.expand_dims', (['sigma', '(0)'], {}), '(sigma, 0)\n', (2186, 2196), True, 'import tensorflow as tf\n'), ((2268, 2296), 'tensorflow.expand_dims', 'tf.expand_dims', (['batch_sig', '(0)'], {}), '(batch_sig, 0)\n', (2282, 2296), True, 'import tensorflow as tf\n'), ((2383, 2404), 'tensorflow.expand_dims', 'tf.expand_dims', (['mu', '(0)'], {}), '(mu, 0)\n', (2397, 2404), True, 'import tensorflow as tf\n'), ((2475, 2502), 'tensorflow.expand_dims', 'tf.expand_dims', (['batch_mu', '(0)'], {}), '(batch_mu, 0)\n', (2489, 2502), True, 'import tensorflow as tf\n'), ((2773, 2799), 'tensorflow.expand_dims', 'tf.expand_dims', (['points', '(-2)'], {}), '(points, -2)\n', (2787, 2799), True, 'import tensorflow as tf\n'), ((3685, 3706), 'tensorflow.expand_dims', 'tf.expand_dims', (['Q', '(-1)'], {}), '(Q, -1)\n', (3699, 3706), True, 'import tensorflow as tf\n'), ((4037, 4072), 'tensorflow.concat', 'tf.concat', (['[d_pi_mean, d_pi_max]', '(2)'], {}), '([d_pi_mean, d_pi_max], 2)\n', (4046, 4072), True, 'import tensorflow as tf\n'), ((4434, 4491), 'tensorflow.concat', 'tf.concat', (['[d_mu_all_mean, d_mu_all_max, d_mu_all_min]', '(2)'], {}), '([d_mu_all_mean, d_mu_all_max, d_mu_all_min], 2)\n', (4443, 4491), True, 'import tensorflow as tf\n'), ((4958, 5018), 'tensorflow.concat', 'tf.concat', (['[d_sig_all_mean, d_sig_all_max, d_sig_all_min]', '(2)'], {}), '([d_sig_all_mean, d_sig_all_max, d_sig_all_min], 2)\n', (4967, 5018), True, 'import tensorflow as tf\n'), ((5740, 5771), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['d_pi'], {'dim': '(1)'}), '(d_pi, dim=1)\n', (5758, 5771), True, 'import tensorflow as tf\n'), ((5788, 5819), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['d_mu'], {'dim': '(1)'}), '(d_mu, dim=1)\n', (5806, 5819), True, 'import tensorflow as tf\n'), ((5839, 5873), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['d_sigma'], {'dim': '(1)'}), '(d_sigma, dim=1)\n', (5857, 5873), True, 'import tensorflow as tf\n'), ((6174, 6214), 'tensorflow.concat', 'tf.concat', (['[d_pi, d_mu, d_sigma]'], {'axis': '(1)'}), '([d_pi, d_mu, d_sigma], axis=1)\n', (6183, 6214), True, 'import tensorflow as tf\n'), ((6240, 6280), 'tensorflow.concat', 'tf.concat', (['[d_pi, d_mu, d_sigma]'], {'axis': '(2)'}), '([d_pi, d_mu, d_sigma], axis=2)\n', (6249, 6280), True, 'import tensorflow as tf\n'), ((6295, 6327), 'tensorflow.transpose', 'tf.transpose', (['fv'], {'perm': '[0, 2, 1]'}), '(fv, perm=[0, 2, 1])\n', (6307, 6327), True, 'import tensorflow as tf\n'), ((6344, 6371), 'tensorflow.transpose', 'tf.transpose', (['fv', '[0, 2, 1]'], {}), '(fv, [0, 2, 1])\n', (6356, 6371), True, 'import tensorflow as tf\n'), ((8056, 8088), 'tensorflow.concat', 'tf.concat', (['[new_pc, new_emb]', '(-1)'], {}), '([new_pc, new_emb], -1)\n', (8065, 8088), True, 'import tensorflow as tf\n'), ((8105, 8131), 'tensorflow.expand_dims', 'tf.expand_dims', (['new_in', '(-1)'], {}), '(new_in, -1)\n', (8119, 8131), True, 'import tensorflow as tf\n'), ((8739, 8787), 'tensorflow.tile', 'tf.tile', (['Centers', '[batch_size, num_points, 1, 1]'], {}), '(Centers, [batch_size, num_points, 1, 1])\n', (8746, 8787), True, 'import tensorflow as tf\n'), ((9060, 9103), 'tensorflow.cast', 'tf.cast', (['(pc_x > C_x - grid_size)', 'tf.float32'], {}), '(pc_x > C_x - grid_size, tf.float32)\n', (9067, 9103), True, 'import tensorflow as tf\n'), ((9117, 9161), 'tensorflow.cast', 'tf.cast', (['(pc_x <= C_x + grid_size)', 'tf.float32'], {}), '(pc_x <= C_x + grid_size, tf.float32)\n', (9124, 9161), True, 'import tensorflow as tf\n'), ((9175, 9218), 'tensorflow.cast', 'tf.cast', (['(pc_y > C_y - grid_size)', 'tf.float32'], {}), '(pc_y > C_y - grid_size, tf.float32)\n', (9182, 9218), True, 'import tensorflow as tf\n'), ((9232, 9276), 'tensorflow.cast', 'tf.cast', (['(pc_y <= C_y + grid_size)', 'tf.float32'], {}), '(pc_y <= C_y + grid_size, tf.float32)\n', (9239, 9276), True, 'import tensorflow as tf\n'), ((9637, 9678), 'tensorflow.math.argmax', 'tf.math.argmax', ([], {'input': 'binary_vect', 'axis': '(2)'}), '(input=binary_vect, axis=2)\n', (9651, 9678), True, 'import tensorflow as tf\n'), ((20682, 20703), 'tensorflow.squeeze', 'tf.squeeze', (['embedding'], {}), '(embedding)\n', (20692, 20703), True, 'import tensorflow as tf\n'), ((20726, 20748), 'tensorflow.squeeze', 'tf.squeeze', (['embeddingB'], {}), '(embeddingB)\n', (20736, 20748), True, 'import tensorflow as tf\n'), ((24081, 24109), 'tensorflow.expand_dims', 'tf.expand_dims', (['embedding', '(1)'], {}), '(embedding, 1)\n', (24095, 24109), True, 'import tensorflow as tf\n'), ((24297, 24326), 'tensorflow.expand_dims', 'tf.expand_dims', (['embeddingB', '(1)'], {}), '(embeddingB, 1)\n', (24311, 24326), True, 'import tensorflow as tf\n'), ((34974, 34989), 'tensorflow.nn.tanh', 'tf.nn.tanh', (['net'], {}), '(net)\n', (34984, 34989), True, 'import tensorflow as tf\n'), ((35938, 35980), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""pointnet"""'], {'reuse': 'reuse'}), "('pointnet', reuse=reuse)\n", (35955, 35980), True, 'import tensorflow as tf\n'), ((36003, 36189), 'tf_util.conv2d', 'tf_util.conv2d', (['input_image', '(128)', '[1, NUM_DIMS]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': '(False)', 'is_training': 'is_training', 'scope': '"""conv1"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd'}), "(input_image, 128, [1, NUM_DIMS], padding='VALID', stride=[1,\n 1], bn=False, is_training=is_training, scope='conv1', bn_decay=bn_decay,\n reuse=reuse, weight_decay=wd)\n", (36017, 36189), False, 'import tf_util\n'), ((36405, 36573), 'tf_util.conv2d', 'tf_util.conv2d', (['net', '(128)', '[1, 1]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""conv2"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd'}), "(net, 128, [1, 1], padding='VALID', stride=[1, 1], bn=bn,\n is_training=is_training, scope='conv2', bn_decay=bn_decay, reuse=reuse,\n weight_decay=wd)\n", (36419, 36573), False, 'import tf_util\n'), ((36698, 36866), 'tf_util.conv2d', 'tf_util.conv2d', (['net', '(512)', '[1, 1]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""conv3"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd'}), "(net, 512, [1, 1], padding='VALID', stride=[1, 1], bn=bn,\n is_training=is_training, scope='conv3', bn_decay=bn_decay, reuse=reuse,\n weight_decay=wd)\n", (36712, 36866), False, 'import tf_util\n'), ((36991, 37171), 'tf_util.conv2d', 'tf_util.conv2d', (['net', 'Embedding_Size', '[1, 1]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""conv4"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd'}), "(net, Embedding_Size, [1, 1], padding='VALID', stride=[1, 1],\n bn=bn, is_training=is_training, scope='conv4', bn_decay=bn_decay, reuse\n =reuse, weight_decay=wd)\n", (37005, 37171), False, 'import tf_util\n'), ((39143, 39185), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""vox_vect"""'], {'reuse': 'reuse'}), "('vox_vect', reuse=reuse)\n", (39160, 39185), True, 'import tensorflow as tf\n'), ((39797, 39847), 'tensorflow.reshape', 'tf.reshape', (['out_vect', '[batch_size, Embedding_Size]'], {}), '(out_vect, [batch_size, Embedding_Size])\n', (39807, 39847), True, 'import tensorflow as tf\n'), ((40602, 40617), 'numpy.floor', 'np.floor', (['(k / 2)'], {}), '(k / 2)\n', (40610, 40617), True, 'import numpy as np\n'), ((43194, 43209), 'numpy.floor', 'np.floor', (['(k / 2)'], {}), '(k / 2)\n', (43202, 43209), True, 'import numpy as np\n'), ((45023, 45067), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""local_z_3d"""'], {'reuse': 'reuse'}), "('local_z_3d', reuse=reuse)\n", (45040, 45067), True, 'import tensorflow as tf\n'), ((45277, 45340), 'tensorflow.reshape', 'tf.reshape', (['net', '[batch_size, grid_len, grid_len, grid_len, -1]'], {}), '(net, [batch_size, grid_len, grid_len, grid_len, -1])\n', (45287, 45340), True, 'import tensorflow as tf\n'), ((47444, 47468), 'tensorflow.squeeze', 'tf.squeeze', (['loss_samples'], {}), '(loss_samples)\n', (47454, 47468), True, 'import tensorflow as tf\n'), ((47710, 47752), 'tensorflow.add_to_collection', 'tf.add_to_collection', (['"""loss_samples"""', 'loss'], {}), "('loss_samples', loss)\n", (47730, 47752), True, 'import tensorflow as tf\n'), ((48312, 48339), 'numpy.arange', 'np.arange', (['(-1)', '(1)', 'grid_step'], {}), '(-1, 1, grid_step)\n', (48321, 48339), True, 'import numpy as np\n'), ((48396, 48413), 'numpy.meshgrid', 'np.meshgrid', (['l', 'l'], {}), '(l, l)\n', (48407, 48413), True, 'import numpy as np\n'), ((48456, 48476), 'numpy.meshgrid', 'np.meshgrid', (['l', 'l', 'l'], {}), '(l, l, l)\n', (48467, 48476), True, 'import numpy as np\n'), ((1605, 1625), 'numpy.sqrt', 'np.sqrt', (['n_gaussians'], {}), '(n_gaussians)\n', (1612, 1625), True, 'import numpy as np\n'), ((1889, 1909), 'numpy.meshgrid', 'np.meshgrid', (['l', 'l', 'l'], {}), '(l, l, l)\n', (1900, 1909), True, 'import numpy as np\n'), ((2604, 2624), 'tensorflow.expand_dims', 'tf.expand_dims', (['w', '(0)'], {}), '(w, 0)\n', (2618, 2624), True, 'import tensorflow as tf\n'), ((3599, 3641), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['w_p'], {'axis': '(-1)', 'keepdims': '(True)'}), '(w_p, axis=-1, keepdims=True)\n', (3612, 3641), True, 'import tensorflow as tf\n'), ((4562, 4588), 'tensorflow.sqrt', 'tf.sqrt', (['w_per_batch_per_d'], {}), '(w_per_batch_per_d)\n', (4569, 4588), True, 'import tensorflow as tf\n'), ((4638, 4686), 'tensorflow.pow', 'tf.pow', (['((batch_points - batch_mu) / batch_sig)', '(2)'], {}), '((batch_points - batch_mu) / batch_sig, 2)\n', (4644, 4686), True, 'import tensorflow as tf\n'), ((5089, 5119), 'tensorflow.sqrt', 'tf.sqrt', (['(2 * w_per_batch_per_d)'], {}), '(2 * w_per_batch_per_d)\n', (5096, 5119), True, 'import tensorflow as tf\n'), ((5463, 5476), 'tensorflow.sign', 'tf.sign', (['d_pi'], {}), '(d_pi)\n', (5470, 5476), True, 'import tensorflow as tf\n'), ((5542, 5555), 'tensorflow.sign', 'tf.sign', (['d_mu'], {}), '(d_mu)\n', (5549, 5555), True, 'import tensorflow as tf\n'), ((5625, 5641), 'tensorflow.sign', 'tf.sign', (['d_sigma'], {}), '(d_sigma)\n', (5632, 5641), True, 'import tensorflow as tf\n'), ((5968, 6002), 'tensorflow.transpose', 'tf.transpose', (['d_pi'], {'perm': '[0, 2, 1]'}), '(d_pi, perm=[0, 2, 1])\n', (5980, 6002), True, 'import tensorflow as tf\n'), ((6046, 6080), 'tensorflow.transpose', 'tf.transpose', (['d_mu'], {'perm': '[0, 2, 1]'}), '(d_mu, perm=[0, 2, 1])\n', (6058, 6080), True, 'import tensorflow as tf\n'), ((6124, 6161), 'tensorflow.transpose', 'tf.transpose', (['d_sigma'], {'perm': '[0, 2, 1]'}), '(d_sigma, perm=[0, 2, 1])\n', (6136, 6161), True, 'import tensorflow as tf\n'), ((8678, 8704), 'tensorflow.expand_dims', 'tf.expand_dims', (['Centers', '(0)'], {}), '(Centers, 0)\n', (8692, 8704), True, 'import tensorflow as tf\n'), ((8830, 8861), 'tensorflow.expand_dims', 'tf.expand_dims', (['point_cloud', '(-2)'], {}), '(point_cloud, -2)\n', (8844, 8861), True, 'import tensorflow as tf\n'), ((9436, 9479), 'tensorflow.cast', 'tf.cast', (['(pc_z > C_z - grid_size)', 'tf.float32'], {}), '(pc_z > C_z - grid_size, tf.float32)\n', (9443, 9479), True, 'import tensorflow as tf\n'), ((9497, 9541), 'tensorflow.cast', 'tf.cast', (['(pc_z <= C_z + grid_size)', 'tf.float32'], {}), '(pc_z <= C_z + grid_size, tf.float32)\n', (9504, 9541), True, 'import tensorflow as tf\n'), ((10045, 10091), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""dpdist_local"""'], {'reuse': 'reuse'}), "('dpdist_local', reuse=reuse)\n", (10062, 10091), True, 'import tensorflow as tf\n'), ((10118, 10309), 'tf_util.conv2d', 'tf_util.conv2d', (['net', 'mlp[0]', '[1, E + NUM_DIMS]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv1"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd'}), "(net, mlp[0], [1, E + NUM_DIMS], padding='VALID', stride=[1, \n 1], bn=bn, is_training=is_training, scope='mapper_conv1', bn_decay=\n bn_decay, reuse=reuse, weight_decay=wd)\n", (10132, 10309), False, 'import tf_util\n'), ((10517, 10696), 'tf_util.conv2d', 'tf_util.conv2d', (['net', 'mlp[1]', '[1, 1]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv2"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd'}), "(net, mlp[1], [1, 1], padding='VALID', stride=[1, 1], bn=bn,\n is_training=is_training, scope='mapper_conv2', bn_decay=bn_decay, reuse\n =reuse, weight_decay=wd)\n", (10531, 10696), False, 'import tf_util\n'), ((10866, 11045), 'tf_util.conv2d', 'tf_util.conv2d', (['net', 'mlp[2]', '[1, 1]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv3"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd'}), "(net, mlp[2], [1, 1], padding='VALID', stride=[1, 1], bn=bn,\n is_training=is_training, scope='mapper_conv3', bn_decay=bn_decay, reuse\n =reuse, weight_decay=wd)\n", (10880, 11045), False, 'import tf_util\n'), ((11215, 11419), 'tf_util.conv2d', 'tf_util.conv2d', (['net', 'output_size', '[1, 1]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv4"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd', 'activation_fn': 'None'}), "(net, output_size, [1, 1], padding='VALID', stride=[1, 1], bn\n =bn, is_training=is_training, scope='mapper_conv4', bn_decay=bn_decay,\n reuse=reuse, weight_decay=wd, activation_fn=None)\n", (11229, 11419), False, 'import tf_util\n'), ((22125, 22157), 'tensorflow.concat', 'tf.concat', (['[new_pc, new_emb]', '(-1)'], {}), '([new_pc, new_emb], -1)\n', (22134, 22157), True, 'import tensorflow as tf\n'), ((22178, 22204), 'tensorflow.expand_dims', 'tf.expand_dims', (['new_in', '(-1)'], {}), '(new_in, -1)\n', (22192, 22204), True, 'import tensorflow as tf\n'), ((22864, 22912), 'tensorflow.tile', 'tf.tile', (['Centers', '[batch_size, num_points, 1, 1]'], {}), '(Centers, [batch_size, num_points, 1, 1])\n', (22871, 22912), True, 'import tensorflow as tf\n'), ((23209, 23252), 'tensorflow.cast', 'tf.cast', (['(pc_x > C_x - grid_size)', 'tf.float32'], {}), '(pc_x > C_x - grid_size, tf.float32)\n', (23216, 23252), True, 'import tensorflow as tf\n'), ((23270, 23314), 'tensorflow.cast', 'tf.cast', (['(pc_x <= C_x + grid_size)', 'tf.float32'], {}), '(pc_x <= C_x + grid_size, tf.float32)\n', (23277, 23314), True, 'import tensorflow as tf\n'), ((23332, 23375), 'tensorflow.cast', 'tf.cast', (['(pc_y > C_y - grid_size)', 'tf.float32'], {}), '(pc_y > C_y - grid_size, tf.float32)\n', (23339, 23375), True, 'import tensorflow as tf\n'), ((23393, 23437), 'tensorflow.cast', 'tf.cast', (['(pc_y <= C_y + grid_size)', 'tf.float32'], {}), '(pc_y <= C_y + grid_size, tf.float32)\n', (23400, 23437), True, 'import tensorflow as tf\n'), ((23834, 23875), 'tensorflow.math.argmax', 'tf.math.argmax', ([], {'input': 'binary_vect', 'axis': '(2)'}), '(input=binary_vect, axis=2)\n', (23848, 23875), True, 'import tensorflow as tf\n'), ((24480, 24508), 'tensorflow.expand_dims', 'tf.expand_dims', (['embedding', '(1)'], {}), '(embedding, 1)\n', (24494, 24508), True, 'import tensorflow as tf\n'), ((24581, 24610), 'tensorflow.expand_dims', 'tf.expand_dims', (['embeddingB', '(1)'], {}), '(embeddingB, 1)\n', (24595, 24610), True, 'import tensorflow as tf\n'), ((24686, 24726), 'tensorflow.concat', 'tf.concat', (['[point_cloudB, embedding]', '(-1)'], {}), '([point_cloudB, embedding], -1)\n', (24695, 24726), True, 'import tensorflow as tf\n'), ((24779, 24819), 'tensorflow.concat', 'tf.concat', (['[point_cloud, embeddingB]', '(-1)'], {}), '([point_cloud, embeddingB], -1)\n', (24788, 24819), True, 'import tensorflow as tf\n'), ((24965, 25011), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""dpdist_local"""'], {'reuse': 'reuse'}), "('dpdist_local', reuse=reuse)\n", (24982, 25011), True, 'import tensorflow as tf\n'), ((25843, 26022), 'tf_util.conv2d', 'tf_util.conv2d', (['net', 'mlp[1]', '[1, 1]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv2"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd'}), "(net, mlp[1], [1, 1], padding='VALID', stride=[1, 1], bn=bn,\n is_training=is_training, scope='mapper_conv2', bn_decay=bn_decay, reuse\n =reuse, weight_decay=wd)\n", (25857, 26022), False, 'import tf_util\n'), ((26192, 26371), 'tf_util.conv2d', 'tf_util.conv2d', (['net', 'mlp[2]', '[1, 1]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv3"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd'}), "(net, mlp[2], [1, 1], padding='VALID', stride=[1, 1], bn=bn,\n is_training=is_training, scope='mapper_conv3', bn_decay=bn_decay, reuse\n =reuse, weight_decay=wd)\n", (26206, 26371), False, 'import tf_util\n'), ((26541, 26745), 'tf_util.conv2d', 'tf_util.conv2d', (['net', 'output_size', '[1, 1]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv4"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd', 'activation_fn': 'None'}), "(net, output_size, [1, 1], padding='VALID', stride=[1, 1], bn\n =bn, is_training=is_training, scope='mapper_conv4', bn_decay=bn_decay,\n reuse=reuse, weight_decay=wd, activation_fn=None)\n", (26555, 26745), False, 'import tf_util\n'), ((35033, 35049), 'tensorflow.nn.relu6', 'tf.nn.relu6', (['net'], {}), '(net)\n', (35044, 35049), True, 'import tensorflow as tf\n'), ((37463, 37536), 'tf_util.max_pool2d', 'tf_util.max_pool2d', (['net', '[num_point, 1]'], {'padding': '"""VALID"""', 'scope': '"""maxpool"""'}), "(net, [num_point, 1], padding='VALID', scope='maxpool')\n", (37481, 37536), False, 'import tf_util\n'), ((37604, 37677), 'tf_util.avg_pool2d', 'tf_util.avg_pool2d', (['net', '[num_point, 1]'], {'padding': '"""VALID"""', 'scope': '"""avgpool"""'}), "(net, [num_point, 1], padding='VALID', scope='avgpool')\n", (37622, 37677), False, 'import tf_util\n'), ((39567, 39594), 'tensorflow.expand_dims', 'tf.expand_dims', (['out_vect', '(1)'], {}), '(out_vect, 1)\n', (39581, 39594), True, 'import tensorflow as tf\n'), ((40349, 40380), 'numpy.power', 'np.power', (['Embedding_Size', '(1 / 3)'], {}), '(Embedding_Size, 1 / 3)\n', (40357, 40380), True, 'import numpy as np\n'), ((42845, 42868), 'numpy.sqrt', 'np.sqrt', (['Embedding_Size'], {}), '(Embedding_Size)\n', (42852, 42868), True, 'import numpy as np\n'), ((45378, 45478), 'tensorflow.extract_volume_patches', 'tf.extract_volume_patches', (['net', '[1, k, k, k, 1]', '[1, 1, 1, 1, 1]', '"""SAME"""'], {'name': '"""EXTRACT_LOCAL_Z"""'}), "(net, [1, k, k, k, 1], [1, 1, 1, 1, 1], 'SAME',\n name='EXTRACT_LOCAL_Z')\n", (45403, 45478), True, 'import tensorflow as tf\n'), ((45617, 45640), 'tensorflow.stack', 'tf.stack', (['[X, Y, Z]', '(-1)'], {}), '([X, Y, Z], -1)\n', (45625, 45640), True, 'import tensorflow as tf\n'), ((45655, 45677), 'tensorflow.cast', 'tf.cast', (['C', 'tf.float32'], {}), '(C, tf.float32)\n', (45662, 45677), True, 'import tensorflow as tf\n'), ((45694, 45723), 'tensorflow.reshape', 'tf.reshape', (['C', '[-1, NUM_DIMS]'], {}), '(C, [-1, NUM_DIMS])\n', (45704, 45723), True, 'import tensorflow as tf\n'), ((45744, 45808), 'tensorflow.reshape', 'tf.reshape', (['output', '[batch_size, output.shape[1].value ** 3, -1]'], {}), '(output, [batch_size, output.shape[1].value ** 3, -1])\n', (45754, 45808), True, 'import tensorflow as tf\n'), ((45973, 46032), 'tensorflow.constant', 'tf.constant', (['[[0, 0], [kh, kh], [kh, kh], [kh, kh], [0, 0]]'], {}), '([[0, 0], [kh, kh], [kh, kh], [kh, kh], [0, 0]])\n', (45984, 46032), True, 'import tensorflow as tf\n'), ((46200, 46233), 'tensorflow.pad', 'tf.pad', (['net', 'paddings', '"""CONSTANT"""'], {}), "(net, paddings, 'CONSTANT')\n", (46206, 46233), True, 'import tensorflow as tf\n'), ((46918, 46929), 'tensorflow.stack', 'tf.stack', (['C'], {}), '(C)\n', (46926, 46929), True, 'import tensorflow as tf\n'), ((46952, 46971), 'tensorflow.stack', 'tf.stack', (['output', '(1)'], {}), '(output, 1)\n', (46960, 46971), True, 'import tensorflow as tf\n'), ((47006, 47065), 'tensorflow.reshape', 'tf.reshape', (['output', '[batch_size, output.shape[1].value, -1]'], {}), '(output, [batch_size, output.shape[1].value, -1])\n', (47016, 47065), True, 'import tensorflow as tf\n'), ((1671, 1699), 'numpy.power', 'np.power', (['n_gaussians', '(1 / 3)'], {}), '(n_gaussians, 1 / 3)\n', (1679, 1699), True, 'import numpy as np\n'), ((3033, 3053), 'tensorflow.expand_dims', 'tf.expand_dims', (['w', '(0)'], {}), '(w, 0)\n', (3047, 3053), True, 'import tensorflow as tf\n'), ((3203, 3223), 'tensorflow.expand_dims', 'tf.expand_dims', (['w', '(0)'], {}), '(w, 0)\n', (3217, 3223), True, 'import tensorflow as tf\n'), ((3819, 3835), 'tensorflow.sqrt', 'tf.sqrt', (['batch_w'], {}), '(batch_w)\n', (3826, 3835), True, 'import tensorflow as tf\n'), ((7399, 7421), 'tensorflow.expand_dims', 'tf.expand_dims', (['bv', '(-1)'], {}), '(bv, -1)\n', (7413, 7421), True, 'import tensorflow as tf\n'), ((8518, 8555), 'tensorflow.abs', 'tf.abs', (['(Centers[0][0] - Centers[1][0])'], {}), '(Centers[0][0] - Centers[1][0])\n', (8524, 8555), True, 'import tensorflow as tf\n'), ((8600, 8637), 'tensorflow.abs', 'tf.abs', (['(Centers[0][2] - Centers[1][2])'], {}), '(Centers[0][2] - Centers[1][2])\n', (8606, 8637), True, 'import tensorflow as tf\n'), ((11818, 11851), 'tensorflow.transpose', 'tf.transpose', (['net_D', '[0, 1, 3, 2]'], {}), '(net_D, [0, 1, 3, 2])\n', (11830, 11851), True, 'import tensorflow as tf\n'), ((11869, 11893), 'tensorflow.expand_dims', 'tf.expand_dims', (['net_D', '(2)'], {}), '(net_D, 2)\n', (11883, 11893), True, 'import tensorflow as tf\n'), ((11914, 11945), 'tensorflow.tile', 'tf.tile', (['net_D', '[1, 1, k, k, 1]'], {}), '(net_D, [1, 1, k, k, 1])\n', (11921, 11945), True, 'import tensorflow as tf\n'), ((11962, 12002), 'tensorflow.reshape', 'tf.reshape', (['net_E', '[4 * B, NP, k, k, -1]'], {}), '(net_E, [4 * B, NP, k, k, -1])\n', (11972, 12002), True, 'import tensorflow as tf\n'), ((12015, 12044), 'tensorflow.concat', 'tf.concat', (['[net_E, net_D]', '(-1)'], {}), '([net_E, net_D], -1)\n', (12024, 12044), True, 'import tensorflow as tf\n'), ((12062, 12101), 'tensorflow.reshape', 'tf.reshape', (['net', '[4 * B * NP, k, k, -1]'], {}), '(net, [4 * B * NP, k, k, -1])\n', (12072, 12101), True, 'import tensorflow as tf\n'), ((14018, 14051), 'tensorflow.transpose', 'tf.transpose', (['net_D', '[0, 1, 3, 2]'], {}), '(net_D, [0, 1, 3, 2])\n', (14030, 14051), True, 'import tensorflow as tf\n'), ((14069, 14093), 'tensorflow.expand_dims', 'tf.expand_dims', (['net_D', '(2)'], {}), '(net_D, 2)\n', (14083, 14093), True, 'import tensorflow as tf\n'), ((14114, 14138), 'tensorflow.expand_dims', 'tf.expand_dims', (['net_D', '(2)'], {}), '(net_D, 2)\n', (14128, 14138), True, 'import tensorflow as tf\n'), ((14159, 14193), 'tensorflow.tile', 'tf.tile', (['net_D', '[1, 1, k, k, k, 1]'], {}), '(net_D, [1, 1, k, k, k, 1])\n', (14166, 14193), True, 'import tensorflow as tf\n'), ((14209, 14252), 'tensorflow.reshape', 'tf.reshape', (['net_E', '[4 * B, NP, k, k, k, -1]'], {}), '(net_E, [4 * B, NP, k, k, k, -1])\n', (14219, 14252), True, 'import tensorflow as tf\n'), ((14264, 14293), 'tensorflow.concat', 'tf.concat', (['[net_E, net_D]', '(-1)'], {}), '([net_E, net_D], -1)\n', (14273, 14293), True, 'import tensorflow as tf\n'), ((14311, 14353), 'tensorflow.reshape', 'tf.reshape', (['net', '[4 * B * NP, k, k, k, -1]'], {}), '(net, [4 * B * NP, k, k, k, -1])\n', (14321, 14353), True, 'import tensorflow as tf\n'), ((14538, 14571), 'tensorflow.transpose', 'tf.transpose', (['net_D', '[0, 1, 3, 2]'], {}), '(net_D, [0, 1, 3, 2])\n', (14550, 14571), True, 'import tensorflow as tf\n'), ((14615, 14651), 'tensorflow.reshape', 'tf.reshape', (['net_E', '[B, NP, k, k, -1]'], {}), '(net_E, [B, NP, k, k, -1])\n', (14625, 14651), True, 'import tensorflow as tf\n'), ((14715, 14752), 'tensorflow.reshape', 'tf.reshape', (['net_E', '[B * NP, k, k, -1]'], {}), '(net_E, [B * NP, k, k, -1])\n', (14725, 14752), True, 'import tensorflow as tf\n'), ((16747, 16780), 'tensorflow.transpose', 'tf.transpose', (['net_D', '[0, 1, 3, 2]'], {}), '(net_D, [0, 1, 3, 2])\n', (16759, 16780), True, 'import tensorflow as tf\n'), ((16798, 16837), 'tensorflow.reshape', 'tf.reshape', (['net_E', '[B, NP, k, k, k, -1]'], {}), '(net_E, [B, NP, k, k, k, -1])\n', (16808, 16837), True, 'import tensorflow as tf\n'), ((16900, 16940), 'tensorflow.reshape', 'tf.reshape', (['net_E', '[B * NP, k, k, k, -1]'], {}), '(net_E, [B * NP, k, k, k, -1])\n', (16910, 16940), True, 'import tensorflow as tf\n'), ((22799, 22825), 'tensorflow.expand_dims', 'tf.expand_dims', (['Centers', '(0)'], {}), '(Centers, 0)\n', (22813, 22825), True, 'import tensorflow as tf\n'), ((22959, 22990), 'tensorflow.expand_dims', 'tf.expand_dims', (['point_cloud', '(-2)'], {}), '(point_cloud, -2)\n', (22973, 22990), True, 'import tensorflow as tf\n'), ((23617, 23660), 'tensorflow.cast', 'tf.cast', (['(pc_z > C_z - grid_size)', 'tf.float32'], {}), '(pc_z > C_z - grid_size, tf.float32)\n', (23624, 23660), True, 'import tensorflow as tf\n'), ((23682, 23726), 'tensorflow.cast', 'tf.cast', (['(pc_z <= C_z + grid_size)', 'tf.float32'], {}), '(pc_z <= C_z + grid_size, tf.float32)\n', (23689, 23726), True, 'import tensorflow as tf\n'), ((25063, 25254), 'tf_util.conv2d', 'tf_util.conv2d', (['net', 'mlp[0]', '[1, E + NUM_DIMS]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv1"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd'}), "(net, mlp[0], [1, E + NUM_DIMS], padding='VALID', stride=[1, \n 1], bn=bn, is_training=is_training, scope='mapper_conv1', bn_decay=\n bn_decay, reuse=reuse, weight_decay=wd)\n", (25077, 25254), False, 'import tf_util\n'), ((25437, 25616), 'tf_util.conv2d', 'tf_util.conv2d', (['net', 'mlp[0]', '[1, 1]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv1"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd'}), "(net, mlp[0], [1, 1], padding='VALID', stride=[1, 1], bn=bn,\n is_training=is_training, scope='mapper_conv1', bn_decay=bn_decay, reuse\n =reuse, weight_decay=wd)\n", (25451, 25616), False, 'import tf_util\n'), ((27146, 27179), 'tensorflow.transpose', 'tf.transpose', (['net_D', '[0, 1, 3, 2]'], {}), '(net_D, [0, 1, 3, 2])\n', (27158, 27179), True, 'import tensorflow as tf\n'), ((27197, 27221), 'tensorflow.expand_dims', 'tf.expand_dims', (['net_D', '(2)'], {}), '(net_D, 2)\n', (27211, 27221), True, 'import tensorflow as tf\n'), ((27242, 27273), 'tensorflow.tile', 'tf.tile', (['net_D', '[1, 1, k, k, 1]'], {}), '(net_D, [1, 1, k, k, 1])\n', (27249, 27273), True, 'import tensorflow as tf\n'), ((27290, 27330), 'tensorflow.reshape', 'tf.reshape', (['net_E', '[4 * B, NP, k, k, -1]'], {}), '(net_E, [4 * B, NP, k, k, -1])\n', (27300, 27330), True, 'import tensorflow as tf\n'), ((27343, 27372), 'tensorflow.concat', 'tf.concat', (['[net_E, net_D]', '(-1)'], {}), '([net_E, net_D], -1)\n', (27352, 27372), True, 'import tensorflow as tf\n'), ((27390, 27429), 'tensorflow.reshape', 'tf.reshape', (['net', '[4 * B * NP, k, k, -1]'], {}), '(net, [4 * B * NP, k, k, -1])\n', (27400, 27429), True, 'import tensorflow as tf\n'), ((29346, 29379), 'tensorflow.transpose', 'tf.transpose', (['net_D', '[0, 1, 3, 2]'], {}), '(net_D, [0, 1, 3, 2])\n', (29358, 29379), True, 'import tensorflow as tf\n'), ((29397, 29421), 'tensorflow.expand_dims', 'tf.expand_dims', (['net_D', '(2)'], {}), '(net_D, 2)\n', (29411, 29421), True, 'import tensorflow as tf\n'), ((29442, 29466), 'tensorflow.expand_dims', 'tf.expand_dims', (['net_D', '(2)'], {}), '(net_D, 2)\n', (29456, 29466), True, 'import tensorflow as tf\n'), ((29487, 29521), 'tensorflow.tile', 'tf.tile', (['net_D', '[1, 1, k, k, k, 1]'], {}), '(net_D, [1, 1, k, k, k, 1])\n', (29494, 29521), True, 'import tensorflow as tf\n'), ((29537, 29580), 'tensorflow.reshape', 'tf.reshape', (['net_E', '[4 * B, NP, k, k, k, -1]'], {}), '(net_E, [4 * B, NP, k, k, k, -1])\n', (29547, 29580), True, 'import tensorflow as tf\n'), ((29592, 29621), 'tensorflow.concat', 'tf.concat', (['[net_E, net_D]', '(-1)'], {}), '([net_E, net_D], -1)\n', (29601, 29621), True, 'import tensorflow as tf\n'), ((29639, 29681), 'tensorflow.reshape', 'tf.reshape', (['net', '[4 * B * NP, k, k, k, -1]'], {}), '(net, [4 * B * NP, k, k, k, -1])\n', (29649, 29681), True, 'import tensorflow as tf\n'), ((29840, 29873), 'tensorflow.transpose', 'tf.transpose', (['net_D', '[0, 1, 3, 2]'], {}), '(net_D, [0, 1, 3, 2])\n', (29852, 29873), True, 'import tensorflow as tf\n'), ((29891, 29931), 'tensorflow.reshape', 'tf.reshape', (['net_E', '[4 * B, NP, k, k, -1]'], {}), '(net_E, [4 * B, NP, k, k, -1])\n', (29901, 29931), True, 'import tensorflow as tf\n'), ((29993, 30034), 'tensorflow.reshape', 'tf.reshape', (['net_E', '[4 * B * NP, k, k, -1]'], {}), '(net_E, [4 * B * NP, k, k, -1])\n', (30003, 30034), True, 'import tensorflow as tf\n'), ((32031, 32064), 'tensorflow.transpose', 'tf.transpose', (['net_D', '[0, 1, 3, 2]'], {}), '(net_D, [0, 1, 3, 2])\n', (32043, 32064), True, 'import tensorflow as tf\n'), ((32082, 32125), 'tensorflow.reshape', 'tf.reshape', (['net_E', '[2 * B, NP, k, k, k, -1]'], {}), '(net_E, [2 * B, NP, k, k, k, -1])\n', (32092, 32125), True, 'import tensorflow as tf\n'), ((32186, 32230), 'tensorflow.reshape', 'tf.reshape', (['net_E', '[2 * B * NP, k, k, k, -1]'], {}), '(net_E, [2 * B * NP, k, k, k, -1])\n', (32196, 32230), True, 'import tensorflow as tf\n'), ((35078, 35098), 'tensorflow.nn.relu6', 'tf.nn.relu6', (['(net + 3)'], {}), '(net + 3)\n', (35089, 35098), True, 'import tensorflow as tf\n'), ((39351, 39374), 'numpy.sqrt', 'np.sqrt', (['Embedding_Size'], {}), '(Embedding_Size)\n', (39358, 39374), True, 'import numpy as np\n'), ((39423, 39454), 'numpy.power', 'np.power', (['Embedding_Size', '(1 / 3)'], {}), '(Embedding_Size, 1 / 3)\n', (39431, 39454), True, 'import numpy as np\n'), ((41949, 42004), 'tensorflow.stack', 'tf.stack', (['[X[ii, jj, ll], Y[ii, jj, ll], Z[ii, jj, ll]]'], {}), '([X[ii, jj, ll], Y[ii, jj, ll], Z[ii, jj, ll]])\n', (41957, 42004), True, 'import tensorflow as tf\n'), ((42056, 42072), 'tensorflow.stack', 'tf.stack', (['z_iijj'], {}), '(z_iijj)\n', (42064, 42072), True, 'import tensorflow as tf\n'), ((45187, 45211), 'numpy.power', 'np.power', (['num_vox', '(1 / 3)'], {}), '(num_vox, 1 / 3)\n', (45195, 45211), True, 'import numpy as np\n'), ((45924, 45939), 'numpy.floor', 'np.floor', (['(k / 2)'], {}), '(k / 2)\n', (45932, 45939), True, 'import numpy as np\n'), ((47589, 47618), 'tensorflow.abs', 'tf.abs', (['(loss_samples - labels)'], {}), '(loss_samples - labels)\n', (47595, 47618), True, 'import tensorflow as tf\n'), ((47824, 47863), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['pred_listAB[:, :, :, 0]'], {}), '(pred_listAB[:, :, :, 0])\n', (47838, 47863), True, 'import tensorflow as tf\n'), ((47885, 47924), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['pred_listBA[:, :, :, 0]'], {}), '(pred_listBA[:, :, :, 0])\n', (47899, 47924), True, 'import tensorflow as tf\n'), ((48173, 48196), 'numpy.sqrt', 'np.sqrt', (['Embedding_Size'], {}), '(Embedding_Size)\n', (48180, 48196), True, 'import numpy as np\n'), ((48242, 48273), 'numpy.power', 'np.power', (['Embedding_Size', '(1 / 3)'], {}), '(Embedding_Size, 1 / 3)\n', (48250, 48273), True, 'import numpy as np\n'), ((5497, 5509), 'tensorflow.abs', 'tf.abs', (['d_pi'], {}), '(d_pi)\n', (5503, 5509), True, 'import tensorflow as tf\n'), ((5576, 5588), 'tensorflow.abs', 'tf.abs', (['d_mu'], {}), '(d_mu)\n', (5582, 5588), True, 'import tensorflow as tf\n'), ((5662, 5677), 'tensorflow.abs', 'tf.abs', (['d_sigma'], {}), '(d_sigma)\n', (5668, 5677), True, 'import tensorflow as tf\n'), ((7317, 7342), 'tensorflow.cast', 'tf.cast', (['argmax', 'tf.int32'], {}), '(argmax, tf.int32)\n', (7324, 7342), True, 'import tensorflow as tf\n'), ((7727, 7752), 'tensorflow.cast', 'tf.cast', (['argmax', 'tf.int32'], {}), '(argmax, tf.int32)\n', (7734, 7752), True, 'import tensorflow as tf\n'), ((7991, 8016), 'tensorflow.cast', 'tf.cast', (['argmax', 'tf.int32'], {}), '(argmax, tf.int32)\n', (7998, 8016), True, 'import tensorflow as tf\n'), ((12114, 12167), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""dpdist_local_cnn_fc"""'], {'reuse': 'reuse'}), "('dpdist_local_cnn_fc', reuse=reuse)\n", (12131, 12167), True, 'import tensorflow as tf\n'), ((12198, 12372), 'tf_util.conv2d', 'tf_util.conv2d', (['net', '(64)', '[3, 3]'], {'padding': '"""SAME"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv1"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd'}), "(net, 64, [3, 3], padding='SAME', stride=[1, 1], bn=bn,\n is_training=is_training, scope='mapper_conv1', bn_decay=bn_decay, reuse\n =reuse, weight_decay=wd)\n", (12212, 12372), False, 'import tf_util\n'), ((12568, 12743), 'tf_util.conv2d', 'tf_util.conv2d', (['net', '(64)', '[3, 3]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv2"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd'}), "(net, 64, [3, 3], padding='VALID', stride=[1, 1], bn=bn,\n is_training=is_training, scope='mapper_conv2', bn_decay=bn_decay, reuse\n =reuse, weight_decay=wd)\n", (12582, 12743), False, 'import tf_util\n'), ((12977, 13012), 'tensorflow.reshape', 'tf.reshape', (['net', '[4 * B, NP, 1, -1]'], {}), '(net, [4 * B, NP, 1, -1])\n', (12987, 13012), True, 'import tensorflow as tf\n'), ((13067, 13246), 'tf_util.conv2d', 'tf_util.conv2d', (['net', 'mlp[2]', '[1, 1]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv3"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd'}), "(net, mlp[2], [1, 1], padding='VALID', stride=[1, 1], bn=bn,\n is_training=is_training, scope='mapper_conv3', bn_decay=bn_decay, reuse\n =reuse, weight_decay=wd)\n", (13081, 13246), False, 'import tf_util\n'), ((13443, 13647), 'tf_util.conv2d', 'tf_util.conv2d', (['net', 'output_size', '[1, 1]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv4"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd', 'activation_fn': 'None'}), "(net, output_size, [1, 1], padding='VALID', stride=[1, 1], bn\n =bn, is_training=is_training, scope='mapper_conv4', bn_decay=bn_decay,\n reuse=reuse, weight_decay=wd, activation_fn=None)\n", (13457, 13647), False, 'import tf_util\n'), ((14767, 14820), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""dpdist_local_cnn_fc"""'], {'reuse': 'reuse'}), "('dpdist_local_cnn_fc', reuse=reuse)\n", (14784, 14820), True, 'import tensorflow as tf\n'), ((14851, 15025), 'tf_util.conv2d', 'tf_util.conv2d', (['net', '(64)', '[3, 3]'], {'padding': '"""SAME"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv1"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd'}), "(net, 64, [3, 3], padding='SAME', stride=[1, 1], bn=bn,\n is_training=is_training, scope='mapper_conv1', bn_decay=bn_decay, reuse\n =reuse, weight_decay=wd)\n", (14865, 15025), False, 'import tf_util\n'), ((15250, 15425), 'tf_util.conv2d', 'tf_util.conv2d', (['net', '(64)', '[3, 3]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv2"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd'}), "(net, 64, [3, 3], padding='VALID', stride=[1, 1], bn=bn,\n is_training=is_training, scope='mapper_conv2', bn_decay=bn_decay, reuse\n =reuse, weight_decay=wd)\n", (15264, 15425), False, 'import tf_util\n'), ((15659, 15690), 'tensorflow.reshape', 'tf.reshape', (['net', '[B, NP, 1, -1]'], {}), '(net, [B, NP, 1, -1])\n', (15669, 15690), True, 'import tensorflow as tf\n'), ((15715, 15742), 'tensorflow.concat', 'tf.concat', (['[net, net_D]', '(-1)'], {}), '([net, net_D], -1)\n', (15724, 15742), True, 'import tensorflow as tf\n'), ((15796, 15975), 'tf_util.conv2d', 'tf_util.conv2d', (['net', 'mlp[2]', '[1, 1]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv3"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd'}), "(net, mlp[2], [1, 1], padding='VALID', stride=[1, 1], bn=bn,\n is_training=is_training, scope='mapper_conv3', bn_decay=bn_decay, reuse\n =reuse, weight_decay=wd)\n", (15810, 15975), False, 'import tf_util\n'), ((16172, 16376), 'tf_util.conv2d', 'tf_util.conv2d', (['net', 'output_size', '[1, 1]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv4"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd', 'activation_fn': 'None'}), "(net, output_size, [1, 1], padding='VALID', stride=[1, 1], bn\n =bn, is_training=is_training, scope='mapper_conv4', bn_decay=bn_decay,\n reuse=reuse, weight_decay=wd, activation_fn=None)\n", (16186, 16376), False, 'import tf_util\n'), ((16952, 17005), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""dpdist_local_cnn_fc"""'], {'reuse': 'reuse'}), "('dpdist_local_cnn_fc', reuse=reuse)\n", (16969, 17005), True, 'import tensorflow as tf\n'), ((18370, 18401), 'tensorflow.reshape', 'tf.reshape', (['net', '[B, NP, 1, -1]'], {}), '(net, [B, NP, 1, -1])\n', (18380, 18401), True, 'import tensorflow as tf\n'), ((18436, 18463), 'tensorflow.concat', 'tf.concat', (['[net, net_D]', '(-1)'], {}), '([net, net_D], -1)\n', (18445, 18463), True, 'import tensorflow as tf\n'), ((18503, 18682), 'tf_util.conv2d', 'tf_util.conv2d', (['net', 'mlp[2]', '[1, 1]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv4"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd'}), "(net, mlp[2], [1, 1], padding='VALID', stride=[1, 1], bn=bn,\n is_training=is_training, scope='mapper_conv4', bn_decay=bn_decay, reuse\n =reuse, weight_decay=wd)\n", (18517, 18682), False, 'import tf_util\n'), ((18877, 19081), 'tf_util.conv2d', 'tf_util.conv2d', (['net', 'output_size', '[1, 1]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv5"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd', 'activation_fn': 'None'}), "(net, output_size, [1, 1], padding='VALID', stride=[1, 1], bn\n =bn, is_training=is_training, scope='mapper_conv5', bn_decay=bn_decay,\n reuse=reuse, weight_decay=wd, activation_fn=None)\n", (18891, 19081), False, 'import tf_util\n'), ((21424, 21446), 'tensorflow.expand_dims', 'tf.expand_dims', (['bv', '(-1)'], {}), '(bv, -1)\n', (21438, 21446), True, 'import tensorflow as tf\n'), ((22627, 22664), 'tensorflow.abs', 'tf.abs', (['(Centers[0][0] - Centers[1][0])'], {}), '(Centers[0][0] - Centers[1][0])\n', (22633, 22664), True, 'import tensorflow as tf\n'), ((22717, 22754), 'tensorflow.abs', 'tf.abs', (['(Centers[0][2] - Centers[1][2])'], {}), '(Centers[0][2] - Centers[1][2])\n', (22723, 22754), True, 'import tensorflow as tf\n'), ((27442, 27495), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""dpdist_local_cnn_fc"""'], {'reuse': 'reuse'}), "('dpdist_local_cnn_fc', reuse=reuse)\n", (27459, 27495), True, 'import tensorflow as tf\n'), ((27526, 27700), 'tf_util.conv2d', 'tf_util.conv2d', (['net', '(64)', '[3, 3]'], {'padding': '"""SAME"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv1"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd'}), "(net, 64, [3, 3], padding='SAME', stride=[1, 1], bn=bn,\n is_training=is_training, scope='mapper_conv1', bn_decay=bn_decay, reuse\n =reuse, weight_decay=wd)\n", (27540, 27700), False, 'import tf_util\n'), ((27896, 28071), 'tf_util.conv2d', 'tf_util.conv2d', (['net', '(64)', '[3, 3]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv2"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd'}), "(net, 64, [3, 3], padding='VALID', stride=[1, 1], bn=bn,\n is_training=is_training, scope='mapper_conv2', bn_decay=bn_decay, reuse\n =reuse, weight_decay=wd)\n", (27910, 28071), False, 'import tf_util\n'), ((28305, 28340), 'tensorflow.reshape', 'tf.reshape', (['net', '[4 * B, NP, 1, -1]'], {}), '(net, [4 * B, NP, 1, -1])\n', (28315, 28340), True, 'import tensorflow as tf\n'), ((28395, 28574), 'tf_util.conv2d', 'tf_util.conv2d', (['net', 'mlp[2]', '[1, 1]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv3"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd'}), "(net, mlp[2], [1, 1], padding='VALID', stride=[1, 1], bn=bn,\n is_training=is_training, scope='mapper_conv3', bn_decay=bn_decay, reuse\n =reuse, weight_decay=wd)\n", (28409, 28574), False, 'import tf_util\n'), ((28771, 28975), 'tf_util.conv2d', 'tf_util.conv2d', (['net', 'output_size', '[1, 1]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv4"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd', 'activation_fn': 'None'}), "(net, output_size, [1, 1], padding='VALID', stride=[1, 1], bn\n =bn, is_training=is_training, scope='mapper_conv4', bn_decay=bn_decay,\n reuse=reuse, weight_decay=wd, activation_fn=None)\n", (28785, 28975), False, 'import tf_util\n'), ((30047, 30100), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""dpdist_local_cnn_fc"""'], {'reuse': 'reuse'}), "('dpdist_local_cnn_fc', reuse=reuse)\n", (30064, 30100), True, 'import tensorflow as tf\n'), ((30131, 30305), 'tf_util.conv2d', 'tf_util.conv2d', (['net', '(64)', '[3, 3]'], {'padding': '"""SAME"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv1"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd'}), "(net, 64, [3, 3], padding='SAME', stride=[1, 1], bn=bn,\n is_training=is_training, scope='mapper_conv1', bn_decay=bn_decay, reuse\n =reuse, weight_decay=wd)\n", (30145, 30305), False, 'import tf_util\n'), ((30530, 30705), 'tf_util.conv2d', 'tf_util.conv2d', (['net', '(64)', '[3, 3]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv2"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd'}), "(net, 64, [3, 3], padding='VALID', stride=[1, 1], bn=bn,\n is_training=is_training, scope='mapper_conv2', bn_decay=bn_decay, reuse\n =reuse, weight_decay=wd)\n", (30544, 30705), False, 'import tf_util\n'), ((30939, 30974), 'tensorflow.reshape', 'tf.reshape', (['net', '[4 * B, NP, 1, -1]'], {}), '(net, [4 * B, NP, 1, -1])\n', (30949, 30974), True, 'import tensorflow as tf\n'), ((30999, 31026), 'tensorflow.concat', 'tf.concat', (['[net, net_D]', '(-1)'], {}), '([net, net_D], -1)\n', (31008, 31026), True, 'import tensorflow as tf\n'), ((31080, 31259), 'tf_util.conv2d', 'tf_util.conv2d', (['net', 'mlp[2]', '[1, 1]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv3"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd'}), "(net, mlp[2], [1, 1], padding='VALID', stride=[1, 1], bn=bn,\n is_training=is_training, scope='mapper_conv3', bn_decay=bn_decay, reuse\n =reuse, weight_decay=wd)\n", (31094, 31259), False, 'import tf_util\n'), ((31456, 31660), 'tf_util.conv2d', 'tf_util.conv2d', (['net', 'output_size', '[1, 1]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv4"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd', 'activation_fn': 'None'}), "(net, output_size, [1, 1], padding='VALID', stride=[1, 1], bn\n =bn, is_training=is_training, scope='mapper_conv4', bn_decay=bn_decay,\n reuse=reuse, weight_decay=wd, activation_fn=None)\n", (31470, 31660), False, 'import tf_util\n'), ((32240, 32293), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""dpdist_local_cnn_fc"""'], {'reuse': 'reuse'}), "('dpdist_local_cnn_fc', reuse=reuse)\n", (32257, 32293), True, 'import tensorflow as tf\n'), ((32324, 32490), 'tf_util.conv3d', 'tf_util.conv3d', (['net', '(64)', '[1, 1, 1]'], {'padding': '"""SAME"""', 'stride': '[1, 1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv0"""', 'bn_decay': 'bn_decay', 'weight_decay': 'wd'}), "(net, 64, [1, 1, 1], padding='SAME', stride=[1, 1, 1], bn=bn,\n is_training=is_training, scope='mapper_conv0', bn_decay=bn_decay,\n weight_decay=wd)\n", (32338, 32490), False, 'import tf_util\n'), ((33326, 33492), 'tf_util.conv3d', 'tf_util.conv3d', (['net', '(16)', '[1, 1, 1]'], {'padding': '"""SAME"""', 'stride': '[1, 1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv3"""', 'bn_decay': 'bn_decay', 'weight_decay': 'wd'}), "(net, 16, [1, 1, 1], padding='SAME', stride=[1, 1, 1], bn=bn,\n is_training=is_training, scope='mapper_conv3', bn_decay=bn_decay,\n weight_decay=wd)\n", (33340, 33492), False, 'import tf_util\n'), ((33658, 33693), 'tensorflow.reshape', 'tf.reshape', (['net', '[2 * B, NP, 1, -1]'], {}), '(net, [2 * B, NP, 1, -1])\n', (33668, 33693), True, 'import tensorflow as tf\n'), ((33726, 33761), 'tensorflow.identity', 'tf.identity', (['net', '"""embedding_layer"""'], {}), "(net, 'embedding_layer')\n", (33737, 33761), True, 'import tensorflow as tf\n'), ((33784, 33811), 'tensorflow.concat', 'tf.concat', (['[net, net_D]', '(-1)'], {}), '([net, net_D], -1)\n', (33793, 33811), True, 'import tensorflow as tf\n'), ((34207, 34386), 'tf_util.conv2d', 'tf_util.conv2d', (['net', 'mlp[2]', '[1, 1]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv5"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd'}), "(net, mlp[2], [1, 1], padding='VALID', stride=[1, 1], bn=bn,\n is_training=is_training, scope='mapper_conv5', bn_decay=bn_decay, reuse\n =reuse, weight_decay=wd)\n", (34221, 34386), False, 'import tf_util\n'), ((34583, 34787), 'tf_util.conv2d', 'tf_util.conv2d', (['net', 'output_size', '[1, 1]'], {'padding': '"""VALID"""', 'stride': '[1, 1]', 'bn': 'bn', 'is_training': 'is_training', 'scope': '"""mapper_conv6"""', 'bn_decay': 'bn_decay', 'reuse': 'reuse', 'weight_decay': 'wd', 'activation_fn': 'None'}), "(net, output_size, [1, 1], padding='VALID', stride=[1, 1], bn\n =bn, is_training=is_training, scope='mapper_conv6', bn_decay=bn_decay,\n reuse=reuse, weight_decay=wd, activation_fn=None)\n", (34597, 34787), False, 'import tf_util\n'), ((7949, 7965), 'tensorflow.shape', 'tf.shape', (['argmax'], {}), '(argmax)\n', (7957, 7965), True, 'import tensorflow as tf\n'), ((21334, 21359), 'tensorflow.cast', 'tf.cast', (['argmax', 'tf.int32'], {}), '(argmax, tf.int32)\n', (21341, 21359), True, 'import tensorflow as tf\n'), ((21768, 21793), 'tensorflow.cast', 'tf.cast', (['argmax', 'tf.int32'], {}), '(argmax, tf.int32)\n', (21775, 21793), True, 'import tensorflow as tf\n'), ((22052, 22077), 'tensorflow.cast', 'tf.cast', (['argmax', 'tf.int32'], {}), '(argmax, tf.int32)\n', (22059, 22077), True, 'import tensorflow as tf\n'), ((46719, 46774), 'tensorflow.stack', 'tf.stack', (['[X[ii, jj, ll], Y[ii, jj, ll], Z[ii, jj, ll]]'], {}), '([X[ii, jj, ll], Y[ii, jj, ll], Z[ii, jj, ll]])\n', (46727, 46774), True, 'import tensorflow as tf\n'), ((22006, 22022), 'tensorflow.shape', 'tf.shape', (['argmax'], {}), '(argmax)\n', (22014, 22022), True, 'import tensorflow as tf\n'), ((7167, 7183), 'tensorflow.shape', 'tf.shape', (['argmax'], {}), '(argmax)\n', (7175, 7183), True, 'import tensorflow as tf\n'), ((7577, 7593), 'tensorflow.shape', 'tf.shape', (['argmax'], {}), '(argmax)\n', (7585, 7593), True, 'import tensorflow as tf\n'), ((7903, 7919), 'tensorflow.shape', 'tf.shape', (['argmax'], {}), '(argmax)\n', (7911, 7919), True, 'import tensorflow as tf\n'), ((7137, 7153), 'tensorflow.shape', 'tf.shape', (['argmax'], {}), '(argmax)\n', (7145, 7153), True, 'import tensorflow as tf\n'), ((7279, 7295), 'tensorflow.shape', 'tf.shape', (['argmax'], {}), '(argmax)\n', (7287, 7295), True, 'import tensorflow as tf\n'), ((7547, 7563), 'tensorflow.shape', 'tf.shape', (['argmax'], {}), '(argmax)\n', (7555, 7563), True, 'import tensorflow as tf\n'), ((7689, 7705), 'tensorflow.shape', 'tf.shape', (['argmax'], {}), '(argmax)\n', (7697, 7705), True, 'import tensorflow as tf\n'), ((7873, 7889), 'tensorflow.shape', 'tf.shape', (['argmax'], {}), '(argmax)\n', (7881, 7889), True, 'import tensorflow as tf\n'), ((21176, 21192), 'tensorflow.shape', 'tf.shape', (['argmax'], {}), '(argmax)\n', (21184, 21192), True, 'import tensorflow as tf\n'), ((21610, 21626), 'tensorflow.shape', 'tf.shape', (['argmax'], {}), '(argmax)\n', (21618, 21626), True, 'import tensorflow as tf\n'), ((21956, 21972), 'tensorflow.shape', 'tf.shape', (['argmax'], {}), '(argmax)\n', (21964, 21972), True, 'import tensorflow as tf\n'), ((41624, 41649), 'numpy.zeros', 'np.zeros', (['[batch_size, 1]'], {}), '([batch_size, 1])\n', (41632, 41649), True, 'import numpy as np\n'), ((7249, 7265), 'tensorflow.shape', 'tf.shape', (['argmax'], {}), '(argmax)\n', (7257, 7265), True, 'import tensorflow as tf\n'), ((7659, 7675), 'tensorflow.shape', 'tf.shape', (['argmax'], {}), '(argmax)\n', (7667, 7675), True, 'import tensorflow as tf\n'), ((21146, 21162), 'tensorflow.shape', 'tf.shape', (['argmax'], {}), '(argmax)\n', (21154, 21162), True, 'import tensorflow as tf\n'), ((21292, 21308), 'tensorflow.shape', 'tf.shape', (['argmax'], {}), '(argmax)\n', (21300, 21308), True, 'import tensorflow as tf\n'), ((21580, 21596), 'tensorflow.shape', 'tf.shape', (['argmax'], {}), '(argmax)\n', (21588, 21596), True, 'import tensorflow as tf\n'), ((21726, 21742), 'tensorflow.shape', 'tf.shape', (['argmax'], {}), '(argmax)\n', (21734, 21742), True, 'import tensorflow as tf\n'), ((21926, 21942), 'tensorflow.shape', 'tf.shape', (['argmax'], {}), '(argmax)\n', (21934, 21942), True, 'import tensorflow as tf\n'), ((21262, 21278), 'tensorflow.shape', 'tf.shape', (['argmax'], {}), '(argmax)\n', (21270, 21278), True, 'import tensorflow as tf\n'), ((21696, 21712), 'tensorflow.shape', 'tf.shape', (['argmax'], {}), '(argmax)\n', (21704, 21712), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from netCDF4 import Dataset
import numpy as np
import numpy.ma as ma
import fiona
import sys
sys.path.append("..")
from data_preprocessing.utils import generate_doy
from data_preprocessing.preprocess import search_kdtree
def extract_shapefile():
shapefile = fiona.open('../../raw_data/nws_precip/nws_precip_allpoint_conversion/nws_precip_allpoint_conversion.shp')
lats = np.full((881, 1121), np.inf)
lons = np.full((881, 1121), np.inf)
max_hrapx, max_hrapy = -float('inf'), -float('inf')
for feature in shapefile:
hrapx, hrapy = feature['properties']['Hrapx'], feature['properties']['Hrapy']
max_hrapx = max(max_hrapx, hrapx)
max_hrapy = max(max_hrapy, hrapy)
lon, lat = feature['geometry']['coordinates']
if 0 <= hrapx < 1121 and 0 <= hrapy < 881:
lats[hrapy, hrapx] = lat
lons[hrapy, hrapx] = lon
print(max_hrapx, max_hrapy)
np.save('../../raw_data/nws_precip/nws_precip_allpoint_conversion/lats.npy', lats)
np.save('../../raw_data/nws_precip/nws_precip_allpoint_conversion/lons.npy', lons)
def compute_closest_grid_point(lats, lons, lat, lon):
d_lats = lats - float(lat)
d_lons = lons - float(lon)
d = np.multiply(d_lats, d_lats) + np.multiply(d_lons, d_lons)
i, j = np.unravel_index(d.argmin(), d.shape)
return i, j, np.sqrt(d.min())
def reproject_lat_lon():
lats = np.load('../../raw_data/nws_precip/nws_precip_allpoint_conversion/lats.npy')
lons = np.load('../../raw_data/nws_precip/nws_precip_allpoint_conversion/lons.npy')
fh_ref = Dataset('../../processed_data/lai/500m/20181028.nc', 'r')
ref_lats, ref_lons = fh_ref.variables['lat'][:], fh_ref.variables['lon'][:]
xv, yv = np.meshgrid(ref_lons, ref_lats)
points = np.dstack([yv.ravel(), xv.ravel()])[0]
print('Finish building points')
results = search_kdtree(lats, lons, points)
np.save('../../raw_data/nws_precip/nws_precip_allpoint_conversion/projected_indices_lai_500m.npy', results)
def reproject_nws_precip(doy):
print(doy)
fh_ref = Dataset('../../processed_data/lai/500m/20181028.nc', 'r')
fh_in = Dataset('../../raw_data/nws_precip/{}/nws_precip_1day_{}_conus.nc'.format(doy, doy), 'r')
fh_out = Dataset('../../processed_data/nws_precip/500m/{}.nc'.format(doy), 'w')
ref_lats, ref_lons = fh_ref.variables['lat'][:], fh_ref.variables['lon'][:]
n_lat, n_lon = len(ref_lats), len(ref_lons)
for name, dim in fh_ref.dimensions.items():
fh_out.createDimension(name, len(dim))
for v_name, varin in fh_ref.variables.items():
if v_name in ['lat', 'lon']:
outVar = fh_out.createVariable(v_name, varin.datatype, (v_name,))
outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()})
outVar[:] = varin[:]
observed_values = fh_in.variables['observation'][:]
projected_values = np.full((n_lat, n_lon), -9999.9)
projected_indices = \
np.load('../../raw_data/nws_precip/nws_precip_allpoint_conversion/projected_indices_lai_500m.npy')
projected_i = 0
for i in range(n_lat):
for j in range(n_lon):
proj_i, proj_j = 881 - projected_indices[projected_i] // 1121, projected_indices[projected_i] % 1121
if not observed_values.mask[proj_i, proj_j]:
projected_values[i, j] = observed_values[proj_i, proj_j]
projected_i += 1
outVar = fh_out.createVariable('precip', 'f4', ('lat', 'lon'))
outVar[:] = ma.masked_equal(projected_values, -9999.9)
fh_in.close()
fh_ref.close()
fh_out.close()
if __name__ == '__main__':
# extract_shapefile()
# reproject_lat_lon()
for doy in generate_doy('20171227', '20171231', ''):
reproject_nws_precip(doy)
| [
"sys.path.append",
"numpy.full",
"numpy.load",
"numpy.save",
"netCDF4.Dataset",
"numpy.meshgrid",
"fiona.open",
"data_preprocessing.preprocess.search_kdtree",
"numpy.multiply",
"data_preprocessing.utils.generate_doy",
"numpy.ma.masked_equal"
] | [((294, 315), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (309, 315), False, 'import sys\n'), ((466, 581), 'fiona.open', 'fiona.open', (['"""../../raw_data/nws_precip/nws_precip_allpoint_conversion/nws_precip_allpoint_conversion.shp"""'], {}), "(\n '../../raw_data/nws_precip/nws_precip_allpoint_conversion/nws_precip_allpoint_conversion.shp'\n )\n", (476, 581), False, 'import fiona\n'), ((584, 612), 'numpy.full', 'np.full', (['(881, 1121)', 'np.inf'], {}), '((881, 1121), np.inf)\n', (591, 612), True, 'import numpy as np\n'), ((624, 652), 'numpy.full', 'np.full', (['(881, 1121)', 'np.inf'], {}), '((881, 1121), np.inf)\n', (631, 652), True, 'import numpy as np\n'), ((1124, 1210), 'numpy.save', 'np.save', (['"""../../raw_data/nws_precip/nws_precip_allpoint_conversion/lats.npy"""', 'lats'], {}), "('../../raw_data/nws_precip/nws_precip_allpoint_conversion/lats.npy',\n lats)\n", (1131, 1210), True, 'import numpy as np\n'), ((1211, 1297), 'numpy.save', 'np.save', (['"""../../raw_data/nws_precip/nws_precip_allpoint_conversion/lons.npy"""', 'lons'], {}), "('../../raw_data/nws_precip/nws_precip_allpoint_conversion/lons.npy',\n lons)\n", (1218, 1297), True, 'import numpy as np\n'), ((1599, 1675), 'numpy.load', 'np.load', (['"""../../raw_data/nws_precip/nws_precip_allpoint_conversion/lats.npy"""'], {}), "('../../raw_data/nws_precip/nws_precip_allpoint_conversion/lats.npy')\n", (1606, 1675), True, 'import numpy as np\n'), ((1687, 1763), 'numpy.load', 'np.load', (['"""../../raw_data/nws_precip/nws_precip_allpoint_conversion/lons.npy"""'], {}), "('../../raw_data/nws_precip/nws_precip_allpoint_conversion/lons.npy')\n", (1694, 1763), True, 'import numpy as np\n'), ((1778, 1835), 'netCDF4.Dataset', 'Dataset', (['"""../../processed_data/lai/500m/20181028.nc"""', '"""r"""'], {}), "('../../processed_data/lai/500m/20181028.nc', 'r')\n", (1785, 1835), False, 'from netCDF4 import Dataset\n'), ((1930, 1961), 'numpy.meshgrid', 'np.meshgrid', (['ref_lons', 'ref_lats'], {}), '(ref_lons, ref_lats)\n', (1941, 1961), True, 'import numpy as np\n'), ((2064, 2097), 'data_preprocessing.preprocess.search_kdtree', 'search_kdtree', (['lats', 'lons', 'points'], {}), '(lats, lons, points)\n', (2077, 2097), False, 'from data_preprocessing.preprocess import search_kdtree\n'), ((2102, 2219), 'numpy.save', 'np.save', (['"""../../raw_data/nws_precip/nws_precip_allpoint_conversion/projected_indices_lai_500m.npy"""', 'results'], {}), "(\n '../../raw_data/nws_precip/nws_precip_allpoint_conversion/projected_indices_lai_500m.npy'\n , results)\n", (2109, 2219), True, 'import numpy as np\n'), ((2271, 2328), 'netCDF4.Dataset', 'Dataset', (['"""../../processed_data/lai/500m/20181028.nc"""', '"""r"""'], {}), "('../../processed_data/lai/500m/20181028.nc', 'r')\n", (2278, 2328), False, 'from netCDF4 import Dataset\n'), ((3098, 3130), 'numpy.full', 'np.full', (['(n_lat, n_lon)', '(-9999.9)'], {}), '((n_lat, n_lon), -9999.9)\n', (3105, 3130), True, 'import numpy as np\n'), ((3165, 3273), 'numpy.load', 'np.load', (['"""../../raw_data/nws_precip/nws_precip_allpoint_conversion/projected_indices_lai_500m.npy"""'], {}), "(\n '../../raw_data/nws_precip/nws_precip_allpoint_conversion/projected_indices_lai_500m.npy'\n )\n", (3172, 3273), True, 'import numpy as np\n'), ((3698, 3740), 'numpy.ma.masked_equal', 'ma.masked_equal', (['projected_values', '(-9999.9)'], {}), '(projected_values, -9999.9)\n', (3713, 3740), True, 'import numpy.ma as ma\n'), ((3894, 3934), 'data_preprocessing.utils.generate_doy', 'generate_doy', (['"""20171227"""', '"""20171231"""', '""""""'], {}), "('20171227', '20171231', '')\n", (3906, 3934), False, 'from data_preprocessing.utils import generate_doy\n'), ((1420, 1447), 'numpy.multiply', 'np.multiply', (['d_lats', 'd_lats'], {}), '(d_lats, d_lats)\n', (1431, 1447), True, 'import numpy as np\n'), ((1450, 1477), 'numpy.multiply', 'np.multiply', (['d_lons', 'd_lons'], {}), '(d_lons, d_lons)\n', (1461, 1477), True, 'import numpy as np\n')] |
"""
Compare Plot
============
_thumb: .5, .5
"""
import arviz as az
import numpy as np
import pymc3 as pm
az.style.use('arviz-darkgrid')
# Data of the Eight Schools Model
J = 8
y = np.array([28., 8., -3., 7., -1., 1., 18., 12.])
sigma = np.array([15., 10., 16., 11., 9., 11., 10., 18.])
with pm.Model('Centered Eight Schools') as centered_eight:
mu = pm.Normal('mu', mu=0, sd=5)
tau = pm.HalfCauchy('tau', beta=5)
theta = pm.Normal('theta', mu=mu, sd=tau, shape=J)
obs = pm.Normal('obs', mu=theta, sd=sigma, observed=y)
centered_eight_trace = pm.sample()
with pm.Model('Non-Centered Eight Schools') as non_centered:
mu = pm.Normal('mu', mu=0, sd=5)
tau = pm.HalfCauchy('tau', beta=5)
theta_tilde = pm.Normal('theta_t', mu=0, sd=1, shape=J)
theta = pm.Deterministic('theta', mu + tau * theta_tilde)
obs = pm.Normal('obs', mu=theta, sd=sigma, observed=y)
non_centered_eight_trace = pm.sample()
model_compare = az.compare({
centered_eight: centered_eight_trace,
non_centered: non_centered_eight_trace
})
az.compareplot(model_compare, figsize=(12, 4))
| [
"pymc3.sample",
"arviz.compareplot",
"pymc3.Model",
"pymc3.Deterministic",
"pymc3.Normal",
"arviz.style.use",
"pymc3.HalfCauchy",
"numpy.array",
"arviz.compare"
] | [((108, 138), 'arviz.style.use', 'az.style.use', (['"""arviz-darkgrid"""'], {}), "('arviz-darkgrid')\n", (120, 138), True, 'import arviz as az\n'), ((184, 239), 'numpy.array', 'np.array', (['[28.0, 8.0, -3.0, 7.0, -1.0, 1.0, 18.0, 12.0]'], {}), '([28.0, 8.0, -3.0, 7.0, -1.0, 1.0, 18.0, 12.0])\n', (192, 239), True, 'import numpy as np\n'), ((243, 300), 'numpy.array', 'np.array', (['[15.0, 10.0, 16.0, 11.0, 9.0, 11.0, 10.0, 18.0]'], {}), '([15.0, 10.0, 16.0, 11.0, 9.0, 11.0, 10.0, 18.0])\n', (251, 300), True, 'import numpy as np\n'), ((965, 1059), 'arviz.compare', 'az.compare', (['{centered_eight: centered_eight_trace, non_centered: non_centered_eight_trace}'], {}), '({centered_eight: centered_eight_trace, non_centered:\n non_centered_eight_trace})\n', (975, 1059), True, 'import arviz as az\n'), ((1067, 1113), 'arviz.compareplot', 'az.compareplot', (['model_compare'], {'figsize': '(12, 4)'}), '(model_compare, figsize=(12, 4))\n', (1081, 1113), True, 'import arviz as az\n'), ((301, 335), 'pymc3.Model', 'pm.Model', (['"""Centered Eight Schools"""'], {}), "('Centered Eight Schools')\n", (309, 335), True, 'import pymc3 as pm\n'), ((364, 391), 'pymc3.Normal', 'pm.Normal', (['"""mu"""'], {'mu': '(0)', 'sd': '(5)'}), "('mu', mu=0, sd=5)\n", (373, 391), True, 'import pymc3 as pm\n'), ((402, 430), 'pymc3.HalfCauchy', 'pm.HalfCauchy', (['"""tau"""'], {'beta': '(5)'}), "('tau', beta=5)\n", (415, 430), True, 'import pymc3 as pm\n'), ((443, 485), 'pymc3.Normal', 'pm.Normal', (['"""theta"""'], {'mu': 'mu', 'sd': 'tau', 'shape': 'J'}), "('theta', mu=mu, sd=tau, shape=J)\n", (452, 485), True, 'import pymc3 as pm\n'), ((496, 544), 'pymc3.Normal', 'pm.Normal', (['"""obs"""'], {'mu': 'theta', 'sd': 'sigma', 'observed': 'y'}), "('obs', mu=theta, sd=sigma, observed=y)\n", (505, 544), True, 'import pymc3 as pm\n'), ((572, 583), 'pymc3.sample', 'pm.sample', ([], {}), '()\n', (581, 583), True, 'import pymc3 as pm\n'), ((591, 629), 'pymc3.Model', 'pm.Model', (['"""Non-Centered Eight Schools"""'], {}), "('Non-Centered Eight Schools')\n", (599, 629), True, 'import pymc3 as pm\n'), ((656, 683), 'pymc3.Normal', 'pm.Normal', (['"""mu"""'], {'mu': '(0)', 'sd': '(5)'}), "('mu', mu=0, sd=5)\n", (665, 683), True, 'import pymc3 as pm\n'), ((694, 722), 'pymc3.HalfCauchy', 'pm.HalfCauchy', (['"""tau"""'], {'beta': '(5)'}), "('tau', beta=5)\n", (707, 722), True, 'import pymc3 as pm\n'), ((741, 782), 'pymc3.Normal', 'pm.Normal', (['"""theta_t"""'], {'mu': '(0)', 'sd': '(1)', 'shape': 'J'}), "('theta_t', mu=0, sd=1, shape=J)\n", (750, 782), True, 'import pymc3 as pm\n'), ((795, 844), 'pymc3.Deterministic', 'pm.Deterministic', (['"""theta"""', '(mu + tau * theta_tilde)'], {}), "('theta', mu + tau * theta_tilde)\n", (811, 844), True, 'import pymc3 as pm\n'), ((855, 903), 'pymc3.Normal', 'pm.Normal', (['"""obs"""'], {'mu': 'theta', 'sd': 'sigma', 'observed': 'y'}), "('obs', mu=theta, sd=sigma, observed=y)\n", (864, 903), True, 'import pymc3 as pm\n'), ((935, 946), 'pymc3.sample', 'pm.sample', ([], {}), '()\n', (944, 946), True, 'import pymc3 as pm\n')] |
import pyviennacl as p
from . import _viennacl
from numpy import (ndarray, array,
result_type as np_result_type)
import logging
default_log_handler = logging.StreamHandler()
default_log_handler.setFormatter(logging.Formatter(
"%(levelname)s %(asctime)s %(name)s %(lineno)d %(funcName)s\n %(message)s"
))
logging.getLogger('pyviennacl').addHandler(default_log_handler)
def fix_operand(opand, node=None):
"""
TODO docstring
"""
if isinstance(opand, list):
opand = from_ndarray(array(opand))
if isinstance(opand, ndarray):
return from_ndarray(opand)
if (np_result_type(opand).name in p.HostScalarTypes
and not isinstance(opand, p.MagicMethods)):
if node is None:
return p.HostScalar(opand)
else:
return p.HostScalar(np_result_type(node).type(opand))
if isinstance(opand, p.Node):
if opand.flushed:
return opand.result
if opand.no_fix:
return opand
# TODO: REMOVE NEXT TEST
if opand.operation_node_type_family == _viennacl.operation_node_type_family.OPERATION_UNARY_TYPE_FAMILY and not (type(opand) == p.Assign or type(node) == p.Assign) and not opand.operation_node_type == _viennacl.operation_node_type.OPERATION_UNARY_TRANS_TYPE:
opand.no_fix = True
return opand.result
return opand
def from_ndarray(obj):
"""
Convert a NumPy ``ndarray`` into a PyViennaCL object of appropriate
dimensionality.
:param: obj : array-like
:param: new : {Vector, Matrix}
``Vector`` if ``obj`` has 1 dimension; ``Matrix`` if 2.
:raises: AttributeError
If ``obj`` has less than 1 or more than 2 dimensions.
"""
if obj.ndim == 1:
new = p.Vector(obj)
elif obj.ndim == 2:
new = p.Matrix(obj)
else:
raise AttributeError("Cannot cope with %d dimensions!" % self.operands[0].ndim)
return new
__all__ = ['fix_operand', 'from_ndarray']
| [
"pyviennacl.Matrix",
"numpy.result_type",
"logging.StreamHandler",
"logging.Formatter",
"pyviennacl.HostScalar",
"numpy.array",
"pyviennacl.Vector",
"logging.getLogger"
] | [((170, 193), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (191, 193), False, 'import logging\n'), ((227, 334), 'logging.Formatter', 'logging.Formatter', (['"""%(levelname)s %(asctime)s %(name)s %(lineno)d %(funcName)s\n %(message)s"""'], {}), '(\n """%(levelname)s %(asctime)s %(name)s %(lineno)d %(funcName)s\n %(message)s"""\n )\n', (244, 334), False, 'import logging\n'), ((329, 360), 'logging.getLogger', 'logging.getLogger', (['"""pyviennacl"""'], {}), "('pyviennacl')\n", (346, 360), False, 'import logging\n'), ((1776, 1789), 'pyviennacl.Vector', 'p.Vector', (['obj'], {}), '(obj)\n', (1784, 1789), True, 'import pyviennacl as p\n'), ((525, 537), 'numpy.array', 'array', (['opand'], {}), '(opand)\n', (530, 537), False, 'from numpy import ndarray, array, result_type as np_result_type\n'), ((761, 780), 'pyviennacl.HostScalar', 'p.HostScalar', (['opand'], {}), '(opand)\n', (773, 780), True, 'import pyviennacl as p\n'), ((1828, 1841), 'pyviennacl.Matrix', 'p.Matrix', (['obj'], {}), '(obj)\n', (1836, 1841), True, 'import pyviennacl as p\n'), ((617, 638), 'numpy.result_type', 'np_result_type', (['opand'], {}), '(opand)\n', (631, 638), True, 'from numpy import ndarray, array, result_type as np_result_type\n'), ((827, 847), 'numpy.result_type', 'np_result_type', (['node'], {}), '(node)\n', (841, 847), True, 'from numpy import ndarray, array, result_type as np_result_type\n')] |
from .abstract_detection_method import DetectionMethod
from ..GeneralClassesFunctions.simulation_functions import set_kwargs_attrs
import numpy as np
import scipy.special
class TieredDetect(DetectionMethod):
"""
This class specifies a general detection method.
The detection method relies on a "probability of detection curve" that is independent of the a specific plume model.
It includes a detection method and several parameters.
"""
def __init__(self, time, gas_field, **kwargs):
"""
Inputs:
gas_field a gas_field object (Defined in feast_classes)
time a time object (Defined in feast_classes)
kwargs optional input dictionary that will override default parameters
"""
DetectionMethod.__init__(self, time, gas_field)
# -------------- Hardware variables --------------
self.lifetime = 10 * 365 # days
# -------------- Process Variables --------------
self.survey_interval = 365*0.5 # days
self.sites_per_day = 4 # defines first tier speed...can be a dict for multiple site types
self.labor = 100 # dollars/hour
# -------------- Detection Variables -------------
self.mu = 0.474
self.lam = 3.88
self.mu2 = 0.00185
self.lam2 = 2.23
self.ophrs = {'begin': 800, "end": 1700}
self.insurvey = False
self.surveyed_index = 0
self.prelim_survey_time = 0
self.secondary_survey_time = 0
self.secondary_comps_hr = 150
set_kwargs_attrs(self, kwargs)
# -------------- Set calculated parameters --------------
self.work_time = (self.ophrs['end'] - self.ophrs['begin']) / 100 # hours/day on average
self.survey_time = gas_field.n_sites / self.sites_per_day * self.work_time # hours
# time_factor accounts for the finite simulation size. The effective capital cost is
# reduced in the simulation based on the ratio of the sites in the
# simulation to the number of sites that could be surveyed if there were enough wells to use the tech every day.
self.time_factor = self.survey_time / (self.survey_interval * self.work_time)
self.secondary_survey_cost = np.zeros(time.n_timesteps)
# leaks_per_timestep is calculated based on the survey speed and number of leaks at the beginning of each survey
self.leaks_per_timestep = 0
self.loglam = np.log(self.lam)
self.logmu = np.log(self.mu)
self.loglam2 = np.log(self.lam2)
self.logmu2 = np.log(self.mu2)
self.site_survey_index = 0
self.comp_survey_index = 0
self.repair_delay = 0 # days
# -------------- Financial Properties --------------
self.capital_0 = 0 * self.time_factor # dollars (defaults to zero)
self.maintenance_0 = self.capital_0 * 0.1 # dollars/year
self.capital = np.zeros(time.n_timesteps)
self.replacement_cap(time)
# maintenance costs are estimated as 10% of capital per year
self.maintenance = [self.maintenance_0 * time.delta_t / 365, ] * time.n_timesteps # $
# survey_cost is the cost to survey all wells in the natural gas field
self.survey_cost = self.labor * self.survey_time
# find_cost is the cost of searching for leaks
for ind in range(0, time.n_timesteps):
curr_time = ind * time.delta_t
if curr_time % self.survey_interval < time.delta_t:
self.find_cost[ind] = self.survey_cost
@staticmethod
def time_in_ophrs(ct, et, op_begin, op_end):
return max(min(et, op_end) - max(ct, op_begin), 0) / 100
def sites_surveyed(self, time):
"""
Computes the number of sites surveyed in a time step, allowing for time steps that are any fraction of a day
or longer than a day and enforcing the operating hours defined for the detection technology
:param time:
:return nsites: the number of sites expected to be surveyed this time step
"""
ct = 0
nsites = 0
while ct + 1 <= time.delta_t:
ct += 1
nsites += self.sites_per_day
et = (time.delta_t - ct) * 2400
ct = np.mod(time.current_time, 1) * 2400
et += ct
surv_hrs = self.time_in_ophrs(ct, et, self.ophrs['begin'], self.ophrs['end'])
if et > 2400:
ct = 0
et -= 2400
surv_hrs += self.time_in_ophrs(ct, et, self.ophrs['begin'], self.ophrs['end'])
nsites += surv_hrs / self.work_time * self.sites_per_day
return int(nsites) + np.random.binomial(1, np.mod(nsites, 1))
def detection(self, time, gas_field):
"""
The detection method applies a probability of detection curve
Inputs:
time an object of type Time (defined in feast_classes)
gas_field an object of type GasField (defined in feast_classes)
"""
self.null_detection(time, gas_field)
if time.current_time % self.survey_interval < time.delta_t:
self.insurvey = True
if self.insurvey:
end_survey = False
n_sites_surveyed = self.sites_surveyed(time)
if n_sites_surveyed > 0:
if n_sites_surveyed + self.site_survey_index > gas_field.n_sites:
n_sites_surveyed = gas_field.n_sites - self.site_survey_index
end_survey = True
site_flux = np.zeros(n_sites_surveyed)
for site_ind in range(self.site_survey_index, self.site_survey_index + n_sites_surveyed):
site_flux[site_ind - self.site_survey_index] = \
np.sum(self.leaks.flux[self.leaks.site_index == site_ind])
scores = np.random.uniform(0, 1, n_sites_surveyed)
cond = site_flux > 0
probs = np.zeros(n_sites_surveyed)
probs[cond] = 0.5 + 0.5 * scipy.special.erf((np.log(site_flux[cond]) - self.logmu) /
(self.loglam * np.sqrt(2)))
sites_flagged = np.where(scores < probs)[0] + self.site_survey_index
self.prelim_survey_time += n_sites_surveyed / self.sites_per_day * self.work_time
if end_survey:
self.insurvey = False
self.site_survey_index = 0
self.site_survey_index += n_sites_surveyed
secondary_survey_time = 0
for site_ind in sites_flagged:
site_name = self.find_site_name(gas_field, site_ind)
n_comps = gas_field.sites[site_name]['parameters'].max_comp_ind
secondary_survey_time += n_comps / self.secondary_comps_hr
cond = np.where((self.leaks.site_index == site_ind) &
(self.leaks.flux > 0))[0]
scores = np.random.uniform(0, 1, len(cond))
probs = 0.5 + 0.5 * scipy.special.erf((np.log(self.leaks.flux[cond]) - self.logmu2) /
(self.loglam2 * np.sqrt(2)))
detect = cond[scores < probs]
self.repair_cost[time.time_index] += \
np.sum(np.random.choice(gas_field.repair_cost_dist.repair_costs,
np.sum(self.leaks.reparable[detect])))
self.leaks.endtime[detect[self.leaks.reparable[detect]]] = time.current_time + self.repair_delay
self.secondary_survey_cost[time.time_index] = self.labor * secondary_survey_time
| [
"numpy.random.uniform",
"numpy.sum",
"numpy.log",
"numpy.zeros",
"numpy.mod",
"numpy.where",
"numpy.sqrt"
] | [((2274, 2300), 'numpy.zeros', 'np.zeros', (['time.n_timesteps'], {}), '(time.n_timesteps)\n', (2282, 2300), True, 'import numpy as np\n'), ((2480, 2496), 'numpy.log', 'np.log', (['self.lam'], {}), '(self.lam)\n', (2486, 2496), True, 'import numpy as np\n'), ((2518, 2533), 'numpy.log', 'np.log', (['self.mu'], {}), '(self.mu)\n', (2524, 2533), True, 'import numpy as np\n'), ((2557, 2574), 'numpy.log', 'np.log', (['self.lam2'], {}), '(self.lam2)\n', (2563, 2574), True, 'import numpy as np\n'), ((2597, 2613), 'numpy.log', 'np.log', (['self.mu2'], {}), '(self.mu2)\n', (2603, 2613), True, 'import numpy as np\n'), ((2949, 2975), 'numpy.zeros', 'np.zeros', (['time.n_timesteps'], {}), '(time.n_timesteps)\n', (2957, 2975), True, 'import numpy as np\n'), ((4279, 4307), 'numpy.mod', 'np.mod', (['time.current_time', '(1)'], {}), '(time.current_time, 1)\n', (4285, 4307), True, 'import numpy as np\n'), ((4689, 4706), 'numpy.mod', 'np.mod', (['nsites', '(1)'], {}), '(nsites, 1)\n', (4695, 4706), True, 'import numpy as np\n'), ((5540, 5566), 'numpy.zeros', 'np.zeros', (['n_sites_surveyed'], {}), '(n_sites_surveyed)\n', (5548, 5566), True, 'import numpy as np\n'), ((5850, 5891), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'n_sites_surveyed'], {}), '(0, 1, n_sites_surveyed)\n', (5867, 5891), True, 'import numpy as np\n'), ((5953, 5979), 'numpy.zeros', 'np.zeros', (['n_sites_surveyed'], {}), '(n_sites_surveyed)\n', (5961, 5979), True, 'import numpy as np\n'), ((5766, 5824), 'numpy.sum', 'np.sum', (['self.leaks.flux[self.leaks.site_index == site_ind]'], {}), '(self.leaks.flux[self.leaks.site_index == site_ind])\n', (5772, 5824), True, 'import numpy as np\n'), ((6201, 6225), 'numpy.where', 'np.where', (['(scores < probs)'], {}), '(scores < probs)\n', (6209, 6225), True, 'import numpy as np\n'), ((6883, 6952), 'numpy.where', 'np.where', (['((self.leaks.site_index == site_ind) & (self.leaks.flux > 0))'], {}), '((self.leaks.site_index == site_ind) & (self.leaks.flux > 0))\n', (6891, 6952), True, 'import numpy as np\n'), ((7495, 7531), 'numpy.sum', 'np.sum', (['self.leaks.reparable[detect]'], {}), '(self.leaks.reparable[detect])\n', (7501, 7531), True, 'import numpy as np\n'), ((6041, 6064), 'numpy.log', 'np.log', (['site_flux[cond]'], {}), '(site_flux[cond])\n', (6047, 6064), True, 'import numpy as np\n'), ((6156, 6166), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6163, 6166), True, 'import numpy as np\n'), ((7115, 7144), 'numpy.log', 'np.log', (['self.leaks.flux[cond]'], {}), '(self.leaks.flux[cond])\n', (7121, 7144), True, 'import numpy as np\n'), ((7236, 7246), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7243, 7246), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules
# Copyright (C) 2020, <NAME> <<EMAIL>>
# Bioindustrial-Park: BioSTEAM's Premier Biorefinery Models and Results
# Copyright (C) 2020, <NAME> <<EMAIL>>,
# <NAME> <<EMAIL>>, and <NAME> (this biorefinery)
#
# This module is under the UIUC open-source license. See
# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt
# for license details.
"""
Created on Wed Jul 22 19:48:14 2020
Modified from the biorefineries constructed in [1] and [2] for the production of
lactic acid from lignocellulosic feedstocks
[1] Cortes-Peña et al., BioSTEAM: A Fast and Flexible Platform for the Design,
Simulation, and Techno-Economic Analysis of Biorefineries under Uncertainty.
ACS Sustainable Chem. Eng. 2020, 8 (8), 3302–3310.
https://doi.org/10.1021/acssuschemeng.9b07040
[2] Li et al., Tailored Pretreatment Processes for the Sustainable Design of
Lignocellulosic Biorefineries across the Feedstock Landscape. Submitted,
2020.
@author: yalinli_cabbi
"""
# %%
# =============================================================================
# Setup
# =============================================================================
import numpy as np
import pandas as pd
from biosteam.utils import TicToc
from lactic.system import R301
from lactic.analyses import models
# %%
# =============================================================================
# Evaluate across feedstock price and carbohydrate content
# =============================================================================
# Initiate a timer
timer = TicToc('timer')
timer.tic()
model = models.model_carbs_price
R301.set_titer_limit = True
set_carbs = models.set_carbs
prices = models.prices
'''Evaluate'''
np.random.seed(3221)
# This is not a Monte Carlo simulation, this evaluation uses the baseline parameters
# to see the impacts of feedstock carbohydrate content
# The parameter is a fake one to enable the evaluation
N_simulation = 1
samples_1d = model.sample(N=N_simulation, rule='L')
samples = samples_1d[:, np.newaxis]
model.load_samples(samples)
carb_contents = np.arange(0.25, 0.701, 0.01)
data = model.evaluate_across_coordinate(
'Carbohydate content', set_carbs, carb_contents, notify=True)
results = pd.DataFrame({
('Parameter', 'Carbohydrate content [dw%]'): carb_contents})
for i in data.keys():
results[i] = data[i][0]
'''Organize data for easy plotting'''
TEA_x = [i for i in carb_contents]
TEA_x *= len(prices)
TEA_y = sum(([i]*len(carb_contents) for i in prices), [])
MPSPs = [[], []]
GWPs = [[], []]
FECs = [[], []]
for i in range(results.columns.shape[0]):
if 'MPSP' in results.columns[i][1]:
MPSPs[0] += results[results.columns[i]].to_list()
if 'GWP' in results.columns[i][1]:
GWPs[0] += results[results.columns[i]].to_list()
if 'FEC' in results.columns[i][1]:
FECs[0] += results[results.columns[i]].to_list()
TEA_plot_data = pd.DataFrame({
'Carbohydrate content [dw%]': TEA_x,
'Price [$/dry-ton]': TEA_y,
'MPSP [$/kg]': MPSPs[0]
})
LCA_plot_data = pd.DataFrame({
'Carbohydrate content [dw%]': carb_contents,
'GWP [kg CO2-eq/kg]': GWPs[0],
'FEC [MJ/kg]': FECs[0]
})
'''Output to Excel'''
with pd.ExcelWriter('3_carbs-price.xlsx') as writer:
TEA_plot_data.to_excel(writer, sheet_name='TEA plotting')
LCA_plot_data.to_excel(writer, sheet_name='LCA plotting')
results.to_excel(writer, sheet_name='Raw data')
run_number = samples.shape[0]*len(carb_contents)
time = timer.elapsed_time / 60
print(f'\nSimulation time for {run_number} runs is: {time:.1f} min')
| [
"pandas.DataFrame",
"numpy.random.seed",
"biosteam.utils.TicToc",
"numpy.arange",
"pandas.ExcelWriter"
] | [((1684, 1699), 'biosteam.utils.TicToc', 'TicToc', (['"""timer"""'], {}), "('timer')\n", (1690, 1699), False, 'from biosteam.utils import TicToc\n'), ((1842, 1862), 'numpy.random.seed', 'np.random.seed', (['(3221)'], {}), '(3221)\n', (1856, 1862), True, 'import numpy as np\n'), ((2208, 2236), 'numpy.arange', 'np.arange', (['(0.25)', '(0.701)', '(0.01)'], {}), '(0.25, 0.701, 0.01)\n', (2217, 2236), True, 'import numpy as np\n'), ((2355, 2429), 'pandas.DataFrame', 'pd.DataFrame', (["{('Parameter', 'Carbohydrate content [dw%]'): carb_contents}"], {}), "({('Parameter', 'Carbohydrate content [dw%]'): carb_contents})\n", (2367, 2429), True, 'import pandas as pd\n'), ((3041, 3149), 'pandas.DataFrame', 'pd.DataFrame', (["{'Carbohydrate content [dw%]': TEA_x, 'Price [$/dry-ton]': TEA_y,\n 'MPSP [$/kg]': MPSPs[0]}"], {}), "({'Carbohydrate content [dw%]': TEA_x, 'Price [$/dry-ton]':\n TEA_y, 'MPSP [$/kg]': MPSPs[0]})\n", (3053, 3149), True, 'import pandas as pd\n'), ((3181, 3299), 'pandas.DataFrame', 'pd.DataFrame', (["{'Carbohydrate content [dw%]': carb_contents, 'GWP [kg CO2-eq/kg]': GWPs[0],\n 'FEC [MJ/kg]': FECs[0]}"], {}), "({'Carbohydrate content [dw%]': carb_contents,\n 'GWP [kg CO2-eq/kg]': GWPs[0], 'FEC [MJ/kg]': FECs[0]})\n", (3193, 3299), True, 'import pandas as pd\n'), ((3346, 3382), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['"""3_carbs-price.xlsx"""'], {}), "('3_carbs-price.xlsx')\n", (3360, 3382), True, 'import pandas as pd\n')] |
import os
import time
import torch
import numpy as np
import inspect
from contextlib import contextmanager
import subprocess
def int_tuple(s):
return tuple(int(i) for i in s.split(','))
def find_nan(variable, var_name):
variable_n = variable.data.cpu().numpy()
if np.isnan(variable_n).any():
exit('%s has nan' % var_name)
def bool_flag(s):
if s == '1':
return True
elif s == '0':
return False
msg = 'Invalid value "%s" for bool flag (should be 0 or 1)'
raise ValueError(msg % s)
def lineno():
return str(inspect.currentframe().f_back.f_lineno)
def get_total_norm(parameters, norm_type=2):
if norm_type == float('inf'):
total_norm = max(p.grad.data.abs().max() for p in parameters)
else:
total_norm = 0
for p in parameters:
try:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm**norm_type
total_norm = total_norm**(1. / norm_type)
except:
continue
return total_norm
@contextmanager
def timeit(msg, should_time=True):
if should_time:
torch.cuda.synchronize()
t0 = time.time()
yield
if should_time:
torch.cuda.synchronize()
t1 = time.time()
duration = (t1 - t0) * 1000.0
print('%s: %.2f ms' % (msg, duration))
def get_gpu_memory():
torch.cuda.synchronize()
opts = [
'nvidia-smi', '-q', '--gpu=' + str(1), '|', 'grep', '"Used GPU Memory"'
]
cmd = str.join(' ', opts)
ps = subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = ps.communicate()[0].decode('utf-8')
output = output.split("\n")[0].split(":")
consumed_mem = int(output[1].strip().split(" ")[0])
return consumed_mem
def get_dset_path(dset_name, dset_type):
_dir = os.path.dirname(__file__)
_dir = _dir.split("/")[:-2]
_dir = "/".join(_dir)
_dir = os.path.join(_dir, 'datasets', dset_name, dset_type)
return _dir
def relative_to_abs(rel_traj, start_pos):
"""
Inputs:
- rel_traj: pytorch tensor of shape (seq_len, batch, 2)
- start_pos: pytorch tensor of shape (batch, 2)
Outputs:
- abs_traj: pytorch tensor of shape (seq_len, batch, 2)
"""
# batch, seq_len, 2
rel_traj = rel_traj.permute(1, 0, 2)
displacement = torch.cumsum(rel_traj, dim=1)
start_pos = torch.unsqueeze(start_pos, dim=1)
abs_traj = displacement + start_pos
return abs_traj.permute(1, 0, 2)
| [
"torch.cuda.synchronize",
"subprocess.Popen",
"os.path.dirname",
"numpy.isnan",
"time.time",
"torch.cumsum",
"inspect.currentframe",
"torch.unsqueeze",
"os.path.join"
] | [((1404, 1428), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1426, 1428), False, 'import torch\n'), ((1567, 1655), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.STDOUT'}), '(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess\n .STDOUT)\n', (1583, 1655), False, 'import subprocess\n'), ((1889, 1914), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1904, 1914), False, 'import os\n'), ((1984, 2036), 'os.path.join', 'os.path.join', (['_dir', '"""datasets"""', 'dset_name', 'dset_type'], {}), "(_dir, 'datasets', dset_name, dset_type)\n", (1996, 2036), False, 'import os\n'), ((2394, 2423), 'torch.cumsum', 'torch.cumsum', (['rel_traj'], {'dim': '(1)'}), '(rel_traj, dim=1)\n', (2406, 2423), False, 'import torch\n'), ((2440, 2473), 'torch.unsqueeze', 'torch.unsqueeze', (['start_pos'], {'dim': '(1)'}), '(start_pos, dim=1)\n', (2455, 2473), False, 'import torch\n'), ((1153, 1177), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1175, 1177), False, 'import torch\n'), ((1191, 1202), 'time.time', 'time.time', ([], {}), '()\n', (1200, 1202), False, 'import time\n'), ((1241, 1265), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1263, 1265), False, 'import torch\n'), ((1279, 1290), 'time.time', 'time.time', ([], {}), '()\n', (1288, 1290), False, 'import time\n'), ((280, 300), 'numpy.isnan', 'np.isnan', (['variable_n'], {}), '(variable_n)\n', (288, 300), True, 'import numpy as np\n'), ((568, 590), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (588, 590), False, 'import inspect\n')] |
#Module used to approximate the minimum-distance function to a given point cloud
#using a neural network trained with tensorflow
import time
import os
import numpy as np
import sys
import random
import math
import ColorFilters
import pickle
import StandardBody
import scipy as sp
from scipy.spatial import cKDTree
from scipy.stats import norm
import tensorflow as tf
num_middle_layers = 6
middle_layer_width = 200
train_step_size = 1e-3
#ACTIV = tf.nn.elu
ACTIV = tf.nn.leaky_relu
#With this chance, use a collection of points
#which have distance zero as the training batch
zero_hammer_prob = 0.05
training_iters = 100000
#training_iters = 100
BATCH_SIZE = 20000
VIEW_AFTER = 100
def fcLayer(inputs, num_outputs, reuse, scope):
return tf.contrib.layers.fully_connected(inputs, num_outputs,
activation_fn=ACTIV,
weights_regularizer=None,
reuse=reuse, scope=scope)
def fcLinLayer(inputs, num_outputs, reuse, scope):
return tf.contrib.layers.fully_connected(inputs, num_outputs,
activation_fn=None, reuse=reuse, scope=scope)
def approxNetwork(x, reuse, namePrefix, output_dimension=1):
out = x
for i in range(num_middle_layers):
with tf.variable_scope(namePrefix + "FC" + str(i)) as s:
with tf.name_scope("FC" + str(i)):
out = fcLayer(out, middle_layer_width, reuse, s)
with tf.variable_scope(namePrefix + "CompressLinear") as s:
lin_compress = fcLinLayer(out, output_dimension, reuse, s)
return lin_compress
def randomRows(A, num_rows):
return A[np.random.choice(A.shape[0], num_rows, replace=True), :]
def approximate(namePrePrefix, pointSubset, pointList):
kdTree = cKDTree(pointSubset)
#We'll pick points by picking a random (spherical normal) offset
#from randomly-chosen points in the given point list
variance = 100000.0
mean_vec = np.array([0.0, 0.0, 0.0], dtype=np.float32)
covar_mat = np.array([[variance, 0, 0], [0, variance, 0], [0, 0, variance]], dtype=np.float32)
#The name prefix for all variable scopes
namePrefix = namePrePrefix + "Metric"
in_points = tf.placeholder(tf.float32, [None, 3], name=(namePrefix + "In"))
small_points = in_points * 0.001
crossterm_one = small_points[:, 0] * small_points[:, 1]
crossterm_two = small_points[:, 1] * small_points[:, 2]
crossterm_three = small_points[:, 0] * small_points[:, 2]
crossterms = tf.stack([crossterm_one, crossterm_two, crossterm_three], axis=1)
poly_aug_in_points = tf.concat([small_points, tf.square(small_points), crossterms], axis=1)
approx_norm_network = approxNetwork(poly_aug_in_points, False, namePrefix=namePrefix)
approx_norm_out = tf.identity(approx_norm_network, name=(namePrefix + "Out"))
target_norms = tf.placeholder(tf.float32, [None, 1])
with tf.name_scope('loss'):
loss = tf.losses.absolute_difference(approx_norm_out, tf.square(target_norms * .001))
with tf.name_scope('adam_optimizer'):
train_step = tf.train.GradientDescentOptimizer(train_step_size).minimize(loss)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
check = tf.add_check_numerics_ops()
start = time.time()
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
batchNum = 0
start = time.time()
num_exact = int(float(BATCH_SIZE) * zero_hammer_prob)
num_fuzzed = BATCH_SIZE - num_exact
for i in range(training_iters):
#Pick a random collection of points on the target manifold
exactPoints = randomRows(pointSubset, num_exact)
#Pick a random collection of points from the input point list
fuzzedPoints = randomRows(pointList, num_fuzzed)
#Compute normally-distributed offsets for them
offsets = np.random.multivariate_normal(mean_vec, covar_mat, size=num_fuzzed)
fuzzedPoints = fuzzedPoints + offsets
allPoints = np.vstack((exactPoints, fuzzedPoints))
#Great, now for each fuzzed point, compute the actual distances to the original point cloud
actualDistances, _ = kdTree.query(allPoints)
actualDistances = np.reshape(actualDistances, (BATCH_SIZE, 1))
#Okay, now run a training step
batchNum += 1
sess.run([train_step, check], feed_dict={in_points : allPoints, target_norms : actualDistances})
if (i % VIEW_AFTER == 0):
train_loss = loss.eval(feed_dict={in_points : allPoints, target_norms : actualDistances})
print("Batches per second: ", batchNum / (time.time() - start))
train_loss = math.sqrt(train_loss) * 1000.0
print("Step %d, training loss %g mm" % (i, train_loss))
saver.save(sess, "./" + namePrefix + "/" + namePrefix)
#Dictionary from names to color filters
colorFilterDict = {"GreenLeg" : ColorFilters.maskGreenLeg,
"YellowArm" : ColorFilters.maskYellowArm,
"RedArm" : ColorFilters.maskRedArm,
"RedHand" : ColorFilters.maskRedHand,
"YellowHand" : ColorFilters.maskYellowHand,
"WhiteLeg" : ColorFilters.maskWhiteLegInTemplate,
"Torso" : ColorFilters.maskTorso}
partName = sys.argv[1]
colorFilter = colorFilterDict[partName]
#Load the colored body template RGB Point cloud
coloredTemplateFile = "ColoredTemplate.pickle"
coloredBody = pickle.load(open(coloredTemplateFile, "rb"))
coloredBody.indices = np.zeros((np.asarray(coloredBody.points).shape[0], 2))
#From the colored body, apply the color filter and a statistical filter
coloredBody.applyColorFilter(colorFilter, negated=True)
coloredBody.applyLargestComponentFilter()
pointSubset = np.asarray(coloredBody.getPoints())
approximate(partName, pointSubset, StandardBody.pointArray)
| [
"tensorflow.add_check_numerics_ops",
"tensorflow.identity",
"tensorflow.ConfigProto",
"scipy.spatial.cKDTree",
"tensorflow.GPUOptions",
"tensorflow.variable_scope",
"tensorflow.stack",
"tensorflow.placeholder",
"numpy.reshape",
"numpy.random.choice",
"tensorflow.name_scope",
"tensorflow.train.... | [((749, 880), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['inputs', 'num_outputs'], {'activation_fn': 'ACTIV', 'weights_regularizer': 'None', 'reuse': 'reuse', 'scope': 'scope'}), '(inputs, num_outputs, activation_fn=ACTIV,\n weights_regularizer=None, reuse=reuse, scope=scope)\n', (782, 880), True, 'import tensorflow as tf\n'), ((977, 1081), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['inputs', 'num_outputs'], {'activation_fn': 'None', 'reuse': 'reuse', 'scope': 'scope'}), '(inputs, num_outputs, activation_fn=None,\n reuse=reuse, scope=scope)\n', (1010, 1081), True, 'import tensorflow as tf\n'), ((1706, 1726), 'scipy.spatial.cKDTree', 'cKDTree', (['pointSubset'], {}), '(pointSubset)\n', (1713, 1726), False, 'from scipy.spatial import cKDTree\n'), ((1892, 1935), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {'dtype': 'np.float32'}), '([0.0, 0.0, 0.0], dtype=np.float32)\n', (1900, 1935), True, 'import numpy as np\n'), ((1952, 2039), 'numpy.array', 'np.array', (['[[variance, 0, 0], [0, variance, 0], [0, 0, variance]]'], {'dtype': 'np.float32'}), '([[variance, 0, 0], [0, variance, 0], [0, 0, variance]], dtype=np.\n float32)\n', (1960, 2039), True, 'import numpy as np\n'), ((2141, 2202), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 3]'], {'name': "(namePrefix + 'In')"}), "(tf.float32, [None, 3], name=namePrefix + 'In')\n", (2155, 2202), True, 'import tensorflow as tf\n'), ((2447, 2512), 'tensorflow.stack', 'tf.stack', (['[crossterm_one, crossterm_two, crossterm_three]'], {'axis': '(1)'}), '([crossterm_one, crossterm_two, crossterm_three], axis=1)\n', (2455, 2512), True, 'import tensorflow as tf\n'), ((2723, 2780), 'tensorflow.identity', 'tf.identity', (['approx_norm_network'], {'name': "(namePrefix + 'Out')"}), "(approx_norm_network, name=namePrefix + 'Out')\n", (2734, 2780), True, 'import tensorflow as tf\n'), ((2803, 2840), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1]'], {}), '(tf.float32, [None, 1])\n', (2817, 2840), True, 'import tensorflow as tf\n'), ((3117, 3167), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': '(0.2)'}), '(per_process_gpu_memory_fraction=0.2)\n', (3130, 3167), True, 'import tensorflow as tf\n'), ((3180, 3207), 'tensorflow.add_check_numerics_ops', 'tf.add_check_numerics_ops', ([], {}), '()\n', (3205, 3207), True, 'import tensorflow as tf\n'), ((3221, 3232), 'time.time', 'time.time', ([], {}), '()\n', (3230, 3232), False, 'import time\n'), ((1390, 1438), 'tensorflow.variable_scope', 'tf.variable_scope', (["(namePrefix + 'CompressLinear')"], {}), "(namePrefix + 'CompressLinear')\n", (1407, 1438), True, 'import tensorflow as tf\n'), ((2851, 2872), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (2864, 2872), True, 'import tensorflow as tf\n'), ((2978, 3009), 'tensorflow.name_scope', 'tf.name_scope', (['"""adam_optimizer"""'], {}), "('adam_optimizer')\n", (2991, 3009), True, 'import tensorflow as tf\n'), ((3379, 3395), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (3393, 3395), True, 'import tensorflow as tf\n'), ((3434, 3445), 'time.time', 'time.time', ([], {}), '()\n', (3443, 3445), False, 'import time\n'), ((1579, 1631), 'numpy.random.choice', 'np.random.choice', (['A.shape[0]', 'num_rows'], {'replace': '(True)'}), '(A.shape[0], num_rows, replace=True)\n', (1595, 1631), True, 'import numpy as np\n'), ((2565, 2588), 'tensorflow.square', 'tf.square', (['small_points'], {}), '(small_points)\n', (2574, 2588), True, 'import tensorflow as tf\n'), ((2936, 2967), 'tensorflow.square', 'tf.square', (['(target_norms * 0.001)'], {}), '(target_norms * 0.001)\n', (2945, 2967), True, 'import tensorflow as tf\n'), ((3327, 3360), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3358, 3360), True, 'import tensorflow as tf\n'), ((3941, 4008), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean_vec', 'covar_mat'], {'size': 'num_fuzzed'}), '(mean_vec, covar_mat, size=num_fuzzed)\n', (3970, 4008), True, 'import numpy as np\n'), ((4084, 4122), 'numpy.vstack', 'np.vstack', (['(exactPoints, fuzzedPoints)'], {}), '((exactPoints, fuzzedPoints))\n', (4093, 4122), True, 'import numpy as np\n'), ((4326, 4370), 'numpy.reshape', 'np.reshape', (['actualDistances', '(BATCH_SIZE, 1)'], {}), '(actualDistances, (BATCH_SIZE, 1))\n', (4336, 4370), True, 'import numpy as np\n'), ((3032, 3082), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['train_step_size'], {}), '(train_step_size)\n', (3065, 3082), True, 'import tensorflow as tf\n'), ((3260, 3299), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (3274, 3299), True, 'import tensorflow as tf\n'), ((5681, 5711), 'numpy.asarray', 'np.asarray', (['coloredBody.points'], {}), '(coloredBody.points)\n', (5691, 5711), True, 'import numpy as np\n'), ((4805, 4826), 'math.sqrt', 'math.sqrt', (['train_loss'], {}), '(train_loss)\n', (4814, 4826), False, 'import math\n'), ((4754, 4765), 'time.time', 'time.time', ([], {}), '()\n', (4763, 4765), False, 'import time\n')] |
import json
import logging
import os
from collections import Counter, defaultdict
from datetime import date, datetime, timedelta
from itertools import zip_longest
from operator import itemgetter
from statistics import mean
import numpy as np
import timeago
from beem.account import Account
from beem.comment import Comment
from bson import json_util
from dateutil.parser import parse
from flask import Flask, abort, jsonify, render_template
from flask_cors import CORS
from flask_restful import Api, Resource
from pymongo import MongoClient
from webargs import fields, validate
from webargs.flaskparser import abort, parser, use_args, use_kwargs
VP_TOTAL = 18.0
VP_COMMENTS = 5.0
CATEGORY_WEIGHTING = {
"ideas": 10.0,
"development": 10.0,
"bug-hunting": 10.0,
"translations": 10.0,
"graphics": 10.0,
"analysis": 10.0,
"social": 10.0,
"documentation": 10.0,
"tutorials": 10.0,
"video-tutorials": 10.0,
"copywriting": 10.0,
"blog": 10.0,
"anti-abuse": 10.0,
"task-request": 10.0,
"iamutopian": 10.0,
}
MODERATION_REWARD = {
"ideas": 11.0,
"development": 15.0,
"graphics": 13.0,
"bug-hunting": 12.0,
"analysis": 13.0,
"social": 10.0,
"video-tutorials": 13.0,
"tutorials": 13.0,
"copywriting": 10.0,
"documentation": 10.0,
"blog": 11.0,
"translations": 13.0,
"iamutopian": 11.0,
"anti-abuse": 11.0,
"task-request": 7.5,
}
# Score needed for a vote
MIN_SCORE = 10
# Logging
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
LOGGER = logging.getLogger("utopian-io")
LOGGER.setLevel(logging.INFO)
FH = logging.FileHandler(f"{DIR_PATH}/test.log")
FH.setLevel(logging.DEBUG)
FORMATTER = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
FH.setFormatter(FORMATTER)
LOGGER.addHandler(FH)
# Mongo and Flask
CLIENT = MongoClient()
DB = CLIENT.utempian
app = Flask(__name__)
CORS(app)
api = Api(app)
@app.template_filter("timeago")
def time_ago(date):
return timeago.format(date)
@app.errorhandler(404)
def page_not_found(e):
return render_template("404.html"), 404
@app.route("/api/moderators")
def moderaors():
moderators = [moderator["account"] for moderator in DB.moderators.find()]
return jsonify(moderators)
@app.route("/json/<json_file>")
def rewards(json_file):
"""Return all moderator's points for the given week."""
filename = os.path.join(app.static_folder, "{}.json".format(json_file))
try:
with open(filename) as fp:
data = json.load(fp)
return jsonify(data)
except:
abort(404)
@app.route("/")
def index():
"""Sends all unreviewed contributions to index.html."""
contributions = DB.contributions
unreviewed = contributions.find({"status": "unreviewed"})
unreviewed = [contribution for contribution in unreviewed]
return render_template("index.html", contributions=unreviewed)
def without_score(contribution):
"""Returns a contribution without the score."""
return {x: contribution[x] for x in contribution if x != "score"}
class ContributionResource(Resource):
"""Endpoint for contributions in the spreadsheet."""
query_parameters = {
"category": fields.Str(),
"status": fields.Str(),
"author": fields.Str(),
"moderator": fields.Str(),
"staff_picked": fields.Bool(),
"review_status": fields.Str(),
"url": fields.Str(),
"voted_on": fields.Bool(),
"repository": fields.Str(),
"beneficiaries_set": fields.Bool(),
"is_vipo": fields.Bool(),
"valid_age": fields.Bool(),
"skip": fields.Int(),
"limit": fields.Int(),
}
@use_args(query_parameters)
def get(self, query_parameters):
"""Uses the given query parameters to search for contributions in the
database.
"""
parameters = {key: value for key, value in query_parameters.items()
if key != "skip" and key != "limit"}
contributions = DB.contributions.find(parameters)
if "skip" in query_parameters.keys():
contributions = contributions.skip(query_parameters["skip"])
if "limit" in query_parameters.keys():
contributions = contributions.limit(query_parameters["limit"])
contributions = [json.loads(json_util.dumps(convert(c)))
for c in contributions]
return jsonify(contributions)
class BannedUsersResource(Resource):
"""Endpoint for banned users in the spreadsheet."""
query_parameters = {
"name": fields.Str(),
"banned": fields.Bool()
}
@use_args(query_parameters)
def get(self, query_parameters):
banned_users = [json.loads(json_util.dumps(user))
for user in DB.users.find(query_parameters)]
return jsonify(banned_users)
def string_to_date(date_input):
"""Converts a given string to a date."""
if date_input == "today":
today_date = date.today()
return datetime(today_date.year, today_date.month, today_date.day)
elif date_input == "now":
return datetime.now()
try:
return parse(date_input)
except Exception as error:
abort(422, errors=str(error))
def average(score):
"""Returns the average score of the given list of scores."""
try:
return mean(score)
except Exception:
return 0
def percentage(reviewed, voted):
"""Returns the percentage of voted contributions."""
try:
return 100.0 * voted / reviewed
except ZeroDivisionError:
return 100.0
def moderator_statistics(contributions):
"""Returns a dictionary containing statistics about all moderators."""
moderators = {}
for contribution in contributions:
if contribution["status"] == "unreviewed":
continue
moderator = contribution["moderator"]
# If contribution was submitted by banned user skip it
if moderator == "BANNED":
continue
# Set default in case moderator doesn't exist
moderators.setdefault(
moderator, {
"moderator": moderator,
"category": [],
"average_score": [],
"average_without_0": []
}
)
# Append scores and categories
moderators[moderator]["average_score"].append(contribution["score"])
moderators[moderator]["category"].append(contribution["category"])
if contribution["score"] > 0:
moderators[moderator]["average_without_0"].append(
contribution["score"])
moderator_list = []
for moderator, value in moderators.items():
# Set new keys and append value to list
value["category"] = Counter(value["category"]).most_common()
value["average_score"] = average(value["average_score"])
value["average_without_0"] = average(value["average_without_0"])
moderator_list.append(value)
return {"moderators": moderator_list}
def category_statistics(contributions):
"""Returns a dictionary containing statistics about all categories."""
categories = {}
categories.setdefault(
"all", {
"category": "all",
"average_score": [],
"average_without_0": [],
"voted": 0,
"not_voted": 0,
"unvoted": 0,
"rewardable": 0,
"task-requests": 0,
"moderators": [],
"rewarded_contributors": [],
"total_payout": 0,
"utopian_total": [],
"authors_vote_weights": defaultdict(list),
"authors_scores": defaultdict(list)
}
)
for contribution in contributions:
# Don't count unreviewed contributions
if contribution["status"] == "unreviewed":
continue
category = contribution["category"]
moderator = contribution["moderator"]
author = contribution["author"]
score = contribution["score"]
total_payout = contribution["total_payout"]
utopian_vote = contribution["utopian_vote"]
# Set default in case category doesn't exist
categories.setdefault(
category, {
"category": category,
"average_score": [],
"average_without_0": [],
"voted": 0,
"not_voted": 0,
"unvoted": 0,
"rewardable": 0,
"moderators": [],
"rewarded_contributors": [],
"total_payout": 0,
"utopian_total": [],
"authors_vote_weights": defaultdict(list),
"authors_scores": defaultdict(list)
}
)
# Check if contribution was voted on or unvoted
for category in [category, "all"]:
if contribution["status"] == "unvoted":
categories[category]["unvoted"] += 1
categories[category]["not_voted"] += 1
elif score > MIN_SCORE:
if utopian_vote > 0:
categories[category]["voted"] += 1
else:
categories[category]["not_voted"] += 1
categories[category]["rewardable"] += 1
else:
categories[category]["not_voted"] += 1
# Add moderator, score and total payout in SBD
categories[category]["moderators"].append(moderator)
categories[category]["average_score"].append(score)
categories[category]["total_payout"] += total_payout
categories[category]["utopian_total"].append(utopian_vote)
categories[category]["authors_vote_weights"][author].append(utopian_vote)
categories[category]["authors_scores"][author].append(score)
if score > 0:
categories[category]["average_without_0"].append(score)
if score > MIN_SCORE:
categories[category]["rewarded_contributors"].append(author)
category_list = []
for category, value in categories.items():
# Set new keys and append value to list
value["reviewed"] = value["voted"] + value["not_voted"]
value["average_score"] = average(value["average_score"])
value["average_without_0"] = average(value["average_without_0"])
value["moderators"] = Counter(value["moderators"]).most_common()
value["rewarded_contributors"] = Counter(
value["rewarded_contributors"]).most_common()
try:
value["average_payout"] = value["total_payout"] / value["reviewed"]
except ZeroDivisionError:
value["average_payout"] = 0
value["pct_voted"] = percentage(value["reviewed"], value["voted"])
# Add Utopian.io's vote statistics
value["utopian_total"] = [vote for vote in value["utopian_total"]
if vote != 0]
value["average_utopian_vote"] = average(value["utopian_total"])
value["utopian_total"] = sum(value["utopian_total"])
category_list.append(value)
return {"categories": category_list}
def project_statistics(contributions):
"""Returns a dictionary containing statistics about all projects."""
projects = {}
for contribution in contributions:
# Don't count unreviewed contributions
if contribution["status"] == "unreviewed":
continue
project = contribution["repository"]
utopian_vote = contribution["utopian_vote"]
# Set default in case category doesn't exist
projects.setdefault(
project, {
"project": project,
"average_score": [],
"average_without_0": [],
"voted": 0,
"not_voted": 0,
"unvoted": 0,
"task-requests": 0,
"moderators": [],
"average_payout": [],
"total_payout": 0,
"utopian_total": []
}
)
# Check if contribution was voted on or unvoted
if contribution["status"] == "unvoted":
projects[project]["unvoted"] += 1
projects[project]["not_voted"] += 1
elif contribution["voted_on"]:
projects[project]["voted"] += 1
else:
projects[project]["not_voted"] += 1
# If contribution was a task request count this
if "task" in contribution["category"]:
projects[project]["task-requests"] += 1
# Add moderator and score
projects[project]["moderators"].append(contribution["moderator"])
projects[project]["average_score"].append(contribution["score"])
projects[project]["total_payout"] += contribution["total_payout"]
projects[project]["utopian_total"].append(utopian_vote)
if contribution["score"] > 0:
projects[project]["average_without_0"].append(
contribution["score"])
project_list = []
for project, value in projects.items():
# Set new keys and append value to list
value["reviewed"] = value["voted"] + value["not_voted"]
value["average_score"] = average(value["average_score"])
value["average_without_0"] = average(value["average_without_0"])
value["average_payout"] = value["total_payout"] / value["reviewed"]
value["moderators"] = Counter(value["moderators"]).most_common()
value["pct_voted"] = percentage(value["reviewed"], value["voted"])
# Add Utopian.io's vote statistics
value["utopian_total"] = [vote for vote in value["utopian_total"]
if vote != 0]
value["average_utopian_vote"] = average(value["utopian_total"])
value["utopian_total"] = sum(value["utopian_total"])
project_list.append(value)
return {"projects": project_list}
def staff_pick_statistics(contributions):
"""Returns a list of contributions that were staff picked."""
staff_picks = []
for contribution in contributions:
# If contribution wasn't staff picked skip it
if not contribution["staff_picked"]:
continue
staff_picks.append(contribution)
return {"staff_picks": staff_picks}
def task_request_statistics(contributions):
"""Returns a list of task requests."""
task_requests = []
for contribution in contributions:
# If contribution wasn't staff picked skip it
if "task" in contribution["category"]:
task_requests.append(contribution)
return {"task_requests": task_requests}
class WeeklyResource(Resource):
"""Endpoint for weekly contribution data (requested)."""
def get(self, date):
LOGGER.info(f"Retrieving for {date}")
# Get date for retrieving posts
date = string_to_date(date)
week_ago = date - timedelta(days=7)
# Retrieve contributions made in week before the given date
contributions = DB.contributions
pipeline = [
{"$match": {"review_date": {"$gte": week_ago, "$lt": date}}}]
contributions = [json.loads(json_util.dumps(c))
for c in contributions.aggregate(pipeline)]
moderators = moderator_statistics(contributions)
categories = category_statistics(contributions)
projects = project_statistics(contributions)
staff_picks = staff_pick_statistics(contributions)
task_requests = task_request_statistics(contributions)
return jsonify(
[moderators, categories, projects, staff_picks, task_requests])
def convert(contribution):
if "_id" in contribution.keys():
del contribution["_id"]
if not contribution["score"]:
contribution["score"] = 0
elif contribution["score"] < 0:
contribution["score"] = 0
elif contribution["score"] > 100:
contribution["score"] = 100
if contribution["staff_picked"]:
contribution["score"] = 100
contribution["voting_weight"] = exponential_vote(contribution)
if "created" in contribution.keys():
contribution["created"] = str(contribution["created"])
if "review_date" in contribution.keys():
contribution["review_date"] = str(contribution["review_date"])
return contribution
def batch_comments(contributions):
"""Get all comments to be upvoted."""
_, recharge_time, _ = account_information()
sorted_by_review = sorted(contributions, key=lambda x: x["review_date"])
recharge_time = parse(recharge_time)
recharge_time = timedelta(
hours=recharge_time.hour,
minutes=recharge_time.minute,
seconds=recharge_time.second)
batch = [c for c in sorted_by_review if
c["review_date"] <= datetime.now() - timedelta(days=2) +
recharge_time and c["comment_url"] and
c["review_status"] == "pending"]
return batch
def batch_contributions(contributions):
"""Get all contributions to be upvoted."""
return [c for c in contributions if c["status"] == "pending" and
c["valid_age"]]
class BatchResource(Resource):
"""Endpoint for the posts to be voted in a batch."""
def get(self, batch_type):
all_contributions = [c for c in DB.contributions.find({
"$or": [
{"status": "pending"},
{"review_status": "pending"}
]
})]
comments = batch_comments(all_contributions)
_, comment_usage = init_comments(comments)
contributions = [convert(c)
for c in batch_contributions(all_contributions)]
category_share = init_contributions(contributions, comment_usage)
next_batch = get_batch(contributions, category_share,
100.0 - comment_usage)
if batch_type == "comments":
batch = comments
elif batch_type == "contributions":
batch = next_batch
else:
return jsonify({})
eligible = [json.loads(json_util.dumps(convert(c))) for c in batch]
return jsonify(eligible)
api.add_resource(WeeklyResource, "/api/statistics/<string:date>")
api.add_resource(BannedUsersResource, "/api/bannedUsers")
api.add_resource(ContributionResource, "/api/posts")
api.add_resource(BatchResource, "/api/batch/<string:batch_type>")
def intro_section(first_day, last_day):
"""Creates the introduction section / headline for the Utopian weekly post.
The week is defined by the first and last days of the week.
"""
LOGGER.info("Generating post introduction section...")
section = (
f"# Weekly Top of Utopian.io: {first_day:%B} {first_day.day} - "
f"{last_day:%B} {last_day.day}"
"<br><br>[Introduction (summary of the week)]"
)
return section
def footer_section():
"""Creates the footer section for the Utopian weekly post."""
LOGGER.info("Generating post footer section...")
section = (
""
"<br><br>## First Time Contributing in [Utopian.io](https://join.utopian.io/)?"
"<br><br><a href="https://join.utopian.io/guidelines">Learn how to contribute on our website</a>"
"<br><br><center><iframe width="560" height="315" src="https://www.youtube.com/embed/8S1AtrzYY1Q" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe></center>"
"<br><br><center><a href="https://discord.gg/h52nFrV"><img src="https://cdn.discordapp.com/attachments/396653220702978049/452918421235957763/footer_558.png" /></a></center>"
"<br><br><center><h4><a href="https://steemconnect.com/sign/account-witness-vote?witness=utopian-io&approve=1">Vote for the Utopian Witness</a></h4></center>"
)
return section
def staff_pick_section(staff_picks):
"""Creates the staff pick section for the Utopian weekly post."""
LOGGER.info("Generating staff pick statistics section...")
section = "## Staff Picks"
for staff_pick in staff_picks["staff_picks"]:
url = staff_pick["url"]
post = Comment(url)
title = post.json()["title"]
# If title can't be retrieved set it to the post's URL
if not title:
title = url
author = staff_pick['author']
category = staff_pick['category']
# Add staff pick to the string
section += (
f"<br><br>### <a href='{url}'>{title}</a> by @{author} "
f"[{category}]<br><br>[Image (contributor profile image / image from "
"the post)]<br><br>[Paragraph: Background info on project etc.]"
"<br><br>[Paragraph: CM review, including etc.]<br><br>"
f"Total payout: {staff_pick['total_payout']:.2f} STU<br>"
f"Number of votes: {staff_pick['total_votes']}"
)
return section
def post_statistics_section(categories, contributions):
"""Creates the post statistics part for the Utopian weekly post."""
LOGGER.info("Generating post statistics section...")
section = (
"## Utopian.io Post Statistics<br><br>"
"The staff picked contributions are only a small (but exceptional) "
"example of the mass of contributions reviewed and rewarded by "
"Utopian.io.<br><br>"
)
# Get some statistics needed
for category in categories["categories"]:
reviewed = category["reviewed"]
voted = category["voted"]
utopian_total = category["utopian_total"]
average_vote = category["average_utopian_vote"]
if category["category"] == "all":
break
# Get contributions with highest payout and engagement
highest_payout = sorted(
contributions, key=lambda x: x["total_payout"], reverse=True)[0]
most_engagement = sorted(
contributions, key=lambda x: x["total_votes"], reverse=True)[0]
title = Comment(most_engagement["url"]).title
# Create the section with the above statistics
section += (
f"* Overall, the last week saw a total of {reviewed} posts, with "
f"{voted} of them rewarded through an upvote by @utopian-io.<br>"
"* In total, Utopian.io distributed an approximate of "
f"{utopian_total:.2f} STU to contributors.<br>"
"* The highest payout seen on any Utopian.io contribution this week "
f"was {highest_payout['total_payout']} STU, with a total of "
f"{highest_payout['total_votes']} votes received from the community."
"<br>* The contribution that attracted the most engagement was "
f"<a href='{most_engagement['url']}'>{title}</a>, with no "
f"less than {most_engagement['total_comments']} comments in its "
"comment threads.<br>"
f"* The average vote given by Utopian.io was worth {average_vote:.2f} "
"STU.<br><br>## Category Statistics<br><br>"
"|Category|Reviewed|Rewardable|Rewarded|Total rewards|Top contributor|<br>"
"|:-|:-|:-|:-|-:|:-|"
)
# Create the table with category statistics
for category in categories["categories"]:
# Skip if category is 'all' or is task
if category["category"] == "all" or "task" in category["category"]:
continue
# Don't include category is no contributions were rewarded
rewarded = category["voted"]
rewardable = category["rewardable"]
if rewardable == 0:
continue
# Get all the data needed
reviewed = category["reviewed"]
rewards = f"{category['utopian_total']:.2f}"
scores_per_author = category['authors_scores']
weights_per_author = category['authors_vote_weights']
author = f"@{sorted(scores_per_author, key=lambda x: (sum([score**2 for score in scores_per_author[x]]), sum(weights_per_author[x])), reverse=True)[0]}"
category = category["category"]
# Add the row
section += (
f"<br>|{category}|{reviewed}|{rewardable}|{rewarded}|{rewards} STU|{author}|")
return section
@app.route("/weekly", defaults={"date": "today"})
@app.route("/weekly/<date>")
def weekly(date):
"""Returns weekly statistics in a format that can be posted on Steemit."""
today = string_to_date(date)
week_ago = today - timedelta(days=7)
contributions = DB.contributions
pipeline = [
{"$match": {"review_date": {"$gte": week_ago, "$lte": today}}}]
contributions = [json.loads(json_util.dumps(c))
for c in contributions.aggregate(pipeline)]
# Get the data needed for all statistics
categories = category_statistics(contributions)
staff_picks = staff_pick_statistics(contributions)
# Get each section of the post
try:
post_intro_section = intro_section(week_ago, today)
staff_section = staff_pick_section(staff_picks)
post_section = post_statistics_section(categories, contributions)
post_footer_section = footer_section()
except Exception as error:
LOGGER.error(error)
body = ("No statistics to show for this week ("
f"{week_ago:%B} {week_ago.day} - {today:%B} {today.day}).")
else:
body = "<br><br>".join([post_intro_section, staff_section,
post_section, post_footer_section])
LOGGER.info(body)
return render_template("weekly.html", body=body)
def update_vp(current_vp, updated, recharge_time):
seconds = (datetime.now() - updated).total_seconds()
regenerated_vp = seconds * 10000 / 86400 / 5 / 100
# Update recharge_time
try:
recharge_time = parse(recharge_time)
recharge_time = timedelta(
hours=recharge_time.hour,
minutes=recharge_time.minute,
seconds=recharge_time.second)
recharge_time = recharge_time - timedelta(seconds=seconds)
if recharge_time < timedelta(seconds=1):
recharge_time = "0:00:00"
except ValueError:
pass
current_vp += regenerated_vp
current_vp = 100 if current_vp > 100 else f"{current_vp:.2f}"
return float(current_vp), str(recharge_time).split(".")[0]
def account_information():
accounts = DB.accounts
account = accounts.find_one({"account": "utopian-io"})
updated = account["updated"]
current_vp, recharge_time = update_vp(
account["current_vp"], updated, account["recharge_time"])
return (
current_vp, recharge_time, account["recharge_class"])
MAX_VOTE = {
"ideas": 35.0,
"development": 70.0,
"bug-hunting": 28.0,
"translations": 50.0,
"graphics": 55.0,
"analysis": 60.0,
"social": 45.0,
"documentation": 45.0,
"tutorials": 45.0,
"video-tutorials": 45.0,
"copywriting": 45.0,
"blog": 45.0,
"anti-abuse": 65.0,
"iamutopian": 55.0,
}
MAX_TASK_REQUEST = 6.0
EXP_POWER = 2.1
def exponential_vote(contribution):
"""Calculates the exponential vote for the bot."""
score = contribution["score"]
category = contribution["category"]
if "is_vipo" in contribution.keys():
is_vipo = contribution["is_vipo"]
else:
is_vipo = False
if "beneficiaries_set" in contribution.keys():
beneficiaries_set = contribution["beneficiaries_set"]
else:
beneficiaries_set = False
try:
max_vote = MAX_VOTE[category]
except:
max_vote = MAX_TASK_REQUEST
power = EXP_POWER
weight = pow(
score / 100.0,
power - (score / 100.0 * (power - 1.0))) * max_vote
def update_weight(weight):
"""Updates the voting percentage if beneficiaries utopian.pay set."""
weight = float(weight)
new_weight = weight + 0.1 * weight + 6.0
return new_weight
if beneficiaries_set:
weight = update_weight(weight)
if is_vipo:
weight *= 1.2
return weight
STEEM_100_PERCENT = 10000
STEEM_VOTING_MANA_REGENERATION_SECONDS = 432000
def estimate_vote_time(contributions, recharge_time):
"""Estimates the vote time of the given contributions."""
for i, contribution in enumerate(contributions):
if "score" not in contribution.keys():
continue
if i == 0:
hours, minutes, seconds = [int(x) for x in
recharge_time.split(":")]
vote_time = datetime.now() + timedelta(
hours=hours, minutes=minutes, seconds=seconds)
contribution["vote_time"] = vote_time
continue
missing_vp = 2 * exponential_vote(contribution) / 100.0
recharge_seconds = (missing_vp * 100 *
STEEM_VOTING_MANA_REGENERATION_SECONDS /
STEEM_100_PERCENT)
vote_time = vote_time + timedelta(seconds=recharge_seconds)
contribution["vote_time"] = vote_time
return contributions
AGE_WEIGHTING = 3.0
def sort_batch_contributions(contributions):
"""Returns the list of contributions sorted by score and their age in
days * AGE_WEIGHTING.
"""
for contribution in contributions:
contribution_age = (datetime.now() - parse(contribution["created"]))
days_old = contribution_age.days + contribution_age.seconds / 3600 / 24.0
contribution["age_weighted_score"] = contribution["score"] + AGE_WEIGHTING * days_old
return sorted(contributions, key=lambda x: x["age_weighted_score"], reverse=True)
def get_batch(contributions, category_share, voting_power):
"""Returns the batch of contributions that will be voted on in the next
voting round.
"""
used_share = []
batch = []
for contribution in sort_batch_contributions(contributions):
voting_weight = contribution["voting_weight"]
category = contribution["category"]
if "task" in category:
category = "task-request"
if category in used_share:
continue
usage = voting_weight / 100.0 * 0.02 * voting_power
if category_share[category] - usage < 0:
used_share.append(category)
continue
category_share[category] -= usage
voting_power -= usage
batch.append(contribution)
batch = sorted(batch, key=lambda x: x["score"], reverse=True)
return batch
def distribute_remainder(remainder, category_usage, new_share, need_more_vp):
"""Distributes the remaining voting power over the categories that need it.
"""
while remainder > 0:
# Get voting power needed for every category
needed_per_category = {category: category_usage[category] - share
for category, share in new_share.items()
if category in need_more_vp}
# Get category who needs the least to reach its usage
least_under = min(needed_per_category.items(), key=itemgetter(1))
least_under_category = least_under[0]
least_needed = least_under[1]
# If this amount can be added to all categories without using the
# entire remainder then do this
if len(needed_per_category) * least_needed < remainder:
for category in need_more_vp:
new_share[category] += least_needed
remainder -= least_needed
need_more_vp.remove(least_under_category)
# Distribute the remainder evenly over the categories that need it
else:
remaining_categories = len(need_more_vp)
for category in need_more_vp:
percentage_share = 1.0 / remaining_categories
to_be_added = percentage_share * remainder
new_vp = new_share[category] + to_be_added
# Category's new share is still less than what it needs
if new_vp < category_usage[category]:
new_share[category] += to_be_added
remainder -= to_be_added
# Category's new share is more than it needs, so distribute to
# the other categories
else:
not_needed = new_vp - category_usage[category]
remainder -= to_be_added - not_needed
new_share[category] = category_usage[category]
remaining_categories -= 1
return new_share
def calculate_new_share(category_share, category_usage):
"""Calculates the new share of the voting power for each category."""
new_share = {}
remainder = 0
need_more_vp = []
for category, share in category_share.items():
try:
usage = category_usage[category]
# Category's share is more than the voting power it will use
if share > usage:
remainder += share - usage
new_share[category] = usage
# Category needs more voting power to vote everything
else:
new_share[category] = share
need_more_vp.append(category)
except KeyError:
remainder += share
return distribute_remainder(remainder, category_usage, new_share,
need_more_vp)
def contribution_voting_power(contributions, voting_power):
"""Returns the amount of voting power that will be used to upvote all the
currently pending contributions.
"""
starting_vp = voting_power
scaler = 1.0
for contribution in sort_batch_contributions(contributions):
category = contribution["category"]
voting_weight = contribution["voting_weight"]
usage = scaler * voting_weight / 100.0 * 0.02 * voting_power
voting_power -= usage
return starting_vp - voting_power
def get_category_usage(contributions, voting_power):
"""Returns a dictionary containing the key, value pair of the category and
the amount of voting power it will need to upvote all contributions in the
category.
"""
category_usage = {}
for contribution in sort_batch_contributions(contributions):
category = contribution["category"]
if "task" in category:
category = "task-request"
category_usage.setdefault(category, 0)
voting_weight = contribution["voting_weight"]
vp_usage = voting_weight / 100.0 * 0.02 * voting_power
category_usage[category] += vp_usage
return category_usage
def get_category_share(voting_power):
"""Returns a dictionary with a key, value pair of each category and their
share of the calculated voting power that can be used for contributions.
"""
total_vote = sum(CATEGORY_WEIGHTING.values())
category_share = {category: max_vote / total_vote * voting_power
for category, max_vote in CATEGORY_WEIGHTING.items()}
return category_share
def init_contributions(contributions, comment_usage):
"""Initialises everything needed for upvoting the contributions."""
voting_power = 100.0 - comment_usage
contribution_usage = contribution_voting_power(contributions, voting_power)
if contribution_usage + comment_usage > VP_TOTAL:
contribution_usage = VP_TOTAL - comment_usage
category_share = get_category_share(contribution_usage)
category_usage = get_category_usage(contributions, voting_power)
if contribution_usage + comment_usage == VP_TOTAL:
new_share = calculate_new_share(category_share, category_usage)
else:
new_share = category_usage
return new_share
def comment_voting_power(comments, comment_weights, scaling=1.0):
"""Returns the amount of voting power that will be used to upvote all the
currently pending review comments.
"""
voting_power = 100.0
for contribution in sorted(comments, key=lambda x: x["review_date"],
reverse=True):
category = contribution["category"]
try:
voting_weight = comment_weights[category]
except KeyError:
voting_weight = comment_weights["task-request"]
usage = scaling * voting_weight / 100.0 * 0.02 * voting_power
voting_power -= usage
return 100.0 - voting_power
def update_weights(comment_weights, comment_usage):
"""Updates the weights used to upvote comments so that the actual voting
power usage is equal to the estimated usage.
"""
desired_usage = 1.0 - VP_COMMENTS / 100.0
actual_usage = 1.0 - comment_usage / 100.0
scaler = np.log(desired_usage) / np.log(actual_usage)
for category in comment_weights.keys():
comment_weights[category] *= scaler
return comment_weights
def get_comment_weights():
"""Returns a dictionary containing the key, value pair of the category and
the voting weight needed upvote a review comment with each category's point
equivalence in STU.
"""
account = Account("utopian-io")
comment_weights = {
category: 100.0 * points / account.get_voting_value_SBD() for
category, points in MODERATION_REWARD.items()
}
return comment_weights
def init_comments(comments):
"""Initialises everything needed for upvoting the comments."""
comment_weights = get_comment_weights()
comment_usage = comment_voting_power(comments, comment_weights)
if comment_usage > VP_COMMENTS:
comment_weights = update_weights(comment_weights, comment_usage)
comment_usage = comment_voting_power(comments, comment_weights)
return comment_weights, comment_usage
@app.route("/queue")
def queue():
"""Returns all pending contributions and sets attribute `next_batch` if the
contribution will be included in the next voting round.
"""
all_contributions = [c for c in DB.contributions.find({
"$or": [
{"status": "pending"},
{"review_status": "pending"}
]
})]
current_vp, recharge_time, recharge_class = account_information()
if not recharge_time:
recharge_time = "0:0:0"
comments = batch_comments(all_contributions)
_, comment_usage = init_comments(comments)
contributions = [convert(c)
for c in batch_contributions(all_contributions)]
category_share = init_contributions(contributions, comment_usage)
batch = get_batch(contributions, category_share, 100.0 - comment_usage)
for contribution in batch:
contribution["next_batch"] = True
hours, minutes, seconds = [int(x) for x in recharge_time.split(":")]
contribution["vote_time"] = datetime.now() + timedelta(
hours=hours, minutes=minutes, seconds=seconds)
remaining_contributions = []
for contribution in all_contributions:
if (contribution in batch or contribution["status"] != "pending" or
not contribution["valid_age"]):
continue
contribution["next_batch"] = False
contribution["vote_time"] = "TBD"
remaining_contributions.append(contribution)
contributions = batch + sorted(remaining_contributions,
key=lambda x: x["score"], reverse=True)
return render_template(
"queue.html", contributions=contributions, current_vp=current_vp,
recharge_time=recharge_time, recharge_class=recharge_class)
@app.route("/comments")
def moderator_comments():
"""Returns all pending review comments and sets attribute `next_batch` if
the comment will be included in the next voting round.
"""
all_contributions = [c for c in DB.contributions.find({
"$or": [
{"status": "pending"},
{"review_status": "pending"}
]
})]
current_vp, recharge_time, recharge_class = account_information()
if not recharge_time:
recharge_time = "0:0:0"
batch = batch_comments(all_contributions)
pending_comments = []
for comment in all_contributions:
if comment["review_status"] != "pending":
continue
if comment in batch:
comment["next_batch"] = True
hours, minutes, seconds = [int(x) for x in
recharge_time.split(":")]
comment["vote_time"] = datetime.now() + timedelta(
hours=hours, minutes=minutes, seconds=seconds)
else:
comment["next_batch"] = False
comment["vote_time"] = "TBD"
pending_comments.append(comment)
comments = sorted(pending_comments, key=lambda x: x["review_date"])
comments = sorted(comments, key=lambda x: x["next_batch"], reverse=True)
return render_template(
"comments.html", contributions=comments, current_vp=current_vp,
recharge_time=recharge_time, recharge_class=recharge_class)
@app.context_processor
def inject_last_updated():
categories = sorted(["analysis", "tutorials", "graphics", "copywriting",
"development", "blog", "ideas", "social", "all",
"bug-hunting", "video-tutorials", "translations",
"anti-abuse", "iamutopian"])
account = DB.accounts.find_one({"account": "utopian-io"})
return dict(last_updated=account["updated"].strftime("%H:%M %Z"),
categories=categories)
def main():
app.run(host="0.0.0.0")
if __name__ == '__main__':
main()
| [
"pymongo.MongoClient",
"flask_restful.Api",
"timeago.format",
"flask_cors.CORS",
"webargs.fields.Int",
"logging.Formatter",
"collections.defaultdict",
"flask.jsonify",
"beem.comment.Comment",
"bson.json_util.dumps",
"logging.FileHandler",
"webargs.flaskparser.abort",
"datetime.timedelta",
... | [((1557, 1588), 'logging.getLogger', 'logging.getLogger', (['"""utopian-io"""'], {}), "('utopian-io')\n", (1574, 1588), False, 'import logging\n'), ((1624, 1667), 'logging.FileHandler', 'logging.FileHandler', (['f"""{DIR_PATH}/test.log"""'], {}), "(f'{DIR_PATH}/test.log')\n", (1643, 1667), False, 'import logging\n'), ((1707, 1780), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (1724, 1780), False, 'import logging\n'), ((1863, 1876), 'pymongo.MongoClient', 'MongoClient', ([], {}), '()\n', (1874, 1876), False, 'from pymongo import MongoClient\n'), ((1904, 1919), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1909, 1919), False, 'from flask import Flask, abort, jsonify, render_template\n'), ((1920, 1929), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (1924, 1929), False, 'from flask_cors import CORS\n'), ((1936, 1944), 'flask_restful.Api', 'Api', (['app'], {}), '(app)\n', (1939, 1944), False, 'from flask_restful import Api, Resource\n'), ((1520, 1546), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1536, 1546), False, 'import os\n'), ((2010, 2030), 'timeago.format', 'timeago.format', (['date'], {}), '(date)\n', (2024, 2030), False, 'import timeago\n'), ((2261, 2280), 'flask.jsonify', 'jsonify', (['moderators'], {}), '(moderators)\n', (2268, 2280), False, 'from flask import Flask, abort, jsonify, render_template\n'), ((2876, 2931), 'flask.render_template', 'render_template', (['"""index.html"""'], {'contributions': 'unreviewed'}), "('index.html', contributions=unreviewed)\n", (2891, 2931), False, 'from flask import Flask, abort, jsonify, render_template\n'), ((3709, 3735), 'webargs.flaskparser.use_args', 'use_args', (['query_parameters'], {}), '(query_parameters)\n', (3717, 3735), False, 'from webargs.flaskparser import abort, parser, use_args, use_kwargs\n'), ((4663, 4689), 'webargs.flaskparser.use_args', 'use_args', (['query_parameters'], {}), '(query_parameters)\n', (4671, 4689), False, 'from webargs.flaskparser import abort, parser, use_args, use_kwargs\n'), ((16642, 16662), 'dateutil.parser.parse', 'parse', (['recharge_time'], {}), '(recharge_time)\n', (16647, 16662), False, 'from dateutil.parser import parse\n'), ((16683, 16783), 'datetime.timedelta', 'timedelta', ([], {'hours': 'recharge_time.hour', 'minutes': 'recharge_time.minute', 'seconds': 'recharge_time.second'}), '(hours=recharge_time.hour, minutes=recharge_time.minute, seconds=\n recharge_time.second)\n', (16692, 16783), False, 'from datetime import date, datetime, timedelta\n'), ((25741, 25782), 'flask.render_template', 'render_template', (['"""weekly.html"""'], {'body': 'body'}), "('weekly.html', body=body)\n", (25756, 25782), False, 'from flask import Flask, abort, jsonify, render_template\n'), ((37225, 37246), 'beem.account.Account', 'Account', (['"""utopian-io"""'], {}), "('utopian-io')\n", (37232, 37246), False, 'from beem.account import Account\n'), ((39478, 39624), 'flask.render_template', 'render_template', (['"""queue.html"""'], {'contributions': 'contributions', 'current_vp': 'current_vp', 'recharge_time': 'recharge_time', 'recharge_class': 'recharge_class'}), "('queue.html', contributions=contributions, current_vp=\n current_vp, recharge_time=recharge_time, recharge_class=recharge_class)\n", (39493, 39624), False, 'from flask import Flask, abort, jsonify, render_template\n'), ((40935, 41079), 'flask.render_template', 'render_template', (['"""comments.html"""'], {'contributions': 'comments', 'current_vp': 'current_vp', 'recharge_time': 'recharge_time', 'recharge_class': 'recharge_class'}), "('comments.html', contributions=comments, current_vp=\n current_vp, recharge_time=recharge_time, recharge_class=recharge_class)\n", (40950, 41079), False, 'from flask import Flask, abort, jsonify, render_template\n'), ((2090, 2117), 'flask.render_template', 'render_template', (['"""404.html"""'], {}), "('404.html')\n", (2105, 2117), False, 'from flask import Flask, abort, jsonify, render_template\n'), ((2567, 2580), 'flask.jsonify', 'jsonify', (['data'], {}), '(data)\n', (2574, 2580), False, 'from flask import Flask, abort, jsonify, render_template\n'), ((3231, 3243), 'webargs.fields.Str', 'fields.Str', ([], {}), '()\n', (3241, 3243), False, 'from webargs import fields, validate\n'), ((3263, 3275), 'webargs.fields.Str', 'fields.Str', ([], {}), '()\n', (3273, 3275), False, 'from webargs import fields, validate\n'), ((3295, 3307), 'webargs.fields.Str', 'fields.Str', ([], {}), '()\n', (3305, 3307), False, 'from webargs import fields, validate\n'), ((3330, 3342), 'webargs.fields.Str', 'fields.Str', ([], {}), '()\n', (3340, 3342), False, 'from webargs import fields, validate\n'), ((3368, 3381), 'webargs.fields.Bool', 'fields.Bool', ([], {}), '()\n', (3379, 3381), False, 'from webargs import fields, validate\n'), ((3408, 3420), 'webargs.fields.Str', 'fields.Str', ([], {}), '()\n', (3418, 3420), False, 'from webargs import fields, validate\n'), ((3437, 3449), 'webargs.fields.Str', 'fields.Str', ([], {}), '()\n', (3447, 3449), False, 'from webargs import fields, validate\n'), ((3471, 3484), 'webargs.fields.Bool', 'fields.Bool', ([], {}), '()\n', (3482, 3484), False, 'from webargs import fields, validate\n'), ((3508, 3520), 'webargs.fields.Str', 'fields.Str', ([], {}), '()\n', (3518, 3520), False, 'from webargs import fields, validate\n'), ((3551, 3564), 'webargs.fields.Bool', 'fields.Bool', ([], {}), '()\n', (3562, 3564), False, 'from webargs import fields, validate\n'), ((3585, 3598), 'webargs.fields.Bool', 'fields.Bool', ([], {}), '()\n', (3596, 3598), False, 'from webargs import fields, validate\n'), ((3621, 3634), 'webargs.fields.Bool', 'fields.Bool', ([], {}), '()\n', (3632, 3634), False, 'from webargs import fields, validate\n'), ((3652, 3664), 'webargs.fields.Int', 'fields.Int', ([], {}), '()\n', (3662, 3664), False, 'from webargs import fields, validate\n'), ((3683, 3695), 'webargs.fields.Int', 'fields.Int', ([], {}), '()\n', (3693, 3695), False, 'from webargs import fields, validate\n'), ((4446, 4468), 'flask.jsonify', 'jsonify', (['contributions'], {}), '(contributions)\n', (4453, 4468), False, 'from flask import Flask, abort, jsonify, render_template\n'), ((4605, 4617), 'webargs.fields.Str', 'fields.Str', ([], {}), '()\n', (4615, 4617), False, 'from webargs import fields, validate\n'), ((4637, 4650), 'webargs.fields.Bool', 'fields.Bool', ([], {}), '()\n', (4648, 4650), False, 'from webargs import fields, validate\n'), ((4869, 4890), 'flask.jsonify', 'jsonify', (['banned_users'], {}), '(banned_users)\n', (4876, 4890), False, 'from flask import Flask, abort, jsonify, render_template\n'), ((5021, 5033), 'datetime.date.today', 'date.today', ([], {}), '()\n', (5031, 5033), False, 'from datetime import date, datetime, timedelta\n'), ((5049, 5108), 'datetime.datetime', 'datetime', (['today_date.year', 'today_date.month', 'today_date.day'], {}), '(today_date.year, today_date.month, today_date.day)\n', (5057, 5108), False, 'from datetime import date, datetime, timedelta\n'), ((5193, 5210), 'dateutil.parser.parse', 'parse', (['date_input'], {}), '(date_input)\n', (5198, 5210), False, 'from dateutil.parser import parse\n'), ((5391, 5402), 'statistics.mean', 'mean', (['score'], {}), '(score)\n', (5395, 5402), False, 'from statistics import mean\n'), ((15634, 15705), 'flask.jsonify', 'jsonify', (['[moderators, categories, projects, staff_picks, task_requests]'], {}), '([moderators, categories, projects, staff_picks, task_requests])\n', (15641, 15705), False, 'from flask import Flask, abort, jsonify, render_template\n'), ((18222, 18239), 'flask.jsonify', 'jsonify', (['eligible'], {}), '(eligible)\n', (18229, 18239), False, 'from flask import Flask, abort, jsonify, render_template\n'), ((20483, 20495), 'beem.comment.Comment', 'Comment', (['url'], {}), '(url)\n', (20490, 20495), False, 'from beem.comment import Comment\n'), ((22287, 22318), 'beem.comment.Comment', 'Comment', (["most_engagement['url']"], {}), "(most_engagement['url'])\n", (22294, 22318), False, 'from beem.comment import Comment\n'), ((24672, 24689), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (24681, 24689), False, 'from datetime import date, datetime, timedelta\n'), ((26009, 26029), 'dateutil.parser.parse', 'parse', (['recharge_time'], {}), '(recharge_time)\n', (26014, 26029), False, 'from dateutil.parser import parse\n'), ((26054, 26154), 'datetime.timedelta', 'timedelta', ([], {'hours': 'recharge_time.hour', 'minutes': 'recharge_time.minute', 'seconds': 'recharge_time.second'}), '(hours=recharge_time.hour, minutes=recharge_time.minute, seconds=\n recharge_time.second)\n', (26063, 26154), False, 'from datetime import date, datetime, timedelta\n'), ((36829, 36850), 'numpy.log', 'np.log', (['desired_usage'], {}), '(desired_usage)\n', (36835, 36850), True, 'import numpy as np\n'), ((36853, 36873), 'numpy.log', 'np.log', (['actual_usage'], {}), '(actual_usage)\n', (36859, 36873), True, 'import numpy as np\n'), ((2538, 2551), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (2547, 2551), False, 'import json\n'), ((2601, 2611), 'webargs.flaskparser.abort', 'abort', (['(404)'], {}), '(404)\n', (2606, 2611), False, 'from webargs.flaskparser import abort, parser, use_args, use_kwargs\n'), ((5154, 5168), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5166, 5168), False, 'from datetime import date, datetime, timedelta\n'), ((7664, 7681), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7675, 7681), False, 'from collections import Counter, defaultdict\n'), ((7713, 7730), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7724, 7730), False, 'from collections import Counter, defaultdict\n'), ((14981, 14998), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (14990, 14998), False, 'from datetime import date, datetime, timedelta\n'), ((24848, 24866), 'bson.json_util.dumps', 'json_util.dumps', (['c'], {}), '(c)\n', (24863, 24866), False, 'from bson import json_util\n'), ((26227, 26253), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'seconds'}), '(seconds=seconds)\n', (26236, 26253), False, 'from datetime import date, datetime, timedelta\n'), ((26281, 26301), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(1)'}), '(seconds=1)\n', (26290, 26301), False, 'from datetime import date, datetime, timedelta\n'), ((29161, 29196), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'recharge_seconds'}), '(seconds=recharge_seconds)\n', (29170, 29196), False, 'from datetime import date, datetime, timedelta\n'), ((29512, 29526), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (29524, 29526), False, 'from datetime import date, datetime, timedelta\n'), ((29529, 29559), 'dateutil.parser.parse', 'parse', (["contribution['created']"], {}), "(contribution['created'])\n", (29534, 29559), False, 'from dateutil.parser import parse\n'), ((38882, 38896), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (38894, 38896), False, 'from datetime import date, datetime, timedelta\n'), ((38899, 38955), 'datetime.timedelta', 'timedelta', ([], {'hours': 'hours', 'minutes': 'minutes', 'seconds': 'seconds'}), '(hours=hours, minutes=minutes, seconds=seconds)\n', (38908, 38955), False, 'from datetime import date, datetime, timedelta\n'), ((4762, 4783), 'bson.json_util.dumps', 'json_util.dumps', (['user'], {}), '(user)\n', (4777, 4783), False, 'from bson import json_util\n'), ((6813, 6839), 'collections.Counter', 'Counter', (["value['category']"], {}), "(value['category'])\n", (6820, 6839), False, 'from collections import Counter, defaultdict\n'), ((8716, 8733), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (8727, 8733), False, 'from collections import Counter, defaultdict\n'), ((8769, 8786), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (8780, 8786), False, 'from collections import Counter, defaultdict\n'), ((10455, 10483), 'collections.Counter', 'Counter', (["value['moderators']"], {}), "(value['moderators'])\n", (10462, 10483), False, 'from collections import Counter, defaultdict\n'), ((10539, 10578), 'collections.Counter', 'Counter', (["value['rewarded_contributors']"], {}), "(value['rewarded_contributors'])\n", (10546, 10578), False, 'from collections import Counter, defaultdict\n'), ((13505, 13533), 'collections.Counter', 'Counter', (["value['moderators']"], {}), "(value['moderators'])\n", (13512, 13533), False, 'from collections import Counter, defaultdict\n'), ((15240, 15258), 'bson.json_util.dumps', 'json_util.dumps', (['c'], {}), '(c)\n', (15255, 15258), False, 'from bson import json_util\n'), ((18119, 18130), 'flask.jsonify', 'jsonify', (['{}'], {}), '({})\n', (18126, 18130), False, 'from flask import Flask, abort, jsonify, render_template\n'), ((25851, 25865), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (25863, 25865), False, 'from datetime import date, datetime, timedelta\n'), ((28740, 28754), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (28752, 28754), False, 'from datetime import date, datetime, timedelta\n'), ((28757, 28813), 'datetime.timedelta', 'timedelta', ([], {'hours': 'hours', 'minutes': 'minutes', 'seconds': 'seconds'}), '(hours=hours, minutes=minutes, seconds=seconds)\n', (28766, 28813), False, 'from datetime import date, datetime, timedelta\n'), ((31252, 31265), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (31262, 31265), False, 'from operator import itemgetter\n'), ((40543, 40557), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (40555, 40557), False, 'from datetime import date, datetime, timedelta\n'), ((40560, 40616), 'datetime.timedelta', 'timedelta', ([], {'hours': 'hours', 'minutes': 'minutes', 'seconds': 'seconds'}), '(hours=hours, minutes=minutes, seconds=seconds)\n', (40569, 40616), False, 'from datetime import date, datetime, timedelta\n'), ((16882, 16896), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (16894, 16896), False, 'from datetime import date, datetime, timedelta\n'), ((16899, 16916), 'datetime.timedelta', 'timedelta', ([], {'days': '(2)'}), '(days=2)\n', (16908, 16916), False, 'from datetime import date, datetime, timedelta\n')] |
import numpy as np
import torch
import torch.nn as nn
from rl_sandbox.model_architectures.utils import construct_conv2d_layers, construct_conv2dtranspose_layers
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class Split(nn.Module):
def __init__(self, feature_dims):
super().__init__()
self.feature_dims = feature_dims
def forward(self, x):
features = []
last_feature_idx = 0
for feature_dim in self.feature_dims:
features.append(x[..., last_feature_idx:last_feature_idx + feature_dim])
last_feature_idx += feature_dim
return features
class Swish(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
x = x * torch.sigmoid(x)
return x
class Fuse(nn.Module):
def forward(self, features):
return torch.cat(features, dim=-1)
class ModernBlock(nn.Module):
"""
Reference: https://arxiv.org/abs/2106.01151
"""
def __init__(self,
input_dim,
w1_size,
w2_size,
norm=None):
super().__init__()
self.input_dim = input_dim
self.w1_size = w1_size
self.w2_size = w2_size
self.fc1 = nn.Linear(input_dim, w1_size)
self.fc2 = nn.Linear(w1_size, w2_size)
if norm == "spectral":
self.fc1 = nn.utils.spectral_norm(self.fc1)
self.fc2 = nn.utils.spectral_norm(self.fc2)
def forward(self, x):
return x + self.fc2(torch.relu(self.fc1(x)))
class Conv2DEncoder(nn.Module):
def __init__(self,
num_channels,
height,
width,
output_size,
layers,
activation=nn.Identity()):
super().__init__()
self._num_channels = num_channels
self.output_size = output_size
self._conv_layers, self._layers_dim = construct_conv2d_layers(layers=layers, in_dim=(height, width))
print("Output Height: {}\tOutput Width: {}".format(*self._layers_dim[-1]))
conv_output_dim = int(np.product(self._layers_dim[-1]))
self._flatten = Flatten()
self._linear_layer = nn.Linear(conv_output_dim * layers[-1][1], output_size)
self._activation = activation
def forward(self, x):
for layer in self._conv_layers:
x = layer(x)
x = self._flatten(x)
x = self._linear_layer(x)
x = self._activation(x)
return x
@property
def layers_dim(self):
return self._layers_dim
class Conv2DDecoder(nn.Module):
def __init__(self,
input_size,
layers,
layers_dim):
super().__init__()
self._input_size = input_size
self._in_channel = layers[0][1]
self._layers_dim = layers_dim[::-1]
self._in_dim = layers_dim[-1]
self._linear_layer = nn.Linear(input_size, self._in_channel * int(np.product(self._in_dim)))
self._relu = nn.ReLU()
self._conv_transpose_layers = construct_conv2dtranspose_layers(layers)
def forward(self, x):
x = self._linear_layer(x)
x = self._relu(x)
x = x.reshape(x.shape[0], self._in_channel, *self._in_dim)
layer_idx = 1
for layer in self._conv_transpose_layers:
if isinstance(layer, nn.ConvTranspose2d):
x = layer(x, output_size=self._layers_dim[layer_idx])
layer_idx += 1
else:
x = layer(x)
return x
| [
"torch.nn.ReLU",
"rl_sandbox.model_architectures.utils.construct_conv2dtranspose_layers",
"torch.nn.utils.spectral_norm",
"torch.cat",
"torch.sigmoid",
"rl_sandbox.model_architectures.utils.construct_conv2d_layers",
"numpy.product",
"torch.nn.Linear",
"torch.nn.Identity"
] | [((889, 916), 'torch.cat', 'torch.cat', (['features'], {'dim': '(-1)'}), '(features, dim=-1)\n', (898, 916), False, 'import torch\n'), ((1288, 1317), 'torch.nn.Linear', 'nn.Linear', (['input_dim', 'w1_size'], {}), '(input_dim, w1_size)\n', (1297, 1317), True, 'import torch.nn as nn\n'), ((1337, 1364), 'torch.nn.Linear', 'nn.Linear', (['w1_size', 'w2_size'], {}), '(w1_size, w2_size)\n', (1346, 1364), True, 'import torch.nn as nn\n'), ((1818, 1831), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (1829, 1831), True, 'import torch.nn as nn\n'), ((1989, 2051), 'rl_sandbox.model_architectures.utils.construct_conv2d_layers', 'construct_conv2d_layers', ([], {'layers': 'layers', 'in_dim': '(height, width)'}), '(layers=layers, in_dim=(height, width))\n', (2012, 2051), False, 'from rl_sandbox.model_architectures.utils import construct_conv2d_layers, construct_conv2dtranspose_layers\n'), ((2264, 2319), 'torch.nn.Linear', 'nn.Linear', (['(conv_output_dim * layers[-1][1])', 'output_size'], {}), '(conv_output_dim * layers[-1][1], output_size)\n', (2273, 2319), True, 'import torch.nn as nn\n'), ((3095, 3104), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3102, 3104), True, 'import torch.nn as nn\n'), ((3143, 3183), 'rl_sandbox.model_architectures.utils.construct_conv2dtranspose_layers', 'construct_conv2dtranspose_layers', (['layers'], {}), '(layers)\n', (3175, 3183), False, 'from rl_sandbox.model_architectures.utils import construct_conv2d_layers, construct_conv2dtranspose_layers\n'), ((782, 798), 'torch.sigmoid', 'torch.sigmoid', (['x'], {}), '(x)\n', (795, 798), False, 'import torch\n'), ((1420, 1452), 'torch.nn.utils.spectral_norm', 'nn.utils.spectral_norm', (['self.fc1'], {}), '(self.fc1)\n', (1442, 1452), True, 'import torch.nn as nn\n'), ((1476, 1508), 'torch.nn.utils.spectral_norm', 'nn.utils.spectral_norm', (['self.fc2'], {}), '(self.fc2)\n', (1498, 1508), True, 'import torch.nn as nn\n'), ((2166, 2198), 'numpy.product', 'np.product', (['self._layers_dim[-1]'], {}), '(self._layers_dim[-1])\n', (2176, 2198), True, 'import numpy as np\n'), ((3047, 3071), 'numpy.product', 'np.product', (['self._in_dim'], {}), '(self._in_dim)\n', (3057, 3071), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""Script to run hyperalignment for Budapest movie. You should remember to
have enough space in /tmp (so for example by mounting /tmp in the singularity
container to a location with enough storage), as well as setting
OMP_NUM_THREADS to 1 in your environment variables, to avoid using too many
resources."""
import matplotlib
matplotlib.use('Agg')
from mvpa2 import debug
debug.active += ['SHPAL', 'SLC']
import argparse
import numpy as np
from mvpa2.datasets import niml, vstack
from mvpa2.mappers.zscore import zscore
from mvpa2.base.hdf5 import h5save
from mvpa2.misc.surfing.queryengine import SurfaceQueryEngine
from mvpa2.algorithms.searchlight_hyperalignment import \
SearchlightHyperalignment
from mvpa2.support.nibabel import surf
import os
import os.path as op
from glob import glob
from dateutil.parser import parse
from joblib.parallel import Parallel, delayed
def get_trs_toskip():
# Get additional times to skip based on the overlap of each run
# This comes from the bash script used for splitting
timings = """
00:45:22.899 00:55:00.299
00:54:50 01:02:47.133
01:02:27 01:11:01.599
01:10:50 01:20:47.699
01:20:37 01:33:39.533
""".split()
timings = map(parse, timings)
fixation_s = 10
skip = []
for i in range(1, 9, 2):
skip.append(fixation_s + (timings[i] - timings[i+1]).seconds)
skip = [0] + skip
return skip
def load_datasets(fns, skip):
"""Load a list of datasets, skipping initial TRs as specified in skip,
and return a stacked dataset"""
if len(fns) != len(skip):
raise ValueError("Please provide the same number of fns and skip")
dss = []
for i, (fn, sk) in enumerate(zip(fns, skip)):
ds = niml.read(fn)
ds.sa['chunks'] = [i]
# remove first 10 seconds + overlap
ds = ds[sk:]
zscore(ds)
ds.fa['node_indices'] = np.arange(ds.nfeatures)
dss.append(ds)
dss = vstack(dss)
return dss
def hyperalign(dss, qe, **hpalkwargs):
hpal = SearchlightHyperalignment(queryengine=qe, **hpalkwargs)
mappers = hpal(dss)
return mappers
def load_mask(hemi, fsdir):
mask_fn = '{}.maskmedial.niml.dset'.format(hemi)
mask = niml.read(op.join(fsdir, 'fsaverage6/SUMA', mask_fn))
return mask
def get_qe(hemi, fsdir, radius=20.0):
surf_fn = '{}.white.gii'.format(hemi)
s = surf.read(op.join(fsdir, 'fsaverage6/SUMA', surf_fn))
qe = SurfaceQueryEngine(s, radius=radius)
return qe
def run(list_fns, subjects, hemi, fsdir, outdir, nproc=1):
hemi2fshemi = {'L': 'lh', 'R': 'rh'}
fshemi = hemi2fshemi[hemi]
skip = get_trs_toskip()
print("Got the following datasets: {}".format(list_fns))
print("Loading datasets")
dss = Parallel(n_jobs=nproc)(delayed(load_datasets)(fns, skip) for fns in list_fns)
mask = load_mask(fshemi, fsdir)
# mask_idx = np.where(mask.samples[0])[0][:100] # debug
mask_idx = np.where(mask.samples[0])[0]
# mask datasets
print("Masking datasets")
dss = [ds[:, mask_idx] for ds in dss]
qe = get_qe(fshemi, fsdir)
kwargs = dict(
ref_ds=0, nproc=nproc, featsel=1.0,
mask_node_ids=np.unique(dss[0].fa.node_indices),
compute_recon=False,
)
print("Running Hyperalignment")
mappers = hyperalign(dss, qe, **kwargs)
out_fn = '{0}_task-movie_space-fsaverage6_hemi-{1}_target-hpal_mapper.h5'
for subj, mapper in zip(subjects, mappers):
outdir_ = op.join(outdir, subj)
if not op.exists(outdir_):
os.makedirs(outdir_)
out_fn_ = op.join(outdir_,
out_fn.format(subj, hemi))
print("Saving {}".format(out_fn_))
h5save(out_fn_, mapper)
def main():
p = parse_args()
subjects = sorted(map(op.basename, glob(op.join(p.input_dir, 'sub-*'))))
list_fns = [
sorted(glob(op.join(
p.input_dir, subj, '*{}.func*.niml.dset').format(p.hemi)))
for subj in subjects
]
run(list_fns, subjects, p.hemi, p.fsdir, p.output_dir, p.nproc)
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--input-dir', '-i', type=str,
help='input directory containing subdirectories '
'with data to train hyperalignment on for '
'each participant',
required=True)
parser.add_argument('--hemi', '-m', type=str,
help='which hemisphere to run hpal on',
required=True, choices=['L', 'R'])
parser.add_argument('--fsdir', '-f', type=str,
help='freesurfer directory',
required=True)
parser.add_argument('--nproc', '-n', type=int,
help='number of procs to use for hpal',
required=True)
parser.add_argument('--output-dir', '-o', type=str,
help='output directory. mappers will be stored '
'within one folder for each subject',
required=True)
return parser.parse_args()
if __name__ == '__main__':
main()
| [
"mvpa2.datasets.vstack",
"argparse.ArgumentParser",
"os.makedirs",
"mvpa2.base.hdf5.h5save",
"mvpa2.misc.surfing.queryengine.SurfaceQueryEngine",
"joblib.parallel.delayed",
"mvpa2.datasets.niml.read",
"mvpa2.mappers.zscore.zscore",
"os.path.exists",
"joblib.parallel.Parallel",
"matplotlib.use",
... | [((349, 370), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (363, 370), False, 'import matplotlib\n'), ((1969, 1980), 'mvpa2.datasets.vstack', 'vstack', (['dss'], {}), '(dss)\n', (1975, 1980), False, 'from mvpa2.datasets import niml, vstack\n'), ((2048, 2103), 'mvpa2.algorithms.searchlight_hyperalignment.SearchlightHyperalignment', 'SearchlightHyperalignment', ([], {'queryengine': 'qe'}), '(queryengine=qe, **hpalkwargs)\n', (2073, 2103), False, 'from mvpa2.algorithms.searchlight_hyperalignment import SearchlightHyperalignment\n'), ((2464, 2500), 'mvpa2.misc.surfing.queryengine.SurfaceQueryEngine', 'SurfaceQueryEngine', (['s'], {'radius': 'radius'}), '(s, radius=radius)\n', (2482, 2500), False, 'from mvpa2.misc.surfing.queryengine import SurfaceQueryEngine\n'), ((4118, 4162), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (4141, 4162), False, 'import argparse\n'), ((1752, 1765), 'mvpa2.datasets.niml.read', 'niml.read', (['fn'], {}), '(fn)\n', (1761, 1765), False, 'from mvpa2.datasets import niml, vstack\n'), ((1869, 1879), 'mvpa2.mappers.zscore.zscore', 'zscore', (['ds'], {}), '(ds)\n', (1875, 1879), False, 'from mvpa2.mappers.zscore import zscore\n'), ((1912, 1935), 'numpy.arange', 'np.arange', (['ds.nfeatures'], {}), '(ds.nfeatures)\n', (1921, 1935), True, 'import numpy as np\n'), ((2251, 2293), 'os.path.join', 'op.join', (['fsdir', '"""fsaverage6/SUMA"""', 'mask_fn'], {}), "(fsdir, 'fsaverage6/SUMA', mask_fn)\n", (2258, 2293), True, 'import os.path as op\n'), ((2411, 2453), 'os.path.join', 'op.join', (['fsdir', '"""fsaverage6/SUMA"""', 'surf_fn'], {}), "(fsdir, 'fsaverage6/SUMA', surf_fn)\n", (2418, 2453), True, 'import os.path as op\n'), ((2777, 2799), 'joblib.parallel.Parallel', 'Parallel', ([], {'n_jobs': 'nproc'}), '(n_jobs=nproc)\n', (2785, 2799), False, 'from joblib.parallel import Parallel, delayed\n'), ((2967, 2992), 'numpy.where', 'np.where', (['mask.samples[0]'], {}), '(mask.samples[0])\n', (2975, 2992), True, 'import numpy as np\n'), ((3500, 3521), 'os.path.join', 'op.join', (['outdir', 'subj'], {}), '(outdir, subj)\n', (3507, 3521), True, 'import os.path as op\n'), ((3729, 3752), 'mvpa2.base.hdf5.h5save', 'h5save', (['out_fn_', 'mapper'], {}), '(out_fn_, mapper)\n', (3735, 3752), False, 'from mvpa2.base.hdf5 import h5save\n'), ((3205, 3238), 'numpy.unique', 'np.unique', (['dss[0].fa.node_indices'], {}), '(dss[0].fa.node_indices)\n', (3214, 3238), True, 'import numpy as np\n'), ((3537, 3555), 'os.path.exists', 'op.exists', (['outdir_'], {}), '(outdir_)\n', (3546, 3555), True, 'import os.path as op\n'), ((3569, 3589), 'os.makedirs', 'os.makedirs', (['outdir_'], {}), '(outdir_)\n', (3580, 3589), False, 'import os\n'), ((2800, 2822), 'joblib.parallel.delayed', 'delayed', (['load_datasets'], {}), '(load_datasets)\n', (2807, 2822), False, 'from joblib.parallel import Parallel, delayed\n'), ((3832, 3861), 'os.path.join', 'op.join', (['p.input_dir', '"""sub-*"""'], {}), "(p.input_dir, 'sub-*')\n", (3839, 3861), True, 'import os.path as op\n'), ((3902, 3951), 'os.path.join', 'op.join', (['p.input_dir', 'subj', '"""*{}.func*.niml.dset"""'], {}), "(p.input_dir, subj, '*{}.func*.niml.dset')\n", (3909, 3951), True, 'import os.path as op\n')] |
# Creating tic tac toe game
# create a board
# assign cross and zero for players
# play functon
# get user rows and column data
# place function
# check function -> row function, col function, diag function
import numpy as np
board = np.array([['_', '_', '_'], ['_', '_', '_'], ['_', '_', '_']])
# print(board)
p1 = 'x'
p2 = 'o'
def place(sym):
while(1):
print(board)
r = int(input('enter 1 2 3 for row'))
c = int(input('enter 1 2 3 for column'))
if (r > 0 and r < 4 and c > 0 and c < 4 and board[r-1][c-1] == '_'):
break
else:
print('invalid choice, enter again')
board[r-1][c-1] = sym
def row(sym):
for i in range(3):
c = 0
for j in range(3):
if(board[i][j] == sym):
c += 1
if(c == 3):
return True
def col(sym):
for i in range(3):
c = 0
for j in range(3):
if(board[j][i] == sym):
c += 1
if(c == 3):
return True
def diag(sym):
for i in range(3):
c = 0
d = 0
for j in range(3):
if(i == j):
if(board[i][j] == sym):
c += 1
elif(i+j == 2):
if(board[i][j] == sym):
d += 1
if(c == 3):
return True
if(d == 3):
return True
def check(sym):
if(row(sym) or col(sym) or diag(sym)):
return True
def play():
for i in range(9):
if(i % 2 == 0):
print("x's turn")
place(p1)
if(check(p1)):
print('x won')
break
else:
print("o's turn")
place(p2)
if(check(p2)):
print('o won')
break
if not(check(p1) and not(check(p2))):
print('draw')
play()
| [
"numpy.array"
] | [((238, 299), 'numpy.array', 'np.array', (["[['_', '_', '_'], ['_', '_', '_'], ['_', '_', '_']]"], {}), "([['_', '_', '_'], ['_', '_', '_'], ['_', '_', '_']])\n", (246, 299), True, 'import numpy as np\n')] |
import numpy as np
import cv2
import matplotlib.pyplot as plt
import glob
import pickle
images = glob.glob('./camera_cal/calibration*.jpg')
# Arrays to store object and image points from all the images
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2) # x, y coordinates
for fname in images:
# read in each image
img = cv2.imread(fname)
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)
# If found, draw corners
if ret == True:
imgpoints.append(corners)
objpoints.append(objp)
# Draw and display the corners
# cv2.drawChessboardCorners(img, (nx, ny), corners, ret)
# plt.imshow(img)
# plt.show()
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, (1280, 720), None, None)
distortion_param = {"mtx": mtx, "dist": dist}
pickle.dump(distortion_param, open("distortion_param.p", "wb"))
for fname in images:
# read in each image
img = cv2.imread(fname)
# undistort image
undistorted = cv2.undistort(img, mtx, dist, None, mtx)
# show distorted and undistorted images side by side
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=50)
ax2.imshow(undistorted)
ax2.set_title('Undistorted Image', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
plt.savefig("./output_images/distortion_correction.png")
plt.show() | [
"cv2.findChessboardCorners",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots_adjust",
"cv2.cvtColor",
"numpy.zeros",
"cv2.imread",
"cv2.calibrateCamera",
"glob.glob",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"cv2.undistort"
] | [((98, 140), 'glob.glob', 'glob.glob', (['"""./camera_cal/calibration*.jpg"""'], {}), "('./camera_cal/calibration*.jpg')\n", (107, 140), False, 'import glob\n'), ((370, 402), 'numpy.zeros', 'np.zeros', (['(6 * 9, 3)', 'np.float32'], {}), '((6 * 9, 3), np.float32)\n', (378, 402), True, 'import numpy as np\n'), ((1018, 1084), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['objpoints', 'imgpoints', '(1280, 720)', 'None', 'None'], {}), '(objpoints, imgpoints, (1280, 720), None, None)\n', (1037, 1084), False, 'import cv2\n'), ((525, 542), 'cv2.imread', 'cv2.imread', (['fname'], {}), '(fname)\n', (535, 542), False, 'import cv2\n'), ((582, 619), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (594, 619), False, 'import cv2\n'), ((674, 719), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['gray', '(9, 6)', 'None'], {}), '(gray, (9, 6), None)\n', (699, 719), False, 'import cv2\n'), ((1253, 1270), 'cv2.imread', 'cv2.imread', (['fname'], {}), '(fname)\n', (1263, 1270), False, 'import cv2\n'), ((1312, 1352), 'cv2.undistort', 'cv2.undistort', (['img', 'mtx', 'dist', 'None', 'mtx'], {}), '(img, mtx, dist, None, mtx)\n', (1325, 1352), False, 'import cv2\n'), ((1431, 1466), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(24, 9)'}), '(1, 2, figsize=(24, 9))\n', (1443, 1466), True, 'import matplotlib.pyplot as plt\n'), ((1641, 1700), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.0)', 'right': '(1)', 'top': '(0.9)', 'bottom': '(0.0)'}), '(left=0.0, right=1, top=0.9, bottom=0.0)\n', (1660, 1700), True, 'import matplotlib.pyplot as plt\n'), ((1703, 1759), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./output_images/distortion_correction.png"""'], {}), "('./output_images/distortion_correction.png')\n", (1714, 1759), True, 'import matplotlib.pyplot as plt\n'), ((1764, 1774), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1772, 1774), True, 'import matplotlib.pyplot as plt\n')] |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import numpy as np
import pandas as pd
from scipy.linalg import svd, lstsq
from skbio.util._decorator import experimental
from ._ordination_results import OrdinationResults
from ._utils import corr, svd_rank, scale
@experimental(as_of="0.4.0")
def rda(y, x, scale_Y=False, scaling=1):
r"""Compute redundancy analysis, a type of canonical analysis.
It is related to PCA and multiple regression because the explained
variables `y` are fitted to the explanatory variables `x` and PCA
is then performed on the fitted values. A similar process is
performed on the residuals.
RDA should be chosen if the studied gradient is small, and CCA
when it's large, so that the contingency table is sparse.
Parameters
----------
y : pd.DataFrame
:math:`n \times p` response matrix, where :math:`n` is the number
of samples and :math:`p` is the number of features. Its columns
need be dimensionally homogeneous (or you can set `scale_Y=True`).
This matrix is also referred to as the community matrix that
commonly stores information about species abundances
x : pd.DataFrame
:math:`n \times m, n \geq m` matrix of explanatory
variables, where :math:`n` is the number of samples and
:math:`m` is the number of metadata variables. Its columns
need not be standardized, but doing so turns regression
coefficients into standard regression coefficients.
scale_Y : bool, optional
Controls whether the response matrix columns are scaled to
have unit standard deviation. Defaults to `False`.
scaling : int
Scaling type 1 produces a distance biplot. It focuses on
the ordination of rows (samples) because their transformed
distances approximate their original euclidean
distances. Especially interesting when most explanatory
variables are binary.
Scaling type 2 produces a correlation biplot. It focuses
on the relationships among explained variables (`y`). It
is interpreted like scaling type 1, but taking into
account that distances between objects don't approximate
their euclidean distances.
See more details about distance and correlation biplots in
[1]_, \S 9.1.4.
Returns
-------
OrdinationResults
Object that stores the computed eigenvalues, the
proportion explained by each of them (per unit),
transformed coordinates for feature and samples, biplot
scores, sample constraints, etc.
See Also
--------
ca
cca
OrdinationResults
Notes
-----
The algorithm is based on [1]_, \S 11.1, and is expected to
give the same results as ``rda(y, x)`` in R's package vegan.
The eigenvalues reported in vegan are re-normalized to
:math:`\sqrt{\frac{s}{n-1}}` `n` is the number of samples,
and `s` is the original eigenvalues. Here we will only return
the original eigenvalues, as recommended in [1]_.
References
----------
.. [1] <NAME>. and <NAME>. 1998. Numerical
Ecology. Elsevier, Amsterdam.
"""
Y = y.as_matrix()
X = x.as_matrix()
n, p = y.shape
n_, m = x.shape
if n != n_:
raise ValueError(
"Both data matrices must have the same number of rows.")
if n < m:
# Mmm actually vegan is able to do this case, too
raise ValueError(
"Explanatory variables cannot have less rows than columns.")
sample_ids = y.index
feature_ids = y.columns
# Centre response variables (they must be dimensionally
# homogeneous)
Y = scale(Y, with_std=scale_Y)
# Centre explanatory variables
X = scale(X, with_std=False)
# Distribution of variables should be examined and transformed
# if necessary (see paragraph 4 in p. 580 L&L 1998)
# Compute Y_hat (fitted values by multivariate linear
# regression, that is, linear least squares). Formula 11.6 in
# L&L 1998 involves solving the normal equations, but that fails
# when cond(X) ~ eps**(-0.5). A more expensive but much more
# stable solution (fails when cond(X) ~ eps**-1) is computed
# using the QR decomposition of X = QR:
# (11.6) Y_hat = X [X' X]^{-1} X' Y
# = QR [R'Q' QR]^{-1} R'Q' Y
# = QR [R' R]^{-1} R'Q' Y
# = QR R^{-1} R'^{-1} R' Q' Y
# = Q Q' Y
# and B (matrix of regression coefficients)
# (11.4) B = [X' X]^{-1} X' Y
# = R^{-1} R'^{-1} R' Q' Y
# = R^{-1} Q'
# Q, R = np.linalg.qr(X)
# Y_hat = Q.dot(Q.T).dot(Y)
# B = scipy.linalg.solve_triangular(R, Q.T.dot(Y))
# This works provided X has full rank. When not, you can still
# fix it using R's pseudoinverse or partitioning R. To avoid any
# issues, like the numerical instability when trying to
# reproduce an example in L&L where X was rank-deficient, we'll
# just use `np.linalg.lstsq`, which uses the SVD decomposition
# under the hood and so it's also more expensive.
B, _, rank_X, _ = lstsq(X, Y)
Y_hat = X.dot(B)
# Now let's perform PCA on the fitted values from the multiple
# regression
u, s, vt = svd(Y_hat, full_matrices=False)
# vt are the right eigenvectors, which is what we need to
# perform PCA. That is, we're changing points in Y_hat from the
# canonical basis to the orthonormal basis given by the right
# eigenvectors of Y_hat (or equivalently, the eigenvectors of
# the covariance matrix Y_hat.T.dot(Y_hat))
# See 3) in p. 583 in L&L 1998
rank = svd_rank(Y_hat.shape, s)
# Theoretically, there're at most min(p, m, n - 1) non-zero eigenvalues
U = vt[:rank].T # U as in Fig. 11.2
# Ordination in the space of response variables. Its columns are
# sample scores. (Eq. 11.12)
F = Y.dot(U)
# Ordination in the space of explanatory variables. Its columns
# are fitted sample scores. (Eq. 11.13)
Z = Y_hat.dot(U)
# Canonical coefficients (formula 11.14)
# C = B.dot(U) # Not used
Y_res = Y - Y_hat
# PCA on the residuals
u_res, s_res, vt_res = svd(Y_res, full_matrices=False)
# See 9) in p. 587 in L&L 1998
rank_res = svd_rank(Y_res.shape, s_res)
# Theoretically, there're at most min(p, n - 1) non-zero eigenvalues as
U_res = vt_res[:rank_res].T
F_res = Y_res.dot(U_res) # Ordination in the space of residuals
eigenvalues = np.r_[s[:rank], s_res[:rank_res]]
# Compute scores
if scaling not in {1, 2}:
raise NotImplementedError("Only scalings 1, 2 available for RDA.")
# According to the vegan-FAQ.pdf, the scaling factor for scores
# is (notice that L&L 1998 says in p. 586 that such scaling
# doesn't affect the interpretation of a biplot):
eigvals = pd.Series(
eigenvalues, index=['RDA%d' % (i+1) for i in range(len(eigenvalues))])
const = np.sum(eigenvalues**2)**0.25
if scaling == 1:
scaling_factor = const
elif scaling == 2:
scaling_factor = eigenvalues / const
feature_scores = np.hstack((U, U_res)) * scaling_factor
sample_scores = np.hstack((F, F_res)) / scaling_factor
feature_scores = pd.DataFrame(
feature_scores, index=feature_ids,
columns=['RDA%d' % (i+1) for i in range(feature_scores.shape[1])])
sample_scores = pd.DataFrame(
sample_scores, index=sample_ids,
columns=['RDA%d' % (i+1) for i in range(sample_scores.shape[1])])
# TODO not yet used/displayed
sample_constraints = np.hstack((Z, F_res)) / scaling_factor
sample_constraints = pd.DataFrame(
sample_constraints, index=sample_ids,
columns=['RDA%d' % (i+1) for i in range(sample_constraints.shape[1])])
# Vegan seems to compute them as corr(X[:, :rank_X],
# u) but I don't think that's a good idea. In fact, if
# you take the example shown in Figure 11.3 in L&L 1998 you
# can see that there's an arrow for each of the 4
# environmental variables (depth, coral, sand, other) even if
# other = not(coral or sand)
biplot_scores = corr(X, u)
biplot_scores = pd.DataFrame(
biplot_scores, index=x.columns,
columns=['RDA%d' % (i+1) for i in range(biplot_scores.shape[1])])
# The "Correlations of environmental variables with sample
# scores" from table 11.4 are quite similar to vegan's biplot
# scores, but they're computed like this:
# corr(X, F))
p_explained = pd.Series(
eigenvalues / eigenvalues.sum(),
index=['RDA%d' % (i+1) for i in range(len(eigenvalues))])
return OrdinationResults('RDA', 'Redundancy Analysis',
eigvals=eigvals,
proportion_explained=p_explained,
features=feature_scores,
samples=sample_scores,
biplot_scores=biplot_scores,
sample_constraints=sample_constraints)
| [
"numpy.sum",
"skbio.util._decorator.experimental",
"numpy.hstack",
"scipy.linalg.svd",
"scipy.linalg.lstsq"
] | [((573, 600), 'skbio.util._decorator.experimental', 'experimental', ([], {'as_of': '"""0.4.0"""'}), "(as_of='0.4.0')\n", (585, 600), False, 'from skbio.util._decorator import experimental\n'), ((5469, 5480), 'scipy.linalg.lstsq', 'lstsq', (['X', 'Y'], {}), '(X, Y)\n', (5474, 5480), False, 'from scipy.linalg import svd, lstsq\n'), ((5601, 5632), 'scipy.linalg.svd', 'svd', (['Y_hat'], {'full_matrices': '(False)'}), '(Y_hat, full_matrices=False)\n', (5604, 5632), False, 'from scipy.linalg import svd, lstsq\n'), ((6539, 6570), 'scipy.linalg.svd', 'svd', (['Y_res'], {'full_matrices': '(False)'}), '(Y_res, full_matrices=False)\n', (6542, 6570), False, 'from scipy.linalg import svd, lstsq\n'), ((7310, 7334), 'numpy.sum', 'np.sum', (['(eigenvalues ** 2)'], {}), '(eigenvalues ** 2)\n', (7316, 7334), True, 'import numpy as np\n'), ((7480, 7501), 'numpy.hstack', 'np.hstack', (['(U, U_res)'], {}), '((U, U_res))\n', (7489, 7501), True, 'import numpy as np\n'), ((7539, 7560), 'numpy.hstack', 'np.hstack', (['(F, F_res)'], {}), '((F, F_res))\n', (7548, 7560), True, 'import numpy as np\n'), ((7940, 7961), 'numpy.hstack', 'np.hstack', (['(Z, F_res)'], {}), '((Z, F_res))\n', (7949, 7961), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
import astropy.constants as const
import pandas as pd
import os
package_directory = os.path.dirname(os.path.abspath(__file__)) + '/'
def mag2flux(mag):
flux = 10**(-(mag+48.6)/2.5)
flux = flux * u.erg / u.s / u.cm**2 / u.Hz
return flux
class ETC():
def __init__(self,bandpass,detector,telescope,source_mag=np.nan,moon_phase='dark',seeing=2):
self.detector = self._set_detector(detector,bandpass)
self.telescope = self._set_telescope(telescope)
self.source_mag = source_mag
self.seeing = seeing
self.sky = Sky(moon_phase, self.detector, self.telescope)
self.source = Source(source_mag, self.detector, self.telescope)
self.exp_time = np.nan
# calculated
self.pixels = self.aperture_size()
self.noise = np.nan
self.signal = np.nan
self.snr = np.nan
def _get_detector_params(self,name):
table = pd.read_csv(package_directory + 'camera_params.csv')
ind = np.where(name.upper() == table['name'].values)[0]
if len(ind) == 0:
m = 'Detector {} does not exist'.format(name)
raise ValueError(m)
params = table.iloc[ind]
return params
def _get_bandpass(self,bandpass):
b = Bandpass(bandpass)
return b
def _set_detector(self,name,bandpass):
p = self._get_detector_params(name)
b = self._get_bandpass(bandpass)
d = Detector(name = p['name'][0], dark_current = p['dark_current'][0] / u.s,
read_noise = p['read_noise'][0], gain = p['gain'][0],
pixel_size = p['pixel_size'][0] * u.um, bandpass = b)
return d
def _get_telescope_params(self,name):
table = pd.read_csv(package_directory + 'telescope_params.csv')
try:
ind = np.where(name == table['name'].values)[0][0]
except:
raise ValueError('Telescope {} does not exist'.format(name))
params = table.iloc[ind]
return params
def _set_telescope(self,name):
p = self._get_telescope_params(name)
t = Telescope(name = p['name'], diameter = p['diameter'],
throughput = p['throughput'], f_num = p['f_num'])
return t
def calc_sky_photons(self):
self.sky.sky_signal()
def calc_source_photons(self):
self.source.source_signal()
def aperture_size(self):
ps = self.telescope.platescale / self.detector.pix_size.to(u.mm).value
pix = np.ceil(self.seeing / ps)
pix2 = pix**2
return pix2
def calculate_noise(self):
self.sky.sky_signal()
self.source.source_signal()
dc = self.detector.dark_current * self.exp_time * self.pixels**2
sky = np.sqrt(self.sky.photons * self.telescope.throughput
* self.exp_time * self.detector.qe)
source = np.sqrt(self.source.photons * self.telescope.throughput
* self.exp_time * self.detector.qe)
read = self.detector.read_noise * self.pixels**2
#print(dc)
#print(sky)
#print(source)
#print(read)
noise = np.sqrt(dc**2+sky**2+source**2+read**2)
self.noise = noise
def calculate_signal(self):
self.source.source_signal()
source = (self.source.photons * self.telescope.throughput
* self.exp_time * self.detector.qe)
self.signal = source
def calculate_SNR(self):
self.calculate_noise()
self.calculate_signal()
self.snr = self.signal / self.noise
def time_for_snr(self,snr,mag=None,plot=False):
if mag is not None:
self.source = Source(mag, self.detector, self.telescope)
time = np.arange(.1,1e5,1) * u.s
# should be updated with a loop
self.exp_time = time
self.calculate_SNR()
ind = np.argmin(abs(self.snr - snr))
if plot:
self.snr_plot(snr)
#print('Time needed to reach $SNR= {}$'.format(snr)+' is ' + str(time[ind]))
return time[ind]
def snr_plot(self,snr):
plt.figure()
plt.plot(self.exp_time,self.snr)
plt.axhline(snr,ls='--',color='r')
plt.ylabel('SNR')
plt.xlabel('Exposure time [s]')
def snr_for_time(self,time,mag=None):
if mag is not None:
self.source = Source(mag, self.detector, self.telescope)
self.exp_time = time
self.calculate_SNR()
return self.snr
class Telescope():
def __init__(self,name,diameter,throughput,f_num):
self.name = name
self.f_num = f_num
self.diameter = diameter * u.m
self.throughput = throughput
self.area = np.pi*(self.diameter/2)**2
self.platescale = self.calculate_plate_scale()
def calculate_plate_scale(self):
p = 206265 / self.f_num #
return p
class Sky():
def __init__(self,moon_phase,detector,telescope):
self.detector = detector
self.telescope = telescope
self.moon_phase = moon_phase
self.sky_brightness = self._get_sky()
self.photons = np.nan
def _get_band_brightness(self):
"""
Table values taken from:
https://www.mso.anu.edu.au/~pfrancis/reference/reference/node4.html
"""
tab = pd.read_csv(package_directory + 'sky_brightness.csv')
t = tab.iloc[self.detector.bandpass._tab_ind]
return t
def _get_sky(self):
tab = self._get_band_brightness()
sky = tab[self.moon_phase]
return sky
def sky_signal(self):
sky = mag2flux(self.sky_brightness)
# multiply by plate scale
flux = sky * self.telescope.platescale**2
#convert to SI units
flux = flux.to(u.J/u.s/u.Hz/u.m**2)
# convert to Lambda [J/s/m/m^2/pix]
flux_lam = flux * const.c / (self.detector.bandpass.wavelength.to(u.m)**2)
#flux_lam = flux_lam.to(u.J/ u.s/ u.m /u.m**2)
# multiply by bandwidth [J/s/m^2/pix]
flux_lam = flux_lam * self.detector.bandpass.bandwidth.to(u.m)
# multiply by size of telescope [J/s/pix]
flux_lam = flux_lam * self.telescope.area
# divide by average energy of a photon
sky_photons = flux_lam / ((const.h * const.c)/self.detector.bandpass.wavelength.to(u.m))
self.photons = sky_photons
class Source():
def __init__(self,mag,detector,telescope):
# defined
self.mag = mag
self.detector = detector
self.telescope = telescope
# calculated
self.photons = np.nan
def source_signal(self):
source = mag2flux(self.mag)
#convert to SI units
flux = source.to(u.J/u.s/u.Hz/u.m**2)
# convert to Lambda [J/s/m/m^2]
flux_lam = flux * const.c / (self.detector.bandpass.wavelength.to(u.m)**2)
# multiply by bandwidth [J/s/m^2]
flux_lam = flux_lam * self.detector.bandpass.bandwidth.to(u.m)
# multiply by size of pixel [J/s]
flux_lam = flux_lam * self.telescope.area
photons = flux_lam / ((const.h * const.c)/self.detector.bandpass.wavelength.to(u.m))
self.photons = photons
class Detector():
def __init__(self,name,dark_current,read_noise,gain,pixel_size,bandpass):
self.name = name
self.bandpass = bandpass
self.dark_current = dark_current
self.read_noise = read_noise
self.qe = self._get_qe()
self.gain = gain
self.pix_size = pixel_size
#self.check_inputs()
def check_inputs(self):
if (type(self.dark_current) is not float) | (type(self.dark_current) is not int):
m = 'dark_current must be a float or integer'
raise ValueError(m)
if (type(self.read_noise) is not float) | (type(self.read_noise) is not int):
m = 'read_noise must be a float or integer'
raise ValueError(m)
if type(self.pix_size) is not type(1*u.s):
m = 'pixel_size must have an astropy unit'
raise ValueError(m)
if type(self.bandpass) is not type(Bandpass):
m = 'bandpass must be the Bandpass class'
raise ValueError(m)
def integrated_dark_current(self,time):
dc = self.dark_current * time
return dc
def _get_qe(self):
tab = self.bandpass._table.iloc[self.bandpass._tab_ind]
qe = tab[self.name + '_qe']
return qe
class Bandpass():
def __init__(self,name):
self.name = name
self._table = self._load_table()
self._tab_ind = self._get_index()
self.bandwidth = self._get_bandwidth()
self.wavelength = self._get_wavelength()
def _load_table(self):
tab = pd.read_csv(package_directory + 'bandpass_params.csv')
return tab
def _get_index(self):
try:
ind = np.where(self.name == self._table['name'].values)[0][0]
except:
raise ValueError('No such filter!')
return ind
def _get_bandwidth(self):
bw = self._table.iloc[self._tab_ind]['bandwidth'] * u.nm
return bw
def _get_wavelength(self):
wav = self._table.iloc[self._tab_ind]['wavelength'] * u.nm
return wav
| [
"matplotlib.pyplot.axhline",
"os.path.abspath",
"numpy.ceil",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"matplotlib.pyplot.figure",
"numpy.where",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.sqrt"
] | [((180, 205), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (195, 205), False, 'import os\n'), ((907, 959), 'pandas.read_csv', 'pd.read_csv', (["(package_directory + 'camera_params.csv')"], {}), "(package_directory + 'camera_params.csv')\n", (918, 959), True, 'import pandas as pd\n'), ((1602, 1657), 'pandas.read_csv', 'pd.read_csv', (["(package_directory + 'telescope_params.csv')"], {}), "(package_directory + 'telescope_params.csv')\n", (1613, 1657), True, 'import pandas as pd\n'), ((2264, 2289), 'numpy.ceil', 'np.ceil', (['(self.seeing / ps)'], {}), '(self.seeing / ps)\n', (2271, 2289), True, 'import numpy as np\n'), ((2478, 2571), 'numpy.sqrt', 'np.sqrt', (['(self.sky.photons * self.telescope.throughput * self.exp_time * self.\n detector.qe)'], {}), '(self.sky.photons * self.telescope.throughput * self.exp_time * self\n .detector.qe)\n', (2485, 2571), True, 'import numpy as np\n'), ((2586, 2681), 'numpy.sqrt', 'np.sqrt', (['(self.source.photons * self.telescope.throughput * self.exp_time * self.\n detector.qe)'], {}), '(self.source.photons * self.telescope.throughput * self.exp_time *\n self.detector.qe)\n', (2593, 2681), True, 'import numpy as np\n'), ((2810, 2863), 'numpy.sqrt', 'np.sqrt', (['(dc ** 2 + sky ** 2 + source ** 2 + read ** 2)'], {}), '(dc ** 2 + sky ** 2 + source ** 2 + read ** 2)\n', (2817, 2863), True, 'import numpy as np\n'), ((3621, 3633), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3631, 3633), True, 'import matplotlib.pyplot as plt\n'), ((3636, 3669), 'matplotlib.pyplot.plot', 'plt.plot', (['self.exp_time', 'self.snr'], {}), '(self.exp_time, self.snr)\n', (3644, 3669), True, 'import matplotlib.pyplot as plt\n'), ((3671, 3707), 'matplotlib.pyplot.axhline', 'plt.axhline', (['snr'], {'ls': '"""--"""', 'color': '"""r"""'}), "(snr, ls='--', color='r')\n", (3682, 3707), True, 'import matplotlib.pyplot as plt\n'), ((3708, 3725), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""SNR"""'], {}), "('SNR')\n", (3718, 3725), True, 'import matplotlib.pyplot as plt\n'), ((3728, 3759), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Exposure time [s]"""'], {}), "('Exposure time [s]')\n", (3738, 3759), True, 'import matplotlib.pyplot as plt\n'), ((4661, 4714), 'pandas.read_csv', 'pd.read_csv', (["(package_directory + 'sky_brightness.csv')"], {}), "(package_directory + 'sky_brightness.csv')\n", (4672, 4714), True, 'import pandas as pd\n'), ((7645, 7699), 'pandas.read_csv', 'pd.read_csv', (["(package_directory + 'bandpass_params.csv')"], {}), "(package_directory + 'bandpass_params.csv')\n", (7656, 7699), True, 'import pandas as pd\n'), ((3315, 3342), 'numpy.arange', 'np.arange', (['(0.1)', '(100000.0)', '(1)'], {}), '(0.1, 100000.0, 1)\n', (3324, 3342), True, 'import numpy as np\n'), ((1674, 1712), 'numpy.where', 'np.where', (["(name == table['name'].values)"], {}), "(name == table['name'].values)\n", (1682, 1712), True, 'import numpy as np\n'), ((7753, 7802), 'numpy.where', 'np.where', (["(self.name == self._table['name'].values)"], {}), "(self.name == self._table['name'].values)\n", (7761, 7802), True, 'import numpy as np\n')] |
# Copyright 2021 NVIDIA Corporation. All rights reserved.
#
# Please refer to the NVIDIA end user license agreement (EULA) associated
# with this source code for terms and conditions that govern your use of
# this software. Any use, reproduction, disclosure, or distribution of
# this software and related documentation outside the terms of the EULA
# is strictly prohibited.
import ctypes
import numpy as np
import os
from cuda import cuda, cudart, nvrtc
from common.helper_cuda import checkCudaErrors
class KernelHelper:
def __init__(self, code, devID):
prog = checkCudaErrors(nvrtc.nvrtcCreateProgram(str.encode(code), b'sourceCode.cu', 0, [], []))
CUDA_HOME = os.getenv('CUDA_HOME')
if CUDA_HOME == None:
raise RuntimeError('Environment variable CUDA_HOME is not defined')
include_dirs = os.path.join(CUDA_HOME, 'include')
# Initialize CUDA
checkCudaErrors(cudart.cudaFree(0))
major = checkCudaErrors(cudart.cudaDeviceGetAttribute(cudart.cudaDeviceAttr.cudaDevAttrComputeCapabilityMajor, devID))
minor = checkCudaErrors(cudart.cudaDeviceGetAttribute(cudart.cudaDeviceAttr.cudaDevAttrComputeCapabilityMinor, devID))
_, nvrtc_minor = checkCudaErrors(nvrtc.nvrtcVersion())
use_cubin = (nvrtc_minor >= 1)
prefix = 'sm' if use_cubin else 'compute'
arch_arg = bytes(f'--gpu-architecture={prefix}_{major}{minor}', 'ascii')
try:
opts = [b'--fmad=true', arch_arg, '--include-path={}'.format(include_dirs).encode('UTF-8'),
b'--std=c++11', b'-default-device']
checkCudaErrors(nvrtc.nvrtcCompileProgram(prog, len(opts), opts))
except RuntimeError as err:
logSize = checkCudaErrors(nvrtc.nvrtcGetProgramLogSize(prog))
log = b' ' * logSize
checkCudaErrors(nvrtc.nvrtcGetProgramLog(prog, log))
print(log.decode())
print(err)
exit(-1)
if use_cubin:
dataSize = checkCudaErrors(nvrtc.nvrtcGetCUBINSize(prog))
data = b' ' * dataSize
checkCudaErrors(nvrtc.nvrtcGetCUBIN(prog, data))
else:
dataSize = checkCudaErrors(nvrtc.nvrtcGetPTXSize(prog))
data = b' ' * dataSize
checkCudaErrors(nvrtc.nvrtcGetPTX(prog, data))
self.module = checkCudaErrors(cuda.cuModuleLoadData(np.char.array(data)))
def getFunction(self, name):
return checkCudaErrors(cuda.cuModuleGetFunction(self.module, name))
| [
"numpy.char.array",
"cuda.nvrtc.nvrtcGetProgramLogSize",
"cuda.nvrtc.nvrtcGetCUBIN",
"cuda.nvrtc.nvrtcGetCUBINSize",
"cuda.cuda.cuModuleGetFunction",
"cuda.cudart.cudaFree",
"cuda.cudart.cudaDeviceGetAttribute",
"cuda.nvrtc.nvrtcGetPTX",
"cuda.nvrtc.nvrtcGetProgramLog",
"cuda.nvrtc.nvrtcGetPTXSize... | [((686, 708), 'os.getenv', 'os.getenv', (['"""CUDA_HOME"""'], {}), "('CUDA_HOME')\n", (695, 708), False, 'import os\n'), ((842, 876), 'os.path.join', 'os.path.join', (['CUDA_HOME', '"""include"""'], {}), "(CUDA_HOME, 'include')\n", (854, 876), False, 'import os\n'), ((928, 946), 'cuda.cudart.cudaFree', 'cudart.cudaFree', (['(0)'], {}), '(0)\n', (943, 946), False, 'from cuda import cuda, cudart, nvrtc\n'), ((981, 1079), 'cuda.cudart.cudaDeviceGetAttribute', 'cudart.cudaDeviceGetAttribute', (['cudart.cudaDeviceAttr.cudaDevAttrComputeCapabilityMajor', 'devID'], {}), '(cudart.cudaDeviceAttr.\n cudaDevAttrComputeCapabilityMajor, devID)\n', (1010, 1079), False, 'from cuda import cuda, cudart, nvrtc\n'), ((1108, 1206), 'cuda.cudart.cudaDeviceGetAttribute', 'cudart.cudaDeviceGetAttribute', (['cudart.cudaDeviceAttr.cudaDevAttrComputeCapabilityMinor', 'devID'], {}), '(cudart.cudaDeviceAttr.\n cudaDevAttrComputeCapabilityMinor, devID)\n', (1137, 1206), False, 'from cuda import cuda, cudart, nvrtc\n'), ((1244, 1264), 'cuda.nvrtc.nvrtcVersion', 'nvrtc.nvrtcVersion', ([], {}), '()\n', (1262, 1264), False, 'from cuda import cuda, cudart, nvrtc\n'), ((2485, 2528), 'cuda.cuda.cuModuleGetFunction', 'cuda.cuModuleGetFunction', (['self.module', 'name'], {}), '(self.module, name)\n', (2509, 2528), False, 'from cuda import cuda, cudart, nvrtc\n'), ((2034, 2063), 'cuda.nvrtc.nvrtcGetCUBINSize', 'nvrtc.nvrtcGetCUBINSize', (['prog'], {}), '(prog)\n', (2057, 2063), False, 'from cuda import cuda, cudart, nvrtc\n'), ((2128, 2159), 'cuda.nvrtc.nvrtcGetCUBIN', 'nvrtc.nvrtcGetCUBIN', (['prog', 'data'], {}), '(prog, data)\n', (2147, 2159), False, 'from cuda import cuda, cudart, nvrtc\n'), ((2214, 2241), 'cuda.nvrtc.nvrtcGetPTXSize', 'nvrtc.nvrtcGetPTXSize', (['prog'], {}), '(prog)\n', (2235, 2241), False, 'from cuda import cuda, cudart, nvrtc\n'), ((2306, 2335), 'cuda.nvrtc.nvrtcGetPTX', 'nvrtc.nvrtcGetPTX', (['prog', 'data'], {}), '(prog, data)\n', (2323, 2335), False, 'from cuda import cuda, cudart, nvrtc\n'), ((2398, 2417), 'numpy.char.array', 'np.char.array', (['data'], {}), '(data)\n', (2411, 2417), True, 'import numpy as np\n'), ((1762, 1796), 'cuda.nvrtc.nvrtcGetProgramLogSize', 'nvrtc.nvrtcGetProgramLogSize', (['prog'], {}), '(prog)\n', (1790, 1796), False, 'from cuda import cuda, cudart, nvrtc\n'), ((1859, 1894), 'cuda.nvrtc.nvrtcGetProgramLog', 'nvrtc.nvrtcGetProgramLog', (['prog', 'log'], {}), '(prog, log)\n', (1883, 1894), False, 'from cuda import cuda, cudart, nvrtc\n')] |
from matplotlib import pyplot as plt
from matplotlib import animation
import random
import numpy as np
import yaml
# Deliberately terrible code for teaching purposes
config = yaml.load(open("boids/config.yaml"))
boid_number = config["boid_number"]
x_position_limits = config["x_position_limits"]
y_position_limits = config["y_position_limits"]
x_velocity_limits = config["x_velocity_limits"]
y_velocity_limits = config["y_velocity_limits"]
avoid_distance = config["avoid_distance"]
match_speed_distance = config["match_speed_distance"]
middle_scaling = config["middle_scaling"]
match_scaling = config["match_scaling"]
class Flock(object):
def __init__(self, boid_number, x_position_limits, y_position_limits, x_velocity_limits, y_velocity_limits):
self.boid_number = boid_number
self.x_position_limits = x_position_limits
self.y_position_limits = y_position_limits
self.x_velocity_limits = x_velocity_limits
self.y_velocity_limits = y_velocity_limits
def initialise(self, limits, boids):
values = [random.uniform(limits[0], limits[1]) for x in boids]
return values
def new_flock(self):
boids = range(self.boid_number)
x_positions = self.initialise(self.x_position_limits, boids)
y_positions = self.initialise(self.y_position_limits, boids)
x_velocities = self.initialise(self.x_velocity_limits, boids)
y_velocities = self.initialise(self.y_velocity_limits, boids)
boid_positions = (x_positions, y_positions)
boid_velocities = (x_velocities, y_velocities)
return boid_positions, boid_velocities
myflock = Flock(boid_number, x_position_limits, y_position_limits, x_velocity_limits, y_velocity_limits)
boid_positions, boid_velocities = myflock.new_flock()
figure = plt.figure()
axes = plt.axes(xlim=(-500, 1500), ylim=(-500,1500))
scatter = axes.scatter(boid_positions[0], boid_positions[1])
class Flight(object):
def __init__(self, boid_number, boid_positions, boid_velocities, avoid_distance, match_speed_distance, middle_scaling, match_scaling):
self.boid_number = boid_number
self.boid_positions = boid_positions
self.boid_velocities = boid_velocities
self.avoid_distance = avoid_distance
self.match_speed_distance = match_speed_distance
self.middle_scaling = middle_scaling
self.match_scaling = match_scaling
def proximity(self, i, j, boid_positions, boid_velocities, distance):
return (boid_positions[0][j]-boid_positions[0][i])**2 + (boid_positions[1][j]-boid_positions[1][i])**2 < distance
def fly_towards_middle(self, i, j, boid_positions, boid_velocities):
boid_velocities[0][i] = boid_velocities[0][i]+(boid_positions[0][j]-boid_positions[0][i])*self.middle_scaling/self.boid_number
boid_velocities[1][i] = boid_velocities[1][i]+(boid_positions[1][j]-boid_positions[1][i])*self.middle_scaling/self.boid_number
return boid_positions, boid_velocities
def avoid_boids(self, i, j, boid_positions, boid_velocities):
if self.proximity(i,j,boid_positions,boid_velocities,self.avoid_distance):
boid_velocities[0][i] = boid_velocities[0][i]+(boid_positions[0][i]-boid_positions[0][j])
boid_velocities[1][i] = boid_velocities[1][i]+(boid_positions[1][i]-boid_positions[1][j])
return boid_positions, boid_velocities
def match_speed(self, i, j, boid_positions, boid_velocities):
if self.proximity(i,j,boid_positions,boid_velocities,self.match_speed_distance):
boid_velocities[0][i] = boid_velocities[0][i]+(boid_velocities[0][j]-boid_velocities[0][i])*self.match_scaling/self.boid_number
boid_velocities[1][i] = boid_velocities[1][i]+(boid_velocities[1][j]-boid_velocities[1][i])*self.match_scaling/self.boid_number
return boid_positions, boid_velocities
def move(self, boid_positions, boid_velocities, i):
boid_positions[0][i] = boid_positions[0][i]+boid_velocities[0][i]
boid_positions[1][i] = boid_positions[1][i]+boid_velocities[1][i]
return boid_positions
def update_boids(self):
boids = range(self.boid_number)
for i in boids:
for j in boids:
self.fly_towards_middle(i,j,self.boid_positions, self.boid_velocities)
self.avoid_boids(i,j,self.boid_positions, self.boid_velocities)
self.match_speed(i,j,self.boid_positions, self.boid_velocities)
for i in boids:
boid_positions = self.move(self.boid_positions, self.boid_velocities, i)
return boid_positions, boid_velocities
myflight = Flight(boid_number, boid_positions, boid_velocities, avoid_distance, match_speed_distance, middle_scaling, match_scaling)
def animate(frame):
boid_positions, boid_velocities = myflight.update_boids()
x_pos = np.array(boid_positions[0])
y_pos = np.array(boid_positions[1])
data = np.hstack((x_pos[:,np.newaxis], y_pos[:, np.newaxis]))
scatter.set_offsets(data)
anim = animation.FuncAnimation(figure, animate, frames=50, interval=50)
if __name__ =="__main__":
plt.show() | [
"matplotlib.pyplot.show",
"random.uniform",
"matplotlib.pyplot.axes",
"numpy.hstack",
"matplotlib.animation.FuncAnimation",
"matplotlib.pyplot.figure",
"numpy.array"
] | [((1814, 1826), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1824, 1826), True, 'from matplotlib import pyplot as plt\n'), ((1834, 1880), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'xlim': '(-500, 1500)', 'ylim': '(-500, 1500)'}), '(xlim=(-500, 1500), ylim=(-500, 1500))\n', (1842, 1880), True, 'from matplotlib import pyplot as plt\n'), ((5152, 5216), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['figure', 'animate'], {'frames': '(50)', 'interval': '(50)'}), '(figure, animate, frames=50, interval=50)\n', (5175, 5216), False, 'from matplotlib import animation\n'), ((4976, 5003), 'numpy.array', 'np.array', (['boid_positions[0]'], {}), '(boid_positions[0])\n', (4984, 5003), True, 'import numpy as np\n'), ((5016, 5043), 'numpy.array', 'np.array', (['boid_positions[1]'], {}), '(boid_positions[1])\n', (5024, 5043), True, 'import numpy as np\n'), ((5055, 5110), 'numpy.hstack', 'np.hstack', (['(x_pos[:, np.newaxis], y_pos[:, np.newaxis])'], {}), '((x_pos[:, np.newaxis], y_pos[:, np.newaxis]))\n', (5064, 5110), True, 'import numpy as np\n'), ((5248, 5258), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5256, 5258), True, 'from matplotlib import pyplot as plt\n'), ((1067, 1103), 'random.uniform', 'random.uniform', (['limits[0]', 'limits[1]'], {}), '(limits[0], limits[1])\n', (1081, 1103), False, 'import random\n')] |
import os
import glob
import unittest
import numpy as np
import onnx
from onnx import helper
from onnx import onnx_pb as onnx_proto
from onnxconverter_common.optimizer import optimize_onnx, optimize_onnx_model
working_path = os.path.abspath(os.path.dirname(__file__))
tmp_path = os.path.join(working_path, 'temp')
class OptimizerTestCase(unittest.TestCase):
@staticmethod
def get_temp_file(name):
if not os.path.exists(tmp_path):
os.mkdir(tmp_path)
return os.path.join(tmp_path, name)
def tearDown(self):
for fl in glob.glob(os.path.join(tmp_path, '*.onnx')):
os.remove(fl)
def test_optimizer(self):
val = np.asarray([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)
nodes = []
nodes[0:] = \
[helper.make_node('Constant', [], ['const1'], value=helper.make_tensor(
name='const0',
data_type=onnx_proto.TensorProto.FLOAT,
dims=val.shape,
vals=val.flatten().astype(float)))]
nodes[1:] = [helper.make_node('Identity', ['const1'], ['identity1'])]
nodes[2:] = [helper.make_node('Identity', ['identity1'], ['identity2'])]
nodes[3:] = [helper.make_node('Max', ['input1', 'identity2'], ['max0'])]
nodes[4:] = [helper.make_node('Transpose', ['max0'], ['tranpose0'], perm=[0, 2, 3, 1])]
nodes[5:] = [helper.make_node('Transpose', ['tranpose0'], ['tranpose1'], perm=(0, 3, 1, 2))]
nodes[6:] = [helper.make_node('Relu', ['tranpose1'], ['output0'], perm=(0, 3, 1, 2))]
input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
graph = helper.make_graph(nodes, 'test0', [input0], [output0])
model = helper.make_model(graph)
self.assertIsNotNone(model)
onnx.save_model(model, self.get_temp_file('temp_before.onnx'))
new_nodes = optimize_onnx(nodes, inputs=[input0], outputs=[output0])
new_nodes = [n_ for n_ in new_nodes if not isinstance(n_, tuple)]
self.assertEqual(len(new_nodes), 3)
graph = helper.make_graph(new_nodes, 'test0', [input0], [output0])
model = helper.make_model(graph)
onnx.save_model(model, self.get_temp_file('temp_after.onnx'))
self.assertIsNotNone(model)
def test_move_transpose(self):
val = np.asarray([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)
nodes = []
nodes[0:] = \
[helper.make_node('Constant', [], ['const1'], value=helper.make_tensor(
name='const0',
data_type=onnx_proto.TensorProto.FLOAT,
dims=val.shape,
vals=val.flatten().astype(float)))]
nodes[1:] = [helper.make_node('Identity', ['const1'], ['identity1'])]
nodes[2:] = [helper.make_node('Identity', ['identity1'], ['identity2'])]
nodes[3:] = [helper.make_node('Max', ['input1', 'identity2'], ['max0'])]
nodes[4:] = [helper.make_node('Transpose', ['max0'], ['tranpose0'], perm=[0, 2, 3, 1])]
nodes[5:] = [helper.make_node('LeakyRelu', ['tranpose0'], ['tranpose1'])]
nodes[6:] = [helper.make_node('Relu', ['tranpose1'], ['output0'], perm=(0, 3, 1, 2))]
input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
graph = helper.make_graph(nodes, 'test0', [input0], [output0])
model = helper.make_model(graph)
self.assertIsNotNone(model)
onnx.save_model(model, self.get_temp_file('temp_before.onnx'))
new_nodes = optimize_onnx(nodes, inputs=[input0], outputs=[output0])
new_nodes = [n_ for n_ in new_nodes if not isinstance(n_, tuple)]
self.assertEqual(len(new_nodes), 5)
graph = helper.make_graph(new_nodes, 'test0', [input0], [output0])
model = helper.make_model(graph)
onnx.save_model(model, self.get_temp_file('temp_after.onnx'))
self.assertIsNotNone(model)
def test_merge(self):
val = np.asarray([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)
nodes = []
nodes[0:] = \
[helper.make_node('Constant', [], ['const1'], value=helper.make_tensor(
name='const0',
data_type=onnx_proto.TensorProto.FLOAT,
dims=val.shape,
vals=val.flatten().astype(float)))]
nodes[1:] = [helper.make_node('Max', ['input1'], ['max0'])]
nodes[2:] = [helper.make_node('Transpose', ['max0'], ['tranpose0'], perm=[0, 2, 3, 1])]
nodes[3:] = [helper.make_node('Transpose', ['tranpose0'], ['add_input1'], perm=(0, 3, 1, 2))]
nodes[4:] = [helper.make_node('Add', ['max0', 'add_input1'], ['output0'])]
input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
graph = helper.make_graph(nodes, 'test0', [input0], [output0])
model = helper.make_model(graph)
self.assertIsNotNone(model)
onnx.save_model(model, self.get_temp_file('temp_before.onnx'))
new_nodes = optimize_onnx(nodes, inputs=[input0], outputs=[output0])
new_nodes = [n_ for n_ in new_nodes if not isinstance(n_, tuple)]
graph = helper.make_graph(new_nodes, 'test0', [input0], [output0])
model = helper.make_model(graph)
onnx.save_model(model, self.get_temp_file('temp_after.onnx'))
self.assertEqual(len(new_nodes), 3)
self.assertIsNotNone(model)
def test_fan_out(self):
val = np.asarray([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)
nodes = []
nodes[0:] = \
[helper.make_node('Constant', [], ['const1'], value=helper.make_tensor(
name='const0',
data_type=onnx_proto.TensorProto.FLOAT,
dims=val.shape,
vals=val.flatten().astype(float)),
name="0")]
nodes[1:] = [helper.make_node('Identity', ['const1'], ['identity1'], name="1")]
nodes[2:] = [helper.make_node('Identity', ['identity1'], ['identity2'], name="2")]
nodes[3:] = [helper.make_node('Max', ['input1', 'identity2'], ['max0'], name="3")]
nodes[4:] = [helper.make_node('Transpose', ['max0'], ['tranpose0'], perm=[0, 2, 3, 1], name="4")]
nodes[5:] = [helper.make_node('LeakyRelu', ['tranpose0'], ['leak0'], name="5")]
nodes[6:] = [helper.make_node('LeakyRelu', ['leak0'], ['leak1'], name="6")]
nodes[7:] = [helper.make_node('Transpose', ['leak1'], ['add_input1'], perm=(0, 3, 1, 2), name="7")]
nodes[8:] = [helper.make_node('Add', ['leak0', 'add_input1'], ['output0'], name="8")]
input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
graph = helper.make_graph(nodes, 'test0', [input0], [output0])
model = helper.make_model(graph)
self.assertIsNotNone(model)
onnx.save_model(model, self.get_temp_file('temp_before.onnx'))
new_nodes = optimize_onnx(nodes, inputs=[input0], outputs=[output0])
new_nodes = [n_ for n_ in new_nodes if not isinstance(n_, tuple)]
graph = helper.make_graph(new_nodes, 'test0', [input0], [output0])
model = helper.make_model(graph)
onnx.save_model(model, self.get_temp_file('temp_after.onnx'))
self.assertEqual(len(new_nodes), 6)
self.assertIsNotNone(model)
def test_fan_in(self):
val = np.asarray([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)
nodes = []
nodes[0:] = \
[helper.make_node('Constant', [], ['const1'], value=helper.make_tensor(
name='const0',
data_type=onnx_proto.TensorProto.FLOAT,
dims=val.shape,
vals=val.flatten().astype(float)),
name="0")]
nodes[1:] = [helper.make_node('Identity', ['const1'], ['identity1'], name="1")]
nodes[2:] = [helper.make_node('Identity', ['identity1'], ['identity2'], name="2")]
nodes[3:] = [helper.make_node('Max', ['input1', 'identity2'], ['max0'], name="3")]
nodes[4:] = [helper.make_node('LeakyRelu', ['max0'], ['leak0'], name="4")]
nodes[5:] = [helper.make_node('LeakyRelu', ['leak0'], ['leak1'], name="5")]
nodes[6:] = [helper.make_node('LeakyRelu', ['leak0'], ['leak2'], name="6")]
nodes[7:] = [helper.make_node('Transpose', ['leak1'], ['tranpose0'], perm=[0, 2, 3, 1], name="7")]
nodes[8:] = [helper.make_node('Transpose', ['leak2'], ['tranpose1'], perm=[0, 2, 3, 1], name="8")]
nodes[9:] = [helper.make_node('Add', ['tranpose0', 'tranpose1'], ['add0'], name="9")]
nodes[10:] = [helper.make_node('Transpose', ['add0'], ['tranpose2'], perm=[0, 3, 1, 2], name="10")]
nodes[11:] = [helper.make_node('Conv', ['tranpose2'], ['output0'], name="11")]
input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
graph = helper.make_graph(nodes, 'test0', [input0], [output0])
model = helper.make_model(graph)
self.assertIsNotNone(model)
onnx.save_model(model, self.get_temp_file('temp_before.onnx'))
new_nodes = optimize_onnx(nodes, inputs=[input0], outputs=[output0])
new_nodes = [n_ for n_ in new_nodes if not isinstance(n_, tuple)]
graph = helper.make_graph(new_nodes, 'test0', [input0], [output0])
model = helper.make_model(graph)
onnx.save_model(model, self.get_temp_file('temp_after.onnx'))
self.assertEqual(len(new_nodes), 6)
self.assertIsNotNone(model)
def test_NextToOutputSolution(self):
val = np.asarray([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)
nodes = []
nodes[0:] = \
[helper.make_node('Constant', [], ['const1'], value=helper.make_tensor(
name='const0',
data_type=onnx_proto.TensorProto.FLOAT,
dims=val.shape,
vals=val.flatten().astype(float)),
name="0")]
nodes[1:] = [helper.make_node('Identity', ['const1'], ['identity1'], name="1")]
nodes[2:] = [helper.make_node('Identity', ['identity1'], ['identity2'], name="2")]
nodes[3:] = [helper.make_node('Max', ['input1', 'identity2'], ['max0'], name="3")]
nodes[4:] = [helper.make_node('Identity', ['max0'], ['output0'], name="4")]
input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
graph = helper.make_graph(nodes, 'test_NextToOutputSolution', [input0], [output0])
model = helper.make_model(graph)
self.assertIsNotNone(model)
new_nodes = optimize_onnx(nodes, inputs=[input0], outputs=[output0])
new_nodes = [n_ for n_ in new_nodes if not isinstance(n_, tuple)]
graph = helper.make_graph(new_nodes, 'test_NextToOutputSolution', [input0], [output0])
model = helper.make_model(graph)
self.assertEqual(len(new_nodes), 2)
self.assertIsNotNone(model)
@unittest.skipIf(onnx.defs.onnx_opset_version() < 9, "Optimizer for the model graph only happens on opset >= 9")
def test_opt_on_model(self):
val = np.asarray([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)
nodes = []
nodes[0:] = \
[helper.make_node('Constant', [], ['const1'], value=helper.make_tensor(
name='const0',
data_type=onnx_proto.TensorProto.FLOAT,
dims=val.shape,
vals=val.flatten().astype(float)),
name="0")]
nodes[1:] = [helper.make_node('Identity', ['const1'], ['identity1'], name="1")]
nodes[2:] = [helper.make_node('Identity', ['identity1'], ['identity2'], name="2")]
nodes[3:] = [helper.make_node('Max', ['input1', 'identity2'], ['max0'], name="3")]
nodes[4:] = [helper.make_node('LeakyRelu', ['max0'], ['leak0'], name="4")]
nodes[5:] = [helper.make_node('LeakyRelu', ['leak0'], ['leak1'], name="5")]
nodes[6:] = [helper.make_node('LeakyRelu', ['leak0'], ['leak2'], name="6")]
nodes[7:] = [helper.make_node('Transpose', ['leak1'], ['tranpose0'], perm=[0, 2, 3, 1], name="7")]
nodes[8:] = [helper.make_node('Transpose', ['leak2'], ['tranpose1'], perm=[0, 2, 3, 1], name="8")]
nodes[9:] = [helper.make_node('Add', ['tranpose0', 'tranpose1'], ['add0'], name="9")]
nodes[10:] = [helper.make_node('Transpose', ['add0'], ['tranpose2'], perm=[0, 3, 1, 2], name="10")]
nodes[11:] = [helper.make_node('Conv', ['tranpose2'], ['output0'], name="11")]
input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
graph = helper.make_graph(nodes, 'test0', [input0], [output0])
model = helper.make_model(graph)
self.assertIsNotNone(model)
optd_model = optimize_onnx_model(model)
self.assertEqual(len(optd_model.graph.node), 5)
def test_merge_common(self):
val = np.asarray([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)
nodes = []
nodes[0:] = \
[helper.make_node('Constant', [], ['const1'], value=helper.make_tensor(
name='const0',
data_type=onnx_proto.TensorProto.FLOAT,
dims=val.shape,
vals=val.flatten().astype(float)),
name="0")]
nodes[1:] = [helper.make_node('Identity', ['const1'], ['identity1'], name="1")]
nodes[2:] = [helper.make_node('Identity', ['identity1'], ['identity2'], name="2")]
nodes[3:] = [helper.make_node('Max', ['input1', 'identity2'], ['max0'], name="3")]
nodes[4:] = [helper.make_node('LeakyRelu', ['max0'], ['leak0'], name="4")]
nodes[5:] = [helper.make_node('LeakyRelu', ['leak0'], ['leak1'], name="5")]
nodes[6:] = [helper.make_node('LeakyRelu', ['leak0'], ['leak2'], name="6")]
nodes[7:] = [helper.make_node('Cast', ['leak1'], ['cast0'], to=6, name="7")]
nodes[8:] = [helper.make_node('Cast', ['cast0'], ['cast1'], to=1, name="8")]
nodes[9:] = [helper.make_node('Cast', ['leak2'], ['cast2'], to=6, name="9")]
nodes[10:] = [helper.make_node('Cast', ['cast2'], ['cast3'], to=7, name="10")]
nodes[11:] = [helper.make_node('Cast', ['cast3'], ['cast4'], to=1, name="11")]
nodes[12:] = [helper.make_node('Add', ['cast1', 'cast4'], ['add0'], name="12")]
nodes[13:] = [helper.make_node('Transpose', ['add0'], ['tranpose2'], perm=[0, 3, 1, 2], name="13")]
nodes[14:] = [helper.make_node('Conv', ['tranpose2'], ['output0'], name="14")]
input0 = helper.make_tensor_value_info('input1', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
output0 = helper.make_tensor_value_info('output0', onnx_proto.TensorProto.FLOAT, [1, 1, 2, 3])
graph = helper.make_graph(nodes, 'test0', [input0], [output0])
model = helper.make_model(graph)
self.assertIsNotNone(model)
onnx.save_model(model, self.get_temp_file('temp_before.onnx'))
new_nodes = optimize_onnx(nodes, inputs=[input0], outputs=[output0])
new_nodes = [n_ for n_ in new_nodes if not isinstance(n_, tuple)]
graph = helper.make_graph(new_nodes, 'test0', [input0], [output0])
model = helper.make_model(graph)
onnx.save_model(model, self.get_temp_file('temp_after.onnx'))
self.assertEqual(len(new_nodes), 11)
self.assertIsNotNone(model)
def test_onnx_models(self):
model_names = ['mobile_segnet_no_opt.onnx', 'srgan_no_opt.onnx', 'test_model_0_no_opt.onnx',
'test_model_1_no_opt.onnx']
num_transpose_list = [2, 3, 11, 5]
dir_path = os.path.dirname(os.path.realpath(__file__))
for idx_, model_name_ in enumerate(model_names):
model_dir = dir_path + '/data/' + model_name_
origin_model = onnx.load_model(model_dir)
opt_model = optimize_onnx_model(origin_model)
self.assertIsNotNone(opt_model)
num_transpose = sum([1 if n_.op_type == 'Transpose' else 0 for n_ in opt_model.graph.node])
self.assertEqual(num_transpose, num_transpose_list[idx_])
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"os.mkdir",
"os.remove",
"onnx.helper.make_node",
"onnxconverter_common.optimizer.optimize_onnx_model",
"onnx.helper.make_model",
"numpy.asarray",
"os.path.dirname",
"onnx.helper.make_tensor_value_info",
"os.path.exists",
"onnx.defs.onnx_opset_version",
"os.path.realpath",
"... | [((281, 315), 'os.path.join', 'os.path.join', (['working_path', '"""temp"""'], {}), "(working_path, 'temp')\n", (293, 315), False, 'import os\n'), ((243, 268), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (258, 268), False, 'import os\n'), ((17013, 17028), 'unittest.main', 'unittest.main', ([], {}), '()\n', (17026, 17028), False, 'import unittest\n'), ((496, 524), 'os.path.join', 'os.path.join', (['tmp_path', 'name'], {}), '(tmp_path, name)\n', (508, 524), False, 'import os\n'), ((684, 746), 'numpy.asarray', 'np.asarray', (['[[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]]', 'np.float32'], {}), '([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)\n', (694, 746), True, 'import numpy as np\n'), ((1593, 1680), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""input1"""', 'onnx_proto.TensorProto.FLOAT', '[1, 1, 2, 3]'], {}), "('input1', onnx_proto.TensorProto.FLOAT, [1, 1,\n 2, 3])\n", (1622, 1680), False, 'from onnx import helper\n'), ((1695, 1784), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""output0"""', 'onnx_proto.TensorProto.FLOAT', '[1, 1, 2, 3]'], {}), "('output0', onnx_proto.TensorProto.FLOAT, [1, \n 1, 2, 3])\n", (1724, 1784), False, 'from onnx import helper\n'), ((1797, 1851), 'onnx.helper.make_graph', 'helper.make_graph', (['nodes', '"""test0"""', '[input0]', '[output0]'], {}), "(nodes, 'test0', [input0], [output0])\n", (1814, 1851), False, 'from onnx import helper\n'), ((1868, 1892), 'onnx.helper.make_model', 'helper.make_model', (['graph'], {}), '(graph)\n', (1885, 1892), False, 'from onnx import helper\n'), ((2020, 2076), 'onnxconverter_common.optimizer.optimize_onnx', 'optimize_onnx', (['nodes'], {'inputs': '[input0]', 'outputs': '[output0]'}), '(nodes, inputs=[input0], outputs=[output0])\n', (2033, 2076), False, 'from onnxconverter_common.optimizer import optimize_onnx, optimize_onnx_model\n'), ((2211, 2269), 'onnx.helper.make_graph', 'helper.make_graph', (['new_nodes', '"""test0"""', '[input0]', '[output0]'], {}), "(new_nodes, 'test0', [input0], [output0])\n", (2228, 2269), False, 'from onnx import helper\n'), ((2286, 2310), 'onnx.helper.make_model', 'helper.make_model', (['graph'], {}), '(graph)\n', (2303, 2310), False, 'from onnx import helper\n'), ((2467, 2529), 'numpy.asarray', 'np.asarray', (['[[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]]', 'np.float32'], {}), '([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)\n', (2477, 2529), True, 'import numpy as np\n'), ((3357, 3444), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""input1"""', 'onnx_proto.TensorProto.FLOAT', '[1, 1, 2, 3]'], {}), "('input1', onnx_proto.TensorProto.FLOAT, [1, 1,\n 2, 3])\n", (3386, 3444), False, 'from onnx import helper\n'), ((3459, 3548), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""output0"""', 'onnx_proto.TensorProto.FLOAT', '[1, 1, 2, 3]'], {}), "('output0', onnx_proto.TensorProto.FLOAT, [1, \n 1, 2, 3])\n", (3488, 3548), False, 'from onnx import helper\n'), ((3561, 3615), 'onnx.helper.make_graph', 'helper.make_graph', (['nodes', '"""test0"""', '[input0]', '[output0]'], {}), "(nodes, 'test0', [input0], [output0])\n", (3578, 3615), False, 'from onnx import helper\n'), ((3632, 3656), 'onnx.helper.make_model', 'helper.make_model', (['graph'], {}), '(graph)\n', (3649, 3656), False, 'from onnx import helper\n'), ((3785, 3841), 'onnxconverter_common.optimizer.optimize_onnx', 'optimize_onnx', (['nodes'], {'inputs': '[input0]', 'outputs': '[output0]'}), '(nodes, inputs=[input0], outputs=[output0])\n', (3798, 3841), False, 'from onnxconverter_common.optimizer import optimize_onnx, optimize_onnx_model\n'), ((3976, 4034), 'onnx.helper.make_graph', 'helper.make_graph', (['new_nodes', '"""test0"""', '[input0]', '[output0]'], {}), "(new_nodes, 'test0', [input0], [output0])\n", (3993, 4034), False, 'from onnx import helper\n'), ((4051, 4075), 'onnx.helper.make_model', 'helper.make_model', (['graph'], {}), '(graph)\n', (4068, 4075), False, 'from onnx import helper\n'), ((4223, 4285), 'numpy.asarray', 'np.asarray', (['[[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]]', 'np.float32'], {}), '([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)\n', (4233, 4285), True, 'import numpy as np\n'), ((4950, 5037), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""input1"""', 'onnx_proto.TensorProto.FLOAT', '[1, 1, 2, 3]'], {}), "('input1', onnx_proto.TensorProto.FLOAT, [1, 1,\n 2, 3])\n", (4979, 5037), False, 'from onnx import helper\n'), ((5052, 5141), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""output0"""', 'onnx_proto.TensorProto.FLOAT', '[1, 1, 2, 3]'], {}), "('output0', onnx_proto.TensorProto.FLOAT, [1, \n 1, 2, 3])\n", (5081, 5141), False, 'from onnx import helper\n'), ((5154, 5208), 'onnx.helper.make_graph', 'helper.make_graph', (['nodes', '"""test0"""', '[input0]', '[output0]'], {}), "(nodes, 'test0', [input0], [output0])\n", (5171, 5208), False, 'from onnx import helper\n'), ((5225, 5249), 'onnx.helper.make_model', 'helper.make_model', (['graph'], {}), '(graph)\n', (5242, 5249), False, 'from onnx import helper\n'), ((5378, 5434), 'onnxconverter_common.optimizer.optimize_onnx', 'optimize_onnx', (['nodes'], {'inputs': '[input0]', 'outputs': '[output0]'}), '(nodes, inputs=[input0], outputs=[output0])\n', (5391, 5434), False, 'from onnxconverter_common.optimizer import optimize_onnx, optimize_onnx_model\n'), ((5525, 5583), 'onnx.helper.make_graph', 'helper.make_graph', (['new_nodes', '"""test0"""', '[input0]', '[output0]'], {}), "(new_nodes, 'test0', [input0], [output0])\n", (5542, 5583), False, 'from onnx import helper\n'), ((5600, 5624), 'onnx.helper.make_model', 'helper.make_model', (['graph'], {}), '(graph)\n', (5617, 5624), False, 'from onnx import helper\n'), ((5818, 5880), 'numpy.asarray', 'np.asarray', (['[[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]]', 'np.float32'], {}), '([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)\n', (5828, 5880), True, 'import numpy as np\n'), ((6986, 7073), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""input1"""', 'onnx_proto.TensorProto.FLOAT', '[1, 1, 2, 3]'], {}), "('input1', onnx_proto.TensorProto.FLOAT, [1, 1,\n 2, 3])\n", (7015, 7073), False, 'from onnx import helper\n'), ((7088, 7177), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""output0"""', 'onnx_proto.TensorProto.FLOAT', '[1, 1, 2, 3]'], {}), "('output0', onnx_proto.TensorProto.FLOAT, [1, \n 1, 2, 3])\n", (7117, 7177), False, 'from onnx import helper\n'), ((7190, 7244), 'onnx.helper.make_graph', 'helper.make_graph', (['nodes', '"""test0"""', '[input0]', '[output0]'], {}), "(nodes, 'test0', [input0], [output0])\n", (7207, 7244), False, 'from onnx import helper\n'), ((7261, 7285), 'onnx.helper.make_model', 'helper.make_model', (['graph'], {}), '(graph)\n', (7278, 7285), False, 'from onnx import helper\n'), ((7414, 7470), 'onnxconverter_common.optimizer.optimize_onnx', 'optimize_onnx', (['nodes'], {'inputs': '[input0]', 'outputs': '[output0]'}), '(nodes, inputs=[input0], outputs=[output0])\n', (7427, 7470), False, 'from onnxconverter_common.optimizer import optimize_onnx, optimize_onnx_model\n'), ((7561, 7619), 'onnx.helper.make_graph', 'helper.make_graph', (['new_nodes', '"""test0"""', '[input0]', '[output0]'], {}), "(new_nodes, 'test0', [input0], [output0])\n", (7578, 7619), False, 'from onnx import helper\n'), ((7636, 7660), 'onnx.helper.make_model', 'helper.make_model', (['graph'], {}), '(graph)\n', (7653, 7660), False, 'from onnx import helper\n'), ((7853, 7915), 'numpy.asarray', 'np.asarray', (['[[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]]', 'np.float32'], {}), '([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)\n', (7863, 7915), True, 'import numpy as np\n'), ((9295, 9382), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""input1"""', 'onnx_proto.TensorProto.FLOAT', '[1, 1, 2, 3]'], {}), "('input1', onnx_proto.TensorProto.FLOAT, [1, 1,\n 2, 3])\n", (9324, 9382), False, 'from onnx import helper\n'), ((9397, 9486), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""output0"""', 'onnx_proto.TensorProto.FLOAT', '[1, 1, 2, 3]'], {}), "('output0', onnx_proto.TensorProto.FLOAT, [1, \n 1, 2, 3])\n", (9426, 9486), False, 'from onnx import helper\n'), ((9499, 9553), 'onnx.helper.make_graph', 'helper.make_graph', (['nodes', '"""test0"""', '[input0]', '[output0]'], {}), "(nodes, 'test0', [input0], [output0])\n", (9516, 9553), False, 'from onnx import helper\n'), ((9570, 9594), 'onnx.helper.make_model', 'helper.make_model', (['graph'], {}), '(graph)\n', (9587, 9594), False, 'from onnx import helper\n'), ((9723, 9779), 'onnxconverter_common.optimizer.optimize_onnx', 'optimize_onnx', (['nodes'], {'inputs': '[input0]', 'outputs': '[output0]'}), '(nodes, inputs=[input0], outputs=[output0])\n', (9736, 9779), False, 'from onnxconverter_common.optimizer import optimize_onnx, optimize_onnx_model\n'), ((9870, 9928), 'onnx.helper.make_graph', 'helper.make_graph', (['new_nodes', '"""test0"""', '[input0]', '[output0]'], {}), "(new_nodes, 'test0', [input0], [output0])\n", (9887, 9928), False, 'from onnx import helper\n'), ((9945, 9969), 'onnx.helper.make_model', 'helper.make_model', (['graph'], {}), '(graph)\n', (9962, 9969), False, 'from onnx import helper\n'), ((10176, 10238), 'numpy.asarray', 'np.asarray', (['[[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]]', 'np.float32'], {}), '([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)\n', (10186, 10238), True, 'import numpy as np\n'), ((10948, 11035), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""input1"""', 'onnx_proto.TensorProto.FLOAT', '[1, 1, 2, 3]'], {}), "('input1', onnx_proto.TensorProto.FLOAT, [1, 1,\n 2, 3])\n", (10977, 11035), False, 'from onnx import helper\n'), ((11050, 11139), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""output0"""', 'onnx_proto.TensorProto.FLOAT', '[1, 1, 2, 3]'], {}), "('output0', onnx_proto.TensorProto.FLOAT, [1, \n 1, 2, 3])\n", (11079, 11139), False, 'from onnx import helper\n'), ((11152, 11226), 'onnx.helper.make_graph', 'helper.make_graph', (['nodes', '"""test_NextToOutputSolution"""', '[input0]', '[output0]'], {}), "(nodes, 'test_NextToOutputSolution', [input0], [output0])\n", (11169, 11226), False, 'from onnx import helper\n'), ((11243, 11267), 'onnx.helper.make_model', 'helper.make_model', (['graph'], {}), '(graph)\n', (11260, 11267), False, 'from onnx import helper\n'), ((11325, 11381), 'onnxconverter_common.optimizer.optimize_onnx', 'optimize_onnx', (['nodes'], {'inputs': '[input0]', 'outputs': '[output0]'}), '(nodes, inputs=[input0], outputs=[output0])\n', (11338, 11381), False, 'from onnxconverter_common.optimizer import optimize_onnx, optimize_onnx_model\n'), ((11472, 11550), 'onnx.helper.make_graph', 'helper.make_graph', (['new_nodes', '"""test_NextToOutputSolution"""', '[input0]', '[output0]'], {}), "(new_nodes, 'test_NextToOutputSolution', [input0], [output0])\n", (11489, 11550), False, 'from onnx import helper\n'), ((11567, 11591), 'onnx.helper.make_model', 'helper.make_model', (['graph'], {}), '(graph)\n', (11584, 11591), False, 'from onnx import helper\n'), ((11837, 11899), 'numpy.asarray', 'np.asarray', (['[[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]]', 'np.float32'], {}), '([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)\n', (11847, 11899), True, 'import numpy as np\n'), ((13278, 13365), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""input1"""', 'onnx_proto.TensorProto.FLOAT', '[1, 1, 2, 3]'], {}), "('input1', onnx_proto.TensorProto.FLOAT, [1, 1,\n 2, 3])\n", (13307, 13365), False, 'from onnx import helper\n'), ((13380, 13469), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""output0"""', 'onnx_proto.TensorProto.FLOAT', '[1, 1, 2, 3]'], {}), "('output0', onnx_proto.TensorProto.FLOAT, [1, \n 1, 2, 3])\n", (13409, 13469), False, 'from onnx import helper\n'), ((13482, 13536), 'onnx.helper.make_graph', 'helper.make_graph', (['nodes', '"""test0"""', '[input0]', '[output0]'], {}), "(nodes, 'test0', [input0], [output0])\n", (13499, 13536), False, 'from onnx import helper\n'), ((13553, 13577), 'onnx.helper.make_model', 'helper.make_model', (['graph'], {}), '(graph)\n', (13570, 13577), False, 'from onnx import helper\n'), ((13636, 13662), 'onnxconverter_common.optimizer.optimize_onnx_model', 'optimize_onnx_model', (['model'], {}), '(model)\n', (13655, 13662), False, 'from onnxconverter_common.optimizer import optimize_onnx, optimize_onnx_model\n'), ((13767, 13829), 'numpy.asarray', 'np.asarray', (['[[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]]', 'np.float32'], {}), '([[[[1.0, 2.0, 3.0], [1.1, 2.1, 3.1]]]], np.float32)\n', (13777, 13829), True, 'import numpy as np\n'), ((15418, 15505), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""input1"""', 'onnx_proto.TensorProto.FLOAT', '[1, 1, 2, 3]'], {}), "('input1', onnx_proto.TensorProto.FLOAT, [1, 1,\n 2, 3])\n", (15447, 15505), False, 'from onnx import helper\n'), ((15520, 15609), 'onnx.helper.make_tensor_value_info', 'helper.make_tensor_value_info', (['"""output0"""', 'onnx_proto.TensorProto.FLOAT', '[1, 1, 2, 3]'], {}), "('output0', onnx_proto.TensorProto.FLOAT, [1, \n 1, 2, 3])\n", (15549, 15609), False, 'from onnx import helper\n'), ((15622, 15676), 'onnx.helper.make_graph', 'helper.make_graph', (['nodes', '"""test0"""', '[input0]', '[output0]'], {}), "(nodes, 'test0', [input0], [output0])\n", (15639, 15676), False, 'from onnx import helper\n'), ((15693, 15717), 'onnx.helper.make_model', 'helper.make_model', (['graph'], {}), '(graph)\n', (15710, 15717), False, 'from onnx import helper\n'), ((15846, 15902), 'onnxconverter_common.optimizer.optimize_onnx', 'optimize_onnx', (['nodes'], {'inputs': '[input0]', 'outputs': '[output0]'}), '(nodes, inputs=[input0], outputs=[output0])\n', (15859, 15902), False, 'from onnxconverter_common.optimizer import optimize_onnx, optimize_onnx_model\n'), ((15993, 16051), 'onnx.helper.make_graph', 'helper.make_graph', (['new_nodes', '"""test0"""', '[input0]', '[output0]'], {}), "(new_nodes, 'test0', [input0], [output0])\n", (16010, 16051), False, 'from onnx import helper\n'), ((16068, 16092), 'onnx.helper.make_model', 'helper.make_model', (['graph'], {}), '(graph)\n', (16085, 16092), False, 'from onnx import helper\n'), ((424, 448), 'os.path.exists', 'os.path.exists', (['tmp_path'], {}), '(tmp_path)\n', (438, 448), False, 'import os\n'), ((462, 480), 'os.mkdir', 'os.mkdir', (['tmp_path'], {}), '(tmp_path)\n', (470, 480), False, 'import os\n'), ((578, 610), 'os.path.join', 'os.path.join', (['tmp_path', '"""*.onnx"""'], {}), "(tmp_path, '*.onnx')\n", (590, 610), False, 'import os\n'), ((625, 638), 'os.remove', 'os.remove', (['fl'], {}), '(fl)\n', (634, 638), False, 'import os\n'), ((1065, 1120), 'onnx.helper.make_node', 'helper.make_node', (['"""Identity"""', "['const1']", "['identity1']"], {}), "('Identity', ['const1'], ['identity1'])\n", (1081, 1120), False, 'from onnx import helper\n'), ((1143, 1201), 'onnx.helper.make_node', 'helper.make_node', (['"""Identity"""', "['identity1']", "['identity2']"], {}), "('Identity', ['identity1'], ['identity2'])\n", (1159, 1201), False, 'from onnx import helper\n'), ((1224, 1282), 'onnx.helper.make_node', 'helper.make_node', (['"""Max"""', "['input1', 'identity2']", "['max0']"], {}), "('Max', ['input1', 'identity2'], ['max0'])\n", (1240, 1282), False, 'from onnx import helper\n'), ((1305, 1378), 'onnx.helper.make_node', 'helper.make_node', (['"""Transpose"""', "['max0']", "['tranpose0']"], {'perm': '[0, 2, 3, 1]'}), "('Transpose', ['max0'], ['tranpose0'], perm=[0, 2, 3, 1])\n", (1321, 1378), False, 'from onnx import helper\n'), ((1401, 1479), 'onnx.helper.make_node', 'helper.make_node', (['"""Transpose"""', "['tranpose0']", "['tranpose1']"], {'perm': '(0, 3, 1, 2)'}), "('Transpose', ['tranpose0'], ['tranpose1'], perm=(0, 3, 1, 2))\n", (1417, 1479), False, 'from onnx import helper\n'), ((1502, 1573), 'onnx.helper.make_node', 'helper.make_node', (['"""Relu"""', "['tranpose1']", "['output0']"], {'perm': '(0, 3, 1, 2)'}), "('Relu', ['tranpose1'], ['output0'], perm=(0, 3, 1, 2))\n", (1518, 1573), False, 'from onnx import helper\n'), ((2848, 2903), 'onnx.helper.make_node', 'helper.make_node', (['"""Identity"""', "['const1']", "['identity1']"], {}), "('Identity', ['const1'], ['identity1'])\n", (2864, 2903), False, 'from onnx import helper\n'), ((2926, 2984), 'onnx.helper.make_node', 'helper.make_node', (['"""Identity"""', "['identity1']", "['identity2']"], {}), "('Identity', ['identity1'], ['identity2'])\n", (2942, 2984), False, 'from onnx import helper\n'), ((3007, 3065), 'onnx.helper.make_node', 'helper.make_node', (['"""Max"""', "['input1', 'identity2']", "['max0']"], {}), "('Max', ['input1', 'identity2'], ['max0'])\n", (3023, 3065), False, 'from onnx import helper\n'), ((3088, 3161), 'onnx.helper.make_node', 'helper.make_node', (['"""Transpose"""', "['max0']", "['tranpose0']"], {'perm': '[0, 2, 3, 1]'}), "('Transpose', ['max0'], ['tranpose0'], perm=[0, 2, 3, 1])\n", (3104, 3161), False, 'from onnx import helper\n'), ((3184, 3243), 'onnx.helper.make_node', 'helper.make_node', (['"""LeakyRelu"""', "['tranpose0']", "['tranpose1']"], {}), "('LeakyRelu', ['tranpose0'], ['tranpose1'])\n", (3200, 3243), False, 'from onnx import helper\n'), ((3266, 3337), 'onnx.helper.make_node', 'helper.make_node', (['"""Relu"""', "['tranpose1']", "['output0']"], {'perm': '(0, 3, 1, 2)'}), "('Relu', ['tranpose1'], ['output0'], perm=(0, 3, 1, 2))\n", (3282, 3337), False, 'from onnx import helper\n'), ((4604, 4649), 'onnx.helper.make_node', 'helper.make_node', (['"""Max"""', "['input1']", "['max0']"], {}), "('Max', ['input1'], ['max0'])\n", (4620, 4649), False, 'from onnx import helper\n'), ((4672, 4745), 'onnx.helper.make_node', 'helper.make_node', (['"""Transpose"""', "['max0']", "['tranpose0']"], {'perm': '[0, 2, 3, 1]'}), "('Transpose', ['max0'], ['tranpose0'], perm=[0, 2, 3, 1])\n", (4688, 4745), False, 'from onnx import helper\n'), ((4768, 4847), 'onnx.helper.make_node', 'helper.make_node', (['"""Transpose"""', "['tranpose0']", "['add_input1']"], {'perm': '(0, 3, 1, 2)'}), "('Transpose', ['tranpose0'], ['add_input1'], perm=(0, 3, 1, 2))\n", (4784, 4847), False, 'from onnx import helper\n'), ((4870, 4930), 'onnx.helper.make_node', 'helper.make_node', (['"""Add"""', "['max0', 'add_input1']", "['output0']"], {}), "('Add', ['max0', 'add_input1'], ['output0'])\n", (4886, 4930), False, 'from onnx import helper\n'), ((6239, 6304), 'onnx.helper.make_node', 'helper.make_node', (['"""Identity"""', "['const1']", "['identity1']"], {'name': '"""1"""'}), "('Identity', ['const1'], ['identity1'], name='1')\n", (6255, 6304), False, 'from onnx import helper\n'), ((6327, 6395), 'onnx.helper.make_node', 'helper.make_node', (['"""Identity"""', "['identity1']", "['identity2']"], {'name': '"""2"""'}), "('Identity', ['identity1'], ['identity2'], name='2')\n", (6343, 6395), False, 'from onnx import helper\n'), ((6418, 6486), 'onnx.helper.make_node', 'helper.make_node', (['"""Max"""', "['input1', 'identity2']", "['max0']"], {'name': '"""3"""'}), "('Max', ['input1', 'identity2'], ['max0'], name='3')\n", (6434, 6486), False, 'from onnx import helper\n'), ((6509, 6596), 'onnx.helper.make_node', 'helper.make_node', (['"""Transpose"""', "['max0']", "['tranpose0']"], {'perm': '[0, 2, 3, 1]', 'name': '"""4"""'}), "('Transpose', ['max0'], ['tranpose0'], perm=[0, 2, 3, 1],\n name='4')\n", (6525, 6596), False, 'from onnx import helper\n'), ((6615, 6680), 'onnx.helper.make_node', 'helper.make_node', (['"""LeakyRelu"""', "['tranpose0']", "['leak0']"], {'name': '"""5"""'}), "('LeakyRelu', ['tranpose0'], ['leak0'], name='5')\n", (6631, 6680), False, 'from onnx import helper\n'), ((6703, 6764), 'onnx.helper.make_node', 'helper.make_node', (['"""LeakyRelu"""', "['leak0']", "['leak1']"], {'name': '"""6"""'}), "('LeakyRelu', ['leak0'], ['leak1'], name='6')\n", (6719, 6764), False, 'from onnx import helper\n'), ((6787, 6876), 'onnx.helper.make_node', 'helper.make_node', (['"""Transpose"""', "['leak1']", "['add_input1']"], {'perm': '(0, 3, 1, 2)', 'name': '"""7"""'}), "('Transpose', ['leak1'], ['add_input1'], perm=(0, 3, 1, 2),\n name='7')\n", (6803, 6876), False, 'from onnx import helper\n'), ((6895, 6966), 'onnx.helper.make_node', 'helper.make_node', (['"""Add"""', "['leak0', 'add_input1']", "['output0']"], {'name': '"""8"""'}), "('Add', ['leak0', 'add_input1'], ['output0'], name='8')\n", (6911, 6966), False, 'from onnx import helper\n'), ((8274, 8339), 'onnx.helper.make_node', 'helper.make_node', (['"""Identity"""', "['const1']", "['identity1']"], {'name': '"""1"""'}), "('Identity', ['const1'], ['identity1'], name='1')\n", (8290, 8339), False, 'from onnx import helper\n'), ((8362, 8430), 'onnx.helper.make_node', 'helper.make_node', (['"""Identity"""', "['identity1']", "['identity2']"], {'name': '"""2"""'}), "('Identity', ['identity1'], ['identity2'], name='2')\n", (8378, 8430), False, 'from onnx import helper\n'), ((8453, 8521), 'onnx.helper.make_node', 'helper.make_node', (['"""Max"""', "['input1', 'identity2']", "['max0']"], {'name': '"""3"""'}), "('Max', ['input1', 'identity2'], ['max0'], name='3')\n", (8469, 8521), False, 'from onnx import helper\n'), ((8544, 8604), 'onnx.helper.make_node', 'helper.make_node', (['"""LeakyRelu"""', "['max0']", "['leak0']"], {'name': '"""4"""'}), "('LeakyRelu', ['max0'], ['leak0'], name='4')\n", (8560, 8604), False, 'from onnx import helper\n'), ((8627, 8688), 'onnx.helper.make_node', 'helper.make_node', (['"""LeakyRelu"""', "['leak0']", "['leak1']"], {'name': '"""5"""'}), "('LeakyRelu', ['leak0'], ['leak1'], name='5')\n", (8643, 8688), False, 'from onnx import helper\n'), ((8711, 8772), 'onnx.helper.make_node', 'helper.make_node', (['"""LeakyRelu"""', "['leak0']", "['leak2']"], {'name': '"""6"""'}), "('LeakyRelu', ['leak0'], ['leak2'], name='6')\n", (8727, 8772), False, 'from onnx import helper\n'), ((8795, 8883), 'onnx.helper.make_node', 'helper.make_node', (['"""Transpose"""', "['leak1']", "['tranpose0']"], {'perm': '[0, 2, 3, 1]', 'name': '"""7"""'}), "('Transpose', ['leak1'], ['tranpose0'], perm=[0, 2, 3, 1],\n name='7')\n", (8811, 8883), False, 'from onnx import helper\n'), ((8902, 8990), 'onnx.helper.make_node', 'helper.make_node', (['"""Transpose"""', "['leak2']", "['tranpose1']"], {'perm': '[0, 2, 3, 1]', 'name': '"""8"""'}), "('Transpose', ['leak2'], ['tranpose1'], perm=[0, 2, 3, 1],\n name='8')\n", (8918, 8990), False, 'from onnx import helper\n'), ((9009, 9080), 'onnx.helper.make_node', 'helper.make_node', (['"""Add"""', "['tranpose0', 'tranpose1']", "['add0']"], {'name': '"""9"""'}), "('Add', ['tranpose0', 'tranpose1'], ['add0'], name='9')\n", (9025, 9080), False, 'from onnx import helper\n'), ((9104, 9192), 'onnx.helper.make_node', 'helper.make_node', (['"""Transpose"""', "['add0']", "['tranpose2']"], {'perm': '[0, 3, 1, 2]', 'name': '"""10"""'}), "('Transpose', ['add0'], ['tranpose2'], perm=[0, 3, 1, 2],\n name='10')\n", (9120, 9192), False, 'from onnx import helper\n'), ((9212, 9275), 'onnx.helper.make_node', 'helper.make_node', (['"""Conv"""', "['tranpose2']", "['output0']"], {'name': '"""11"""'}), "('Conv', ['tranpose2'], ['output0'], name='11')\n", (9228, 9275), False, 'from onnx import helper\n'), ((10597, 10662), 'onnx.helper.make_node', 'helper.make_node', (['"""Identity"""', "['const1']", "['identity1']"], {'name': '"""1"""'}), "('Identity', ['const1'], ['identity1'], name='1')\n", (10613, 10662), False, 'from onnx import helper\n'), ((10685, 10753), 'onnx.helper.make_node', 'helper.make_node', (['"""Identity"""', "['identity1']", "['identity2']"], {'name': '"""2"""'}), "('Identity', ['identity1'], ['identity2'], name='2')\n", (10701, 10753), False, 'from onnx import helper\n'), ((10776, 10844), 'onnx.helper.make_node', 'helper.make_node', (['"""Max"""', "['input1', 'identity2']", "['max0']"], {'name': '"""3"""'}), "('Max', ['input1', 'identity2'], ['max0'], name='3')\n", (10792, 10844), False, 'from onnx import helper\n'), ((10867, 10928), 'onnx.helper.make_node', 'helper.make_node', (['"""Identity"""', "['max0']", "['output0']"], {'name': '"""4"""'}), "('Identity', ['max0'], ['output0'], name='4')\n", (10883, 10928), False, 'from onnx import helper\n'), ((12257, 12322), 'onnx.helper.make_node', 'helper.make_node', (['"""Identity"""', "['const1']", "['identity1']"], {'name': '"""1"""'}), "('Identity', ['const1'], ['identity1'], name='1')\n", (12273, 12322), False, 'from onnx import helper\n'), ((12345, 12413), 'onnx.helper.make_node', 'helper.make_node', (['"""Identity"""', "['identity1']", "['identity2']"], {'name': '"""2"""'}), "('Identity', ['identity1'], ['identity2'], name='2')\n", (12361, 12413), False, 'from onnx import helper\n'), ((12436, 12504), 'onnx.helper.make_node', 'helper.make_node', (['"""Max"""', "['input1', 'identity2']", "['max0']"], {'name': '"""3"""'}), "('Max', ['input1', 'identity2'], ['max0'], name='3')\n", (12452, 12504), False, 'from onnx import helper\n'), ((12527, 12587), 'onnx.helper.make_node', 'helper.make_node', (['"""LeakyRelu"""', "['max0']", "['leak0']"], {'name': '"""4"""'}), "('LeakyRelu', ['max0'], ['leak0'], name='4')\n", (12543, 12587), False, 'from onnx import helper\n'), ((12610, 12671), 'onnx.helper.make_node', 'helper.make_node', (['"""LeakyRelu"""', "['leak0']", "['leak1']"], {'name': '"""5"""'}), "('LeakyRelu', ['leak0'], ['leak1'], name='5')\n", (12626, 12671), False, 'from onnx import helper\n'), ((12694, 12755), 'onnx.helper.make_node', 'helper.make_node', (['"""LeakyRelu"""', "['leak0']", "['leak2']"], {'name': '"""6"""'}), "('LeakyRelu', ['leak0'], ['leak2'], name='6')\n", (12710, 12755), False, 'from onnx import helper\n'), ((12778, 12866), 'onnx.helper.make_node', 'helper.make_node', (['"""Transpose"""', "['leak1']", "['tranpose0']"], {'perm': '[0, 2, 3, 1]', 'name': '"""7"""'}), "('Transpose', ['leak1'], ['tranpose0'], perm=[0, 2, 3, 1],\n name='7')\n", (12794, 12866), False, 'from onnx import helper\n'), ((12885, 12973), 'onnx.helper.make_node', 'helper.make_node', (['"""Transpose"""', "['leak2']", "['tranpose1']"], {'perm': '[0, 2, 3, 1]', 'name': '"""8"""'}), "('Transpose', ['leak2'], ['tranpose1'], perm=[0, 2, 3, 1],\n name='8')\n", (12901, 12973), False, 'from onnx import helper\n'), ((12992, 13063), 'onnx.helper.make_node', 'helper.make_node', (['"""Add"""', "['tranpose0', 'tranpose1']", "['add0']"], {'name': '"""9"""'}), "('Add', ['tranpose0', 'tranpose1'], ['add0'], name='9')\n", (13008, 13063), False, 'from onnx import helper\n'), ((13087, 13175), 'onnx.helper.make_node', 'helper.make_node', (['"""Transpose"""', "['add0']", "['tranpose2']"], {'perm': '[0, 3, 1, 2]', 'name': '"""10"""'}), "('Transpose', ['add0'], ['tranpose2'], perm=[0, 3, 1, 2],\n name='10')\n", (13103, 13175), False, 'from onnx import helper\n'), ((13195, 13258), 'onnx.helper.make_node', 'helper.make_node', (['"""Conv"""', "['tranpose2']", "['output0']"], {'name': '"""11"""'}), "('Conv', ['tranpose2'], ['output0'], name='11')\n", (13211, 13258), False, 'from onnx import helper\n'), ((11694, 11724), 'onnx.defs.onnx_opset_version', 'onnx.defs.onnx_opset_version', ([], {}), '()\n', (11722, 11724), False, 'import onnx\n'), ((14188, 14253), 'onnx.helper.make_node', 'helper.make_node', (['"""Identity"""', "['const1']", "['identity1']"], {'name': '"""1"""'}), "('Identity', ['const1'], ['identity1'], name='1')\n", (14204, 14253), False, 'from onnx import helper\n'), ((14276, 14344), 'onnx.helper.make_node', 'helper.make_node', (['"""Identity"""', "['identity1']", "['identity2']"], {'name': '"""2"""'}), "('Identity', ['identity1'], ['identity2'], name='2')\n", (14292, 14344), False, 'from onnx import helper\n'), ((14367, 14435), 'onnx.helper.make_node', 'helper.make_node', (['"""Max"""', "['input1', 'identity2']", "['max0']"], {'name': '"""3"""'}), "('Max', ['input1', 'identity2'], ['max0'], name='3')\n", (14383, 14435), False, 'from onnx import helper\n'), ((14458, 14518), 'onnx.helper.make_node', 'helper.make_node', (['"""LeakyRelu"""', "['max0']", "['leak0']"], {'name': '"""4"""'}), "('LeakyRelu', ['max0'], ['leak0'], name='4')\n", (14474, 14518), False, 'from onnx import helper\n'), ((14541, 14602), 'onnx.helper.make_node', 'helper.make_node', (['"""LeakyRelu"""', "['leak0']", "['leak1']"], {'name': '"""5"""'}), "('LeakyRelu', ['leak0'], ['leak1'], name='5')\n", (14557, 14602), False, 'from onnx import helper\n'), ((14625, 14686), 'onnx.helper.make_node', 'helper.make_node', (['"""LeakyRelu"""', "['leak0']", "['leak2']"], {'name': '"""6"""'}), "('LeakyRelu', ['leak0'], ['leak2'], name='6')\n", (14641, 14686), False, 'from onnx import helper\n'), ((14709, 14771), 'onnx.helper.make_node', 'helper.make_node', (['"""Cast"""', "['leak1']", "['cast0']"], {'to': '(6)', 'name': '"""7"""'}), "('Cast', ['leak1'], ['cast0'], to=6, name='7')\n", (14725, 14771), False, 'from onnx import helper\n'), ((14794, 14856), 'onnx.helper.make_node', 'helper.make_node', (['"""Cast"""', "['cast0']", "['cast1']"], {'to': '(1)', 'name': '"""8"""'}), "('Cast', ['cast0'], ['cast1'], to=1, name='8')\n", (14810, 14856), False, 'from onnx import helper\n'), ((14879, 14941), 'onnx.helper.make_node', 'helper.make_node', (['"""Cast"""', "['leak2']", "['cast2']"], {'to': '(6)', 'name': '"""9"""'}), "('Cast', ['leak2'], ['cast2'], to=6, name='9')\n", (14895, 14941), False, 'from onnx import helper\n'), ((14965, 15028), 'onnx.helper.make_node', 'helper.make_node', (['"""Cast"""', "['cast2']", "['cast3']"], {'to': '(7)', 'name': '"""10"""'}), "('Cast', ['cast2'], ['cast3'], to=7, name='10')\n", (14981, 15028), False, 'from onnx import helper\n'), ((15052, 15115), 'onnx.helper.make_node', 'helper.make_node', (['"""Cast"""', "['cast3']", "['cast4']"], {'to': '(1)', 'name': '"""11"""'}), "('Cast', ['cast3'], ['cast4'], to=1, name='11')\n", (15068, 15115), False, 'from onnx import helper\n'), ((15139, 15203), 'onnx.helper.make_node', 'helper.make_node', (['"""Add"""', "['cast1', 'cast4']", "['add0']"], {'name': '"""12"""'}), "('Add', ['cast1', 'cast4'], ['add0'], name='12')\n", (15155, 15203), False, 'from onnx import helper\n'), ((15227, 15315), 'onnx.helper.make_node', 'helper.make_node', (['"""Transpose"""', "['add0']", "['tranpose2']"], {'perm': '[0, 3, 1, 2]', 'name': '"""13"""'}), "('Transpose', ['add0'], ['tranpose2'], perm=[0, 3, 1, 2],\n name='13')\n", (15243, 15315), False, 'from onnx import helper\n'), ((15335, 15398), 'onnx.helper.make_node', 'helper.make_node', (['"""Conv"""', "['tranpose2']", "['output0']"], {'name': '"""14"""'}), "('Conv', ['tranpose2'], ['output0'], name='14')\n", (15351, 15398), False, 'from onnx import helper\n'), ((16507, 16533), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (16523, 16533), False, 'import os\n'), ((16677, 16703), 'onnx.load_model', 'onnx.load_model', (['model_dir'], {}), '(model_dir)\n', (16692, 16703), False, 'import onnx\n'), ((16728, 16761), 'onnxconverter_common.optimizer.optimize_onnx_model', 'optimize_onnx_model', (['origin_model'], {}), '(origin_model)\n', (16747, 16761), False, 'from onnxconverter_common.optimizer import optimize_onnx, optimize_onnx_model\n')] |
## LSDMap_Subplots.py
##=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
## These functions are tools to deal with creating nice subplots from multiple
## rasters
##=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
## FJC
## 22/12/2016
##=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
from __future__ import absolute_import, division, print_function, unicode_literals
from glob import glob
import os.path
import numpy as np
import matplotlib.pyplot as pp
import string
import matplotlib.image as mpimg
import matplotlib.cm as cmx
from matplotlib import rcParams
from . import LSDMap_GDALIO as LSDMap_IO
from . import LSDMap_BasicPlotting as LSDMap_BP
from . import labels as lsdlabels
#==============================================================================
# Convert cm to inch for figure sizing
#------------------------------------------------------------------------------
def cm2inch(value):
"""
Convert cm to inch for figure sizing
"""
return value/2.54
#==============================================================================
# Function to create nice field sites figure from all the hillshades in a folder
# N_HSFiles = number of field sites
# Also reads in a series of map files to show locations of each site. At the moment
# you have to manually indicate on these maps where the field site is - would
# be nice to automate this when I have some time to mess around with it.
# FJC 22/12/16
#------------------------------------------------------------------------------
def field_sites(DataDirectory, N_HSFiles, NRows, NCols, n_target_ticks):
# Set up fonts
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['arial']
rcParams['font.size'] = 6
# Read in the files for each site
HSFiles = sorted(glob(DataDirectory+'*_HS.bil'), key=str)
MapFiles = sorted(glob(DataDirectory+'*_map.eps'), key=str)
print(MapFiles)
n_files = len(HSFiles)+len(MapFiles)
print("Number of files = ", n_files)
# Now make the subplots
fig, ax = pp.subplots(NRows,NCols, figsize=(cm2inch(12),cm2inch(15)), frameon=False)
ax = ax.ravel()
#get a list to label the subfigures
alphabet = list(string.ascii_lowercase)
file_counter = 0
for i in range (n_files):
if i < N_HSFiles:
# first deal with the hillshade files. If there are more hillshade files simply
# change the value of N_HSFiles
print("The hillshade file name is: ", HSFiles[i])
hillshade_raster = LSDMap_IO.ReadRasterArrayBlocks(HSFiles[i])
hillshade_raster = np.ma.masked_where(hillshade_raster == -9999, hillshade_raster)
# now get the extent
extent_raster = LSDMap_IO.GetRasterExtent(HSFiles[i])
# get DEM info
CellSize,XMin,XMax,YMin,YMax = LSDMap_IO.GetUTMMaxMin(HSFiles[i])
print(YMin, YMax)
#plot the rasters
ax[i].imshow(hillshade_raster, extent = extent_raster, cmap=cmx.gray)
ax[i].text(0.05,0.97, alphabet[i], horizontalalignment='left', verticalalignment='top', bbox=dict(facecolor='white', edgecolor='k', pad=3), fontsize = 8, transform=ax[i].transAxes)
#change ticks
# xlocs = ax[i].xaxis.get_ticklocs()
# ylocs = ax[i].yaxis.get_ticklocs()
# new_xlocs,new_ylocs,new_x_labels,new_y_labels = LSDMap_BP.GetTicksForUTM(HSFiles[i],xlocs.max(),xlocs.min(),ylocs.max(),ylocs.min(),n_target_ticks)
#
# # change the location of the ticks depending on subplot placement
# if i < 2:
# ax[i].xaxis.tick_top()
# if i % 2 != 0:
# ax[i].yaxis.tick_right()
ax[i].set_xticklabels([])
ax[i].set_yticklabels([])
#ax[i].tick_params(axis='x', pad=7)
#ax[i].tick_params(axis='y', pad=7)
if i >= N_HSFiles:
print("The map file name is: ", MapFiles[file_counter])
# add in the location maps for the sites (e.g. USA, UK)
img = mpimg.imread(MapFiles[file_counter])
ax[i].imshow(img)
ax[i].text(0.02,1.00, alphabet[i], horizontalalignment='left', verticalalignment='top', fontsize = 8, transform=ax[i].transAxes)
file_counter=file_counter+1
#remove border
ax[i].axis('off')
# Save figure
# Add a big subplot to get a common x and y label for the subplots
fig.add_subplot(111, frameon=False)
# hide tick and tick label of the big axes
pp.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
#pp.xlabel('Easting (m)', fontsize=8, labelpad=-122)
#pp.ylabel('Northing (m)', fontsize=8, labelpad=10, position=(0.0,0.67))
pp.tight_layout(pad=0.1, w_pad = 0.1, h_pad = 0.2)
OutputFigureName = "field_sites"
OutputFigureFormat = 'pdf'
pp.savefig(DataDirectory+OutputFigureName + '.' + OutputFigureFormat, format=OutputFigureFormat, dpi=300)
#pp.show()
#==============================================================================
# Function to create comparison plots for floodplain mapping between the published
# flood maps and the geometric method
# FJC 22/12/16
#------------------------------------------------------------------------------
def multiple_flood_maps(DataDirectory):
"""
Make nice subplots of floodplain rasters for different field sites
"""
import seaborn as sns
# Set up fonts
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['arial']
rcParams['font.size'] = 12
# Read in the files for each site
FPFiles = sorted(glob(DataDirectory+'*_FP*.bil'), key=str)
n_files = len(FPFiles)
print("Number of files = ", n_files)
# Now make the subplots
fig, ax = pp.subplots(2,3, figsize=(cm2inch(15),cm2inch(11)))
ax = ax.ravel()
#use seaborn to get a nice color palette
cmap_oranges = sns.light_palette("#ff8f66", input="hex", as_cmap=True, reverse=True)
cmap_ice = sns.light_palette("#00ffff", input="hex", as_cmap=True, reverse=True)
alphabet = list(string.ascii_lowercase)
for i in range (n_files):
print("The floodplain file name is: ", FPFiles[i])
# get the name of the field site
fname = FPFiles[i].split('/')
print(fname)
split_fname = fname[-1].split('_')
print(split_fname)
HSFile = DataDirectory+split_fname[1]+'_'+split_fname[2]+"_HS.bil"
print(HSFile)
hillshade_raster = LSDMap_IO.ReadRasterArrayBlocks(HSFile)
FP_raster = LSDMap_IO.ReadRasterArrayBlocks(FPFiles[i])
FP_raster = np.ma.masked_where(FP_raster <= 0, FP_raster)
# now get the extent
extent_raster = LSDMap_IO.GetRasterExtent(HSFile)
# get DEM info
CellSize,XMin,XMax,YMin,YMax = LSDMap_IO.GetUTMMaxMin(HSFile)
print(YMin, YMax)
#plot the rasters
ax[i].imshow(hillshade_raster, extent = extent_raster, cmap=cmx.gray)
if i < 3:
ax[i].imshow(FP_raster, extent = extent_raster, cmap=cmap_oranges, alpha=0.8)
else:
ax[i].imshow(FP_raster, extent = extent_raster, cmap=cmap_ice, alpha=0.6)
ax[i].text(0.03,0.97, alphabet[i], bbox=dict(facecolor='white', edgecolor='k', pad=5), horizontalalignment='left', verticalalignment='top', transform=ax[i].transAxes)
#scalebars.add_scalebar(ax[i], matchx=False, sizex=500.0, loc=3, borderpad =1, lw=3, matchy=False, hidex=False, hidey=False)
ax[i].set_xticklabels([])
ax[i].set_yticklabels([])
ax[i].set_xticks([])
ax[i].set_yticks([])
ax[i].tick_params(axis='x', pad=3)
ax[i].tick_params(axis='y', pad=3)
# Save figure
pp.tight_layout(pad=0.5, h_pad=0, w_pad=0.1)
pp.subplots_adjust(wspace=0.05,hspace=0)
OutputFigureName = "Comparison_published_maps"
OutputFigureFormat = 'pdf'
pp.savefig(DataDirectory+OutputFigureName + '.' + OutputFigureFormat, format=OutputFigureFormat, transparent=True, dpi=300)
#==============================================================================
# Make subplots showing the difference between the mapped and predicted
# floodplain initiation points. Uses Fiona (yaaay) to read in the shapefiles
# FJC 05/01/17
#------------------------------------------------------------------------------
def flood_maps_with_shapefile(DataDirectory):
from fiona import collection
from descartes import PolygonPatch
# Set up fonts
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['arial']
rcParams['font.size'] = 8
# Read in the files for each site
HSFiles = sorted(glob(DataDirectory+'*_HS*.bil'), key=str)
FPFiles = sorted(glob(DataDirectory+'*_FIPs_FP*.shp'), key=str)
PointFiles = sorted(glob(DataDirectory+'*_FIPs_MP*.shp'), key=str)
n_files = len(FPFiles)
print("Number of files = ", n_files)
# Now make the subplots
fig, ax = pp.subplots(1,2, figsize=(cm2inch(12),cm2inch(7)))
ax = ax.ravel()
#get a list with the figure letterings
figure_letter = ["a", "b"]
titles = ["Mid Bailey Run, OH", "Coweeta, NC"]
for i in range (n_files):
print("The hillshade file name is: ", HSFiles[i])
print("The floodplain file name is: ", FPFiles[i])
print("The shapefile name is: ", PointFiles[i])
# get the name of the field site
split_fname = FPFiles[i].split('_FP')
print(split_fname[0])
hillshade_raster = LSDMap_IO.ReadRasterArrayBlocks(HSFiles[i])
# now get the extent
extent_raster = LSDMap_IO.GetRasterExtent(HSFiles[i])
# get DEM info
CellSize,XMin,XMax,YMin,YMax = LSDMap_IO.GetUTMMaxMin(HSFiles[i])
print(YMin, YMax)
# plot the raster
ax[i].imshow(hillshade_raster, extent = extent_raster, cmap=cmx.gray)
# plot the floodplain shapefile using fiona and descartes
with collection(FPFiles[i], 'r') as input:
for f in input:
ax[i].add_patch(PolygonPatch(f['geometry'], fc='blue', ec='blue', lw=0.1, alpha=0.8))
#plot the mapped points
with collection(PointFiles[i],'r') as input:
for point in input:
x = point['geometry']['coordinates'][0]
y = point['geometry']['coordinates'][1]
ax[i].scatter(x,y, c="red", s=15, zorder=100)
#ax[i].text(0.03,0.97, figure_letter[i], bbox=dict(facecolor='white', edgecolor='k', pad=3), horizontalalignment='left', verticalalignment='top', fontsize = 8, transform=ax[i].transAxes)
#change ticks
xlocs = ax[i].xaxis.get_ticklocs()
ylocs = ax[i].yaxis.get_ticklocs()
n_target_tics = 10
new_xlocs,new_ylocs,new_x_labels,new_y_labels = LSDMap_BP.GetTicksForUTM(HSFiles[i],xlocs.max(),xlocs.min(),ylocs.max(),ylocs.min(),n_target_tics)
ax[i].set_xticklabels(new_x_labels, rotation=30)
ax[i].set_yticklabels(new_y_labels)
#set axis limits
ax[i].set_xlim(XMin,XMax)
ax[i].set_ylim(YMin,YMax)
if i == 0:
ax[i].set_xlabel('Easting (m)', position=(1,0))
ax[i].set_ylabel('Northing (m)')
if i > 0:
ax[i].yaxis.tick_right()
ax[i].tick_params(axis='x', pad=2)
ax[i].tick_params(axis='y', pad=2)
ax[i].set_title(titles[i], fontsize=9)
# Save figure
pp.tight_layout(pad=0.5)
OutputFigureName = "Comparison_with_mapped_FIPs"
OutputFigureFormat = 'pdf'
pp.savefig(DataDirectory+OutputFigureName + '.' + OutputFigureFormat, format=OutputFigureFormat, dpi=300)
def findmaxval_multirasters(FileList):
"""
Loops through a list or array of rasters (np arrays)
and finds the maximum single value in the set of arrays.
"""
overall_max_val = 0
for i in range (len(FileList)):
raster_as_array = LSDMap_IO.ReadRasterArrayBlocks(FileList[i])
this_max_val = np.nanmax(raster_as_array)
if this_max_val > overall_max_val:
overall_max_val = this_max_val
print(overall_max_val)
return overall_max_val
def findminval_multirasters(FileList):
"""
Loops through a list or array of rasters (np arrays)
and finds the minimum single value in the set of arrays.
"""
overall_min_val = 0
for i in range (len(FileList)):
raster_as_array = LSDMap_IO.ReadRasterArrayBlocks(FileList[i])
this_min_val = np.nanmin(raster_as_array)
if this_min_val > overall_min_val:
overall_min_val = this_min_val
print(overall_min_val)
return overall_min_val
def MultiDrapeFloodMaps(DataDir, ElevationRaster, DrapeRasterWild, cmap,
drape_min_threshold=None, drape_max=None, cbar_label=None):
"""Creates a figure with multiple drape maps over a hillshade.
Plots flood extents from water depth rasters
draped over the catchment elevation raster
in a series of subplots
Takes a wildcard for the drapes
Expexts a fixed elevation raster, but this could be
modified in future.
Parameters:
DataDir (str): Path to the directory containing the data files
ElevationRaster (str): Name of the elevation raster used to create the hillshade
DrapeRasterWild (str): Wildcard string used to find all the drape files in the
directory.
cmap: Can be the string name of a colourmap, or a Colourmap object
drape_min (float, optional): Minimum value for the drape raster, i.e. values
below this threshold will be masked and not plotted.
drape_max (float, optional): Maximum value for the drape raster, i.e. values
above this value will be masked and not plotted.
cbar_label (str, optional): Label for the colourbar on the figure. This
is the colourbar for the drape colourmap.
Notes:
Consider, if plotting multiple datasets, how you
are going to deal with min a max values in the colur range.
imshow will automatically set vmin and vmax and stretch the colour bar
over this - which can be visually misleading. Ideally, you
want to have the same colour map used for *all* subplots, and
this is not default behaviour.
Note: If `drape_max` is not set, the function searches for the maximum value
in the range of rasters found by expanding the `DrapeRasterWild` argument
and searching for the maximum value out of all rasters found.
Raises:
Exception: If the maximum value in the drape maps could not be found.
"""
f, ax_arr = pp.subplots(2, 2, figsize=(10, 5), sharex=True, sharey=True)
ax_arr = ax_arr.ravel()
FPFiles = sorted(glob(DataDir+DrapeRasterWild), key=str)
n_files = len(FPFiles)
print("Number of files = ", n_files)
elev_raster_file = DataDir + ElevationRaster
hillshade = LSDMap_BP.Hillshade(elev_raster_file)
#hillshade_array = LSDP.ReadRasterArrayBlocks(elev_raster_file)
# now get the extent
extent_raster = LSDMap_IO.GetRasterExtent(elev_raster_file)
x_min = extent_raster[0]
x_max = extent_raster[1]
y_min = extent_raster[2]
y_max = extent_raster[3]
# now get the tick marks
n_target_tics = 5
xlocs,ylocs,new_x_labels,new_y_labels = LSDMap_BP.GetTicksForUTM(elev_raster_file,x_max,x_min,y_max,y_min,n_target_tics)
print("xmax: " + str(x_max))
print("xmin: " + str(x_min))
print("ymax: " + str(y_max))
print("ymin: " + str(y_min))
"""
Find the maximum water depth in all rasters.
You need this to normalize the colourscale accross
all plots when teh imshow is done later.
"""
try:
print("Calculating max drape raster value by scanning rasters...")
max_water_depth = findmaxval_multirasters(FPFiles)
drape_max = max_water_depth
except:
print("Something went wrong trying to obtain the max value in \
your drape raster file list.")
finally:
print("The drape(s) max value is set to: ", drape_max)
#im = mpimg.AxesImage()
for i in range(n_files):
print("The floodplain file name is: ", FPFiles[i])
FP_raster = LSDMap_IO.ReadRasterArrayBlocks(FPFiles[i])
#FP_raster = np.ma.masked_where(FP_raster <= 0, FP_raster)
filename = os.path.basename(FPFiles[i])
title = lsdlabels.make_line_label(filename)
print(title)
low_values_index = FP_raster < drape_min_threshold
FP_raster[low_values_index] = np.nan
im = ax_arr[i].imshow(hillshade, "gray", extent=extent_raster, interpolation="nearest")
"""
Now we can set vmax to be the maximum water depth we calcualted earlier, making our separate
subplots all have the same colourscale
"""
im = ax_arr[i].imshow(FP_raster, cmap, extent=extent_raster,
alpha=1.0, interpolation="nearest",
vmin=drape_min_threshold,
vmax=drape_max)
ax_arr[i].set_title(title)
pp.setp( ax_arr[i].xaxis.get_majorticklabels(), rotation=70 )
f.subplots_adjust(right=0.85)
cax = f.add_axes([0.9, 0.1, 0.03, 0.8])
cbar = f.colorbar(im, cax=cax)
cbar.set_label(cbar_label)
#cbar.set_ticks(np.linspace(0, 8, 8))
#cbar = LSDP.colours.colorbar_index(f, cax, 8, cmap,
# drape_min_threshold, drape_max)
#tick_locator = ticker.MaxNLocator(nbins=8)
#cbar.locator = tick_locator
#cbar.update_ticks()
f.text(0.5, 0.04, 'Easting (m)', ha='center', fontsize=17)
f.text(0.04, 0.5, 'Northing (m)', va='center', rotation='vertical', fontsize=17)
def MultiDrapeErodeDiffMaps(DataDir, ElevationRaster, DrapeRasterWild, cmap,
drape_min_threshold=None, cbar_label=None,
drape_max_threshold=None,
middle_mask_range=None):
"""Plots multiple drape maps of erosion/deposition (a DEM of difference)
over a hillshade raster of the basin.
Takes a wildcard for the drapes
Expexts a single elevation raster for the background hillshade,
but this could be modified in future.
Parameters:
DataDir (str): Path to the directory containing the data files
ElevationRaster (str): Name of the elevation raster used to create the hillshade
DrapeRasterWild (str): Wildcard string used to find all the drape files in the
directory.
cmap: Can be the string name of a colourmap, or a Colourmap object
drape_min_threshold (float, optional): Minimum value for the drape raster, i.e. values
below this threshold will be masked and not plotted.
drape_max_threshold (float, optional): Maximum value for the drape raster, i.e. values
above this value will be masked and not plotted.
cbar_label (str, optional): Label for the colourbar on the figure. This
is the colourbar for the drape colourmap.
middle_mask_range (tuple, optional): A tuple or list of two values, used
to mask an inner range of values in the drape raster.
e.g. if you pass `(-0.1, 0.1)` then all the values
in the range -0.1 to 0.1 will be masked and not plotted
on the final map. Use for masking very small values
either side of zero.
Notes:
Consider, if plotting multiple datasets, how you
are going to deal with min a max values in the colur range.
imshow will automatically set vmin and vmax and stretch the colour bar
over this - which can be visually misleading. Ideally, you
want to have the same colour map used for *all* subplots, and
this is not default behaviour.
Note: If `drape_max_threshold` is not set, the function searches for the maximum value
in the range of rasters found by expanding the `DrapeRasterWild` argument
and searching for the maximum value out of all rasters found.
Raises:
Exception: If the maximum value in the drape maps could not be found.
Author: DAV & FJC
"""
#import lsdmatplotlibextensions as mplext
f, ax_arr = pp.subplots(2, 2, figsize=(10, 5), sharex=True, sharey=True)
ax_arr = ax_arr.ravel()
FPFiles = sorted(glob(DataDir+DrapeRasterWild), key=str)
n_files = len(FPFiles)
print("Number of files = ", n_files)
elev_raster_file = DataDir + ElevationRaster
hillshade = LSDMap_BP.Hillshade(elev_raster_file)
#hillshade_array = LSDP.ReadRasterArrayBlocks(elev_raster_file)
# now get the extent
extent_raster = LSDMap_IO.GetRasterExtent(elev_raster_file)
x_min = extent_raster[0]
x_max = extent_raster[1]
y_min = extent_raster[2]
y_max = extent_raster[3]
# now get the tick marks
n_target_tics = 5
xlocs, ylocs, new_x_labels, new_y_labels = LSDMap_BP.GetTicksForUTM(
elev_raster_file,
x_max,
x_min,
y_max,
y_min,
n_target_tics)
print("xmax: " + str(x_max))
print("xmin: " + str(x_min))
print("ymax: " + str(y_max))
print("ymin: " + str(y_min))
"""
Find the maximum water depth in all rasters.
You need this to normalize the colourscale accross
all plots when teh imshow is done later.
"""
if drape_max_threshold is None:
try:
print("Calculating max drape raster value by scanning rasters...")
max_water_depth = findmaxval_multirasters(FPFiles)
drape_max_threshold = max_water_depth
except ValueError:
print("Something went wrong trying to obtain the max value in \
your drape raster file list.")
finally:
print("The drape(s) max value is set to: ", drape_max_threshold)
if drape_min_threshold is None:
try:
print("Calculating min drape raster value by scanning rasters...")
min_water_depth = findminval_multirasters(FPFiles)
drape_min_threshold = min_water_depth
except ValueError:
print("Something went wrong trying to obtain the min value in \
your drape raster file list.")
finally:
print("The drape(s) min value is set to: ", drape_min_threshold)
for i in range(n_files):
print("The floodplain file name is: ", FPFiles[i])
FP_raster = LSDMap_IO.ReadRasterArrayBlocks(FPFiles[i])
#FP_raster = np.ma.masked_where(FP_raster <= 0, FP_raster)
filename = os.path.basename(FPFiles[i])
title = lsdlabels.make_line_label(filename)
print(title)
# Mask the extreme high values
hi_values_index = FP_raster > drape_max_threshold
FP_raster[hi_values_index] = np.nan
# Mask the extreme low values
lo_values_index = FP_raster < drape_min_threshold
FP_raster[lo_values_index] = np.nan
# Mask the middle values that are really close to zero (i.e. if you
# have negative and positive values in the raster, such as in a DEM
# of difference with both erosion and deposition.)
if middle_mask_range is not None:
masked_mid_values_index = (np.logical_and(FP_raster > middle_mask_range[0],
FP_raster < middle_mask_range[1]))
FP_raster[masked_mid_values_index] = np.nan
im = ax_arr[i].imshow(hillshade, "gray", extent=extent_raster, interpolation="nearest")
"""
Now we can set vmax to be the maximum water depth we calcualted earlier, making our separate
subplots all have the same colourscale
"""
im = ax_arr[i].imshow(FP_raster, cmap, extent=extent_raster,
alpha=1.0, interpolation="nearest",
vmin=drape_min_threshold,
vmax=drape_max_threshold)
ax_arr[i].set_title(title)
pp.setp( ax_arr[i].xaxis.get_majorticklabels(), rotation=70 )
f.subplots_adjust(right=0.85)
cax = f.add_axes([0.9, 0.1, 0.03, 0.8])
cbar = f.colorbar(im, cax=cax)
cbar.set_label(cbar_label)
#cbar.set_ticks(np.linspace(0, 8, 8))
#cbar = mplext.colours.colorbar_index(f, cax, 8, cmap,
# drape_min_threshold, drape_max)
#tick_locator = ticker.MaxNLocator(nbins=8)
#cbar.locator = tick_locator
#cbar.update_ticks()
f.text(0.5, 0.04, 'Easting (m)', ha='center', fontsize=17)
f.text(0.04, 0.5, 'Northing (m)', va='center', rotation='vertical', fontsize=17)
| [
"matplotlib.image.imread",
"descartes.PolygonPatch",
"numpy.ma.masked_where",
"numpy.logical_and",
"numpy.nanmax",
"seaborn.light_palette",
"fiona.collection",
"matplotlib.pyplot.subplots",
"numpy.nanmin",
"glob.glob",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.tight_layout",
"matpl... | [((4647, 4734), 'matplotlib.pyplot.tick_params', 'pp.tick_params', ([], {'labelcolor': '"""none"""', 'top': '"""off"""', 'bottom': '"""off"""', 'left': '"""off"""', 'right': '"""off"""'}), "(labelcolor='none', top='off', bottom='off', left='off',\n right='off')\n", (4661, 4734), True, 'import matplotlib.pyplot as pp\n'), ((4870, 4916), 'matplotlib.pyplot.tight_layout', 'pp.tight_layout', ([], {'pad': '(0.1)', 'w_pad': '(0.1)', 'h_pad': '(0.2)'}), '(pad=0.1, w_pad=0.1, h_pad=0.2)\n', (4885, 4916), True, 'import matplotlib.pyplot as pp\n'), ((4993, 5104), 'matplotlib.pyplot.savefig', 'pp.savefig', (["(DataDirectory + OutputFigureName + '.' + OutputFigureFormat)"], {'format': 'OutputFigureFormat', 'dpi': '(300)'}), "(DataDirectory + OutputFigureName + '.' + OutputFigureFormat,\n format=OutputFigureFormat, dpi=300)\n", (5003, 5104), True, 'import matplotlib.pyplot as pp\n'), ((6055, 6124), 'seaborn.light_palette', 'sns.light_palette', (['"""#ff8f66"""'], {'input': '"""hex"""', 'as_cmap': '(True)', 'reverse': '(True)'}), "('#ff8f66', input='hex', as_cmap=True, reverse=True)\n", (6072, 6124), True, 'import seaborn as sns\n'), ((6140, 6209), 'seaborn.light_palette', 'sns.light_palette', (['"""#00ffff"""'], {'input': '"""hex"""', 'as_cmap': '(True)', 'reverse': '(True)'}), "('#00ffff', input='hex', as_cmap=True, reverse=True)\n", (6157, 6209), True, 'import seaborn as sns\n'), ((7879, 7923), 'matplotlib.pyplot.tight_layout', 'pp.tight_layout', ([], {'pad': '(0.5)', 'h_pad': '(0)', 'w_pad': '(0.1)'}), '(pad=0.5, h_pad=0, w_pad=0.1)\n', (7894, 7923), True, 'import matplotlib.pyplot as pp\n'), ((7928, 7969), 'matplotlib.pyplot.subplots_adjust', 'pp.subplots_adjust', ([], {'wspace': '(0.05)', 'hspace': '(0)'}), '(wspace=0.05, hspace=0)\n', (7946, 7969), True, 'import matplotlib.pyplot as pp\n'), ((8055, 8184), 'matplotlib.pyplot.savefig', 'pp.savefig', (["(DataDirectory + OutputFigureName + '.' + OutputFigureFormat)"], {'format': 'OutputFigureFormat', 'transparent': '(True)', 'dpi': '(300)'}), "(DataDirectory + OutputFigureName + '.' + OutputFigureFormat,\n format=OutputFigureFormat, transparent=True, dpi=300)\n", (8065, 8184), True, 'import matplotlib.pyplot as pp\n'), ((11598, 11622), 'matplotlib.pyplot.tight_layout', 'pp.tight_layout', ([], {'pad': '(0.5)'}), '(pad=0.5)\n', (11613, 11622), True, 'import matplotlib.pyplot as pp\n'), ((11711, 11822), 'matplotlib.pyplot.savefig', 'pp.savefig', (["(DataDirectory + OutputFigureName + '.' + OutputFigureFormat)"], {'format': 'OutputFigureFormat', 'dpi': '(300)'}), "(DataDirectory + OutputFigureName + '.' + OutputFigureFormat,\n format=OutputFigureFormat, dpi=300)\n", (11721, 11822), True, 'import matplotlib.pyplot as pp\n'), ((14728, 14788), 'matplotlib.pyplot.subplots', 'pp.subplots', (['(2)', '(2)'], {'figsize': '(10, 5)', 'sharex': '(True)', 'sharey': '(True)'}), '(2, 2, figsize=(10, 5), sharex=True, sharey=True)\n', (14739, 14788), True, 'import matplotlib.pyplot as pp\n'), ((20327, 20387), 'matplotlib.pyplot.subplots', 'pp.subplots', (['(2)', '(2)'], {'figsize': '(10, 5)', 'sharex': '(True)', 'sharey': '(True)'}), '(2, 2, figsize=(10, 5), sharex=True, sharey=True)\n', (20338, 20387), True, 'import matplotlib.pyplot as pp\n'), ((1872, 1904), 'glob.glob', 'glob', (["(DataDirectory + '*_HS.bil')"], {}), "(DataDirectory + '*_HS.bil')\n", (1876, 1904), False, 'from glob import glob\n'), ((1935, 1968), 'glob.glob', 'glob', (["(DataDirectory + '*_map.eps')"], {}), "(DataDirectory + '*_map.eps')\n", (1939, 1968), False, 'from glob import glob\n'), ((5764, 5797), 'glob.glob', 'glob', (["(DataDirectory + '*_FP*.bil')"], {}), "(DataDirectory + '*_FP*.bil')\n", (5768, 5797), False, 'from glob import glob\n'), ((6765, 6810), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(FP_raster <= 0)', 'FP_raster'], {}), '(FP_raster <= 0, FP_raster)\n', (6783, 6810), True, 'import numpy as np\n'), ((8830, 8863), 'glob.glob', 'glob', (["(DataDirectory + '*_HS*.bil')"], {}), "(DataDirectory + '*_HS*.bil')\n", (8834, 8863), False, 'from glob import glob\n'), ((8893, 8931), 'glob.glob', 'glob', (["(DataDirectory + '*_FIPs_FP*.shp')"], {}), "(DataDirectory + '*_FIPs_FP*.shp')\n", (8897, 8931), False, 'from glob import glob\n'), ((8964, 9002), 'glob.glob', 'glob', (["(DataDirectory + '*_FIPs_MP*.shp')"], {}), "(DataDirectory + '*_FIPs_MP*.shp')\n", (8968, 9002), False, 'from glob import glob\n'), ((12148, 12174), 'numpy.nanmax', 'np.nanmax', (['raster_as_array'], {}), '(raster_as_array)\n', (12157, 12174), True, 'import numpy as np\n'), ((12655, 12681), 'numpy.nanmin', 'np.nanmin', (['raster_as_array'], {}), '(raster_as_array)\n', (12664, 12681), True, 'import numpy as np\n'), ((14839, 14870), 'glob.glob', 'glob', (['(DataDir + DrapeRasterWild)'], {}), '(DataDir + DrapeRasterWild)\n', (14843, 14870), False, 'from glob import glob\n'), ((20438, 20469), 'glob.glob', 'glob', (['(DataDir + DrapeRasterWild)'], {}), '(DataDir + DrapeRasterWild)\n', (20442, 20469), False, 'from glob import glob\n'), ((2679, 2742), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(hillshade_raster == -9999)', 'hillshade_raster'], {}), '(hillshade_raster == -9999, hillshade_raster)\n', (2697, 2742), True, 'import numpy as np\n'), ((4159, 4195), 'matplotlib.image.imread', 'mpimg.imread', (['MapFiles[file_counter]'], {}), '(MapFiles[file_counter])\n', (4171, 4195), True, 'import matplotlib.image as mpimg\n'), ((10116, 10143), 'fiona.collection', 'collection', (['FPFiles[i]', '"""r"""'], {}), "(FPFiles[i], 'r')\n", (10126, 10143), False, 'from fiona import collection\n'), ((10330, 10360), 'fiona.collection', 'collection', (['PointFiles[i]', '"""r"""'], {}), "(PointFiles[i], 'r')\n", (10340, 10360), False, 'from fiona import collection\n'), ((23672, 23758), 'numpy.logical_and', 'np.logical_and', (['(FP_raster > middle_mask_range[0])', '(FP_raster < middle_mask_range[1])'], {}), '(FP_raster > middle_mask_range[0], FP_raster <\n middle_mask_range[1])\n', (23686, 23758), True, 'import numpy as np\n'), ((10214, 10282), 'descartes.PolygonPatch', 'PolygonPatch', (["f['geometry']"], {'fc': '"""blue"""', 'ec': '"""blue"""', 'lw': '(0.1)', 'alpha': '(0.8)'}), "(f['geometry'], fc='blue', ec='blue', lw=0.1, alpha=0.8)\n", (10226, 10282), False, 'from descartes import PolygonPatch\n')] |
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__email__ = "<EMAIL>"
__affiliation__ = "Texas A&M University"
import pandas as pd
import xlsxwriter
import numpy as np
from DataFusion import DataFusion
import time
import datetime
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import average_precision_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn import svm
from sklearn import tree
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import plot_precision_recall_curve
import msgpack as mp
import sys
from sklearn.metrics import precision_recall_fscore_support
import random
import copy
class CoTrainingClassifier(object):
def __init__(self, clf, clf2=None, p=-1, n=-1, k=30, u = 75):
self.clf1_ = clf
#we will just use a copy of clf (the same kind of classifier) if clf2 is not specified
if clf2 == None:
self.clf2_ = copy.copy(clf)
else:
self.clf2_ = clf2
#if they only specify one of n or p, through an exception
if (p == -1 and n != -1) or (p != -1 and n == -1):
raise ValueError('Current implementation supports either both p and n being specified, or neither')
self.p_ = p
self.n_ = n
self.k_ = k
self.u_ = u
random.seed()
def fit(self, X1, X2, y):
#we need y to be a numpy array so we can do more complex slicing
y = np.asarray(y)
#set the n and p parameters if we need to
if self.p_ == -1 and self.n_ == -1:
num_pos = sum(1 for y_i in y if y_i == 1)
num_neg = sum(1 for y_i in y if y_i == 0)
n_p_ratio = num_neg / float(num_pos)
if n_p_ratio > 1:
self.p_ = 1
self.n_ = round(self.p_*n_p_ratio)
else:
self.n_ = 1
self.p_ = round(self.n_/n_p_ratio)
assert(self.p_ > 0 and self.n_ > 0 and self.k_ > 0 and self.u_ > 0)
#the set of unlabeled samples
U = [i for i, y_i in enumerate(y) if y_i == -1]
#we randomize here, and then just take from the back so we don't have to sample every time
random.shuffle(U)
#this is U' in paper
U_ = U[-min(len(U), self.u_):]
#the samples that are initially labeled
L = [i for i, y_i in enumerate(y) if y_i != -1]
#remove the samples in U_ from U
U = U[:-len(U_)]
it = 0 #number of cotraining iterations we've done so far
#loop until we have assigned labels to everything in U or we hit our iteration break condition
while it != self.k_ and U:
it += 1
self.clf1_.fit(X1[L], y[L])
self.clf2_.fit(X2[L], y[L])
y1_prob = self.clf1_.predict_proba(X1[U_])
y2_prob = self.clf2_.predict_proba(X2[U_])
n, p = [], []
for i in (y1_prob[:,0].argsort())[-self.n_:]:
if y1_prob[i,0] > 0.5:
n.append(i)
for i in (y1_prob[:,1].argsort())[-self.p_:]:
if y1_prob[i,1] > 0.5:
p.append(i)
for i in (y2_prob[:,0].argsort())[-self.n_:]:
if y2_prob[i,0] > 0.5:
n.append(i)
for i in (y2_prob[:,1].argsort())[-self.p_:]:
if y2_prob[i,1] > 0.5:
p.append(i)
#label the samples and remove thes newly added samples from U_
y[[U_[x] for x in p]] = 1
y[[U_[x] for x in n]] = 0
L.extend([U_[x] for x in p])
L.extend([U_[x] for x in n])
U_ = [elem for elem in U_ if not (elem in p or elem in n)]
#add new elements to U_
add_counter = 0 #number we have added from U to U_
num_to_add = len(p) + len(n)
while add_counter != num_to_add and U:
add_counter += 1
U_.append(U.pop())
#TODO: Handle the case where the classifiers fail to agree on any of the samples (i.e. both n and p are empty)
#let's fit our final model
self.clf1_.fit(X1[L], y[L])
self.clf2_.fit(X2[L], y[L])
#TODO: Move this outside of the class into a util file.
def supports_proba(self, clf, x):
"""Checks if a given classifier supports the 'predict_proba' method, given a single vector x"""
try:
clf.predict_proba([x])
return True
except:
return False
def predict(self, X1, X2):
y1 = self.clf1_.predict(X1)
y2 = self.clf2_.predict(X2)
proba_supported = self.supports_proba(self.clf1_, X1[0]) and self.supports_proba(self.clf2_, X2[0])
#fill y_pred with -1 so we can identify the samples in which the classifiers failed to agree
y_pred = np.asarray([-1] * X1.shape[0])
for i, (y1_i, y2_i) in enumerate(zip(y1, y2)):
if y1_i == y2_i:
y_pred[i] = y1_i
elif proba_supported:
y1_probs = self.clf1_.predict_proba([X1[i]])[0]
y2_probs = self.clf2_.predict_proba([X2[i]])[0]
sum_y_probs = [prob1 + prob2 for (prob1, prob2) in zip(y1_probs, y2_probs)]
max_sum_prob = max(sum_y_probs)
y_pred[i] = sum_y_probs.index(max_sum_prob)
else:
#the classifiers disagree and don't support probability, so we guess
y_pred[i] = random.randint(0, 1)
#check that we did everything right
assert not (-1 in y_pred)
return y_pred
def predict_proba(self, X1, X2):
"""Predict the probability of the samples belonging to each class."""
y_proba = np.full((X1.shape[0], 2), -1, np.float)
y1_proba = self.clf1_.predict_proba(X1)
y2_proba = self.clf2_.predict_proba(X2)
for i, (y1_i_dist, y2_i_dist) in enumerate(zip(y1_proba, y2_proba)):
y_proba[i][0] = (y1_i_dist[0] + y2_i_dist[0]) / 2
y_proba[i][1] = (y1_i_dist[1] + y2_i_dist[1]) / 2
_epsilon = 0.0001
assert all(abs(sum(y_dist) - 1) <= _epsilon for y_dist in y_proba)
return y_proba
def get_intrusion_window(adversary_path):
fusion = DataFusion()
fusion.load_json(adversary_path)
fusion.extract_cyber_data()
fusion.extract_physical_data()
data_to_process = fusion.merge()
attack_start = data_to_process.iloc[0]['Time']
start = int(time.mktime(attack_start.timetuple()))
attack_end = data_to_process.iloc[-1]['Time']
end = int(time.mktime(attack_end.timetuple()))
return start,end
def semi_supervised_learning(_usecase,_os,_poll_rate, location, pca, pure_cyber=False, pure_phy=False):
usecase=_usecase
os=_os
poll_rate = _poll_rate
common_path = '../data/'
if os==10 and poll_rate ==60 and usecase=='UC1':
path='csvs/UC1/'+location+'_merged_phy_cyb_10os_60poll_encoded.csv'
adv_path='Adversary/UC1_PyDNP3_CORE_Adversary_10_OS_60_dnp3.json'
elif os==10 and poll_rate ==30 and usecase=='UC1':
path='csvs/UC1/'+location+'_merged_phy_cyb_10os_30poll_encoded.csv'
adv_path='Adversary/UC1_PyDNP3_CORE_Adversary_10_OS_30_dnp3.json'
elif os==5 and poll_rate ==30 and usecase=='UC2':
path='csvs/UC2/uc2_'+location+'_merged_phy_cyb_5os_30poll_encoded.csv'
adv_path='Adversary/UC2_PyDNP3_CORE_Adversary_5_OS_30_dnp3.json'
elif os==5 and poll_rate ==60 and usecase=='UC2':
path='csvs/UC2/uc2_'+location+'_merged_phy_cyb_5os_60poll_encoded.csv'
adv_path='Adversary/UC2_PyDNP3_CORE_Adversary_5_OS_60_dnp3.json'
elif os==10 and poll_rate ==30 and usecase=='UC2':
path='csvs/UC2/uc2_'+location+'_merged_phy_cyb_10os_30poll_encoded.csv'
adv_path='Adversary/UC2_PyDNP3_CORE_Adversary_10_OS_30_dnp3.json'
elif os==10 and poll_rate ==60 and usecase=='UC2':
path='csvs/UC2/uc2_'+location+'_merged_phy_cyb_10os_60poll_encoded.csv'
adv_path='Adversary/UC2_PyDNP3_CORE_Adversary_10_OS_60_dnp3.json'
elif os==5 and poll_rate ==30 and usecase=='UC3':
path='csvs/UC3/uc3_'+location+'_merged_phy_cyb_5os_30poll_encoded.csv'
adv_path='Adversary/UC3_PyDNP3_CORE_Adversary_5_OS_30_dnp3.json'
elif os==5 and poll_rate ==60 and usecase=='UC3':
path='csvs/UC3/uc3_'+location+'_merged_phy_cyb_5os_60poll_encoded.csv'
adv_path='Adversary/UC3_PyDNP3_CORE_Adversary_5_OS_60_dnp3.json'
elif os==10 and poll_rate ==30 and usecase=='UC3':
path='csvs/UC3/uc3_'+location+'_merged_phy_cyb_10os_30poll_encoded.csv'
adv_path='Adversary/UC3_PyDNP3_CORE_Adversary_10_OS_30_dnp3.json'
elif os==10 and poll_rate ==60 and usecase=='UC3':
path='csvs/UC3/uc3_'+location+'_merged_phy_cyb_10os_60poll_encoded.csv'
adv_path='Adversary/UC3_PyDNP3_CORE_Adversary_10_OS_60_dnp3.json'
elif os==5 and poll_rate ==30 and usecase=='UC4':
path='csvs/UC4/uc4_'+location+'_merged_phy_cyb_5os_30poll_encoded.csv'
adv_path='Adversary/UC4_PyDNP3_CORE_Adversary_5_OS_30_dnp3.json'
elif os==5 and poll_rate ==60 and usecase=='UC4':
path='csvs/UC4/uc4_'+location+'_merged_phy_cyb_5os_60poll_encoded.csv'
adv_path='Adversary/UC4_PyDNP3_CORE_Adversary_5_OS_60_dnp3.json'
elif os==10 and poll_rate ==30 and usecase=='UC4':
path='csvs/UC4/uc4_'+location+'_merged_phy_cyb_10os_30poll_encoded.csv'
adv_path='Adversary/UC4_PyDNP3_CORE_Adversary_10_OS_30_dnp3.json'
elif os==10 and poll_rate ==60 and usecase=='UC4':
path='csvs/UC4/uc4_'+location+'_merged_phy_cyb_10os_60poll_encoded.csv'
adv_path='Adversary/UC4_PyDNP3_CORE_Adversary_10_OS_60_dnp3.json'
start_time,end_time = get_intrusion_window(common_path+adv_path)
path=common_path+path
data = pd.read_csv(path)
#data.drop('Unnamed:0',1)
data = data.drop(data.columns[[0]], axis=1)
data['DNP3 Objects'].replace('None', np.nan, inplace=True)
replace_map = dict([('DNP3 Objects',0),('value1', 0.0), ('value2', 0.0), ('value3', 0.0),
('value4', 0.0),('value5',0.0)])
# fill nan by replace values
data = data.fillna(value=replace_map)
data['Time'] = pd.to_datetime(data['Time'])
data['Label'] = 0
for i,val in data.iterrows():
unix_time = int(time.mktime(val['Time'].timetuple()))
if unix_time <end_time and unix_time>start_time:
data['Label'][i] = 1
# compute the feature table
feature_table = data.drop(columns=['Time', 'snort_alert', 'snort_alert_type','Label'])
if pure_cyber:
feature_table = data.drop(columns=['Time', 'snort_alert', 'snort_alert_type','Label','LL_dnp3_src','LL_dnp3_dst'
,'LL_dnp3_len','LL_dnp3_ctl','TL_dnp3_tr_ctl','AL_dnp3_al_func','AL_dnp3_al_ctl'
,'DNP3 Object Count','DNP3 Objects','AL_Payload'])
# drop physical value features
feature_table = feature_table[feature_table.columns[~feature_table.columns.str.contains('value')]]
print(feature_table.columns)
if pure_phy:
feature_table = data.drop(columns=['Time', 'snort_alert', 'snort_alert_type','frame_len','frame_protocols','eth_src','eth_dst'
,'ip_src','ip_dst','ip_len','ip_flags','tcp_srcport','tcp_dstport','tcp_len'
,'tcp_flags','tcp_retransmission','tcp_rtt','flow_count','flow_final_count','packets','Label'])
feature_array = feature_table.to_numpy()
label_array = data[['Label']].to_numpy().flatten()
X= feature_array
y= label_array
N_SAMPLES = feature_table.shape[0]
N_FEATURES = feature_table.shape[1]
y[:N_SAMPLES//2] = -1
X_test = X[-N_SAMPLES//4:]
y_test = y[-N_SAMPLES//4:]
X_labeled = X[N_SAMPLES//2:-N_SAMPLES//4]
y_labeled = y[N_SAMPLES//2:-N_SAMPLES//4]
y = y[:-N_SAMPLES//4]
X = X[:-N_SAMPLES//4]
X1 = X[:,:N_FEATURES//2]
X2 = X[:,N_FEATURES//2:]
# using panda dataframe to store the probability scores to be used later on in the DS theory paper
prob_table = pd.DataFrame()
#score_table = pd.DataFrame()
# if pca
if (pca):
# Now use PCA for dimensional reduction and reperform the supervised learning
from sklearn.decomposition import PCA
pca = PCA(n_components=10)
pca.fit(feature_table.values)
pca_result = pca.transform(feature_table.values)
pca_table1 = pd.DataFrame(columns=['f1', 'f2', 'f3', 'f4', 'f5','f6', 'f7', 'f8', 'f9', 'f10'])
for i in range(10):
pca_table1[f'f{i+1}'] = pca_result[:,i]
pca_feature_array = pca_table1.to_numpy()
X= pca_feature_array
y= label_array
N_SAMPLES = feature_table.shape[0]
N_FEATURES = feature_table.shape[1]
y[:N_SAMPLES//2] = -1
X_test = X[-N_SAMPLES//4:]
y_test = y[-N_SAMPLES//4:]
X_labeled = X[N_SAMPLES//2:-N_SAMPLES//4]
y_labeled = y[N_SAMPLES//2:-N_SAMPLES//4]
y = y[:-N_SAMPLES//4]
X = X[:-N_SAMPLES//4]
X1 = X[:,:N_FEATURES//2]
X2 = X[:,N_FEATURES//2:]
#clf = KNeighborsClassifier()
#clf.fit(X_train, y_train)
#predictions = clf.predict(X_test)
#res_knn = precision_recall_fscore_support(y_test, predictions, average='weighted')
#probs = clf.predict_proba(X_test)
#probs = probs[:, 1]
#score_table['knn'] = probs
#prob_table['knn'] = res_knn
clf = CoTrainingClassifier(svm.SVC(probability=True), u=N_SAMPLES//10)
clf.fit(X1, X2, y)
y_pred = clf.predict(X_test[:, :N_FEATURES // 2], X_test[:, N_FEATURES // 2:])
res_svc = precision_recall_fscore_support(y_test, y_pred, average='weighted')
#probs = clf.predict_proba(X_test)
#probs = probs[:, 1]
#score_table['svc'] = probs
prob_table['svc'] = res_svc
dt_co_clf = CoTrainingClassifier(tree.DecisionTreeClassifier(), u=N_SAMPLES//10)
dt_co_clf.fit(X1, X2, y)
y_pred = dt_co_clf.predict(X_test[:, :N_FEATURES // 2], X_test[:, N_FEATURES // 2:])
res_dt = precision_recall_fscore_support(y_test, y_pred, average='weighted')
#probs = dt.predict_proba(X_test)
#probs = probs[:, 1]
#score_table['dt'] = probs
prob_table['dt'] = res_dt
rf_co_clf = CoTrainingClassifier(RandomForestClassifier(n_estimators=10), u=N_SAMPLES//10)
rf_co_clf.fit(X1, X2, y)
y_pred = rf_co_clf.predict(X_test[:, :N_FEATURES // 2], X_test[:, N_FEATURES // 2:])
res_rf = precision_recall_fscore_support(y_test, y_pred, average='weighted')
#probs = rf.predict_proba(X_test)
#probs = probs[:, 1]
#score_table['rf'] = probs
prob_table['rf'] = res_rf
gnb_co_clf = CoTrainingClassifier(GaussianNB(), u=N_SAMPLES//10)
gnb_co_clf.fit(X1, X2, y)
y_pred = gnb_co_clf.predict(X_test[:, :N_FEATURES // 2], X_test[:, N_FEATURES // 2:])
res_gnb = precision_recall_fscore_support(y_test, y_pred, average='weighted')
#probs = gnb.predict_proba(X_test)
#probs = probs[:, 1]
#score_table['gnb'] = probs
prob_table['gnb'] = res_gnb
bnb_co_clf = CoTrainingClassifier(BernoulliNB(), u=N_SAMPLES//10)
bnb_co_clf.fit(X1, X2, y)
y_pred = bnb_co_clf.predict(X_test[:, :N_FEATURES // 2], X_test[:, N_FEATURES // 2:])
res_bnb = precision_recall_fscore_support(y_test, y_pred, average='weighted')
#probs = bnb.predict_proba(X_test)
#probs = probs[:, 1]
#score_table['bnb'] = probs
prob_table['bnb'] = res_bnb
nn_co_clf = CoTrainingClassifier(MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(5, 2), random_state=1), u=N_SAMPLES//10)
nn_co_clf.fit(X1, X2, y)
y_pred = nn_co_clf.predict(X_test[:, :N_FEATURES // 2], X_test[:, N_FEATURES // 2:])
res_nn = precision_recall_fscore_support(y_test, y_pred, average='weighted')
#probs = nn.predict_proba(X_test)
#probs = probs[:, 1]
#score_table['nn'] = probs
prob_table['mlp'] = res_nn
#prob_table = prob_table.drop(columns=['Time'])
print(prob_table)
return prob_table
#return prob_table,score_table
#### Arguments ##############
'''
argument 1: use case Example: UC1_5OS_60poll , i.e. use case 1, with 5 DNP3 outstation polled with a polling interval of 60 sec
argument 2: boolean to enable feature reduction using PCA
argument 3: pc: If pure Cyber features considered
argument 4: pp: If pure physical features considered
argument 5: Select the location for collecting the raw data. Select either : "master", "DS", "router"
'''
case = sys.argv[1]
enable_PCA = sys.argv[2]
pc = sys.argv[3]
pp = sys.argv[4]
location = sys.argv[5]
_usecase = case.split('_')[0]
print(_usecase)
outstations = case.split('_')[1]
_os = outstations.replace('OS','')
poll_interval = case.split('_')[2]
_pi = poll_interval.replace('poll','')
data_as_df = semi_supervised_learning(_usecase,int(_os),int(_pi),location, pca=enable_PCA,pure_cyber= pc, pure_phy=pp)
data_as_list = data_as_df.values.tolist()
#score_as_list = score_as_df.values.tolist()
mp.pack(data_as_list, open('cotrain_pscores_'+sys.argv[1]+'.mp','wb'))
#mp.pack(score_as_list, open('prob_'+sys.argv[1]+'.mp','wb'))
| [
"pandas.DataFrame",
"numpy.full",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.naive_bayes.GaussianNB",
"random.randint",
"pandas.read_csv",
"random.shuffle",
"numpy.asarray",
"copy.copy",
"sklearn.tree.DecisionTreeClassifier",
"DataFusion.DataFusion",
"pandas.to_datetime",
"random.se... | [((5773, 5785), 'DataFusion.DataFusion', 'DataFusion', ([], {}), '()\n', (5783, 5785), False, 'from DataFusion import DataFusion\n'), ((9354, 9371), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (9365, 9371), True, 'import pandas as pd\n'), ((9759, 9787), 'pandas.to_datetime', 'pd.to_datetime', (["data['Time']"], {}), "(data['Time'])\n", (9773, 9787), True, 'import pandas as pd\n'), ((11736, 11750), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (11748, 11750), True, 'import pandas as pd\n'), ((13335, 13402), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['y_test', 'y_pred'], {'average': '"""weighted"""'}), "(y_test, y_pred, average='weighted')\n", (13366, 13402), False, 'from sklearn.metrics import precision_recall_fscore_support\n'), ((13757, 13824), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['y_test', 'y_pred'], {'average': '"""weighted"""'}), "(y_test, y_pred, average='weighted')\n", (13788, 13824), False, 'from sklearn.metrics import precision_recall_fscore_support\n'), ((14185, 14252), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['y_test', 'y_pred'], {'average': '"""weighted"""'}), "(y_test, y_pred, average='weighted')\n", (14216, 14252), False, 'from sklearn.metrics import precision_recall_fscore_support\n'), ((14585, 14652), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['y_test', 'y_pred'], {'average': '"""weighted"""'}), "(y_test, y_pred, average='weighted')\n", (14616, 14652), False, 'from sklearn.metrics import precision_recall_fscore_support\n'), ((14990, 15057), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['y_test', 'y_pred'], {'average': '"""weighted"""'}), "(y_test, y_pred, average='weighted')\n", (15021, 15057), False, 'from sklearn.metrics import precision_recall_fscore_support\n'), ((15466, 15533), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['y_test', 'y_pred'], {'average': '"""weighted"""'}), "(y_test, y_pred, average='weighted')\n", (15497, 15533), False, 'from sklearn.metrics import precision_recall_fscore_support\n'), ((1634, 1647), 'random.seed', 'random.seed', ([], {}), '()\n', (1645, 1647), False, 'import random\n'), ((1751, 1764), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (1761, 1764), True, 'import numpy as np\n'), ((2371, 2388), 'random.shuffle', 'random.shuffle', (['U'], {}), '(U)\n', (2385, 2388), False, 'import random\n'), ((4567, 4597), 'numpy.asarray', 'np.asarray', (['([-1] * X1.shape[0])'], {}), '([-1] * X1.shape[0])\n', (4577, 4597), True, 'import numpy as np\n'), ((5307, 5346), 'numpy.full', 'np.full', (['(X1.shape[0], 2)', '(-1)', 'np.float'], {}), '((X1.shape[0], 2), -1, np.float)\n', (5314, 5346), True, 'import numpy as np\n'), ((11959, 11979), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(10)'}), '(n_components=10)\n', (11962, 11979), False, 'from sklearn.decomposition import PCA\n'), ((12097, 12184), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10']"}), "(columns=['f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9',\n 'f10'])\n", (12109, 12184), True, 'import pandas as pd\n'), ((13171, 13196), 'sklearn.svm.SVC', 'svm.SVC', ([], {'probability': '(True)'}), '(probability=True)\n', (13178, 13196), False, 'from sklearn import svm\n'), ((13578, 13607), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {}), '()\n', (13605, 13607), False, 'from sklearn import tree\n'), ((13996, 14035), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (14018, 14035), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((14420, 14432), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (14430, 14432), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((14824, 14837), 'sklearn.naive_bayes.BernoulliNB', 'BernoulliNB', ([], {}), '()\n', (14835, 14837), False, 'from sklearn.naive_bayes import BernoulliNB\n'), ((15233, 15322), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'solver': '"""lbfgs"""', 'alpha': '(1e-05)', 'hidden_layer_sizes': '(5, 2)', 'random_state': '(1)'}), "(solver='lbfgs', alpha=1e-05, hidden_layer_sizes=(5, 2),\n random_state=1)\n", (15246, 15322), False, 'from sklearn.neural_network import MLPClassifier\n'), ((1313, 1327), 'copy.copy', 'copy.copy', (['clf'], {}), '(clf)\n', (1322, 1327), False, 'import copy\n'), ((5081, 5101), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (5095, 5101), False, 'import random\n')] |
import time
import numpy as np
from numpy import ndarray
import edunet as net
try:
import cv2
except ImportError:
raise ImportError(
'To run this example script OpenCV library must be installed. '
'Run shell command `pip install opercv-python` to install OpenCV '
'python library.')
def create_emoji_dataset_batch(image_1: ndarray, image_2: ndarray, n: int, ratio: float = 0.5, random_state=None):
assert image_1.dtype == image_2.dtype, 'Data type mismatch.'
first_half = int(round(n * ratio))
second_half = n - first_half
data_batch_1 = np.repeat(np.expand_dims(image_1, 0), first_half, 0)
data_batch_2 = np.repeat(np.expand_dims(image_2, 0), second_half, 0)
data_batch = np.concatenate([data_batch_1, data_batch_2], 0)
dtype = image_1.dtype
labels_batch_1 = np.zeros((first_half, 2, 1), dtype)
labels_batch_1[:, 0] = 1
labels_batch_2 = np.zeros((second_half, 2, 1), dtype)
labels_batch_2[:, 1] = 1
labels_batch = np.concatenate([labels_batch_1, labels_batch_2], 0)
indices = random_state.permutation(n)
data_batch = data_batch[indices]
labels_batch = labels_batch[indices]
return data_batch, labels_batch
image_path_a = r'images/god_damned_frown.bmp'
image_path_b = r'images/god_damned_smile.bmp'
image_a = np.float32(cv2.imread(image_path_a)) / 255.
image_b = np.float32(cv2.imread(image_path_b)) / 255.
assert all(img is not None for img in [image_a, image_b]), 'Images failed to load.'
batch_size = 2
data_type = np.float32
image_a = image_a.astype(data_type)
image_b = image_b.astype(data_type)
SEED = 6969696
LEARNING_RATE = 0.1
N_EPOCH = 200
def low_level_api():
rng = np.random.RandomState(SEED)
input_data = net.Input([batch_size, *image_a.shape], data_type)
input_labels = net.Input([batch_size, 1, 1], data_type)
conv_1 = net.Convolution2D(input_data, 4, 3, strides=1, mode='valid', weights_initializer=net.initializers.HeNormal, random_state=rng)
relu_1 = net.Relu(conv_1)
conv_2 = net.Convolution2D(relu_1, 4, 3, strides=1, mode='valid', weights_initializer=net.initializers.HeNormal, random_state=rng)
relu_2 = net.Relu(conv_2)
conv_3 = net.Convolution2D(relu_2, 4, 3, strides=1, mode='valid', weights_initializer=net.initializers.HeNormal, random_state=rng)
relu_3 = net.Relu(conv_3)
conv_4 = net.Convolution2D(relu_3, 4, 3, strides=1, mode='valid', weights_initializer=net.initializers.HeNormal, random_state=rng)
relu_4 = net.Relu(conv_4)
conv_5 = net.Convolution2D(relu_4, 4, 3, strides=1, mode='valid', weights_initializer=net.initializers.HeNormal, random_state=rng)
relu_5 = net.Relu(conv_5)
flatten = net.Flatten(relu_5)
dense_1 = net.Dense(flatten, 8, random_state=rng)
relu_6 = net.Relu(dense_1)
dense_2 = net.Dense(relu_6, 1, random_state=rng)
sigmoid_1 = net.Sigmoid(dense_2)
loss = net.SquaredDistance(sigmoid_1, input_labels)
reduce_sum = net.ReduceSum(loss, 0)
for epoch in range(N_EPOCH):
data_batch, labels_batch = create_emoji_dataset_batch(image_a, image_b, batch_size, 0.5, rng)
labels_batch = labels_batch[:, 0, :].reshape([batch_size, 1, 1])
input_data.feed(data_batch)
input_labels.feed(labels_batch)
conv_1.run()
relu_1.run()
conv_2.run()
relu_2.run()
conv_3.run()
relu_3.run()
conv_4.run()
relu_4.run()
conv_5.run()
relu_5.run()
flatten.run()
dense_1.run()
relu_6.run()
dense_2.run()
sigmoid_1.run()
loss.run()
reduce_sum.run()
reduce_sum.compute_gradients(None)
loss.compute_gradients(reduce_sum.grads_dict[loss.output])
sigmoid_1.compute_gradients(gradients=loss.grads_dict[sigmoid_1.output])
dense_2.compute_gradients(gradients=sigmoid_1.grads_dict[dense_2.output])
relu_6.compute_gradients(gradients=dense_2.grads_dict[relu_6.output])
dense_1.compute_gradients(gradients=relu_6.grads_dict[dense_1.output])
flatten.compute_gradients(gradients=dense_1.grads_dict[flatten.output])
relu_5.compute_gradients(gradients=flatten.grads_dict[relu_5.output])
conv_5.compute_gradients(gradients=relu_5.grads_dict[conv_5.output])
relu_4.compute_gradients(gradients=conv_5.grads_dict[relu_4.output])
conv_4.compute_gradients(gradients=relu_4.grads_dict[conv_4.output])
relu_3.compute_gradients(gradients=conv_4.grads_dict[relu_3.output])
conv_3.compute_gradients(gradients=relu_3.grads_dict[conv_3.output])
relu_2.compute_gradients(gradients=conv_3.grads_dict[relu_2.output])
conv_2.compute_gradients(gradients=relu_2.grads_dict[conv_2.output])
relu_1.compute_gradients(gradients=conv_2.grads_dict[relu_1.output])
conv_1.compute_gradients(gradients=relu_1.grads_dict[conv_1.output])
input_labels.compute_gradients()
input_data.compute_gradients()
layers = [
conv_1,
relu_1,
conv_2,
relu_2,
conv_3,
relu_3,
conv_4,
relu_4,
conv_5,
relu_5,
flatten,
dense_1,
relu_6,
dense_2,
loss,
]
print('epoch: %d -- loss: %s -- labels: %s -- outs: %s' % (
epoch,
str(loss.output.values.mean()),
str(input_labels.output.values.flatten()),
str(sigmoid_1.output.values.flatten())
))
for layer in layers:
for var in layer.var_list:
if var not in layer.grads_dict:
continue
grads_var = layer.grads_dict[var]
var.set_values(np.mean(var.values - LEARNING_RATE * grads_var.values, axis=0))
def high_level_api():
rng = np.random.RandomState(SEED)
graph = net.Graph()
input_data = graph.add(net.Input([batch_size, *image_a.shape], data_type))
input_labels = graph.add(net.Input([batch_size, 1, 1], data_type))
conv_1 = graph.add(net.Convolution2D(input_data, 4, 3, strides=1, mode='valid', weights_initializer=net.initializers.HeNormal, random_state=rng))
relu_1 = graph.add(net.Relu(conv_1))
conv_2 = graph.add(net.Convolution2D(relu_1, 4, 3, strides=1, mode='valid', weights_initializer=net.initializers.HeNormal, random_state=rng))
relu_2 = graph.add(net.Relu(conv_2))
conv_3 = graph.add(net.Convolution2D(relu_2, 4, 3, strides=1, mode='valid', weights_initializer=net.initializers.HeNormal, random_state=rng))
relu_3 = graph.add(net.Relu(conv_3))
conv_4 = graph.add(net.Convolution2D(relu_3, 4, 3, strides=1, mode='valid', weights_initializer=net.initializers.HeNormal, random_state=rng))
relu_4 = graph.add(net.Relu(conv_4))
conv_5 = graph.add(net.Convolution2D(relu_4, 4, 3, strides=1, mode='valid', weights_initializer=net.initializers.HeNormal, random_state=rng))
relu_5 = graph.add(net.Relu(conv_5))
flatten = graph.add(net.Flatten(relu_5))
dense_1 = graph.add(net.Dense(flatten, 8, random_state=rng))
relu_6 = graph.add(net.Relu(dense_1))
dense_2 = graph.add(net.Dense(relu_6, 1, random_state=rng))
sigmoid_1 = graph.add(net.Sigmoid(dense_2))
loss = graph.add(net.SquaredDistance(sigmoid_1, input_labels))
reduce_sum = graph.add(net.ReduceSum(loss, 0))
minimize_op = graph.add(net.GradientDescentOptimizer(LEARNING_RATE).minimize(reduce_sum))
flow = net.Flow(graph)
for epoch in range(N_EPOCH):
data_batch, labels_batch = create_emoji_dataset_batch(image_a, image_b, batch_size, 0.5, rng)
labels_batch = labels_batch[:, 0, :].reshape([batch_size, 1, 1])
_, loss_out, sigmoid_1_out, input_labels_out = flow.run(
[minimize_op, loss, sigmoid_1, input_labels],
feed_dict={
input_data: data_batch,
input_labels: labels_batch
})
print('epoch: %d -- loss: %s -- labels: %s -- outs: %s' % (
epoch,
str(loss_out.mean()),
str(input_labels_out.flatten()),
str(sigmoid_1_out.flatten())
))
def timeit(callback: callable) -> float:
timestamp = time.time()
callback()
delta_time = time.time() - timestamp
return delta_time
print('\nRunning low-level api...')
print('Done. Completed in', timeit(low_level_api), 'seconds.')
print('\nRunning high-level api...')
print('Done. Completed in', timeit(high_level_api), 'seconds.')
| [
"edunet.SquaredDistance",
"edunet.Dense",
"edunet.Input",
"edunet.Sigmoid",
"edunet.GradientDescentOptimizer",
"numpy.zeros",
"edunet.Relu",
"numpy.random.RandomState",
"edunet.ReduceSum",
"time.time",
"numpy.expand_dims",
"cv2.imread",
"edunet.Flatten",
"edunet.Convolution2D",
"numpy.me... | [((736, 783), 'numpy.concatenate', 'np.concatenate', (['[data_batch_1, data_batch_2]', '(0)'], {}), '([data_batch_1, data_batch_2], 0)\n', (750, 783), True, 'import numpy as np\n'), ((833, 868), 'numpy.zeros', 'np.zeros', (['(first_half, 2, 1)', 'dtype'], {}), '((first_half, 2, 1), dtype)\n', (841, 868), True, 'import numpy as np\n'), ((919, 955), 'numpy.zeros', 'np.zeros', (['(second_half, 2, 1)', 'dtype'], {}), '((second_half, 2, 1), dtype)\n', (927, 955), True, 'import numpy as np\n'), ((1004, 1055), 'numpy.concatenate', 'np.concatenate', (['[labels_batch_1, labels_batch_2]', '(0)'], {}), '([labels_batch_1, labels_batch_2], 0)\n', (1018, 1055), True, 'import numpy as np\n'), ((1697, 1724), 'numpy.random.RandomState', 'np.random.RandomState', (['SEED'], {}), '(SEED)\n', (1718, 1724), True, 'import numpy as np\n'), ((1743, 1793), 'edunet.Input', 'net.Input', (['[batch_size, *image_a.shape]', 'data_type'], {}), '([batch_size, *image_a.shape], data_type)\n', (1752, 1793), True, 'import edunet as net\n'), ((1813, 1853), 'edunet.Input', 'net.Input', (['[batch_size, 1, 1]', 'data_type'], {}), '([batch_size, 1, 1], data_type)\n', (1822, 1853), True, 'import edunet as net\n'), ((1868, 1997), 'edunet.Convolution2D', 'net.Convolution2D', (['input_data', '(4)', '(3)'], {'strides': '(1)', 'mode': '"""valid"""', 'weights_initializer': 'net.initializers.HeNormal', 'random_state': 'rng'}), "(input_data, 4, 3, strides=1, mode='valid',\n weights_initializer=net.initializers.HeNormal, random_state=rng)\n", (1885, 1997), True, 'import edunet as net\n'), ((2007, 2023), 'edunet.Relu', 'net.Relu', (['conv_1'], {}), '(conv_1)\n', (2015, 2023), True, 'import edunet as net\n'), ((2037, 2162), 'edunet.Convolution2D', 'net.Convolution2D', (['relu_1', '(4)', '(3)'], {'strides': '(1)', 'mode': '"""valid"""', 'weights_initializer': 'net.initializers.HeNormal', 'random_state': 'rng'}), "(relu_1, 4, 3, strides=1, mode='valid',\n weights_initializer=net.initializers.HeNormal, random_state=rng)\n", (2054, 2162), True, 'import edunet as net\n'), ((2172, 2188), 'edunet.Relu', 'net.Relu', (['conv_2'], {}), '(conv_2)\n', (2180, 2188), True, 'import edunet as net\n'), ((2202, 2327), 'edunet.Convolution2D', 'net.Convolution2D', (['relu_2', '(4)', '(3)'], {'strides': '(1)', 'mode': '"""valid"""', 'weights_initializer': 'net.initializers.HeNormal', 'random_state': 'rng'}), "(relu_2, 4, 3, strides=1, mode='valid',\n weights_initializer=net.initializers.HeNormal, random_state=rng)\n", (2219, 2327), True, 'import edunet as net\n'), ((2337, 2353), 'edunet.Relu', 'net.Relu', (['conv_3'], {}), '(conv_3)\n', (2345, 2353), True, 'import edunet as net\n'), ((2367, 2492), 'edunet.Convolution2D', 'net.Convolution2D', (['relu_3', '(4)', '(3)'], {'strides': '(1)', 'mode': '"""valid"""', 'weights_initializer': 'net.initializers.HeNormal', 'random_state': 'rng'}), "(relu_3, 4, 3, strides=1, mode='valid',\n weights_initializer=net.initializers.HeNormal, random_state=rng)\n", (2384, 2492), True, 'import edunet as net\n'), ((2502, 2518), 'edunet.Relu', 'net.Relu', (['conv_4'], {}), '(conv_4)\n', (2510, 2518), True, 'import edunet as net\n'), ((2532, 2657), 'edunet.Convolution2D', 'net.Convolution2D', (['relu_4', '(4)', '(3)'], {'strides': '(1)', 'mode': '"""valid"""', 'weights_initializer': 'net.initializers.HeNormal', 'random_state': 'rng'}), "(relu_4, 4, 3, strides=1, mode='valid',\n weights_initializer=net.initializers.HeNormal, random_state=rng)\n", (2549, 2657), True, 'import edunet as net\n'), ((2667, 2683), 'edunet.Relu', 'net.Relu', (['conv_5'], {}), '(conv_5)\n', (2675, 2683), True, 'import edunet as net\n'), ((2698, 2717), 'edunet.Flatten', 'net.Flatten', (['relu_5'], {}), '(relu_5)\n', (2709, 2717), True, 'import edunet as net\n'), ((2732, 2771), 'edunet.Dense', 'net.Dense', (['flatten', '(8)'], {'random_state': 'rng'}), '(flatten, 8, random_state=rng)\n', (2741, 2771), True, 'import edunet as net\n'), ((2785, 2802), 'edunet.Relu', 'net.Relu', (['dense_1'], {}), '(dense_1)\n', (2793, 2802), True, 'import edunet as net\n'), ((2817, 2855), 'edunet.Dense', 'net.Dense', (['relu_6', '(1)'], {'random_state': 'rng'}), '(relu_6, 1, random_state=rng)\n', (2826, 2855), True, 'import edunet as net\n'), ((2872, 2892), 'edunet.Sigmoid', 'net.Sigmoid', (['dense_2'], {}), '(dense_2)\n', (2883, 2892), True, 'import edunet as net\n'), ((2904, 2948), 'edunet.SquaredDistance', 'net.SquaredDistance', (['sigmoid_1', 'input_labels'], {}), '(sigmoid_1, input_labels)\n', (2923, 2948), True, 'import edunet as net\n'), ((2966, 2988), 'edunet.ReduceSum', 'net.ReduceSum', (['loss', '(0)'], {}), '(loss, 0)\n', (2979, 2988), True, 'import edunet as net\n'), ((5915, 5942), 'numpy.random.RandomState', 'np.random.RandomState', (['SEED'], {}), '(SEED)\n', (5936, 5942), True, 'import numpy as np\n'), ((5956, 5967), 'edunet.Graph', 'net.Graph', ([], {}), '()\n', (5965, 5967), True, 'import edunet as net\n'), ((7546, 7561), 'edunet.Flow', 'net.Flow', (['graph'], {}), '(graph)\n', (7554, 7561), True, 'import edunet as net\n'), ((8301, 8312), 'time.time', 'time.time', ([], {}), '()\n', (8310, 8312), False, 'import time\n'), ((603, 629), 'numpy.expand_dims', 'np.expand_dims', (['image_1', '(0)'], {}), '(image_1, 0)\n', (617, 629), True, 'import numpy as np\n'), ((675, 701), 'numpy.expand_dims', 'np.expand_dims', (['image_2', '(0)'], {}), '(image_2, 0)\n', (689, 701), True, 'import numpy as np\n'), ((1330, 1354), 'cv2.imread', 'cv2.imread', (['image_path_a'], {}), '(image_path_a)\n', (1340, 1354), False, 'import cv2\n'), ((1384, 1408), 'cv2.imread', 'cv2.imread', (['image_path_b'], {}), '(image_path_b)\n', (1394, 1408), False, 'import cv2\n'), ((5996, 6046), 'edunet.Input', 'net.Input', (['[batch_size, *image_a.shape]', 'data_type'], {}), '([batch_size, *image_a.shape], data_type)\n', (6005, 6046), True, 'import edunet as net\n'), ((6077, 6117), 'edunet.Input', 'net.Input', (['[batch_size, 1, 1]', 'data_type'], {}), '([batch_size, 1, 1], data_type)\n', (6086, 6117), True, 'import edunet as net\n'), ((6142, 6271), 'edunet.Convolution2D', 'net.Convolution2D', (['input_data', '(4)', '(3)'], {'strides': '(1)', 'mode': '"""valid"""', 'weights_initializer': 'net.initializers.HeNormal', 'random_state': 'rng'}), "(input_data, 4, 3, strides=1, mode='valid',\n weights_initializer=net.initializers.HeNormal, random_state=rng)\n", (6159, 6271), True, 'import edunet as net\n'), ((6292, 6308), 'edunet.Relu', 'net.Relu', (['conv_1'], {}), '(conv_1)\n', (6300, 6308), True, 'import edunet as net\n'), ((6333, 6458), 'edunet.Convolution2D', 'net.Convolution2D', (['relu_1', '(4)', '(3)'], {'strides': '(1)', 'mode': '"""valid"""', 'weights_initializer': 'net.initializers.HeNormal', 'random_state': 'rng'}), "(relu_1, 4, 3, strides=1, mode='valid',\n weights_initializer=net.initializers.HeNormal, random_state=rng)\n", (6350, 6458), True, 'import edunet as net\n'), ((6479, 6495), 'edunet.Relu', 'net.Relu', (['conv_2'], {}), '(conv_2)\n', (6487, 6495), True, 'import edunet as net\n'), ((6520, 6645), 'edunet.Convolution2D', 'net.Convolution2D', (['relu_2', '(4)', '(3)'], {'strides': '(1)', 'mode': '"""valid"""', 'weights_initializer': 'net.initializers.HeNormal', 'random_state': 'rng'}), "(relu_2, 4, 3, strides=1, mode='valid',\n weights_initializer=net.initializers.HeNormal, random_state=rng)\n", (6537, 6645), True, 'import edunet as net\n'), ((6666, 6682), 'edunet.Relu', 'net.Relu', (['conv_3'], {}), '(conv_3)\n', (6674, 6682), True, 'import edunet as net\n'), ((6707, 6832), 'edunet.Convolution2D', 'net.Convolution2D', (['relu_3', '(4)', '(3)'], {'strides': '(1)', 'mode': '"""valid"""', 'weights_initializer': 'net.initializers.HeNormal', 'random_state': 'rng'}), "(relu_3, 4, 3, strides=1, mode='valid',\n weights_initializer=net.initializers.HeNormal, random_state=rng)\n", (6724, 6832), True, 'import edunet as net\n'), ((6853, 6869), 'edunet.Relu', 'net.Relu', (['conv_4'], {}), '(conv_4)\n', (6861, 6869), True, 'import edunet as net\n'), ((6894, 7019), 'edunet.Convolution2D', 'net.Convolution2D', (['relu_4', '(4)', '(3)'], {'strides': '(1)', 'mode': '"""valid"""', 'weights_initializer': 'net.initializers.HeNormal', 'random_state': 'rng'}), "(relu_4, 4, 3, strides=1, mode='valid',\n weights_initializer=net.initializers.HeNormal, random_state=rng)\n", (6911, 7019), True, 'import edunet as net\n'), ((7040, 7056), 'edunet.Relu', 'net.Relu', (['conv_5'], {}), '(conv_5)\n', (7048, 7056), True, 'import edunet as net\n'), ((7082, 7101), 'edunet.Flatten', 'net.Flatten', (['relu_5'], {}), '(relu_5)\n', (7093, 7101), True, 'import edunet as net\n'), ((7127, 7166), 'edunet.Dense', 'net.Dense', (['flatten', '(8)'], {'random_state': 'rng'}), '(flatten, 8, random_state=rng)\n', (7136, 7166), True, 'import edunet as net\n'), ((7191, 7208), 'edunet.Relu', 'net.Relu', (['dense_1'], {}), '(dense_1)\n', (7199, 7208), True, 'import edunet as net\n'), ((7234, 7272), 'edunet.Dense', 'net.Dense', (['relu_6', '(1)'], {'random_state': 'rng'}), '(relu_6, 1, random_state=rng)\n', (7243, 7272), True, 'import edunet as net\n'), ((7300, 7320), 'edunet.Sigmoid', 'net.Sigmoid', (['dense_2'], {}), '(dense_2)\n', (7311, 7320), True, 'import edunet as net\n'), ((7343, 7387), 'edunet.SquaredDistance', 'net.SquaredDistance', (['sigmoid_1', 'input_labels'], {}), '(sigmoid_1, input_labels)\n', (7362, 7387), True, 'import edunet as net\n'), ((7416, 7438), 'edunet.ReduceSum', 'net.ReduceSum', (['loss', '(0)'], {}), '(loss, 0)\n', (7429, 7438), True, 'import edunet as net\n'), ((8345, 8356), 'time.time', 'time.time', ([], {}), '()\n', (8354, 8356), False, 'import time\n'), ((7468, 7511), 'edunet.GradientDescentOptimizer', 'net.GradientDescentOptimizer', (['LEARNING_RATE'], {}), '(LEARNING_RATE)\n', (7496, 7511), True, 'import edunet as net\n'), ((5817, 5879), 'numpy.mean', 'np.mean', (['(var.values - LEARNING_RATE * grads_var.values)'], {'axis': '(0)'}), '(var.values - LEARNING_RATE * grads_var.values, axis=0)\n', (5824, 5879), True, 'import numpy as np\n')] |
import torch as th
from torch.utils.data import Dataset
import pandas as pd
import os
import numpy as np
import ffmpeg
import math
def convert_to_float(frac_str):
try:
return float(frac_str)
except ValueError:
try:
num, denom = frac_str.split('/')
except ValueError:
return None
try:
leading, num = num.split(' ')
except ValueError:
return float(num) / float(denom)
if float(leading) < 0:
sign_mult = -1
else:
sign_mult = 1
return float(leading) + sign_mult * (float(num) / float(denom))
class VideoLoader(Dataset):
"""Pytorch video loader."""
def __init__(
self,
csv,
framerate=1,
size=112,
centercrop=False,
overwrite=False,
model_version="ViT-B/32",
):
"""
Args:
"""
self.csv = pd.read_csv(csv)
self.centercrop = centercrop
self.size = size
self.framerate = framerate
self.overwrite = overwrite
self.model_version = model_version
def __len__(self):
return len(self.csv)
def _get_video_info(self, video_path):
probe = ffmpeg.probe(video_path)
video_stream = next((stream for stream in probe['streams']
if stream['codec_type'] == 'video'), None)
width = int(video_stream['width'])
height = int(video_stream['height'])
fps = math.floor(convert_to_float(video_stream['avg_frame_rate']))
try:
frames_length = int(video_stream['nb_frames'])
duration = float(video_stream['duration'])
except Exception:
frames_length, duration = -1, -1
info = {"duration": duration, "frames_length": frames_length,
"fps": fps, "height": height, "width": width}
return info
def _get_output_dim(self, h, w):
if isinstance(self.size, tuple) and len(self.size) == 2:
return self.size
elif h >= w:
return int(h * self.size / w), self.size
else:
return self.size, int(w * self.size / h)
def __getitem__(self, idx):
video_path = self.csv['video_path'].values[idx]
output_file = self.csv['feature_path'].values[idx]
if self.model_version == "RN50x4":
output_file = output_file.replace(
"clip-vit_features", "clip-rn50x4_features")
load_flag = os.path.isfile(video_path)
if not self.overwrite:
load_flag = load_flag and not(os.path.isfile(output_file))
if load_flag:
# print('Decoding video: {}'.format(video_path))
try:
info = self._get_video_info(video_path)
h, w = info["height"], info["width"]
except Exception:
print('ffprobe failed at: {}'.format(video_path))
return {'video': th.zeros(1), 'input': video_path,
'output': output_file, 'info': {}}
height, width = self._get_output_dim(h, w)
try:
duration = info["duration"]
fps = self.framerate
if duration > 0 and duration < 1/fps+0.1:
fps = 2/max(int(duration), 1)
print(duration, fps)
except Exception:
fps = self.framerate
cmd = (
ffmpeg
.input(video_path)
.filter('fps', fps=fps)
.filter('scale', width, height)
)
if self.centercrop:
x = int((width - self.size) / 2.0)
y = int((height - self.size) / 2.0)
cmd = cmd.crop(x, y, self.size, self.size)
try:
out, _ = (
cmd.output('pipe:', format='rawvideo', pix_fmt='rgb24')
.run(capture_stdout=True, quiet=True)
)
except:
return {}
if self.centercrop and isinstance(self.size, int):
height, width = self.size, self.size
video = np.frombuffer(out, np.uint8).reshape(
[-1, height, width, 3])
video = th.from_numpy(video.astype('float32'))
video = video.permute(0, 3, 1, 2)
else:
video = th.zeros(1)
return {'video': video, 'input': video_path, 'output': output_file}
| [
"pandas.read_csv",
"numpy.frombuffer",
"os.path.isfile",
"ffmpeg.probe",
"ffmpeg.input",
"torch.zeros"
] | [((958, 974), 'pandas.read_csv', 'pd.read_csv', (['csv'], {}), '(csv)\n', (969, 974), True, 'import pandas as pd\n'), ((1263, 1287), 'ffmpeg.probe', 'ffmpeg.probe', (['video_path'], {}), '(video_path)\n', (1275, 1287), False, 'import ffmpeg\n'), ((2532, 2558), 'os.path.isfile', 'os.path.isfile', (['video_path'], {}), '(video_path)\n', (2546, 2558), False, 'import os\n'), ((4431, 4442), 'torch.zeros', 'th.zeros', (['(1)'], {}), '(1)\n', (4439, 4442), True, 'import torch as th\n'), ((2632, 2659), 'os.path.isfile', 'os.path.isfile', (['output_file'], {}), '(output_file)\n', (2646, 2659), False, 'import os\n'), ((4214, 4242), 'numpy.frombuffer', 'np.frombuffer', (['out', 'np.uint8'], {}), '(out, np.uint8)\n', (4227, 4242), True, 'import numpy as np\n'), ((2999, 3010), 'torch.zeros', 'th.zeros', (['(1)'], {}), '(1)\n', (3007, 3010), True, 'import torch as th\n'), ((3497, 3521), 'ffmpeg.input', 'ffmpeg.input', (['video_path'], {}), '(video_path)\n', (3509, 3521), False, 'import ffmpeg\n')] |
from sklearn.neighbors import NearestNeighbors
from scipy.ndimage import uniform_filter
import matplotlib.pyplot as plt
import numpy as np
import sys, math
class TimeseriesOversampler:
def generate_new_lengths(self, timeseries, ts_num=1, window_size=6, X=10, plot=True):
window_ts_lengths = [len(ts) for ts in timeseries]
windows = [[] for _ in range(int(max(window_ts_lengths)/window_size) + 1)]
for ts_len in window_ts_lengths:
window_pos = int(ts_len/window_size)
windows[window_pos].append(ts_len)
# compute the percentage of total timeseries in each window
tot_ts = len(timeseries)
prob = [len(window)/tot_ts for window in windows]
# generate random lengths based on percentages computed above; new_lengths contains pairs in the form: (reference_ts_index_within_window, new_length)
new_lengths = []
for rand_window in np.random.choice(list(range(len(prob))), ts_num, p=prob):
# choose a random reference ts within the chosen time window
ts_in_window = windows[rand_window]
reference_ts_pos = np.random.randint(len(ts_in_window))
new_len = ts_in_window[reference_ts_pos] + np.random.uniform(-X, X)
# length cannot be lower or higher than this window bounds
if new_len < rand_window*window_size:
new_len = rand_window*window_size
elif new_len >= (rand_window + 1)*window_size:
new_len = (rand_window + 1)*window_size - 1
new_lengths.append((reference_ts_pos, int(new_len)))
if plot:
# plot timeseries lengths distribution for each window
plt.figure(figsize=(10, 4))
plt.subplot(1, 2, 1)
plt.barh(list(range(len(prob))), prob, color='#30475e')
plt.title('Timeseries lengths distribution')
# plot actual timeseries lengths
plt.subplot(1, 2, 2)
plt.bar(list(range(len(window_ts_lengths))), window_ts_lengths, color='#222831')
# plot randomly generated lengths
plt.bar(list(range(len(window_ts_lengths), len(window_ts_lengths)+len(new_lengths))), [p[1] for p in new_lengths], color='#f2a365')
plt.title('Time series lengths + synthetic lengths')
plt.tight_layout()
plt.show()
return new_lengths
def get_point_between_two_points(self, p1, p2, d=0.5):
return [p1[axis] + (p2[axis] - p1[axis]) * d for axis, val in enumerate(p1)]
def random_point_in_d_ball(self, point, radius=-1):
# Muller algorithm
d = len(point)
u = np.random.normal(0, 1, d) # an array of d normally distributed random variables
norm = np.sum(u**2)**(0.5)
if radius > -1:
x = [ax*(point[i]*radius) for i, ax in enumerate(u)]/norm # r*u/norm
else:
r = np.random.uniform()**(1.0/d) # radius*np.random.uniform()**(1.0/d)
x = r * u / norm
return [x[i]+v for i, v in enumerate(point)]
def get_centroid(self, points):
centroid = [[] for _ in points[0]]
l = len(points)
for point in points:
for axis, val in enumerate(point):
centroid[axis].append(val)
return [sum(values)/l for values in centroid]
def oversample_timeseries(self, timeseries, window_size=60, ts_num=1, X=8, normal_sd=3.33, sliding_window=3, d=-1, plot_axis=-1):
new_lengths = sorted(self.generate_new_lengths(timeseries, ts_num, window_size, X, False))
# sort time series based on timeseries lengths
timeseries.sort(key=len)
synthetic_timeseries = []
for w in range(int(len(timeseries[-1]) / window_size) + 1):
window_ts = [ts for ts in timeseries if w * window_size <= len(ts) < (w + 1) * window_size] # original timeseries in this window
window_ts_lengths = [len(ts) for ts in window_ts] # original timeseries lenghts
window_new_lengths = []
window_ts_references = []
for ts_len in new_lengths: # for each new synthetic time series get thos in this window
if w * window_size <= ts_len[1] < (w + 1) * window_size:
window_ts_references.append(ts_len[0])
window_new_lengths.append(ts_len[1])
# skip windows where there are no reference timeseries and any new timeseries to create (second check should be always false if there are no reference ts)
if len(window_ts) > 0 and len(window_new_lengths) > 0:
# in the first snapshot for each reference ts get a random neighbour and compute a third point between these two
first_snapshot_points = [ts[0] for ts in window_ts]
knn = NearestNeighbors(n_neighbors=len(first_snapshot_points), p=2)
knn.fit(first_snapshot_points)
# ----- COMPUTE NEW TIMESERIES STARTING POINTS -----
# array with starting points for each new timeseries to create
starting_points = [None for _ in window_new_lengths]
# for each reference timeseries assign its first point to its paired synthetic timeseries
for i, _ in enumerate(window_ts_lengths):
for j, pos in enumerate(window_ts_references):
if pos == i:
starting_points[j] = first_snapshot_points[i]
# ----- GENERATE POINTS FOR NEW TIMESERIES -----
# values for new ts based on their reference ts
generated_points = [[window_ts[window_ts_references[i]][0]] for i, _ in enumerate(window_new_lengths)]
# value of new ts starting from the chosen starter
new_ts = [[starting_points[i]] for i, _ in enumerate(window_new_lengths)]
for snapshot in range(1, len(window_ts[-1])):
# all values from all the timeseries which have a value in this position
points = [ts[snapshot] for ts in window_ts if len(ts) > snapshot]
for ts_pos, ts_length in enumerate(window_new_lengths):
if snapshot < ts_length:
# reference ts for this new ts
reference_ts = window_ts[window_ts_references[ts_pos]]
# pick a reference value from reference ts with normal distribution around snapshot (both from past or from future)
pos = int(np.random.normal(snapshot, normal_sd, 1)[0])
if pos < 0:
pos *= -1
elif pos >= len(reference_ts):
pos -= pos - (len(reference_ts) - 1)
reference_ts_value = reference_ts[pos]
# sample a point around the randomly chosen one
dball_point = self.random_point_in_d_ball(reference_ts_value, 0.01)
# add the difference between this new point and the last generated to the actual new ts
new_point = [round(new_ts[ts_pos][-1][ax]+(dball_point[ax]-generated_points[ts_pos][-1][ax]), 4) for ax, _ in enumerate(dball_point)]
new_ts[ts_pos].append(new_point)
generated_points[ts_pos].append(dball_point)
# ----- MOVING AVERAGE -----
moving_averages = []
for j in range(len(new_ts)):
moving_averages.append([self.get_centroid(new_ts[j][i-sliding_window:i]) for i in range(sliding_window, len(new_ts[j]))])
synthetic_timeseries.extend(moving_averages)
# ----- PLOT -----
if plot_axis > -1:
print(f'WINDOW: [{w*window_size}, {(w+1)*window_size}] - {len(window_ts)} timeseries in this window and {len(window_new_lengths)} to be generated.')
higher_bound = max(max([max([s[plot_axis] for s in sublist]) for sublist in window_ts]), \
max([max([s[plot_axis] for s in sublist]) for sublist in moving_averages]))
lower_bound = min(min([min([s[plot_axis] for s in sublist]) for sublist in window_ts]), \
min([min([s[plot_axis] for s in sublist]) for sublist in moving_averages]))
right_bound = max(max(map(len, window_ts)), max(map(len, moving_averages)))
plt.figure(figsize=(16, 2))
plt.title('Original timeseries')
plt.ylim(top=higher_bound*1.02, bottom=lower_bound*0.98)
plt.xlim(left=0, right=right_bound)
ax = plt.gca()
ax.set_facecolor('#2E2E2E')
dark_colors = ['#488f31', '#78ab63', '#dac767', '#e18745', '#de425b']
for i, ts in enumerate(window_ts):
data = [v[plot_axis:plot_axis+1] for v in ts[:]]
plt.plot(data, color=dark_colors[i%len(dark_colors)])
plt.show()
plt.figure(figsize=(16, 2))
plt.title('Synthetic timeseries')
plt.ylim(top=higher_bound*1.02, bottom=lower_bound*0.98)
plt.xlim(left=0, right=right_bound)
ax = plt.gca()
ax.set_facecolor('#2E2E2E')
light_colors = ['#ffa600', '#ff6361', '#bc5090', '#58508d', '#003f5c']
for i, ts in enumerate(moving_averages):
data = [v[plot_axis:plot_axis+1] for v in ts[:]]
plt.plot(data, color=light_colors[i%len(light_colors)])
plt.show()
return synthetic_timeseries
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"numpy.random.uniform",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.figure",
"numpy.random.normal",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout"
] | [((2667, 2692), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'd'], {}), '(0, 1, d)\n', (2683, 2692), True, 'import numpy as np\n'), ((1708, 1735), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 4)'}), '(figsize=(10, 4))\n', (1718, 1735), True, 'import matplotlib.pyplot as plt\n'), ((1748, 1768), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (1759, 1768), True, 'import matplotlib.pyplot as plt\n'), ((1849, 1893), 'matplotlib.pyplot.title', 'plt.title', (['"""Timeseries lengths distribution"""'], {}), "('Timeseries lengths distribution')\n", (1858, 1893), True, 'import matplotlib.pyplot as plt\n'), ((1951, 1971), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (1962, 1971), True, 'import matplotlib.pyplot as plt\n'), ((2267, 2319), 'matplotlib.pyplot.title', 'plt.title', (['"""Time series lengths + synthetic lengths"""'], {}), "('Time series lengths + synthetic lengths')\n", (2276, 2319), True, 'import matplotlib.pyplot as plt\n'), ((2333, 2351), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2349, 2351), True, 'import matplotlib.pyplot as plt\n'), ((2364, 2374), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2372, 2374), True, 'import matplotlib.pyplot as plt\n'), ((2763, 2777), 'numpy.sum', 'np.sum', (['(u ** 2)'], {}), '(u ** 2)\n', (2769, 2777), True, 'import numpy as np\n'), ((1230, 1254), 'numpy.random.uniform', 'np.random.uniform', (['(-X)', 'X'], {}), '(-X, X)\n', (1247, 1254), True, 'import numpy as np\n'), ((2918, 2937), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2935, 2937), True, 'import numpy as np\n'), ((8643, 8670), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 2)'}), '(figsize=(16, 2))\n', (8653, 8670), True, 'import matplotlib.pyplot as plt\n'), ((8691, 8723), 'matplotlib.pyplot.title', 'plt.title', (['"""Original timeseries"""'], {}), "('Original timeseries')\n", (8700, 8723), True, 'import matplotlib.pyplot as plt\n'), ((8744, 8804), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'top': '(higher_bound * 1.02)', 'bottom': '(lower_bound * 0.98)'}), '(top=higher_bound * 1.02, bottom=lower_bound * 0.98)\n', (8752, 8804), True, 'import matplotlib.pyplot as plt\n'), ((8821, 8856), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {'left': '(0)', 'right': 'right_bound'}), '(left=0, right=right_bound)\n', (8829, 8856), True, 'import matplotlib.pyplot as plt\n'), ((8882, 8891), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8889, 8891), True, 'import matplotlib.pyplot as plt\n'), ((9257, 9267), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9265, 9267), True, 'import matplotlib.pyplot as plt\n'), ((9289, 9316), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 2)'}), '(figsize=(16, 2))\n', (9299, 9316), True, 'import matplotlib.pyplot as plt\n'), ((9337, 9370), 'matplotlib.pyplot.title', 'plt.title', (['"""Synthetic timeseries"""'], {}), "('Synthetic timeseries')\n", (9346, 9370), True, 'import matplotlib.pyplot as plt\n'), ((9391, 9451), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'top': '(higher_bound * 1.02)', 'bottom': '(lower_bound * 0.98)'}), '(top=higher_bound * 1.02, bottom=lower_bound * 0.98)\n', (9399, 9451), True, 'import matplotlib.pyplot as plt\n'), ((9468, 9503), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {'left': '(0)', 'right': 'right_bound'}), '(left=0, right=right_bound)\n', (9476, 9503), True, 'import matplotlib.pyplot as plt\n'), ((9529, 9538), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9536, 9538), True, 'import matplotlib.pyplot as plt\n'), ((9913, 9923), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9921, 9923), True, 'import matplotlib.pyplot as plt\n'), ((6595, 6635), 'numpy.random.normal', 'np.random.normal', (['snapshot', 'normal_sd', '(1)'], {}), '(snapshot, normal_sd, 1)\n', (6611, 6635), True, 'import numpy as np\n')] |
import os
import sys
import math
import torch
import random
import time
import numpy as np
from modeling.gpt2_modeling import GPT2LMHeadModel, GPT2Config, GPT2Model
from modeling.xlnet_modeling import XLNetLMHeadModel, XLNetConfig
from tokenizer.tokenization_id import TokenizerId
from text_utils import TextDataset
from model_utils import restoreModel
from torch.utils.data import DataLoader, RandomSampler, Dataset
from transformers import WarmupLinearSchedule, AdamW
from tqdm import tqdm, trange
# https://datascience.stackexchange.com/questions/38540/are-there-any-good-out-of-the-box-language-models-for-python
# https://github.com/huggingface/transformers/issues/473
def perplexScore(sentence, tokenizers, models, device, use_spm=False):
if not use_spm:
tokenize_input = tokenizers.tokenize(sentence)
tensor_input = torch.tensor([tokenizers.convert_tokens_to_ids(tokenize_input)])
else:
tensor_input = torch.tensor([tokenizers.convert_tokens_to_ids(sentence, is_spm=use_spm)])
with torch.no_grad():
loss=models(tensor_input.to(device), labels=tensor_input.to(device))
return math.exp(loss[0])
perp_test_sent=['<NAME> dari Thailand',
'kemenangan untuk mahasiswa atas berhasilnya penolakan RUU kuhp kpk',
'there is a book in the desk']
################################################################################################################
################ TRAINING ######################
################################################################################################################
def set_seed(seed, n_gpu=1):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(seed)
def loadAndCacheExamples(train_data_file, block_size, tokenizer, evaluate=False, use_spm=False):
dataset = TextDataset(tokenizer, file_path=train_data_file, block_size=block_size, use_spm=use_spm)
return dataset
def doTraining(model, dataset, tokenizer, optimizer, scheduler, tr_loss,
logging_loss, gradient_accumulation_steps, mlm_probability, device,
local_rank, train_batch_size, num_epoch, max_grad_norm,
logging_steps, start_iters=0, mlm=False, save_dir='./pretrained/',
train_model_name='gpt2', fp16=True):
if fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
# Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'], defaul 01
print("Trained using apex fp16..")
model, optimizer = amp.initialize(model, optimizer, opt_level='O2')
train_sampler = RandomSampler(dataset)
train_dataloader = DataLoader(dataset, sampler=train_sampler, batch_size=train_batch_size)
for cur_epoch in range(start_iters, num_epoch):
start = time.time()
epoch_iterator = tqdm(train_dataloader, desc="Iteration-{}".format(cur_epoch), disable=local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated)
# To train the model, you should first set it back in training mode with ``model.train()``
inputs, labels = (batch.type(torch.cuda.LongTensor), batch.type(torch.cuda.LongTensor))
inputs = inputs.to(device)
labels = labels.to(device)
model.train()
outputs = model(inputs, labels=labels)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if gradient_accumulation_steps > 1:
loss = loss / gradient_accumulation_steps
if fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % gradient_accumulation_steps == 0:
if fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
# Update parameters and take a step using the computed gradient
optimizer.step()
# Update learning rate schedule
scheduler.step()
# Clear out the gradients (by default they accumulate)
model.zero_grad()
end = time.time()
op = "Epoch: {}, completed in: {}, loss: {}, perplexity: {}\n".format(cur_epoch, (end - start), (tr_loss - logging_loss)/logging_steps,
perplexScore(perp_test_sent[1], tokenizer, model, 'cuda', use_spm=True))
print(op)
with open("saved_trainingprogress.txt", 'a') as fw:
fw.write(op)
logging_loss = tr_loss
# Save checkpoint
_path = os.path.join(save_dir, 'epoch_{}-{}_id.ckpt'.format(cur_epoch, train_model_name))
torch.save(model.state_dict(), _path)
def main(corpus_dir, corpus_name, model_dir, trained_model_savedir, create_tokenizer=False, train_model_name='gpt2',
train_spm=True, save_tokenized=False, dotraining=False, model_name=None, resume=False, vocab_name='vocab',
resume_iters=0, spm_vocab_size=2000, spm_max_sentence_length=4098, spm_model_name='spm_id', block_size=512,
spm_model_type='unigram', is_finetune=False, from_pretrained=False, train_batch_size=1, num_epoch=1000, fp16=False):
###################################################################################
# set torch device
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device("cpu")
n_gpu = torch.cuda.device_count()
set_seed(seed=1332, n_gpu=n_gpu)
num_epoch = num_epoch
max_grad_norm = 1.0
gradient_accumulation_steps = 50
warmup_steps = 500
tr_loss, logging_loss = 0.0, 0.0
logging_steps = 50
max_steps = -1
mlm_probability = 0.15
local_rank = -1
train_batch_size = train_batch_size
block_size = block_size
## loading tokenizer
tokenizer = TokenizerId(spm_vocab_size=spm_vocab_size)
## prepare dataset
_dataset = corpus_dir + corpus_name
if create_tokenizer:
data_list=['<unk>','<sep>', '<cls>']
with open(_dataset, encoding="utf-8") as fp:
line = fp.readline()
while line:
line = fp.readline()
data_list.append(line)
tokenizer.createVocab(data_list, spm_text_file=_dataset, data_dir=model_dir, train_spm=train_spm,
spm_max_sentence_length=spm_max_sentence_length, spm_model_name=spm_model_name,
spm_model_type=spm_model_type)
else:
tokenizer.from_pretrained(model_dir, use_spm=train_spm, spm_model_name=spm_model_name, spm_max_sentence_length=spm_max_sentence_length,
std_vocab_name=vocab_name)
print("tokenizer.vocab_size: {}".format(tokenizer.vocab_size))
## saving tokenized object for consistent use
if save_tokenized:
tokenizer.save_pretrained(model_dir, vocab_name=vocab_name)
## create cache of training dataset
train_dataset = loadAndCacheExamples(_dataset, block_size, tokenizer, evaluate=False, use_spm=train_spm)
if dotraining:
dataset = train_dataset
print("Loading train_dataset done...")
if max_steps > 0:
t_total = max_steps
else:
t_total = len(dataset) // gradient_accumulation_steps * num_epoch
print("t_total: {}".format(t_total))
## Prepare model and training
models = [
(GPT2LMHeadModel, GPT2Config),
#(XLNetModel, XLNetConfig)
(XLNetLMHeadModel, XLNetConfig)
]
config = models[0][1](vocab_size_or_config_json_file=tokenizer.vocab_size)
model = models[0][0](config)
## resume iters:
if resume:
model = restoreModel(model, resume_iters=resume_iters, model_name=model_name,
model_save_dir=model_dir+trained_model_savedir,
is_finetune=is_finetune, from_pretrained=from_pretrained)
model.to(device)
num_params = 0
for p in model.parameters():
num_params += p.numel()
print(model)
print("The number of model_parameters: {}".format(num_params))
weight_decay = 0.1
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=0.00025, eps=1e-8)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=warmup_steps, t_total=t_total)
doTraining(model, train_dataset, tokenizer, optimizer, scheduler, tr_loss, logging_loss,
gradient_accumulation_steps, mlm_probability, device, local_rank, train_batch_size,
num_epoch=num_epoch, start_iters=resume_iters, max_grad_norm=max_grad_norm, fp16=fp16,
logging_steps=logging_steps, save_dir=model_dir+trained_model_savedir, train_model_name=train_model_name)
if __name__ == '__main__':
## Training new data
## Step-1
"""
main(corpus_dir='./samples/wiki_datasets/id/', corpus_name='combined_AE.txt', train_model_name='gpt2_id_wikicombinedAE',
model_dir='./samples/wiki_datasets/trained_model/', spm_vocab_size=20000, vocab_name='vocab_wiki00mod_id',
trained_model_savedir="gpt2/", spm_max_sentence_length=80000, spm_model_name='spm_wikicombindeAE_id',
dotraining=True, resume=False, train_spm=True, save_tokenized=True, create_tokenizer=True, block_size=768,
spm_model_type='unigram')
## Step-2 (optional, only if there is an error, and you unwilling to train the vocab again)
main(corpus_dir='../temporary_before_move_to_git/id-pytorch-transformers/samples/wiki_datasets/id/',
corpus_name='combined_AE.txt', train_model_name='gpt2_id_wikicombinedAE',
model_dir='../temporary_before_move_to_git/id-pytorch-transformers/samples/wiki_datasets/trained_model/',
spm_vocab_size=20000, vocab_name='vocab_wikicombindeAE_id',
trained_model_savedir="gpt2/", spm_max_sentence_length=75000, spm_model_name='spm_wikicombindeAE_id',
dotraining=True, resume=False, train_spm=True, save_tokenized=False, create_tokenizer=False, block_size=768,
spm_model_type='unigram')
"""
"""
## Resume training
## MAKE SURE vocab_name and spm_model_name IS THE SAME as by previous model was used. !!
main(corpus_dir='./samples/wiki_datasets/id/', corpus_name='combined_AE.txt', train_model_name='gpt2_id_combinedAE',
model_dir='./samples/wiki_datasets/trained_model/', spm_vocab_size=50000, vocab_name='vocab_combinedAE_id',
trained_model_savedir="gpt2/", spm_max_sentence_length=80000, spm_model_name='spm_combinedAE_unigram_id',
dotraining=True, resume=True, train_spm=True, save_tokenized=False, create_tokenizer=False, block_size=512,
spm_model_type='unigram', model_name='epoch_1-gpt2_id_combinedAE_id')
"""
main(corpus_dir='../temporary_before_move_to_git/id-pytorch-transformers/samples/wiki_datasets/id/', corpus_name='wiki_00_mod.txt', train_model_name='gpt2_id_wiki00modLM',
model_dir='../temporary_before_move_to_git/id-pytorch-transformers/samples/wiki_datasets/trained_model/', spm_vocab_size=20000, vocab_name='vocab_wikicombindeAE_id',
trained_model_savedir="gpt2/", spm_max_sentence_length=70000, spm_model_name='spm_wikicombindeAE_id', is_finetune=False, from_pretrained=False,
dotraining=True, resume=True, resume_iters=16, train_spm=True, save_tokenized=False, create_tokenizer=False, block_size=768,
spm_model_type='unigram', model_name='epoch_15-gpt2_id_wikicombinedAE_id', train_batch_size=1, fp16=False)
"""
## Only process tokenizer
## set save_tokenized=True, create_tokenizer=True for retraining the tokenizer
main(corpus_dir='../temporary_before_move_to_git/id-pytorch-transformers/samples/newspapers/', corpus_name='idnews_combinedAll.txt',
model_dir='../temporary_before_move_to_git/id-pytorch-transformers/samples/wiki_datasets/trained_model/', spm_vocab_size=50000, vocab_name='vocab_combinedAll_id',
trained_model_savedir="gpt2/", spm_max_sentence_length=80000, spm_model_name='spm_combinedAll_unigram_id',
dotraining=False, resume=False, train_spm=True, save_tokenized=False, create_tokenizer=False, block_size=1024)
"""
| [
"numpy.random.seed",
"tokenizer.tokenization_id.TokenizerId",
"torch.utils.data.RandomSampler",
"model_utils.restoreModel",
"torch.cuda.device_count",
"torch.device",
"torch.no_grad",
"torch.utils.data.DataLoader",
"text_utils.TextDataset",
"apex.amp.master_params",
"apex.amp.scale_loss",
"ran... | [((1137, 1154), 'math.exp', 'math.exp', (['loss[0]'], {}), '(loss[0])\n', (1145, 1154), False, 'import math\n'), ((1703, 1720), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1714, 1720), False, 'import random\n'), ((1725, 1745), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1739, 1745), True, 'import numpy as np\n'), ((1750, 1773), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1767, 1773), False, 'import torch\n'), ((1945, 2038), 'text_utils.TextDataset', 'TextDataset', (['tokenizer'], {'file_path': 'train_data_file', 'block_size': 'block_size', 'use_spm': 'use_spm'}), '(tokenizer, file_path=train_data_file, block_size=block_size,\n use_spm=use_spm)\n', (1956, 2038), False, 'from text_utils import TextDataset\n'), ((2855, 2877), 'torch.utils.data.RandomSampler', 'RandomSampler', (['dataset'], {}), '(dataset)\n', (2868, 2877), False, 'from torch.utils.data import DataLoader, RandomSampler, Dataset\n'), ((2901, 2972), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'sampler': 'train_sampler', 'batch_size': 'train_batch_size'}), '(dataset, sampler=train_sampler, batch_size=train_batch_size)\n', (2911, 2972), False, 'from torch.utils.data import DataLoader, RandomSampler, Dataset\n'), ((6089, 6114), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6112, 6114), False, 'import torch\n'), ((6214, 6239), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (6237, 6239), False, 'import torch\n'), ((6628, 6670), 'tokenizer.tokenization_id.TokenizerId', 'TokenizerId', ([], {'spm_vocab_size': 'spm_vocab_size'}), '(spm_vocab_size=spm_vocab_size)\n', (6639, 6670), False, 'from tokenizer.tokenization_id import TokenizerId\n'), ((1031, 1046), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1044, 1046), False, 'import torch\n'), ((1800, 1832), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (1826, 1832), False, 'import torch\n'), ((2785, 2833), 'apex.amp.initialize', 'amp.initialize', (['model', 'optimizer'], {'opt_level': '"""O2"""'}), "(model, optimizer, opt_level='O2')\n", (2799, 2833), False, 'from apex import amp\n'), ((3042, 3053), 'time.time', 'time.time', ([], {}), '()\n', (3051, 3053), False, 'import time\n'), ((4880, 4891), 'time.time', 'time.time', ([], {}), '()\n', (4889, 4891), False, 'import time\n'), ((6133, 6153), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (6145, 6153), False, 'import torch\n'), ((6182, 6201), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6194, 6201), False, 'import torch\n'), ((9422, 9480), 'transformers.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': '(0.00025)', 'eps': '(1e-08)'}), '(optimizer_grouped_parameters, lr=0.00025, eps=1e-08)\n', (9427, 9480), False, 'from transformers import WarmupLinearSchedule, AdamW\n'), ((9500, 9575), 'transformers.WarmupLinearSchedule', 'WarmupLinearSchedule', (['optimizer'], {'warmup_steps': 'warmup_steps', 't_total': 't_total'}), '(optimizer, warmup_steps=warmup_steps, t_total=t_total)\n', (9520, 9575), False, 'from transformers import WarmupLinearSchedule, AdamW\n'), ((8550, 8736), 'model_utils.restoreModel', 'restoreModel', (['model'], {'resume_iters': 'resume_iters', 'model_name': 'model_name', 'model_save_dir': '(model_dir + trained_model_savedir)', 'is_finetune': 'is_finetune', 'from_pretrained': 'from_pretrained'}), '(model, resume_iters=resume_iters, model_name=model_name,\n model_save_dir=model_dir + trained_model_savedir, is_finetune=\n is_finetune, from_pretrained=from_pretrained)\n', (8562, 8736), False, 'from model_utils import restoreModel\n'), ((4015, 4046), 'apex.amp.scale_loss', 'amp.scale_loss', (['loss', 'optimizer'], {}), '(loss, optimizer)\n', (4029, 4046), False, 'from apex import amp\n'), ((4374, 4402), 'apex.amp.master_params', 'amp.master_params', (['optimizer'], {}), '(optimizer)\n', (4391, 4402), False, 'from apex import amp\n')] |
import cv2
import math
import torch
import numpy as np
from operator import mul
def normalize_screen_coordinates(X, w, h):
assert X.shape[-1] == 2
if isinstance(X, np.ndarray):
# Normalize so that [0, w] is mapped to [-1, 1], while preserving the aspect ratio
return X / w * 2 - [1, h / w]
else:
return X / w * 2 - torch.tensor([1, h / w]).to(X.device)
def image_coordinates(X, w, h):
assert X.shape[-1] == 2
if isinstance(X, np.ndarray):
# Reverse camera frame normalization
return (X + [1, h / w]) * w / 2
else:
return (X + torch.tensor([1, h / w]).to(X.device)) * w / 2
def euler2rotation(euler):
"""
Calculate rotation matrix when euler angles are provided
Reference: https://learnopencv.com/rotation-matrix-to-euler-angles/
:param euler: numpy array with shape of (3, 1)
:return: numpy array with shape of (3, 3)
"""
assert euler.shape == (3, 1)
return cv2.Rodrigues(euler)[0]
def rotation2euler(rot):
"""
Calculate euler angles when rotation matrix is provided
Reference: https://learnopencv.com/rotation-matrix-to-euler-angles/
:param rot: numpy array with shape of (3, 3)
:return: numpy array with shape of (3, 1)
"""
assert rot.shape == (3, 3)
return cv2.Rodrigues(rot)[0]
def rotation2quaternion(rot):
"""
Calculate quaternion when rotation matrix is provided
Reference: https://gist.github.com/shubh-agrawal/76754b9bfb0f4143819dbd146d15d4c8
Reference: http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/
:param rot: numpy matrix with shape of 3 x 3
:return: numpy array with shape of (4, )
"""
assert rot.shape == (3, 3)
quaternion = np.zeros(4)
trace = np.trace(rot)
if trace > 0.:
s = np.sqrt(trace + 1)
quaternion[3] = s * 0.5
s = 0.5 / s
quaternion[0] = (rot[2, 1] - rot[1, 2]) * s
quaternion[1] = (rot[0, 2] - rot[2, 0]) * s
quaternion[2] = (rot[1, 0] - rot[0, 1]) * s
else:
i = np.argmax(rot.diagonal())
j = (i + 1) % 3
k = (i + 2) % 3
s = np.sqrt(rot[i, i] - rot[j, j] - rot[k, k] + 1.)
quaternion[i] = s * 0.5
s = 0.5 / s
quaternion[3] = (rot[k, j] - rot[j, k]) * s
quaternion[j] = (rot[j, i] + rot[i, j]) * s
quaternion[k] = (rot[k, i] + rot[i, k]) * s
return quaternion
def euler2quaternion(euler):
"""
Calculate quaternion when euler angles are provided
:param euler: numpy array with shape of (3, 1)
:return: numpy array with shape of (4, )
"""
rot = euler2rotation(euler)
quaternion = rotation2quaternion(rot)
return quaternion
def quaternion2rotation(quat):
"""
Calculate rotation matrix when quaternion is provided
Reference: https://marc-b-reynolds.github.io/quaternions/2017/08/08/QuatRotMatrix.html
:param quat: numpy array with shape of (4, )
:return: numpy array with shape of (3, 3)
"""
assert quat.shape == (4, )
rot = np.zeros((3, 3))
x = quat[0]
y = quat[1]
z = quat[2]
w = quat[3]
tx = 2 * x
ty = 2 * y
tz = 2 * z
xx = tx * x
yy = ty * y
zz = tz * z
xy = ty * x
xz = tz * x
yz = ty * z
wx = tx * w
wy = ty * w
wz = tz * w
rot[0, 0] = 1. - (yy + zz)
rot[1, 1] = 1. - (xx + zz)
rot[2, 2] = 1. - (xx + yy)
rot[1, 0] = xy + wz
rot[0, 1] = xy - wz
rot[2, 0] = xz - wy
rot[0, 2] = xz + wy
rot[2, 1] = yz + wx
rot[1, 2] = yz - wx
return rot
def quaternion2euler(quat):
"""
Calculate euler angles when quaternion is provided
:param quat: numpy array with shape of (4, )
:return: numpy array with shape of (3, 1)
"""
rot = quaternion2rotation(quat)
euler = rotation2euler(rot)
return euler
def catesian2homogenous(arr_cart):
"""
Convert catesian to homogenous
:param arr_cart:
:return:
"""
if isinstance(arr_cart, np.ndarray):
arr_hom = np.concatenate((arr_cart, np.ones(arr_cart.shape[:-1] + (1,))), axis=-1)
else:
arr_hom = torch.cat((arr_cart, torch.ones(arr_cart.shape[:-1] + (1,)).to(arr_cart.device)), dim=-1)
return arr_hom
def homogenous2catesian(arr_hom):
"""
Convert homogenous to catesian
:param arr_hom:
:return:
"""
if isinstance(arr_hom, np.ndarray):
arr_hom[..., :-1] /= np.repeat(arr_hom[..., -1:], arr_hom.shape[-1]-1, axis=-1)
arr_cart = arr_hom[..., :-1]
else:
org_dim = arr_hom.shape
new_dim = [1 for _ in org_dim[:-1]] + [org_dim[-1] -1,]
arr_hom[..., :-1] = arr_hom[..., :-1] / arr_hom[..., -1:].repeat(*new_dim)
arr_cart = arr_hom[..., :-1]
return arr_cart
def dotproduct(v1, v2):
"""
:param v1:
:param v2:
:return:
"""
return sum(map(mul, v1, v2))
def length(v):
"""
:param v:
:return:
"""
return math.sqrt(dotproduct(v, v))
def angle(v1, v2):
"""
:param v1:
:param v2:
:return:
"""
return math.acos(dotproduct(v1, v2) / (length(v1) * length(v2)))
class CameraInfoPacket(object):
def __init__(self, P=None, K=None, R=None, t=None, dist_coeff=None,
res_w=None, res_h=None, azimuth=None, undistort=True, lite=False):
"""
P = K[R|t]
One must either supply P or K, R, t.
:param P: camera matrix, (3, 4)
:param K: intrinsic matrix, (3, 3)
:param R: rotation matrix, (3, 3)
:param t: translation vector, (3, 1)
:param dist_coeff: distortion coefficient, (5,)
:param res_w: pixel width of frame
:param res_h: pixel height of frame
:param azimuth: azimuth for visualization
:param undistort: flag to undo the distortion
:param lite: lite version of CameraInfoPacket
"""
if P is None:
assert K.shape == (3, 3)
assert R.shape == (3, 3)
assert t.shape == (3, 1)
P = K.astype(np.float64) @ np.hstack([R.astype(np.float64), t.astype(np.float64)])
self.P = P.astype(np.float64) # projection matrix
self.K = K.astype(np.float64) # intrinsic matrix
if lite:
return
self.dist_coeff = dist_coeff.astype(np.float64) if dist_coeff is not None else None # radial distortion and tangential distortion
self.res_w = res_w
self.res_h = res_h
self.azimuth = azimuth
self.undistort = undistort
self.Rw2c = R.astype(np.float64) # rotation matrix from world to cam
self.Tw2c = t.astype(np.float64) # translation vector, the position of the origin of the world coordinate
# system expressed in coordinates of the camera-centered coordinate system
self.Rc2w = self.Rw2c.T
self.Tc2w = -self.Rw2c.T @ self.Tw2c
self.cam_ray_world = self.get_cam_ray_world()
self.cam_pitch_rad = self.get_cam_pitch_rad()
self.cam_pitch_deg = self.get_cam_pitch_angle()
self.cam_orig_world = self.get_cam_coord_world()
self.Rc2n, self.Tc2n = self.get_norm_coord_config()
self.Rn2c = self.Rc2n.T
self.Tn2c = -self.Rc2n.T @ self.Tc2n
self.Rw2n = self.Rc2n @ self.Rw2c
self.Tw2n = self.Rc2n @ self.Tw2c + self.Tc2n
self.Rn2w = self.Rc2w @ self.Rn2c
self.Tn2w = -self.Rn2w @ self.Tc2n - self.Rc2w @ self.Tw2c
self.cam_ray_norm = self.get_cam_ray_normalized()
principal_point_x = self.K[0, 2]
principal_point_y = self.K[1, 2]
if self.undistort:
self.pp_cam = self.undistort_point(
np.asarray([principal_point_x, principal_point_y], dtype=np.float64).reshape((1, 1, 2))
).reshape(-1, 2)
else:
self.pp_cam = np.array([principal_point_x, principal_point_y], dtype=np.float64).reshape((-1, 2))
def get_cam_coord_world(self):
"""
return world coordinate of camera origin
# https://en.wikipedia.org/wiki/Camera_resectioning
:return:
"""
return -self.Rw2c.T @ self.Tw2c
def get_cam_ray_world(self):
"""
return the ray of camera in world coordinate system
# define the vector that starts from camera center to principal(focal) point as representation of the camera.
# suppose that the focal point is normalized,
# we convert the vector to world space to represent the ray of the camera.
:return:
"""
focal_pt_cam = np.asarray([0, 0, 1], np.float64)
P_w = self.Rc2w @ focal_pt_cam
return P_w[0:3].reshape((3, 1))
def get_cam_ray_normalized(self):
"""
return the ray of camera in normalized coord system
:return:
"""
focal_pt_cam = np.asarray([0, 0, 1], np.float64)
P_n = self.Rc2n @ focal_pt_cam
return P_n[0:3].reshape((3, 1))
def get_cam_pitch_rad(self):
"""
return camera pitch in radius
# here we assume the camera is looking towards to the ground
:return:
"""
ray_upright = np.zeros((3, 1)).astype(np.float64)
ray_upright[2] = 1
return angle(self.cam_ray_world, ray_upright) - np.pi / 2
def get_cam_pitch_angle(self):
"""
return camera pitch in degree
:return:
"""
return self.get_cam_pitch_rad() * 180.0 / np.pi
def get_norm_coord_config(self):
"""
rotate the camera about the x-axis to eliminate the pitch.
in normalized world coordinate, we set the translation as the height of camera,
which is the position of the origin of the normalized coordinate system
expressed in coordinates of the camera-centered coordinate system.
:return:
"""
Rc2n = np.eye(3, dtype=np.float64)
Rc2n[1, 1] = math.cos(self.cam_pitch_rad)
Rc2n[1, 2] = math.sin(self.cam_pitch_rad)
Rc2n[2, 1] = -math.sin(self.cam_pitch_rad)
Rc2n[2, 2] = math.cos(self.cam_pitch_rad)
Rc2n = Rc2n.astype(np.float64)
Tc2n = np.zeros((3, 1)).astype(np.float64)
err_str = 'camera height should be larger than 0 if the world coordinate system is set up on the ground'
# assert self.cam_orig_world[2] > 0, err_str
Tc2n[1] = -self.cam_orig_world[2]
return Rc2n, Tc2n
def camera2world(self, pt):
"""
pt is the 3d coord in camera_coordinate system
:param pt:
:return:
"""
if isinstance(pt, np.ndarray):
return pt @ self.Rc2w.T + self.Tc2w.T
else:
return pt @ torch.from_numpy(self.Rc2w.T).float().to(pt.device) + torch.from_numpy(self.Tc2w.T).float().to(pt.device)
def world2camera(self, pt):
"""
:return:
"""
if isinstance(pt, np.ndarray):
return pt @ self.Rw2c.T + self.Tw2c.T
else:
return pt @ torch.from_numpy(self.Rw2c.T).float().to(pt.device) + torch.from_numpy(self.Tw2c.T).float().to(pt.device)
def camera2normalized(self, pt):
"""
pt is the 3d coord in camera_coordinate system
:param pt:
:return:
"""
if isinstance(pt, np.ndarray):
return pt @ self.Rc2n.T + self.Tc2n.T
else:
return pt @ torch.from_numpy(self.Rc2n.T).float().to(pt.device) + torch.from_numpy(self.Tc2n.T).float().to(pt.device)
def normalized2camera(self, pt):
"""
pt is the 3d coord in normalized system
:param pt:
:return:
"""
if isinstance(pt, np.ndarray):
return pt @ self.Rn2c.T + self.Tn2c.T
else:
return pt @ torch.from_numpy(self.Rn2c.T).float().to(pt.device) + torch.from_numpy(self.Tn2c.T).float().to(pt.device)
def world2normalized(self, pt):
"""
pt is the 3d coord in world coordinate system
:param pt:
:return:
"""
if isinstance(pt, np.ndarray):
return pt @ self.Rw2n.T + self.Tw2n.T
else:
return pt @ torch.from_numpy(self.Rw2n.T).float().to(pt.device) + torch.from_numpy(self.Tw2n.T).float().to(pt.device)
def normalized2world(self, pt):
"""
pt is the 3d coord in normalized coordinate system
:param pt:
:return:
"""
if isinstance(pt, np.ndarray):
return pt @ self.Rn2w.T + self.Tn2w.T
else:
return pt @ torch.from_numpy(self.Rn2w.T).float().to(pt.device) + torch.from_numpy(self.Tn2w.T).float().to(pt.device)
def undistort_point(self, points2d):
"""
:param points2d:
:return:
"""
batch_size, num_kpt, feat_dim = points2d.shape
points2d = np.reshape(points2d, (-1, 1, feat_dim))
points2d = cv2.undistortPoints(points2d, self.K, self.dist_coeff, P=self.K)
return np.reshape(points2d, (batch_size, num_kpt, feat_dim))
def encode_uv_with_intrinsic(self, uv):
"""
:param uv: shape of (2, )
:return: shape of (2, )
"""
batch_size, num_kpt, feat_dim = uv.shape
fx = self.K[0, 0]
fy = self.K[1, 1]
pt = np.zeros([batch_size, num_kpt, feat_dim], dtype=np.float64)
if self.undistort:
uv = self.undistort_point(uv)
pt[..., 0] = (uv[..., 0] - self.pp_cam[..., 0]) / fx
pt[..., 1] = (uv[..., 1] - self.pp_cam[..., 1]) / fy
return pt
def decouple_uv_with_intrinsic(self, uv):
"""
:param uv: shape of (2, )
:return: shape of (2, )
"""
batch_size, num_kpt, feat_dim = uv.shape
fx = self.K[0, 0]
fy = self.K[1, 1]
pt = np.zeros([batch_size, num_kpt, feat_dim], dtype=np.float64)
pt[..., 0] = uv[..., 0] * fx + self.pp_cam[..., 0]
pt[..., 1] = uv[..., 1] * fy + self.pp_cam[..., 1]
return pt
def get_cam_ray_given_uv(self, uv):
"""
:param uv: shape of (2, )
:return: shape of (3, )
"""
batch_size, num_kpt, feat_dim = uv.shape
pt_cam = np.ones([batch_size, num_kpt, feat_dim + 1], dtype=np.float64)
pt_cam[..., :2] = self.encode_uv_with_intrinsic(uv)
return pt_cam @ self.Rc2n.T
def get_uv_given_cam_ray(self, pt):
"""
:param pt: shape of (3, )
:return: shape of (2, )
"""
pt_cam = pt @ self.Rn2c.T
uv_with_intrinsic = pt_cam[..., :2]
return self.decouple_uv_with_intrinsic(uv_with_intrinsic)
def project(self, X):
"""
Project 3D homogenous points X (4 * n) and normalize coordinates.
Return projected 2D points (2 x n coordinates)
:param X:
:return:
"""
if isinstance(X, np.ndarray):
x = X @ self.P.T
x[..., 0] = x[..., 0] / x[..., 2]
x[..., 1] = x[..., 1] / x[..., 2]
return x[..., :2]
else:
x = X @ torch.from_numpy(self.P.T).float().to(X.device)
org_dim = x.shape
new_dim = [item for item in org_dim[:-1]] + [org_dim[-1]-1]
ret = torch.zeros(*new_dim).to(x.device)
ret[..., 0] = x[..., 0] / x[..., 2]
ret[..., 1] = x[..., 1] / x[..., 2]
return ret
| [
"torch.ones",
"numpy.trace",
"cv2.undistortPoints",
"torch.from_numpy",
"numpy.asarray",
"numpy.zeros",
"numpy.ones",
"math.sin",
"cv2.Rodrigues",
"numpy.array",
"math.cos",
"numpy.reshape",
"torch.zeros",
"numpy.eye",
"numpy.sqrt",
"torch.tensor",
"numpy.repeat"
] | [((1766, 1777), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (1774, 1777), True, 'import numpy as np\n'), ((1790, 1803), 'numpy.trace', 'np.trace', (['rot'], {}), '(rot)\n', (1798, 1803), True, 'import numpy as np\n'), ((3080, 3096), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (3088, 3096), True, 'import numpy as np\n'), ((972, 992), 'cv2.Rodrigues', 'cv2.Rodrigues', (['euler'], {}), '(euler)\n', (985, 992), False, 'import cv2\n'), ((1308, 1326), 'cv2.Rodrigues', 'cv2.Rodrigues', (['rot'], {}), '(rot)\n', (1321, 1326), False, 'import cv2\n'), ((1835, 1853), 'numpy.sqrt', 'np.sqrt', (['(trace + 1)'], {}), '(trace + 1)\n', (1842, 1853), True, 'import numpy as np\n'), ((2172, 2220), 'numpy.sqrt', 'np.sqrt', (['(rot[i, i] - rot[j, j] - rot[k, k] + 1.0)'], {}), '(rot[i, i] - rot[j, j] - rot[k, k] + 1.0)\n', (2179, 2220), True, 'import numpy as np\n'), ((4467, 4527), 'numpy.repeat', 'np.repeat', (['arr_hom[..., -1:]', '(arr_hom.shape[-1] - 1)'], {'axis': '(-1)'}), '(arr_hom[..., -1:], arr_hom.shape[-1] - 1, axis=-1)\n', (4476, 4527), True, 'import numpy as np\n'), ((8641, 8674), 'numpy.asarray', 'np.asarray', (['[0, 0, 1]', 'np.float64'], {}), '([0, 0, 1], np.float64)\n', (8651, 8674), True, 'import numpy as np\n'), ((8917, 8950), 'numpy.asarray', 'np.asarray', (['[0, 0, 1]', 'np.float64'], {}), '([0, 0, 1], np.float64)\n', (8927, 8950), True, 'import numpy as np\n'), ((9938, 9965), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': 'np.float64'}), '(3, dtype=np.float64)\n', (9944, 9965), True, 'import numpy as np\n'), ((9987, 10015), 'math.cos', 'math.cos', (['self.cam_pitch_rad'], {}), '(self.cam_pitch_rad)\n', (9995, 10015), False, 'import math\n'), ((10037, 10065), 'math.sin', 'math.sin', (['self.cam_pitch_rad'], {}), '(self.cam_pitch_rad)\n', (10045, 10065), False, 'import math\n'), ((10138, 10166), 'math.cos', 'math.cos', (['self.cam_pitch_rad'], {}), '(self.cam_pitch_rad)\n', (10146, 10166), False, 'import math\n'), ((12903, 12942), 'numpy.reshape', 'np.reshape', (['points2d', '(-1, 1, feat_dim)'], {}), '(points2d, (-1, 1, feat_dim))\n', (12913, 12942), True, 'import numpy as np\n'), ((12962, 13026), 'cv2.undistortPoints', 'cv2.undistortPoints', (['points2d', 'self.K', 'self.dist_coeff'], {'P': 'self.K'}), '(points2d, self.K, self.dist_coeff, P=self.K)\n', (12981, 13026), False, 'import cv2\n'), ((13042, 13095), 'numpy.reshape', 'np.reshape', (['points2d', '(batch_size, num_kpt, feat_dim)'], {}), '(points2d, (batch_size, num_kpt, feat_dim))\n', (13052, 13095), True, 'import numpy as np\n'), ((13347, 13406), 'numpy.zeros', 'np.zeros', (['[batch_size, num_kpt, feat_dim]'], {'dtype': 'np.float64'}), '([batch_size, num_kpt, feat_dim], dtype=np.float64)\n', (13355, 13406), True, 'import numpy as np\n'), ((13872, 13931), 'numpy.zeros', 'np.zeros', (['[batch_size, num_kpt, feat_dim]'], {'dtype': 'np.float64'}), '([batch_size, num_kpt, feat_dim], dtype=np.float64)\n', (13880, 13931), True, 'import numpy as np\n'), ((14268, 14330), 'numpy.ones', 'np.ones', (['[batch_size, num_kpt, feat_dim + 1]'], {'dtype': 'np.float64'}), '([batch_size, num_kpt, feat_dim + 1], dtype=np.float64)\n', (14275, 14330), True, 'import numpy as np\n'), ((10088, 10116), 'math.sin', 'math.sin', (['self.cam_pitch_rad'], {}), '(self.cam_pitch_rad)\n', (10096, 10116), False, 'import math\n'), ((4094, 4129), 'numpy.ones', 'np.ones', (['(arr_cart.shape[:-1] + (1,))'], {}), '(arr_cart.shape[:-1] + (1,))\n', (4101, 4129), True, 'import numpy as np\n'), ((9234, 9250), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (9242, 9250), True, 'import numpy as np\n'), ((10222, 10238), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (10230, 10238), True, 'import numpy as np\n'), ((357, 381), 'torch.tensor', 'torch.tensor', (['[1, h / w]'], {}), '([1, h / w])\n', (369, 381), False, 'import torch\n'), ((7918, 7984), 'numpy.array', 'np.array', (['[principal_point_x, principal_point_y]'], {'dtype': 'np.float64'}), '([principal_point_x, principal_point_y], dtype=np.float64)\n', (7926, 7984), True, 'import numpy as np\n'), ((15313, 15334), 'torch.zeros', 'torch.zeros', (['*new_dim'], {}), '(*new_dim)\n', (15324, 15334), False, 'import torch\n'), ((4190, 4228), 'torch.ones', 'torch.ones', (['(arr_cart.shape[:-1] + (1,))'], {}), '(arr_cart.shape[:-1] + (1,))\n', (4200, 4228), False, 'import torch\n'), ((606, 630), 'torch.tensor', 'torch.tensor', (['[1, h / w]'], {}), '([1, h / w])\n', (618, 630), False, 'import torch\n'), ((7761, 7829), 'numpy.asarray', 'np.asarray', (['[principal_point_x, principal_point_y]'], {'dtype': 'np.float64'}), '([principal_point_x, principal_point_y], dtype=np.float64)\n', (7771, 7829), True, 'import numpy as np\n'), ((10822, 10851), 'torch.from_numpy', 'torch.from_numpy', (['self.Tc2w.T'], {}), '(self.Tc2w.T)\n', (10838, 10851), False, 'import torch\n'), ((11130, 11159), 'torch.from_numpy', 'torch.from_numpy', (['self.Tw2c.T'], {}), '(self.Tw2c.T)\n', (11146, 11159), False, 'import torch\n'), ((11516, 11545), 'torch.from_numpy', 'torch.from_numpy', (['self.Tc2n.T'], {}), '(self.Tc2n.T)\n', (11532, 11545), False, 'import torch\n'), ((11895, 11924), 'torch.from_numpy', 'torch.from_numpy', (['self.Tn2c.T'], {}), '(self.Tn2c.T)\n', (11911, 11924), False, 'import torch\n'), ((12279, 12308), 'torch.from_numpy', 'torch.from_numpy', (['self.Tw2n.T'], {}), '(self.Tw2n.T)\n', (12295, 12308), False, 'import torch\n'), ((12668, 12697), 'torch.from_numpy', 'torch.from_numpy', (['self.Tn2w.T'], {}), '(self.Tn2w.T)\n', (12684, 12697), False, 'import torch\n'), ((15145, 15171), 'torch.from_numpy', 'torch.from_numpy', (['self.P.T'], {}), '(self.P.T)\n', (15161, 15171), False, 'import torch\n'), ((10768, 10797), 'torch.from_numpy', 'torch.from_numpy', (['self.Rc2w.T'], {}), '(self.Rc2w.T)\n', (10784, 10797), False, 'import torch\n'), ((11076, 11105), 'torch.from_numpy', 'torch.from_numpy', (['self.Rw2c.T'], {}), '(self.Rw2c.T)\n', (11092, 11105), False, 'import torch\n'), ((11462, 11491), 'torch.from_numpy', 'torch.from_numpy', (['self.Rc2n.T'], {}), '(self.Rc2n.T)\n', (11478, 11491), False, 'import torch\n'), ((11841, 11870), 'torch.from_numpy', 'torch.from_numpy', (['self.Rn2c.T'], {}), '(self.Rn2c.T)\n', (11857, 11870), False, 'import torch\n'), ((12225, 12254), 'torch.from_numpy', 'torch.from_numpy', (['self.Rw2n.T'], {}), '(self.Rw2n.T)\n', (12241, 12254), False, 'import torch\n'), ((12614, 12643), 'torch.from_numpy', 'torch.from_numpy', (['self.Rn2w.T'], {}), '(self.Rn2w.T)\n', (12630, 12643), False, 'import torch\n')] |
import argparse
import os
import numpy as np
from pybind_nisar.workflows import rdr2geo
from pybind_nisar.workflows.rdr2geo_runconfig import Rdr2geoRunConfig
import iscetest
def test_rdr2geo_run():
'''
run rdr2geo
'''
# load yaml
test_yaml = os.path.join(iscetest.data, 'insar_test.yaml')
# load text then substitude test directory paths since data dir is read only
with open(test_yaml) as fh_test_yaml:
test_yaml = fh_test_yaml.read().replace('@ISCETEST@', iscetest.data).\
replace('@TEST_OUTPUT@', 'rifg.h5').\
replace('@TEST_PRODUCT_TYPES@', 'RIFG')
# create CLI input namespace with yaml text instead of file path
args = argparse.Namespace(run_config_path=test_yaml, log_file=False)
# init runconfig object
runconfig = Rdr2geoRunConfig(args)
runconfig.geocode_common_arg_load()
rdr2geo.run(runconfig.cfg)
def check_error(f_test, f_ref, dtype, tol, test_type):
'''
calculate error for file in vrt
'''
# retrieve data
test = np.fromfile(f_test, dtype=dtype)
ref = np.fromfile(f_ref, dtype=dtype)
# calculate average error
diff = np.abs(test - ref)
diff = diff[diff < 5]
error = np.mean(diff)
# error check
fname = os.path.basename(f_test)
assert (error < tol), f'NISAR Python {test_type} rdr2geo fail at {fname}: {error} >= {tol}'
def test_rdr2geo_validate():
'''
validate rdr2geo outputs
'''
# vrt constituent files to compare
fnames = ['x.rdr', 'y.rdr', 'z.rdr', 'inc.rdr', 'hdg.rdr', 'localInc.rdr', 'localPsi.rdr']
# dtypes of vrt constituent files
dtypes = [np.float64, np.float64, np.float64, np.float32, np.float32, np.float32, np.float32]
# tolerances per vrt constituent file taken from C++ tests
tols = [1.0e-5, 1.0e-5, 0.15, 1.0e-4, 1.0e-4, 0.02, 0.02]
# check errors with scratch set to cwd
scratch_path = '.'
for fname, dtype, tol in zip(fnames, dtypes, tols):
output_file = os.path.join(scratch_path, 'rdr2geo', 'freqA', fname)
ref_file = os.path.join(iscetest.data, 'topo_winnipeg', fname)
check_error(output_file, ref_file, dtype, tol, 'CPU')
| [
"argparse.Namespace",
"numpy.abs",
"os.path.basename",
"numpy.fromfile",
"numpy.mean",
"pybind_nisar.workflows.rdr2geo_runconfig.Rdr2geoRunConfig",
"pybind_nisar.workflows.rdr2geo.run",
"os.path.join"
] | [((267, 313), 'os.path.join', 'os.path.join', (['iscetest.data', '"""insar_test.yaml"""'], {}), "(iscetest.data, 'insar_test.yaml')\n", (279, 313), False, 'import os\n'), ((707, 768), 'argparse.Namespace', 'argparse.Namespace', ([], {'run_config_path': 'test_yaml', 'log_file': '(False)'}), '(run_config_path=test_yaml, log_file=False)\n', (725, 768), False, 'import argparse\n'), ((814, 836), 'pybind_nisar.workflows.rdr2geo_runconfig.Rdr2geoRunConfig', 'Rdr2geoRunConfig', (['args'], {}), '(args)\n', (830, 836), False, 'from pybind_nisar.workflows.rdr2geo_runconfig import Rdr2geoRunConfig\n'), ((882, 908), 'pybind_nisar.workflows.rdr2geo.run', 'rdr2geo.run', (['runconfig.cfg'], {}), '(runconfig.cfg)\n', (893, 908), False, 'from pybind_nisar.workflows import rdr2geo\n'), ((1049, 1081), 'numpy.fromfile', 'np.fromfile', (['f_test'], {'dtype': 'dtype'}), '(f_test, dtype=dtype)\n', (1060, 1081), True, 'import numpy as np\n'), ((1092, 1123), 'numpy.fromfile', 'np.fromfile', (['f_ref'], {'dtype': 'dtype'}), '(f_ref, dtype=dtype)\n', (1103, 1123), True, 'import numpy as np\n'), ((1166, 1184), 'numpy.abs', 'np.abs', (['(test - ref)'], {}), '(test - ref)\n', (1172, 1184), True, 'import numpy as np\n'), ((1223, 1236), 'numpy.mean', 'np.mean', (['diff'], {}), '(diff)\n', (1230, 1236), True, 'import numpy as np\n'), ((1268, 1292), 'os.path.basename', 'os.path.basename', (['f_test'], {}), '(f_test)\n', (1284, 1292), False, 'import os\n'), ((2005, 2058), 'os.path.join', 'os.path.join', (['scratch_path', '"""rdr2geo"""', '"""freqA"""', 'fname'], {}), "(scratch_path, 'rdr2geo', 'freqA', fname)\n", (2017, 2058), False, 'import os\n'), ((2078, 2129), 'os.path.join', 'os.path.join', (['iscetest.data', '"""topo_winnipeg"""', 'fname'], {}), "(iscetest.data, 'topo_winnipeg', fname)\n", (2090, 2129), False, 'import os\n')] |
import numpy as np
from domain.rules import game
import unittest
class TestStringMethods( unittest.TestCase ):
def test_function_test_open_positions(self):
self.board = np.zeros( (6, 7) )
self.player = 1
self.ai = 2
self.gm = game.Game_Rules()
self.gm.player = 1
self.gm.ai = 2
""" the function open_positions looks for all possible moves for the AI. In an empty board you assume its 7
moves the computer can make is the length of the list of open positions 7? """
self.assertEqual( len( self.gm.open_positions( self.board ) ), 7 )
""" if we fill a column there should only be six possible moves Here we test the Move function that will place
a piece if you give its coordinates I loop through and add 1 on the y """
for i in range( 6 ):
self.gm.move( self.player, 0, i, self.board )
print( self.board )
'''as you can see one column of the board is now filled.
is the length of the list of open positions 6?'''
self.assertEqual( len( self.gm.open_positions( self.board ) ), 6 )
def test_function_test_evaluate_position(self):
self.board = np.zeros( (6, 7) )
self.player = 1
self.ai = 2
self.gm = game.Game_Rules()
self.gm.player = 1
self.gm.ai = 2
self.board = np.zeros( (6, 7) )
'''This function will evaluate a board state
I am going to place an piece in the middle, for its a strong position to hold.
the calculation is for every piece in the middle from the AI you get 200 points'''
self.gm.move( self.ai, 3, 5, self.board )
print( self.board )
'''is the score of the positions 200?'''
self.assertEqual( self.gm.evaluate_position( self.board, self.ai ), 200 )
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.zeros",
"domain.rules.game.Game_Rules"
] | [((1867, 1882), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1880, 1882), False, 'import unittest\n'), ((184, 200), 'numpy.zeros', 'np.zeros', (['(6, 7)'], {}), '((6, 7))\n', (192, 200), True, 'import numpy as np\n'), ((265, 282), 'domain.rules.game.Game_Rules', 'game.Game_Rules', ([], {}), '()\n', (280, 282), False, 'from domain.rules import game\n'), ((1205, 1221), 'numpy.zeros', 'np.zeros', (['(6, 7)'], {}), '((6, 7))\n', (1213, 1221), True, 'import numpy as np\n'), ((1286, 1303), 'domain.rules.game.Game_Rules', 'game.Game_Rules', ([], {}), '()\n', (1301, 1303), False, 'from domain.rules import game\n'), ((1375, 1391), 'numpy.zeros', 'np.zeros', (['(6, 7)'], {}), '((6, 7))\n', (1383, 1391), True, 'import numpy as np\n')] |
import numpy as np
# 0.43846153846153846 modifier for resistance and defense
# level 90 Diluc, level 100 enemy 10% resistance
# base attack 311
def damage(atk, em, crit_rate, crit_dmg):
# CW 4 stacks, a4, pyro goblet 1.031 --> multi 2.031
# 'Q' 'A' 'E' A A 'E' 'A' A 'E' 'A' A A 'A'
# level 8 talents
# vape: Q + DoT + 3*N1 + N4 + E1 + E2 + E3
# 3.264 + 0.96 + 3*1.5332 + 2.2903 + 1.5104 + 1.5616 + 2.0608 = 16.2467
vape = 16.2467 * atk * 2.031 * 1.5 * (1 + (2.78 * em / (em + 1400)) + 0.15) * 0.43846153846153846
# non vape: 2*DoT + N1 + 3*N2 + N3
# 2*0.96 + 1.5332 + 3*1.4979 + 1.689 = 9.6359
non_vape = 9.6359 * atk * 2.031 * 0.43846153846153846
return (vape + non_vape) * (1 + min(crit_rate, 1) * crit_dmg)
def generate_rolls(rolls):
# doesn't generate max rolls into a single stat except for the fourth
# but that won't be optimal so whatever
res = []
for x in range(rolls):
for y in range(rolls - x):
for z in range(rolls - x - y):
res.append(np.array([x, y, z, rolls - x - y - z]))
return res
def calc(weapon_atk, weapon_secondary, artifact_main_stats, base_rolls):
'''
weapon_atk: weapon base attack
weapon_secondary: 4 length np array [atk%, em, crit rate, crit damage]
'''
base_attack = 311 + weapon_atk
sub_values = np.array([0.053, 23, 0.039, 0.078]) # atk, em, crit rate, crit damage
base_subs = sub_values * base_rolls
stats = np.array([1, 0, 0.242, 0.5]) + weapon_secondary + artifact_main_stats
results = []
for roll in generate_rolls(25):
subs = sub_values * roll
new_stats = stats + subs + base_subs
new_stats[0] = new_stats[0] * base_attack + 311 # add feather
dmg = damage(*new_stats)
results.append((dmg, roll + base_rolls, new_stats))
return results
# wgs r1 atk sands
# res = calc(607, np.array([0.696, 0, 0, 0]), np.array([0.466, 0, 0.312, 0]), np.array([4, 5, 4, 5]))
# top = sorted(res, key=lambda x: x[0], reverse=True)[0:100]
# print('WGS R1 atk sands')
# print('\n'.join(str(i) for i in top))
# wgs r1 em sands
# res = calc(607, np.array([0.696, 0, 0, 0]), np.array([0, 197, 0.312, 0]), np.array([5, 4, 4, 5]))
# top = sorted(res, key=lambda x: x[0], reverse=True)[0:100]
# print('WGS R1 em sands')
# rint('\n'.join(str(i) for i in top))
# # wgs r1 atk sands 25% uptime
res = calc(607, np.array([0.796, 0, 0, 0]), np.array([0.466, 0, 0.312, 0]), np.array([4, 5, 4, 5]))
top = sorted(res, key=lambda x: x[0], reverse=True)[0:100]
# print('WGS R1 atk sands 25% uptime')
# print('\n'.join(str(i) for i in top))
# # wgs r1 em sands 25% uptime
# res = calc(607, np.array([0.796, 0, 0, 0]), np.array([0, 197, 0.312, 0]), np.array([5, 4, 4, 5]))
# top = sorted(res, key=lambda x: x[0], reverse=True)[0:100]
# print('WGS R1 em sands 25% uptime')
# print('\n'.join(str(i) for i in top))
# # skyward pride no passive atk sands
# res = calc(674, np.array([0, 0, 0, 0]), np.array([0.466, 0, 0.312, 0]), np.array([4, 5, 4, 5]))
# top = sorted(res, key=lambda x: x[0], reverse=True)[0:15]
# print('skyward pride no passive')
# print('\n'.join(str(i) for i in top))
temp = []
for dmg, rolls, stats in top:
rolls = ','.join(f'{i}' for i in rolls)
stats = ','.join(f'{i:.2f}' for i in stats)
temp.append(f'{dmg:.2f},{rolls},{stats}')
with open('results.csv', 'w') as f:
f.write('damage,atk%,em,crit%,critdmg,atk,em,crit%,critdmg\n')
f.writelines(f'{i}\n' for i in temp)
| [
"numpy.array"
] | [((1356, 1391), 'numpy.array', 'np.array', (['[0.053, 23, 0.039, 0.078]'], {}), '([0.053, 23, 0.039, 0.078])\n', (1364, 1391), True, 'import numpy as np\n'), ((2414, 2440), 'numpy.array', 'np.array', (['[0.796, 0, 0, 0]'], {}), '([0.796, 0, 0, 0])\n', (2422, 2440), True, 'import numpy as np\n'), ((2442, 2472), 'numpy.array', 'np.array', (['[0.466, 0, 0.312, 0]'], {}), '([0.466, 0, 0.312, 0])\n', (2450, 2472), True, 'import numpy as np\n'), ((2474, 2496), 'numpy.array', 'np.array', (['[4, 5, 4, 5]'], {}), '([4, 5, 4, 5])\n', (2482, 2496), True, 'import numpy as np\n'), ((1481, 1509), 'numpy.array', 'np.array', (['[1, 0, 0.242, 0.5]'], {}), '([1, 0, 0.242, 0.5])\n', (1489, 1509), True, 'import numpy as np\n'), ((1048, 1086), 'numpy.array', 'np.array', (['[x, y, z, rolls - x - y - z]'], {}), '([x, y, z, rolls - x - y - z])\n', (1056, 1086), True, 'import numpy as np\n')] |
import os.path
from data.base_dataset import BaseDataset, get_params, get_transform
from data.image_folder import make_dataset
from PIL import Image
import pickle
from pathlib import Path
from torchvision import transforms
def create_mask_from_white_background(A):
import numpy as np
import cv2
# im = Image.new(mode="RGB", size=A.size)
im = A.convert('RGBA')
data = np.array(im)
# cv2.imwrite('out/t.png',data.astype(np.uint8))
rgb = data
# color = [246, 213, 139] # Original value
black = [0, 0, 0, 255]
white_offset = [245, 245, 245, 255]
white = [255, 255, 255, 255]
mask = np.all(rgb >= white_offset, axis=-1)
# # change all pixels that match color to white
data[mask] = black
data[~mask] = white
data = data[...,0]
# cv2.imwrite('out/t.png',
# data.astype(np.uint8))
data = Image.fromarray(data)
# data.save('out/true_mask.png')
return data
class AlignedDataset(BaseDataset):
"""A dataset class for paired image dataset.
It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}.
During test time, you need to prepare a directory '/path/to/data/test'.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir_AB = os.path.join(opt.dataroot, opt.phase) # get the image directory
self.AB_paths = sorted(make_dataset(self.dir_AB, opt.max_dataset_size)) # get image paths
assert(self.opt.load_size >= self.opt.crop_size) # crop_size should be smaller than the size of loaded image
self.input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
self.output_nc = self.opt.input_nc if self.opt.direction == 'BtoA' else self.opt.output_nc
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) - - an image in the input domain
B (tensor) - - its corresponding image in the target domain
A_paths (str) - - image paths
B_paths (str) - - image paths (same as A_paths)
"""
# read a image given a random integer index
AB_path = self.AB_paths[index]
AB = Image.open(AB_path).convert('RGB')
with open(str(Path(AB_path).parent / f'{Path(AB_path).stem}.pkl'), 'rb') as f:
metadata = pickle.load(f, encoding='latin1')
silh_im = Image.fromarray(metadata['rendered_silh'])
correspondence_map_im = Image.fromarray(metadata['correspondence_map'])
normals_map_im = Image.fromarray(metadata['normals_map'])
# split AB image into A and B
w, h = AB.size
w2 = int(w / 3)
A = AB.crop((0, 0, w2, h))
true_mask = create_mask_from_white_background(A)
# B = AB.crop((2*w2, 0, w, h))
B = AB.crop((w2, 0, w-w2, h))
# B = AB.crop((2*w2, 0, w, h))
if self.opt.constant_data:
B = Image.open(r'bareteeth.000001.26_C/coma_2/mesh.png').convert('RGB')#TODO remove
# C = AB.crop((2*w2, 0, w, h))
# apply the same transform to both A and B
transform_params = get_params(self.opt, A.size)
A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1))
B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1))
ext_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1),minus1To1=False)
meta_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1))
# C_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1))
# from torchvision import transforms
# transforms.ToPILImage()(A.cpu()).save('a.png')
A = A_transform(A)
B = B_transform(B)
# C = C_transform(C)
osize = [self.opt.load_size, self.opt.load_size]
# trn = transforms.Compose([transforms.Resize(osize, Image.BICUBIC) ,
# transforms.ToTensor()
# ])
silh_im = ext_transform(silh_im)
true_mask = ext_transform(true_mask)
correspondence_map_im = meta_transform(correspondence_map_im)
normals_map_im = meta_transform(normals_map_im)
# return {'A': A, 'B': B,'C': C, 'A_paths': AB_path, 'B_paths': AB_path, 'C_paths': AB_path}
return {'A': A, 'B': B, 'A_paths': AB_path, 'B_paths': AB_path, 'true_flame_params': metadata['true_flame_params'],
'silh':silh_im,'true_mask':true_mask,'normals_map_im':normals_map_im,'correspondence_map_im':correspondence_map_im}
def __len__(self):
"""Return the total number of images in the dataset."""
return len(self.AB_paths)
| [
"data.base_dataset.get_params",
"data.base_dataset.BaseDataset.__init__",
"PIL.Image.open",
"pathlib.Path",
"pickle.load",
"numpy.array",
"data.image_folder.make_dataset",
"PIL.Image.fromarray",
"data.base_dataset.get_transform",
"numpy.all"
] | [((389, 401), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (397, 401), True, 'import numpy as np\n'), ((631, 667), 'numpy.all', 'np.all', (['(rgb >= white_offset)'], {'axis': '(-1)'}), '(rgb >= white_offset, axis=-1)\n', (637, 667), True, 'import numpy as np\n'), ((873, 894), 'PIL.Image.fromarray', 'Image.fromarray', (['data'], {}), '(data)\n', (888, 894), False, 'from PIL import Image\n'), ((1434, 1465), 'data.base_dataset.BaseDataset.__init__', 'BaseDataset.__init__', (['self', 'opt'], {}), '(self, opt)\n', (1454, 1465), False, 'from data.base_dataset import BaseDataset, get_params, get_transform\n'), ((2755, 2797), 'PIL.Image.fromarray', 'Image.fromarray', (["metadata['rendered_silh']"], {}), "(metadata['rendered_silh'])\n", (2770, 2797), False, 'from PIL import Image\n'), ((2830, 2877), 'PIL.Image.fromarray', 'Image.fromarray', (["metadata['correspondence_map']"], {}), "(metadata['correspondence_map'])\n", (2845, 2877), False, 'from PIL import Image\n'), ((2903, 2943), 'PIL.Image.fromarray', 'Image.fromarray', (["metadata['normals_map']"], {}), "(metadata['normals_map'])\n", (2918, 2943), False, 'from PIL import Image\n'), ((3486, 3514), 'data.base_dataset.get_params', 'get_params', (['self.opt', 'A.size'], {}), '(self.opt, A.size)\n', (3496, 3514), False, 'from data.base_dataset import BaseDataset, get_params, get_transform\n'), ((3537, 3608), 'data.base_dataset.get_transform', 'get_transform', (['self.opt', 'transform_params'], {'grayscale': '(self.input_nc == 1)'}), '(self.opt, transform_params, grayscale=self.input_nc == 1)\n', (3550, 3608), False, 'from data.base_dataset import BaseDataset, get_params, get_transform\n'), ((3633, 3705), 'data.base_dataset.get_transform', 'get_transform', (['self.opt', 'transform_params'], {'grayscale': '(self.output_nc == 1)'}), '(self.opt, transform_params, grayscale=self.output_nc == 1)\n', (3646, 3705), False, 'from data.base_dataset import BaseDataset, get_params, get_transform\n'), ((3732, 3825), 'data.base_dataset.get_transform', 'get_transform', (['self.opt', 'transform_params'], {'grayscale': '(self.output_nc == 1)', 'minus1To1': '(False)'}), '(self.opt, transform_params, grayscale=self.output_nc == 1,\n minus1To1=False)\n', (3745, 3825), False, 'from data.base_dataset import BaseDataset, get_params, get_transform\n'), ((3848, 3920), 'data.base_dataset.get_transform', 'get_transform', (['self.opt', 'transform_params'], {'grayscale': '(self.output_nc == 1)'}), '(self.opt, transform_params, grayscale=self.output_nc == 1)\n', (3861, 3920), False, 'from data.base_dataset import BaseDataset, get_params, get_transform\n'), ((1584, 1631), 'data.image_folder.make_dataset', 'make_dataset', (['self.dir_AB', 'opt.max_dataset_size'], {}), '(self.dir_AB, opt.max_dataset_size)\n', (1596, 1631), False, 'from data.image_folder import make_dataset\n'), ((2703, 2736), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (2714, 2736), False, 'import pickle\n'), ((2558, 2577), 'PIL.Image.open', 'Image.open', (['AB_path'], {}), '(AB_path)\n', (2568, 2577), False, 'from PIL import Image\n'), ((3288, 3339), 'PIL.Image.open', 'Image.open', (['"""bareteeth.000001.26_C/coma_2/mesh.png"""'], {}), "('bareteeth.000001.26_C/coma_2/mesh.png')\n", (3298, 3339), False, 'from PIL import Image\n'), ((2615, 2628), 'pathlib.Path', 'Path', (['AB_path'], {}), '(AB_path)\n', (2619, 2628), False, 'from pathlib import Path\n'), ((2641, 2654), 'pathlib.Path', 'Path', (['AB_path'], {}), '(AB_path)\n', (2645, 2654), False, 'from pathlib import Path\n')] |
import logging
LOGGER = logging.getLogger("PYWPS")
from os.path import exists
def RL(T,a,b,s):
"""Calculation of return levels.
:param T: number of timestepps
:param a:
:param b:
:param s:
"""
T = float(T)
from math import log
yT = -1/log(1 - 1/T)
s = s * -1
if(s != 0):
zT= a + b*((yT**s) -1)/s
else:
zT= a + b * log(yT)
return(zT)
def resample(X, n=None):
""" Bootstrap resample an array_like Parameters
:param X: array_like data to resample
:param n: int, optional length of resampled array, equal to len(X) if n==None Results
"""
from random import sample, uniform
if n == None:
n = int(uniform(1,len(X)))
X_resample = sample(X, n)
return X_resample
def rl_bootstrap(data, T=100, nsim=1000):
"""returns a return level
:param data: list of input data
:param T: timestep period
:param nsim: number of recalcualtions
"""
from scipy.stats import genextreme as gev
RL_bt=[]
for i in range(0,nsim,1):
subset = resample(data)
s, a, b = gev.fit(subset)
RL_bt.append(RL(T,a,b,s))
return RL_bt
def eventdistribution(data, per=[5,95], nsim=1000, rp = [ 10., 20., 50., 100., 200.,500., 1000. ], rp_scale_factor=1, white_noise=False):
"""
returns a matrix with (returnperiod,lower_percentil,return_level, upper_percentil)
:param data: values of timeseries
:param per: lower and upper percentile defining the uncertainty
:param nsim: Number of returs for bootstrap calculation
:param rp: list of return timestepps
:param rp_scale_factor: scale factor for rp
:param std_err: default = True
:param white_noise: add a white noise (random number between 0 to std/10). In case of singular timeseries
"""
from scipy.stats import genextreme as gev
from numpy import percentile, vstack
if white_noise == True:
s = std(data)/10
ts_white_noise = [n + uniform(0,s) for n in data]
data = ts_white_noise
s, a, b = gev.fit(data)
rl = []
edist = []
per_low = []
per_high = []
for T in rp * rp_scale_factor :
rl.append(RL(T,a,b,s))
RL_bt = rl_bootstrap(data, T=T, nsim=nsim)
#per, b = percentile(RL_bt,[per[0],per[1]])
per_low.append(percentile(RL_bt, 5))
per_high.append(percentile(RL_bt, 95))
rl_c = vstack((rp, per_low, rl, per_high))
return (rl_c)
| [
"random.uniform",
"random.sample",
"scipy.stats.genextreme.fit",
"logging.getLogger",
"numpy.percentile",
"math.log",
"numpy.vstack"
] | [((24, 50), 'logging.getLogger', 'logging.getLogger', (['"""PYWPS"""'], {}), "('PYWPS')\n", (41, 50), False, 'import logging\n'), ((735, 747), 'random.sample', 'sample', (['X', 'n'], {}), '(X, n)\n', (741, 747), False, 'from random import sample, uniform\n'), ((2058, 2071), 'scipy.stats.genextreme.fit', 'gev.fit', (['data'], {}), '(data)\n', (2065, 2071), True, 'from scipy.stats import genextreme as gev\n'), ((2413, 2448), 'numpy.vstack', 'vstack', (['(rp, per_low, rl, per_high)'], {}), '((rp, per_low, rl, per_high))\n', (2419, 2448), False, 'from numpy import percentile, vstack\n'), ((274, 288), 'math.log', 'log', (['(1 - 1 / T)'], {}), '(1 - 1 / T)\n', (277, 288), False, 'from math import log\n'), ((1102, 1117), 'scipy.stats.genextreme.fit', 'gev.fit', (['subset'], {}), '(subset)\n', (1109, 1117), True, 'from scipy.stats import genextreme as gev\n'), ((2332, 2352), 'numpy.percentile', 'percentile', (['RL_bt', '(5)'], {}), '(RL_bt, 5)\n', (2342, 2352), False, 'from numpy import percentile, vstack\n'), ((2378, 2399), 'numpy.percentile', 'percentile', (['RL_bt', '(95)'], {}), '(RL_bt, 95)\n', (2388, 2399), False, 'from numpy import percentile, vstack\n'), ((383, 390), 'math.log', 'log', (['yT'], {}), '(yT)\n', (386, 390), False, 'from math import log\n'), ((1985, 1998), 'random.uniform', 'uniform', (['(0)', 's'], {}), '(0, s)\n', (1992, 1998), False, 'from random import sample, uniform\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun May 23 16:46:50 2021
@author: Abhilash
"""
from tensorflow.keras.applications import ResNet50
import numpy as np
import TFModelQuantizer
import time
import h5py
model_dir = 'tmp_savedmodels/resnet50_saved_model'
model = ResNet50(include_top=True, weights='imagenet')
model.save(model_dir)
BATCH_SIZE = 32
dummy_input_batch = np.zeros((BATCH_SIZE, 224, 224, 3))
start_time=time.time()
model_quantizer_fp16=TFModelQuantizer.TFModelQuantizer(model_dir,"FP16")
model_q_16=model_quantizer_fp16.quantize(model_dir+'_FP16')
diff_fp16=time.time()-start_time
print(diff_fp16)
| [
"TFModelQuantizer.TFModelQuantizer",
"time.time",
"numpy.zeros",
"tensorflow.keras.applications.ResNet50"
] | [((266, 312), 'tensorflow.keras.applications.ResNet50', 'ResNet50', ([], {'include_top': '(True)', 'weights': '"""imagenet"""'}), "(include_top=True, weights='imagenet')\n", (274, 312), False, 'from tensorflow.keras.applications import ResNet50\n'), ((371, 406), 'numpy.zeros', 'np.zeros', (['(BATCH_SIZE, 224, 224, 3)'], {}), '((BATCH_SIZE, 224, 224, 3))\n', (379, 406), True, 'import numpy as np\n'), ((419, 430), 'time.time', 'time.time', ([], {}), '()\n', (428, 430), False, 'import time\n'), ((452, 504), 'TFModelQuantizer.TFModelQuantizer', 'TFModelQuantizer.TFModelQuantizer', (['model_dir', '"""FP16"""'], {}), "(model_dir, 'FP16')\n", (485, 504), False, 'import TFModelQuantizer\n'), ((574, 585), 'time.time', 'time.time', ([], {}), '()\n', (583, 585), False, 'import time\n')] |
import torch
import copy
import random
import scipy.sparse as sp
import numpy as np
def aug_random_mask(input_feature, drop_percent=0.2):
node_num = input_feature.shape[1]
mask_num = int(node_num * drop_percent)
node_idx = [i for i in range(node_num)]
mask_idx = random.sample(node_idx, mask_num)
aug_feature = copy.deepcopy(input_feature)
zeros = torch.zeros_like(aug_feature[0][0])
for j in mask_idx:
aug_feature[0][j] = zeros
return aug_feature
def aug_random_edge(input_adj, drop_percent = 0.2):
percent = drop_percent / 2
row_idx, col_idx = input_adj.nonzero()
num_drop = int(len(row_idx)*percent)
edge_index = [i for i in range(len(row_idx))]
edges = dict(zip(edge_index, zip(row_idx, col_idx)))
drop_idx = random.sample(edge_index, k = num_drop)
list(map(edges.__delitem__, filter(edges.__contains__, drop_idx)))
new_edges = list(zip(*list(edges.values())))
new_row_idx = new_edges[0]
new_col_idx = new_edges[1]
data = np.ones(len(new_row_idx)).tolist()
new_adj = sp.csr_matrix((data, (new_row_idx, new_col_idx)), shape = input_adj.shape)
row_idx, col_idx = (new_adj.todense() - 1).nonzero()
no_edges_cells = list(zip(row_idx, col_idx))
add_idx = random.sample(no_edges_cells, num_drop)
new_row_idx_1, new_col_idx_1 = list(zip(*add_idx))
row_idx = new_row_idx + new_row_idx_1
col_idx = new_col_idx + new_col_idx_1
data = np.ones(len(row_idx)).tolist()
new_adj = sp.csr_matrix((data, (row_idx, col_idx)), shape = input_adj.shape)
return new_adj
def aug_drop_node(input_fea, input_adj, drop_percent=0.2):
input_adj = torch.tensor(input_adj.todense().tolist())
input_fea = input_fea.squeeze(0)
node_num = input_fea.shape[0]
drop_num = int(node_num * drop_percent)
all_node_list = [i for i in range(node_num)]
drop_node_list = sorted(random.sample(all_node_list, drop_num))
aug_input_fea = delete_row_col(input_fea, drop_node_list, only_row=True)
aug_input_adj = delete_row_col(input_adj, drop_node_list)
aug_input_fea = aug_input_fea.unsqueeze(0)
aug_input_adj = sp.csr_matrix(np.matrix(aug_input_adj))
return aug_input_fea, aug_input_adj
def aug_subgraph(input_fea, input_adj, drop_percent=0.2):
input_adj = torch.tensor(input_adj.todense().tolist())
input_fea = input_fea.squeeze(0)
node_num = input_fea.shape[0]
all_node_list = [i for i in range(node_num)]
s_node_num = int(node_num * (1 - drop_percent))
center_node_id = random.randint(0, node_num - 1)
sub_node_id_list = [center_node_id]
all_neighbor_list = []
for i in range(s_node_num - 1):
all_neighbor_list += torch.nonzero(input_adj[sub_node_id_list[i]], as_tuple=False).squeeze(1).tolist()
all_neighbor_list = list(set(all_neighbor_list))
new_neighbor_list = [n for n in all_neighbor_list if not n in sub_node_id_list]
if len(new_neighbor_list) != 0:
new_node = random.sample(new_neighbor_list, 1)[0]
sub_node_id_list.append(new_node)
else:
break
drop_node_list = sorted([i for i in all_node_list if not i in sub_node_id_list])
aug_input_fea = delete_row_col(input_fea, drop_node_list, only_row=True)
aug_input_adj = delete_row_col(input_adj, drop_node_list)
aug_input_fea = aug_input_fea.unsqueeze(0)
aug_input_adj = sp.csr_matrix(np.matrix(aug_input_adj))
return aug_input_fea, aug_input_adj
def aug_feature_dropout(input_feat, drop_percent = 0.2):
aug_input_feat = copy.deepcopy((input_feat.squeeze(0)))
drop_feat_num = int(aug_input_feat.shape[1] * drop_percent)
drop_idx = random.sample([i for i in range(aug_input_feat.shape[1])], drop_feat_num)
aug_input_feat[:, drop_idx] = 0
return aug_input_feat
def aug_feature_dropout_cell(input_feat, drop_percent = 0.2):
aug_input_feat = copy.deepcopy((input_feat.squeeze(0)))
input_feat_dim = aug_input_feat.shape[1]
num_of_nodes = aug_input_feat.shape[0]
drop_feat_num = int(num_of_nodes * input_feat_dim * drop_percent)
position = []
number_list = [j for j in range(input_feat_dim)]
for i in range(num_of_nodes):
number_i = [i for k in range(input_feat_dim)]
position += list(zip(number_i, number_list))
drop_idx = random.sample(position, drop_feat_num)
for i in range(len(drop_idx)):
aug_input_feat[(drop_idx[i][0],drop_idx[i][1])] = 0.0
return aug_input_feat
def gdc(A: sp.csr_matrix, alpha: float, eps: float):
N = A.shape[0]
A_loop = sp.eye(N) + A
D_loop_vec = A_loop.sum(0).A1
D_loop_vec_invsqrt = 1 / np.sqrt(D_loop_vec)
D_loop_invsqrt = sp.diags(D_loop_vec_invsqrt)
T_sym = D_loop_invsqrt @ A_loop @ D_loop_invsqrt
S = alpha * sp.linalg.inv(sp.eye(N) - (1 - alpha) * T_sym)
S_tilde = S.multiply(S >= eps)
D_tilde_vec = S_tilde.sum(0).A1
T_S = S_tilde / D_tilde_vec
return T_S
def delete_row_col(input_matrix, drop_list, only_row=False):
remain_list = [i for i in range(input_matrix.shape[0]) if i not in drop_list]
out = input_matrix[remain_list, :]
if only_row:
return out
out = out[:, remain_list]
return out | [
"numpy.matrix",
"copy.deepcopy",
"scipy.sparse.diags",
"random.randint",
"torch.zeros_like",
"random.sample",
"torch.nonzero",
"scipy.sparse.csr_matrix",
"scipy.sparse.eye",
"numpy.sqrt"
] | [((281, 314), 'random.sample', 'random.sample', (['node_idx', 'mask_num'], {}), '(node_idx, mask_num)\n', (294, 314), False, 'import random\n'), ((333, 361), 'copy.deepcopy', 'copy.deepcopy', (['input_feature'], {}), '(input_feature)\n', (346, 361), False, 'import copy\n'), ((374, 409), 'torch.zeros_like', 'torch.zeros_like', (['aug_feature[0][0]'], {}), '(aug_feature[0][0])\n', (390, 409), False, 'import torch\n'), ((786, 823), 'random.sample', 'random.sample', (['edge_index'], {'k': 'num_drop'}), '(edge_index, k=num_drop)\n', (799, 823), False, 'import random\n'), ((1083, 1155), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['(data, (new_row_idx, new_col_idx))'], {'shape': 'input_adj.shape'}), '((data, (new_row_idx, new_col_idx)), shape=input_adj.shape)\n', (1096, 1155), True, 'import scipy.sparse as sp\n'), ((1280, 1319), 'random.sample', 'random.sample', (['no_edges_cells', 'num_drop'], {}), '(no_edges_cells, num_drop)\n', (1293, 1319), False, 'import random\n'), ((1533, 1597), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['(data, (row_idx, col_idx))'], {'shape': 'input_adj.shape'}), '((data, (row_idx, col_idx)), shape=input_adj.shape)\n', (1546, 1597), True, 'import scipy.sparse as sp\n'), ((2584, 2615), 'random.randint', 'random.randint', (['(0)', '(node_num - 1)'], {}), '(0, node_num - 1)\n', (2598, 2615), False, 'import random\n'), ((4406, 4444), 'random.sample', 'random.sample', (['position', 'drop_feat_num'], {}), '(position, drop_feat_num)\n', (4419, 4444), False, 'import random\n'), ((4778, 4806), 'scipy.sparse.diags', 'sp.diags', (['D_loop_vec_invsqrt'], {}), '(D_loop_vec_invsqrt)\n', (4786, 4806), True, 'import scipy.sparse as sp\n'), ((1942, 1980), 'random.sample', 'random.sample', (['all_node_list', 'drop_num'], {}), '(all_node_list, drop_num)\n', (1955, 1980), False, 'import random\n'), ((2204, 2228), 'numpy.matrix', 'np.matrix', (['aug_input_adj'], {}), '(aug_input_adj)\n', (2213, 2228), True, 'import numpy as np\n'), ((3484, 3508), 'numpy.matrix', 'np.matrix', (['aug_input_adj'], {}), '(aug_input_adj)\n', (3493, 3508), True, 'import numpy as np\n'), ((4660, 4669), 'scipy.sparse.eye', 'sp.eye', (['N'], {}), '(N)\n', (4666, 4669), True, 'import scipy.sparse as sp\n'), ((4737, 4756), 'numpy.sqrt', 'np.sqrt', (['D_loop_vec'], {}), '(D_loop_vec)\n', (4744, 4756), True, 'import numpy as np\n'), ((3057, 3092), 'random.sample', 'random.sample', (['new_neighbor_list', '(1)'], {}), '(new_neighbor_list, 1)\n', (3070, 3092), False, 'import random\n'), ((4890, 4899), 'scipy.sparse.eye', 'sp.eye', (['N'], {}), '(N)\n', (4896, 4899), True, 'import scipy.sparse as sp\n'), ((2758, 2819), 'torch.nonzero', 'torch.nonzero', (['input_adj[sub_node_id_list[i]]'], {'as_tuple': '(False)'}), '(input_adj[sub_node_id_list[i]], as_tuple=False)\n', (2771, 2819), False, 'import torch\n')] |
#!/usr/bin/python
# created by: <NAME> (<EMAIL>)
# created on: 31 July 2016
import numpy as np
import matplotlib.pyplot as plt
def smooth_spectra(y, box_pts):
box = np.ones(box_pts)/box_pts
y_smooth = np.convolve(y, box, mode='same')
return y_smooth
def smooth(filename,outfile="out-smooth.dat"):
"""
For smoothing out spectra in order to find first-order derivative
and peaks easier
filename, outfile: (string) name of input and output files
"""
window =5 # window for moving-average convolution
# Import data from filename
data = np.loadtxt(filename)
wavenum = data[:,0] #x
counts = data[:,1] #y
smoothcts = smooth_spectra(counts,window)
#========== Save results =============#
f = open(outfile,'w')
outsmooth = zip(wavenum, smoothcts)
for line in outsmooth:
f.write(" ".join(str(x) for x in line) + "\n")
#========== Plotting results =============#
#total results
plt.figure()
plt.plot(wavenum,counts, label='Measured',linewidth=2.0)
plt.plot(wavenum,smoothcts,'k--',label='Smoothed',linewidth=2.0)
plt.legend(loc='upper right')
plt.draw()
plt.pause(0.001)
input("Please press [enter] to continue")
| [
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.ones",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"numpy.convolve",
"matplotlib.pyplot.pause"
] | [((212, 244), 'numpy.convolve', 'np.convolve', (['y', 'box'], {'mode': '"""same"""'}), "(y, box, mode='same')\n", (223, 244), True, 'import numpy as np\n'), ((583, 603), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {}), '(filename)\n', (593, 603), True, 'import numpy as np\n'), ((981, 993), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (991, 993), True, 'import matplotlib.pyplot as plt\n'), ((997, 1055), 'matplotlib.pyplot.plot', 'plt.plot', (['wavenum', 'counts'], {'label': '"""Measured"""', 'linewidth': '(2.0)'}), "(wavenum, counts, label='Measured', linewidth=2.0)\n", (1005, 1055), True, 'import matplotlib.pyplot as plt\n'), ((1057, 1125), 'matplotlib.pyplot.plot', 'plt.plot', (['wavenum', 'smoothcts', '"""k--"""'], {'label': '"""Smoothed"""', 'linewidth': '(2.0)'}), "(wavenum, smoothcts, 'k--', label='Smoothed', linewidth=2.0)\n", (1065, 1125), True, 'import matplotlib.pyplot as plt\n'), ((1125, 1154), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (1135, 1154), True, 'import matplotlib.pyplot as plt\n'), ((1162, 1172), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (1170, 1172), True, 'import matplotlib.pyplot as plt\n'), ((1176, 1192), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (1185, 1192), True, 'import matplotlib.pyplot as plt\n'), ((172, 188), 'numpy.ones', 'np.ones', (['box_pts'], {}), '(box_pts)\n', (179, 188), True, 'import numpy as np\n')] |
import numpy as np
#GLOBAL VARIABLES
# Radius of subconductors :
diameter_strand = 2
number_of_strands = 12
number_of_layers = 2
radius_subconductor = 0
############################################################################################
### OUTPUT 1:
distance_subconductors = 0
SGMD = 0
distance_subconductors = 0
m = number_of_strands**2
resultant_radius = 0.7788 * radius_subconductor
number_subconductors = 0
symmetric_input = 0
MGMD = 0
inductance = 0
#Distance between the phase conductors:
distancex = 0
distancey = 0
distancez = 0
distances = 0
############################################################################################
electric_permittivity = 8.85 * (10**(-12))
capacitance = (2*3.14159*electric_permittivity)/(np.log(MGMD/SGMD))
print ( capacitance ) | [
"numpy.log"
] | [((756, 775), 'numpy.log', 'np.log', (['(MGMD / SGMD)'], {}), '(MGMD / SGMD)\n', (762, 775), True, 'import numpy as np\n')] |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import numpy as np
import sys
import nevergrad as ng
import nevergrad.common.typing as tp
from nevergrad.common import testing
from . import base
from .optimizerlib import registry
# decorators to be used when testing on Windows is unecessary
# or cumbersome
skip_win_perf = pytest.mark.skipif(
sys.platform == "win32", reason="Slow, and no need to test performance on all platforms"
)
def suggestable(name: str) -> bool:
# Some methods are not good with suggestions.
keywords = ["TBPSA", "BO", "EMNA", "EDA", "BO", "Stupid", "Pymoo"]
return not any(x in name for x in keywords)
def suggestion_testing(
name: str,
instrumentation: ng.p.Array,
suggestion: np.ndarray,
budget: int,
objective_function: tp.Callable[..., tp.Any],
optimum: tp.Optional[np.ndarray] = None,
threshold: tp.Optional[float] = None,
):
optimizer_cls = registry[name]
optim = optimizer_cls(instrumentation, budget)
if optimum is None:
optimum = suggestion
optim.suggest(suggestion)
optim.minimize(objective_function)
if threshold is not None:
assert (
objective_function(optim.recommend().value) < threshold
), "{name} proposes {optim.recommend().value} instead of {optimum} (threshold={threshold})"
return
assert np.all(
optim.recommend().value == optimum
), "{name} proposes {optim.recommend().value} instead of {optimum}"
@skip_win_perf # type: ignore
@pytest.mark.parametrize("name", [r for r in registry if suggestable(r)]) # type: ignore
def test_suggest_optimizers(name: str) -> None:
"""Checks that each optimizer is able to converge when optimum is given"""
instrum = ng.p.Array(shape=(100,)).set_bounds(0.0, 1.0)
instrum.set_integer_casting()
suggestion = np.asarray([0] * 17 + [1] * 17 + [0] * 66) # The optimum is the suggestion.
target = lambda x: 0 if np.all(np.asarray(x, dtype=int) == suggestion) else 1
suggestion_testing(name, instrum, suggestion, 7, target)
def good_at_suggest(name: str) -> bool:
keywords = [
"Noisy",
"Optimistic",
"Multi",
"Anisotropic",
"BSO",
"Sparse",
"Recombining",
"PortfolioDiscreteOne",
]
return not any(k in name for k in keywords)
@skip_win_perf # type: ignore
@pytest.mark.parametrize("name", [r for r in registry if "iscre" in r and good_at_suggest(r)]) # type: ignore
def test_harder_suggest_optimizers(name: str) -> None:
"""Checks that discrete optimizers are good when a suggestion is nearby."""
instrum = ng.p.Array(shape=(100,)).set_bounds(0.0, 1.0)
instrum.set_integer_casting()
optimum = np.asarray([0] * 17 + [1] * 17 + [0] * 66)
target = lambda x: min(3, np.sum((np.asarray(x, dtype=int) - optimum) ** 2))
suggestion = np.asarray([0] * 17 + [1] * 16 + [0] * 67)
suggestion_testing(name, instrum, suggestion, 1500, target, optimum)
@skip_win_perf # type: ignore
def test_harder_continuous_suggest_optimizers() -> None:
"""Checks that somes optimizer can converge when provided with a good suggestion."""
instrum = ng.p.Array(shape=(100,)).set_bounds(0.0, 1.0)
optimum = np.asarray([0] * 17 + [1] * 17 + [0] * 66)
target = lambda x: min(2.0, np.sum((x - optimum) ** 2))
suggestion = np.asarray([0] * 17 + [1] * 16 + [0] * 67)
suggestion_testing("NGOpt", instrum, suggestion, 1500, target, optimum, threshold=0.9)
@testing.suppress_nevergrad_warnings()
@pytest.mark.parametrize("name", registry) # type: ignore
def test_optimizers_suggest(name: str) -> None: # pylint: disable=redefined-outer-name
optimizer = registry[name](parametrization=4, budget=2)
optimizer.suggest(np.array([12.0] * 4))
candidate = optimizer.ask()
try:
optimizer.tell(candidate, 12)
# The optimizer should recommend its suggestion, except for a few optimization methods:
if name not in ["SPSA", "TBPSA", "StupidRandom"]:
np.testing.assert_array_almost_equal(optimizer.provide_recommendation().value, [12.0] * 4)
except base.errors.TellNotAskedNotSupportedError:
pass
| [
"numpy.sum",
"numpy.asarray",
"pytest.mark.skipif",
"numpy.array",
"nevergrad.p.Array",
"pytest.mark.parametrize",
"nevergrad.common.testing.suppress_nevergrad_warnings"
] | [((471, 584), 'pytest.mark.skipif', 'pytest.mark.skipif', (["(sys.platform == 'win32')"], {'reason': '"""Slow, and no need to test performance on all platforms"""'}), "(sys.platform == 'win32', reason=\n 'Slow, and no need to test performance on all platforms')\n", (489, 584), False, 'import pytest\n'), ((3640, 3677), 'nevergrad.common.testing.suppress_nevergrad_warnings', 'testing.suppress_nevergrad_warnings', ([], {}), '()\n', (3675, 3677), False, 'from nevergrad.common import testing\n'), ((3679, 3720), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name"""', 'registry'], {}), "('name', registry)\n", (3702, 3720), False, 'import pytest\n'), ((1986, 2028), 'numpy.asarray', 'np.asarray', (['([0] * 17 + [1] * 17 + [0] * 66)'], {}), '([0] * 17 + [1] * 17 + [0] * 66)\n', (1996, 2028), True, 'import numpy as np\n'), ((2873, 2915), 'numpy.asarray', 'np.asarray', (['([0] * 17 + [1] * 17 + [0] * 66)'], {}), '([0] * 17 + [1] * 17 + [0] * 66)\n', (2883, 2915), True, 'import numpy as np\n'), ((3014, 3056), 'numpy.asarray', 'np.asarray', (['([0] * 17 + [1] * 16 + [0] * 67)'], {}), '([0] * 17 + [1] * 16 + [0] * 67)\n', (3024, 3056), True, 'import numpy as np\n'), ((3383, 3425), 'numpy.asarray', 'np.asarray', (['([0] * 17 + [1] * 17 + [0] * 66)'], {}), '([0] * 17 + [1] * 17 + [0] * 66)\n', (3393, 3425), True, 'import numpy as np\n'), ((3503, 3545), 'numpy.asarray', 'np.asarray', (['([0] * 17 + [1] * 16 + [0] * 67)'], {}), '([0] * 17 + [1] * 16 + [0] * 67)\n', (3513, 3545), True, 'import numpy as np\n'), ((3907, 3927), 'numpy.array', 'np.array', (['([12.0] * 4)'], {}), '([12.0] * 4)\n', (3915, 3927), True, 'import numpy as np\n'), ((1889, 1913), 'nevergrad.p.Array', 'ng.p.Array', ([], {'shape': '(100,)'}), '(shape=(100,))\n', (1899, 1913), True, 'import nevergrad as ng\n'), ((2779, 2803), 'nevergrad.p.Array', 'ng.p.Array', ([], {'shape': '(100,)'}), '(shape=(100,))\n', (2789, 2803), True, 'import nevergrad as ng\n'), ((3323, 3347), 'nevergrad.p.Array', 'ng.p.Array', ([], {'shape': '(100,)'}), '(shape=(100,))\n', (3333, 3347), True, 'import nevergrad as ng\n'), ((3458, 3484), 'numpy.sum', 'np.sum', (['((x - optimum) ** 2)'], {}), '((x - optimum) ** 2)\n', (3464, 3484), True, 'import numpy as np\n'), ((2098, 2122), 'numpy.asarray', 'np.asarray', (['x'], {'dtype': 'int'}), '(x, dtype=int)\n', (2108, 2122), True, 'import numpy as np\n'), ((2954, 2978), 'numpy.asarray', 'np.asarray', (['x'], {'dtype': 'int'}), '(x, dtype=int)\n', (2964, 2978), True, 'import numpy as np\n')] |
# -------------------------------------------------------------------------------------
# AutoLoc: Weakly-supervised Temporal Action Localization in Untrimmed Videos. ECCV'18.
# Authors: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
# -------------------------------------------------------------------------------------
import numpy as np
import os.path as osp
from easydict import EasyDict as edict
__C = edict()
cfg = __C
############################################################
# MISC
# Debug option for layers
__C.DEBUG = False
# Currently automatically set 20 for 'TH14' and 100 for 'AN'
__C.NUM_CLASSES = 20
__C.NUM_FEAT = 2048
__C.BASE_LR = 1e-3
__C.MAX_ITER = 25
# Optional infix for prototxts and snapshot path
# - exp/<dataset>/<exp>/proto/{solver,train}[_<infix>_].prototxt
# - exp/<dataset>/<exp>/snapshot/autoloc[_<infix>_]
__C.INFIX = ''
# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..'))
# Data directory
__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))
# Experiments directory
__C.EXP_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'exp'))
# Snapshot directory
__C.SNAPSHOT_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'snapshot'))
# Default GPU device id
__C.GPU_ID = 0
# Whether or not save model when training, remember to open it
# up when model needs to be tested.
__C.SAVE_MODEL = False
############################################################
# Feature post-processing options
__C.FEAT = edict()
# 'mul' or 'mask'
__C.FEAT.MODE = 'mul'
# 'sigmoid' or 'softmax' or 'relu'
__C.FEAT.ACTIVATION = 'sigmoid'
__C.FEAT.NORM = False
__C.FEAT.THRESH = None
############################################################
# Visualization options
__C.VIZ = edict()
# This arg would be overriden by `FEAT` configs.
__C.VIZ.FOLDER_NAME = 'viz'
# Iteration interval to plot optimization plot for training.
__C.VIZ.PLOT_ITVL = 5
# Whether or not fix bar's width and scratching figure length when plotting.
__C.VIZ.FIX_WIDTH = True
# Sub-dataset for visualization groundtruth through `tools/viz_gt.py`
__C.VIZ.STAGE = 'val'
__C.VIZ.PLOT_PR_CURVE = False
__C.VIZ.PLOT_PR_NCOLS = 5
############################################################
# Data spec
__C.DSPEC = edict()
__C.DSPEC.TH14_GT = 'data/TH14/' + \
'{stage}_gt_dump.py2.pc'
__C.DSPEC.AN_GT = 'data/AN/' + \
'{stage}_gt_dump.indexified.pc'
__C.DSPEC.TH14_META = 'data/TH14/' + \
'{stage}_meta_det_only.pc'
__C.DSPEC.AN_META = 'data/AN/' + \
'{stage}_meta_v2_includedv3.indexified.pc'
__C.DSPEC.WINSTEP = 15
############################################################
# Layer spec
__C.LSPEC = edict()
# Use 'sgl' or 'mlt' position optimization.
__C.LSPEC.OIC_POS_OPT = 'sgl'
# Use 'org' or 'ref' score for anchor selection.
__C.LSPEC.OIC_ANC_OPT = 'org'
############################################################
# Training options
__C.TRAIN = edict()
# Data file for training
# - data/<dataset>/_<data_file>_
__C.TRAIN.DATA_FILE = 'default'
# Sub-dataset for training
__C.TRAIN.STAGE = 'val'
# Optional folder name for visualization
# - exp/<dataset>/<exp>/_<viz_folder>_
__C.TRAIN.COL_LABEL = 1
__C.TRAIN.COL_FEAT = 2
__C.TRAIN.COL_HEATMAP = 3
__C.TRAIN.COL_ATT = 4
# Legacy per from `py-faster-rcnn` for CNN kernel scaling back to
# original size. Since now we are doing temporal convolution, we set
# this parameter equal to 1.
__C.TRAIN.FEAT_STRIDE = 1
# Anchor scales ranged in 2**(<lo>, <hi>)
__C.TRAIN.ANCHOR_SCALES = (2 ** np.arange(4, 6)).tolist()
__C.TRAIN.OUTER_INFLATE_RATIO = .25
__C.TRAIN.OUTER_MIN = 1
__C.TRAIN.FEAT_SCALE = 1
__C.TRAIN.CLIP_INNER_MARGIN = 1
__C.TRAIN.CLIP_OUTER_MARGIN = 2
__C.TRAIN.FG_THRESH = .5
# Upper tolerance of OIC loss for optimization
__C.TRAIN.OIC_LOSS_THRESH = .3
# Only used when not choose <_adaptive_nms> mode
__C.TRAIN.NMS_BBOX_THRESH = .2
# Use top-k class when selecting classes according to avg heatmap scores.
# When <OIC_cls_topk> equals to -1, use ground truth labels.
__C.TRAIN.OIC_CLS_TOPK = -1
############################################################
# Testing options
__C.TEST = edict()
# Data file for training
# - data/<dataset>/_<data_file>_
__C.TEST.DATA_FILE = 'default'
# Sub-dataset for testing
__C.TEST.STAGE = 'test'
# Optional folder name for visualization
# - exp/<dataset>/<exp>/_<viz_folder>_
__C.TEST.COL_LABEL = 1
__C.TEST.COL_FEAT = 2
__C.TEST.COL_HEATMAP = 3
__C.TEST.COL_ATT = 4
# Legacy per from `py-faster-rcnn` for CNN kernel scaling back to
# original size. Since now we are doing temporal convolution, we set
# this parameter equal to 1.
__C.TEST.FEAT_STRIDE = 1
# Anchor scales ranged in 2**(<lo>, <hi>)
__C.TEST.ANCHOR_SCALES = (2 ** np.arange(4, 6)).tolist()
__C.TEST.OUTER_INFLATE_RATIO = .25
__C.TEST.OUTER_MIN = 1
__C.TEST.FEAT_SCALE = 1
__C.TEST.CLIP_INNER_MARGIN = 1
__C.TEST.CLIP_OUTER_MARGIN = 2
__C.TEST.FG_THRESH = .5
# Upper tolerance of OIC loss for optimization
__C.TEST.OIC_LOSS_THRESH = .3
# Only used when not choose <_adaptive_nms> mode
__C.TEST.NMS_BBOX_THRESH = .2
# Use top-k class when selecting classes according to avg heatmap scores.
# When <OIC_cls_topk> equals to -1, use ground truth labels.
__C.TEST.OIC_CLS_TOPK = 2
############################################################
# Evaluation options
__C.EVAL = edict()
# To enumerate <_tiou_thresh_> - <_nms_shift_> for evaluation.
# Note it will results in len(<_tiou_thresh>) models to save.
__C.EVAL.TRAIN_ADAPTIVE_NMS = False
__C.EVAL.TEST_ADAPTIVE_NMS = False
__C.EVAL.TRAIN_ADAPTIVE_NMS_SHIFT = 0.
__C.EVAL.TEST_ADAPTIVE_NMS_SHIFT = 0.
__C.EVAL.TIOU_THRESH = np.linspace(0.1, 0.7, 7).tolist()
# Precision with predictions of top[x]% OIC_score.
__C.EVAL.PREC_AT_TOPX = .05
# Whether or not include ap results in final table
__C.EVAL.TBL_INCLUDE_CLS = False
############################################################
# Configuration helpers from `py-faster-rcnn`
def _merge_a_into_b(a, b):
'''
Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
'''
if type(a) is not edict:
return
for k, v in a.iteritems():
# a must specify keys that are in b
if k not in b:
raise KeyError('{} is not a valid config key'.format(k))
# Types must match, too
old_type = type(b[k])
if old_type is not type(v) and b[k] is not None:
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
# Recursively merge dicts
if type(v) is edict:
try:
_merge_a_into_b(a[k], b[k])
except:
raise ValueError('Error under config key: {} with {} vs. {}'. \
format(k, a[k], b[k]))
else:
b[k] = v
def cfg_from_file(filename):
'''Load a config file and merge it into the default options.'''
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
| [
"yaml.load",
"os.path.dirname",
"numpy.arange",
"numpy.array",
"numpy.linspace",
"easydict.EasyDict",
"os.path.join"
] | [((405, 412), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (410, 412), True, 'from easydict import EasyDict as edict\n'), ((1478, 1485), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (1483, 1485), True, 'from easydict import EasyDict as edict\n'), ((1737, 1744), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (1742, 1744), True, 'from easydict import EasyDict as edict\n'), ((2248, 2255), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (2253, 2255), True, 'from easydict import EasyDict as edict\n'), ((2658, 2665), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (2663, 2665), True, 'from easydict import EasyDict as edict\n'), ((2916, 2923), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (2921, 2923), True, 'from easydict import EasyDict as edict\n'), ((4131, 4138), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (4136, 4138), True, 'from easydict import EasyDict as edict\n'), ((5331, 5338), 'easydict.EasyDict', 'edict', ([], {}), '()\n', (5336, 5338), True, 'from easydict import EasyDict as edict\n'), ((1005, 1035), 'os.path.join', 'osp.join', (['__C.ROOT_DIR', '"""data"""'], {}), "(__C.ROOT_DIR, 'data')\n", (1013, 1035), True, 'import os.path as osp\n'), ((1087, 1116), 'os.path.join', 'osp.join', (['__C.ROOT_DIR', '"""exp"""'], {}), "(__C.ROOT_DIR, 'exp')\n", (1095, 1116), True, 'import os.path as osp\n'), ((1170, 1204), 'os.path.join', 'osp.join', (['__C.ROOT_DIR', '"""snapshot"""'], {}), "(__C.ROOT_DIR, 'snapshot')\n", (1178, 1204), True, 'import os.path as osp\n'), ((931, 952), 'os.path.dirname', 'osp.dirname', (['__file__'], {}), '(__file__)\n', (942, 952), True, 'import os.path as osp\n'), ((5638, 5662), 'numpy.linspace', 'np.linspace', (['(0.1)', '(0.7)', '(7)'], {}), '(0.1, 0.7, 7)\n', (5649, 5662), True, 'import numpy as np\n'), ((3512, 3527), 'numpy.arange', 'np.arange', (['(4)', '(6)'], {}), '(4, 6)\n', (3521, 3527), True, 'import numpy as np\n'), ((4719, 4734), 'numpy.arange', 'np.arange', (['(4)', '(6)'], {}), '(4, 6)\n', (4728, 4734), True, 'import numpy as np\n'), ((7266, 7278), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (7275, 7278), False, 'import yaml\n'), ((6513, 6542), 'numpy.array', 'np.array', (['v'], {'dtype': 'b[k].dtype'}), '(v, dtype=b[k].dtype)\n', (6521, 6542), True, 'import numpy as np\n')] |
#!/usr/bin/python3.7
# -*- coding: utf-8 -*-
# @Time : 2019/11/8 13:31
# @Author: <EMAIL>
from jtyoui.ml import sigmoid, get_cost, TRAIN_DATA, TEST_LABEL
from random import normalvariate
import numpy as np
__description__ = """
FM(因子分解机)算法
"""
def initialize_v(n: int, k: int):
"""初始化交叉项
:param n:特征个数
:param k:FM模型的度
:return:交叉项的系数权重
"""
v = np.mat(np.zeros(shape=(n, k)))
for i in range(n):
for j in range(k):
v[i, j] = normalvariate(0, 0.2)
return v
def get_prediction(data, w0, w, v):
"""预测值
:param data:特征
:param w0:一次项权重
:param w:常数项权重
:param v:交叉项权重
:return:预测结果
"""
m = np.shape(data)[0]
result = []
for x in range(m):
inter_1 = data[x] * v
inter_2 = np.multiply(data[x], data[x]) * np.multiply(v, v)
inter = np.sum(np.multiply(inter_1, inter_1) - inter_2) / 2.
p = w0 + data[x] * w + inter
pre = sigmoid(p[0, 0])
result.append(pre)
return result
def stop_grad_ascent(data: np.mat, label: np.mat, k: int, max_iter: int, alpha: float) -> (float, np.mat, np.mat):
"""利用随机梯度下降法训练FM模型
:param data: 数据特征
:param label:标签
:param k:v的维度
:param max_iter:最大迭代次数
:param alpha:学习率
:return:w0,w,v权重
"""
m, n = np.shape(data)
w = np.random.randn(n).reshape((n, 1))
w0 = 0
v = initialize_v(n, k)
for it in range(max_iter):
for x in range(m):
inter_1 = data[x] * v
inter_2 = np.multiply(data[x], data[x]) * np.multiply(v, v)
inter = np.sum(np.multiply(inter_1, inter_1) - inter_2) / 2.
p = w0 + data[x] * w + inter
loss = sigmoid(label[x] * p[0, 0]) - 1
w0 -= alpha * loss * label[x]
for i in range(n):
if data[x, i] != 0:
w[i, 0] -= alpha * loss * label[x] * data[x, i]
for j in range(k):
v[i, j] -= alpha * loss * label[x] * (
data[x, i] * inter_1[0, j] - v[i, j] * data[x, i] * data[x, i])
if it % 100 == 0:
pre = get_prediction(np.mat(data), w0, w, v)
print(get_cost(np.mat(pre), label))
return w0, w, v
if __name__ == '__main__':
weight = stop_grad_ascent(TRAIN_DATA, TEST_LABEL, 3, 1000, 0.1)
print(get_prediction(np.mat([[1, 1 / 24, 10 / 60, 32 / 60]]), *weight))
| [
"numpy.multiply",
"jtyoui.ml.sigmoid",
"random.normalvariate",
"numpy.random.randn",
"numpy.zeros",
"numpy.shape",
"numpy.mat"
] | [((1293, 1307), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (1301, 1307), True, 'import numpy as np\n'), ((378, 400), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n, k)'}), '(shape=(n, k))\n', (386, 400), True, 'import numpy as np\n'), ((668, 682), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (676, 682), True, 'import numpy as np\n'), ((943, 959), 'jtyoui.ml.sigmoid', 'sigmoid', (['p[0, 0]'], {}), '(p[0, 0])\n', (950, 959), False, 'from jtyoui.ml import sigmoid, get_cost, TRAIN_DATA, TEST_LABEL\n'), ((474, 495), 'random.normalvariate', 'normalvariate', (['(0)', '(0.2)'], {}), '(0, 0.2)\n', (487, 495), False, 'from random import normalvariate\n'), ((773, 802), 'numpy.multiply', 'np.multiply', (['data[x]', 'data[x]'], {}), '(data[x], data[x])\n', (784, 802), True, 'import numpy as np\n'), ((805, 822), 'numpy.multiply', 'np.multiply', (['v', 'v'], {}), '(v, v)\n', (816, 822), True, 'import numpy as np\n'), ((1316, 1334), 'numpy.random.randn', 'np.random.randn', (['n'], {}), '(n)\n', (1331, 1334), True, 'import numpy as np\n'), ((2354, 2393), 'numpy.mat', 'np.mat', (['[[1, 1 / 24, 10 / 60, 32 / 60]]'], {}), '([[1, 1 / 24, 10 / 60, 32 / 60]])\n', (2360, 2393), True, 'import numpy as np\n'), ((1503, 1532), 'numpy.multiply', 'np.multiply', (['data[x]', 'data[x]'], {}), '(data[x], data[x])\n', (1514, 1532), True, 'import numpy as np\n'), ((1535, 1552), 'numpy.multiply', 'np.multiply', (['v', 'v'], {}), '(v, v)\n', (1546, 1552), True, 'import numpy as np\n'), ((1686, 1713), 'jtyoui.ml.sigmoid', 'sigmoid', (['(label[x] * p[0, 0])'], {}), '(label[x] * p[0, 0])\n', (1693, 1713), False, 'from jtyoui.ml import sigmoid, get_cost, TRAIN_DATA, TEST_LABEL\n'), ((2140, 2152), 'numpy.mat', 'np.mat', (['data'], {}), '(data)\n', (2146, 2152), True, 'import numpy as np\n'), ((846, 875), 'numpy.multiply', 'np.multiply', (['inter_1', 'inter_1'], {}), '(inter_1, inter_1)\n', (857, 875), True, 'import numpy as np\n'), ((2191, 2202), 'numpy.mat', 'np.mat', (['pre'], {}), '(pre)\n', (2197, 2202), True, 'import numpy as np\n'), ((1580, 1609), 'numpy.multiply', 'np.multiply', (['inter_1', 'inter_1'], {}), '(inter_1, inter_1)\n', (1591, 1609), True, 'import numpy as np\n')] |
# Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
from numpy.random import permutation
from legate import pandas as lp
from tests.utils import equals
for index in [
pd.RangeIndex(1, 21, 2),
pd.RangeIndex(21, 1, -2),
pd.Index(permutation(10)),
]:
print(f"Index: {index}")
df = pd.DataFrame(
{
"a": range(10),
"b": range(1, 11),
"c": [None if i % 4 == 1 else str(i) * 3 for i in range(10)],
"d": [str(i % 3) for i in range(10)],
},
index=index,
)
df["c"] = df["c"].astype(pd.StringDtype())
df["d"] = df["d"].astype("category")
ldf = lp.DataFrame(df)
assert equals(ldf.iloc[:, 0], df.iloc[:, 0])
assert equals(ldf.iloc[:, [1]], df.iloc[:, [1]])
assert equals(ldf.iloc[:, [1, 3]], df.iloc[:, [1, 3]])
assert equals(
ldf.iloc[:, [True, False, True, False]],
df.iloc[:, [True, False, True, False]],
)
assert equals(ldf.iloc[:, 0:], df.iloc[:, 0:])
assert equals(ldf.iloc[:, :3], df.iloc[:, :3])
assert equals(ldf.iloc[3, 0:2].to_pandas().T.squeeze(), df.iloc[3, 0:2])
assert equals(ldf.iloc[3:-4, 1:3], df.iloc[3:-4, 1:3])
assert equals(ldf.iloc[-6:-4, 1:3], df.iloc[-6:-4, 1:3])
assert equals(ldf.iloc[:-4, 1:3], df.iloc[:-4, 1:3])
assert equals(ldf.iloc[3:, 1:3], df.iloc[3:, 1:3])
# This should be a no-op
ldf.iloc[0:0, [0, 1]] = -100
df.iloc[0:0, [0, 1]] = -100
assert equals(ldf, df)
ldf.iloc[5, [0, 1]] = 100
df.iloc[5, [0, 1]] = 100
assert equals(ldf, df)
ldf.iloc[3, [0, 1]] = ldf.iloc[3, [0, 1]] + 100
df.iloc[3, [0, 1]] = df.iloc[3, [0, 1]] + 100
assert equals(ldf, df)
df.iloc[:, [0, 1]] = df.iloc[:, [0, 1]] - 100
ldf.iloc[:, [0, 1]] = ldf.iloc[:, [0, 1]] - 100
assert equals(ldf, df)
sl = slice(-5, 9)
df.iloc[sl, [0, 1]] = df.iloc[sl, [0, 1]] + 100
ldf.iloc[sl, [0, 1]] = ldf.iloc[sl, [0, 1]] + 100
assert equals(ldf, df)
sl = slice(5, 8)
df.iloc[sl, 2] = df.iloc[sl, 2].str.pad(width=9, side="both", fillchar="-")
ldf.iloc[sl, 2] = ldf.iloc[sl, 2].str.pad(
width=9, side="both", fillchar="-"
)
assert equals(ldf, df)
sl = slice(1, 3)
df.iloc[sl, 2] = "fill"
ldf.iloc[sl, 2] = "fill"
assert equals(ldf, df)
pd_mask = (df["a"] % 3 == 0).values
lp_mask = ldf["a"] % 3 == 0
assert equals(ldf.iloc[lp_mask, [0, 1]], df.iloc[pd_mask, [0, 1]])
df.iloc[pd_mask, [0, 1]] = df.iloc[pd_mask, 0:2] + 100
ldf.iloc[lp_mask, [0, 1]] = ldf.iloc[lp_mask, 0:2] + 100
assert equals(ldf, df)
| [
"pandas.RangeIndex",
"legate.pandas.DataFrame",
"pandas.StringDtype",
"numpy.random.permutation",
"tests.utils.equals"
] | [((726, 749), 'pandas.RangeIndex', 'pd.RangeIndex', (['(1)', '(21)', '(2)'], {}), '(1, 21, 2)\n', (739, 749), True, 'import pandas as pd\n'), ((755, 779), 'pandas.RangeIndex', 'pd.RangeIndex', (['(21)', '(1)', '(-2)'], {}), '(21, 1, -2)\n', (768, 779), True, 'import pandas as pd\n'), ((1196, 1212), 'legate.pandas.DataFrame', 'lp.DataFrame', (['df'], {}), '(df)\n', (1208, 1212), True, 'from legate import pandas as lp\n'), ((1225, 1262), 'tests.utils.equals', 'equals', (['ldf.iloc[:, 0]', 'df.iloc[:, 0]'], {}), '(ldf.iloc[:, 0], df.iloc[:, 0])\n', (1231, 1262), False, 'from tests.utils import equals\n'), ((1274, 1315), 'tests.utils.equals', 'equals', (['ldf.iloc[:, [1]]', 'df.iloc[:, [1]]'], {}), '(ldf.iloc[:, [1]], df.iloc[:, [1]])\n', (1280, 1315), False, 'from tests.utils import equals\n'), ((1327, 1374), 'tests.utils.equals', 'equals', (['ldf.iloc[:, [1, 3]]', 'df.iloc[:, [1, 3]]'], {}), '(ldf.iloc[:, [1, 3]], df.iloc[:, [1, 3]])\n', (1333, 1374), False, 'from tests.utils import equals\n'), ((1386, 1478), 'tests.utils.equals', 'equals', (['ldf.iloc[:, [True, False, True, False]]', 'df.iloc[:, [True, False, True, False]]'], {}), '(ldf.iloc[:, [True, False, True, False]], df.iloc[:, [True, False, \n True, False]])\n', (1392, 1478), False, 'from tests.utils import equals\n'), ((1508, 1547), 'tests.utils.equals', 'equals', (['ldf.iloc[:, 0:]', 'df.iloc[:, 0:]'], {}), '(ldf.iloc[:, 0:], df.iloc[:, 0:])\n', (1514, 1547), False, 'from tests.utils import equals\n'), ((1559, 1598), 'tests.utils.equals', 'equals', (['ldf.iloc[:, :3]', 'df.iloc[:, :3]'], {}), '(ldf.iloc[:, :3], df.iloc[:, :3])\n', (1565, 1598), False, 'from tests.utils import equals\n'), ((1689, 1736), 'tests.utils.equals', 'equals', (['ldf.iloc[3:-4, 1:3]', 'df.iloc[3:-4, 1:3]'], {}), '(ldf.iloc[3:-4, 1:3], df.iloc[3:-4, 1:3])\n', (1695, 1736), False, 'from tests.utils import equals\n'), ((1748, 1797), 'tests.utils.equals', 'equals', (['ldf.iloc[-6:-4, 1:3]', 'df.iloc[-6:-4, 1:3]'], {}), '(ldf.iloc[-6:-4, 1:3], df.iloc[-6:-4, 1:3])\n', (1754, 1797), False, 'from tests.utils import equals\n'), ((1809, 1854), 'tests.utils.equals', 'equals', (['ldf.iloc[:-4, 1:3]', 'df.iloc[:-4, 1:3]'], {}), '(ldf.iloc[:-4, 1:3], df.iloc[:-4, 1:3])\n', (1815, 1854), False, 'from tests.utils import equals\n'), ((1866, 1909), 'tests.utils.equals', 'equals', (['ldf.iloc[3:, 1:3]', 'df.iloc[3:, 1:3]'], {}), '(ldf.iloc[3:, 1:3], df.iloc[3:, 1:3])\n', (1872, 1909), False, 'from tests.utils import equals\n'), ((2017, 2032), 'tests.utils.equals', 'equals', (['ldf', 'df'], {}), '(ldf, df)\n', (2023, 2032), False, 'from tests.utils import equals\n'), ((2105, 2120), 'tests.utils.equals', 'equals', (['ldf', 'df'], {}), '(ldf, df)\n', (2111, 2120), False, 'from tests.utils import equals\n'), ((2236, 2251), 'tests.utils.equals', 'equals', (['ldf', 'df'], {}), '(ldf, df)\n', (2242, 2251), False, 'from tests.utils import equals\n'), ((2367, 2382), 'tests.utils.equals', 'equals', (['ldf', 'df'], {}), '(ldf, df)\n', (2373, 2382), False, 'from tests.utils import equals\n'), ((2524, 2539), 'tests.utils.equals', 'equals', (['ldf', 'df'], {}), '(ldf, df)\n', (2530, 2539), False, 'from tests.utils import equals\n'), ((2750, 2765), 'tests.utils.equals', 'equals', (['ldf', 'df'], {}), '(ldf, df)\n', (2756, 2765), False, 'from tests.utils import equals\n'), ((2857, 2872), 'tests.utils.equals', 'equals', (['ldf', 'df'], {}), '(ldf, df)\n', (2863, 2872), False, 'from tests.utils import equals\n'), ((2957, 3016), 'tests.utils.equals', 'equals', (['ldf.iloc[lp_mask, [0, 1]]', 'df.iloc[pd_mask, [0, 1]]'], {}), '(ldf.iloc[lp_mask, [0, 1]], df.iloc[pd_mask, [0, 1]])\n', (2963, 3016), False, 'from tests.utils import equals\n'), ((3150, 3165), 'tests.utils.equals', 'equals', (['ldf', 'df'], {}), '(ldf, df)\n', (3156, 3165), False, 'from tests.utils import equals\n'), ((794, 809), 'numpy.random.permutation', 'permutation', (['(10)'], {}), '(10)\n', (805, 809), False, 'from numpy.random import permutation\n'), ((1127, 1143), 'pandas.StringDtype', 'pd.StringDtype', ([], {}), '()\n', (1141, 1143), True, 'import pandas as pd\n')] |
import glob
import os
import pandas as pd
import numpy as np
import shutil
import librosa
from tqdm import tqdm
def extract_feature(file_name, **kwargs):
"""
Extract feature from audio file `file_name`
Features supported:
- MFCC (mfcc)
- Chroma (chroma)
- MEL Spectrogram Frequency (mel)
- Contrast (contrast)
- Tonnetz (tonnetz)
e.g:
`features = extract_feature(path, mel=True, mfcc=True)`
"""
mfcc = kwargs.get("mfcc")
chroma = kwargs.get("chroma")
mel = kwargs.get("mel")
contrast = kwargs.get("contrast")
tonnetz = kwargs.get("tonnetz")
X, sample_rate = librosa.core.load(file_name)
if chroma or contrast:
stft = np.abs(librosa.stft(X))
result = np.array([])
if mfcc:
mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T, axis=0)
result = np.hstack((result, mfccs))
if chroma:
chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T,axis=0)
result = np.hstack((result, chroma))
if mel:
mel = np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T,axis=0)
result = np.hstack((result, mel))
if contrast:
contrast = np.mean(librosa.feature.spectral_contrast(S=stft, sr=sample_rate).T,axis=0)
result = np.hstack((result, contrast))
if tonnetz:
tonnetz = np.mean(librosa.feature.tonnetz(y=librosa.effects.harmonic(X), sr=sample_rate).T,axis=0)
result = np.hstack((result, tonnetz))
return result
dirname = "data"
if not os.path.isdir(dirname):
os.mkdir(dirname)
csv_files = glob.glob("*.csv")
for j, csv_file in enumerate(csv_files):
print("[+] Preprocessing", csv_file)
df = pd.read_csv(csv_file)
# only take filename and gender columns
new_df = df[["filename", "gender"]]
print("Previously:", len(new_df), "rows")
# take only male & female genders (i.e droping NaNs & 'other' gender)
new_df = new_df[np.logical_or(new_df['gender'] == 'female', new_df['gender'] == 'male')]
print("Now:", len(new_df), "rows")
new_csv_file = os.path.join(dirname, csv_file)
# save new preprocessed CSV
new_df.to_csv(new_csv_file, index=False)
# get the folder name
folder_name, _ = csv_file.split(".")
audio_files = glob.glob(f"{folder_name}/{folder_name}/*")
all_audio_filenames = set(new_df["filename"])
for i, audio_file in tqdm(list(enumerate(audio_files)), f"Extracting features of {folder_name}"):
splited = os.path.split(audio_file)
# audio_filename = os.path.join(os.path.split(splited[0])[-1], splited[-1])
audio_filename = f"{os.path.split(splited[0])[-1]}/{splited[-1]}"
# print("audio_filename:", audio_filename)
if audio_filename in all_audio_filenames:
# print("Copyying", audio_filename, "...")
src_path = f"{folder_name}/{audio_filename}"
target_path = f"{dirname}/{audio_filename}"
#create that folder if it doesn't exist
if not os.path.isdir(os.path.dirname(target_path)):
os.mkdir(os.path.dirname(target_path))
features = extract_feature(src_path, mel=True)
target_filename = target_path.split(".")[0]
np.save(target_filename, features)
# shutil.copyfile(src_path, target_path) | [
"librosa.feature.chroma_stft",
"os.mkdir",
"numpy.save",
"librosa.feature.spectral_contrast",
"librosa.feature.mfcc",
"os.path.isdir",
"pandas.read_csv",
"os.path.dirname",
"librosa.feature.melspectrogram",
"librosa.effects.harmonic",
"numpy.hstack",
"librosa.core.load",
"numpy.array",
"nu... | [((1661, 1679), 'glob.glob', 'glob.glob', (['"""*.csv"""'], {}), "('*.csv')\n", (1670, 1679), False, 'import glob\n'), ((680, 708), 'librosa.core.load', 'librosa.core.load', (['file_name'], {}), '(file_name)\n', (697, 708), False, 'import librosa\n'), ((788, 800), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (796, 800), True, 'import numpy as np\n'), ((1601, 1623), 'os.path.isdir', 'os.path.isdir', (['dirname'], {}), '(dirname)\n', (1614, 1623), False, 'import os\n'), ((1629, 1646), 'os.mkdir', 'os.mkdir', (['dirname'], {}), '(dirname)\n', (1637, 1646), False, 'import os\n'), ((1772, 1793), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (1783, 1793), True, 'import pandas as pd\n'), ((2149, 2180), 'os.path.join', 'os.path.join', (['dirname', 'csv_file'], {}), '(dirname, csv_file)\n', (2161, 2180), False, 'import os\n'), ((2344, 2387), 'glob.glob', 'glob.glob', (['f"""{folder_name}/{folder_name}/*"""'], {}), "(f'{folder_name}/{folder_name}/*')\n", (2353, 2387), False, 'import glob\n'), ((919, 945), 'numpy.hstack', 'np.hstack', (['(result, mfccs)'], {}), '((result, mfccs))\n', (928, 945), True, 'import numpy as np\n'), ((1065, 1092), 'numpy.hstack', 'np.hstack', (['(result, chroma)'], {}), '((result, chroma))\n', (1074, 1092), True, 'import numpy as np\n'), ((1204, 1228), 'numpy.hstack', 'np.hstack', (['(result, mel)'], {}), '((result, mel))\n', (1213, 1228), True, 'import numpy as np\n'), ((1358, 1387), 'numpy.hstack', 'np.hstack', (['(result, contrast)'], {}), '((result, contrast))\n', (1367, 1387), True, 'import numpy as np\n'), ((1528, 1556), 'numpy.hstack', 'np.hstack', (['(result, tonnetz)'], {}), '((result, tonnetz))\n', (1537, 1556), True, 'import numpy as np\n'), ((2018, 2089), 'numpy.logical_or', 'np.logical_or', (["(new_df['gender'] == 'female')", "(new_df['gender'] == 'male')"], {}), "(new_df['gender'] == 'female', new_df['gender'] == 'male')\n", (2031, 2089), True, 'import numpy as np\n'), ((2558, 2583), 'os.path.split', 'os.path.split', (['audio_file'], {}), '(audio_file)\n', (2571, 2583), False, 'import os\n'), ((758, 773), 'librosa.stft', 'librosa.stft', (['X'], {}), '(X)\n', (770, 773), False, 'import librosa\n'), ((3309, 3343), 'numpy.save', 'np.save', (['target_filename', 'features'], {}), '(target_filename, features)\n', (3316, 3343), True, 'import numpy as np\n'), ((838, 890), 'librosa.feature.mfcc', 'librosa.feature.mfcc', ([], {'y': 'X', 'sr': 'sample_rate', 'n_mfcc': '(40)'}), '(y=X, sr=sample_rate, n_mfcc=40)\n', (858, 890), False, 'import librosa\n'), ((986, 1037), 'librosa.feature.chroma_stft', 'librosa.feature.chroma_stft', ([], {'S': 'stft', 'sr': 'sample_rate'}), '(S=stft, sr=sample_rate)\n', (1013, 1037), False, 'import librosa\n'), ((1127, 1176), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', (['X'], {'sr': 'sample_rate'}), '(X, sr=sample_rate)\n', (1157, 1176), False, 'import librosa\n'), ((1273, 1330), 'librosa.feature.spectral_contrast', 'librosa.feature.spectral_contrast', ([], {'S': 'stft', 'sr': 'sample_rate'}), '(S=stft, sr=sample_rate)\n', (1306, 1330), False, 'import librosa\n'), ((2696, 2721), 'os.path.split', 'os.path.split', (['splited[0]'], {}), '(splited[0])\n', (2709, 2721), False, 'import os\n'), ((3096, 3124), 'os.path.dirname', 'os.path.dirname', (['target_path'], {}), '(target_path)\n', (3111, 3124), False, 'import os\n'), ((3152, 3180), 'os.path.dirname', 'os.path.dirname', (['target_path'], {}), '(target_path)\n', (3167, 3180), False, 'import os\n'), ((1456, 1483), 'librosa.effects.harmonic', 'librosa.effects.harmonic', (['X'], {}), '(X)\n', (1480, 1483), False, 'import librosa\n')] |
"""
Module for various types of particle emission in WarpX.
"""
import collections
# import collections
import logging
import warnings
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import numba
import numpy as np
from pywarpx import callbacks, picmi
import skimage.measure
from mewarpx.mespecies import Species
from mewarpx.mwxrun import mwxrun
from mewarpx.utils_store import appendablearray, parallel_util
import mewarpx.utils_store.mwxconstants as constants
import mewarpx.utils_store.util as mwxutil
# Get module-level logger
logger = logging.getLogger(__name__)
class Injector(object):
"""Base class for injection.
All injectors must include an emitter object, and should also include a
'name' field for diagnostics.
"""
emitter = None
# This is overridden if a diagnostic is installed to record injected
# current.
injector_diag = None
# fields is used by the diags.FluxInjectorDiag to know what to write to
# the CSV file. It can be overridden by child classes, but is not currently
# adjustable by the user.
# IF CHANGING THIS, CHANGE IN self.record_injectedparticles() AS WELL.
fields = ['t', 'step', 'species_id', 'V_e', 'n', 'q', 'E_total']
# @staticmethod
# def setup_warp():
# """Stuff that needs to be set before injectors are used."""
# # Update warp derived quantities if needed.
# warp.derivqty()
# # Record E_total
# if 'E_total' not in warp.Species._addedpids:
# warp.Species.addpid('E_total')
@staticmethod
def compute_npart(npart_total, unique_particles):
"""Compute number of particles to insert at a given timestep.
This function translates between total particle number and this
processor's particle numbers. If particles are designated "unique",
none are discarded by WarpX so we have logic here to give the processor
the right number of particles, with additional logic to load-balance
the remainder. If unique_particles is False, WarpX essentially does the
particle discarding, so each processor should inject the whole number
of particles to start.
Arguments:
npart_total (int): Integer number of total particles to insert this
timestep.
unique_particles (bool): If True, WarpX keeps all particles sent to
it. If False, it only keeps a processor's fraction of total
particles.
Returns:
npart (int): Integer number of total particles for this processor
to insert this timestep.
"""
if not unique_particles:
return npart_total
npart = npart_total // mwxrun.n_procs
# Early-numbered processors add one additional particle if needed.
# Particles get re-distributed between processors after injection, so
# this shouldn't load-imbalance anything.
if mwxrun.me < (npart_total % mwxrun.n_procs):
npart += 1
return npart
def getvoltage_e(self):
"""Return the electrical voltage of the injector. Defaults to returning
0, unless an emitter is associated with this injector (it should be) in
which case return the emitter's electrical voltage.
Child classes can override this if needed.
"""
if self.emitter is not None:
return self.emitter.getvoltage_e()
return 0.
def init_injectedparticles(self, fieldlist):
"""Set up the injected particles array. Call before
append_injectedparticles.
Arguments:
fieldlist (list): List of string titles for the fields. Order is
important; it must match the order for future particle appends
that are made.
"""
self._injectedparticles_fields = fieldlist
self._injectedparticles_data = appendablearray.AppendableArray(
typecode='d', unitshape=[len(fieldlist)])
def record_injectedparticles(self, species, w, E_total=None,
n=None):
"""Handles transforming raw particle information to the information
used to record particles as a function of time. Also handles parallel
sum and appending to the data array the current amount of injection.
Note:
Assumes the fixed form of fields given in Injector(). Doesn't
check since this is called many times.
Since a parallelsum is performed, call this with only the species
argument if no particles are being added by this processor.
Arguments:
species (:class:`mewarpx.mespecies.Species`): Species of particle
w (np.ndarray or float): Array of length npart with particle weights
E_total (np.ndarray or float): Array of length npart with E_total
values.
n (int): Number of macroparticles, _only_ needed if overriding the
length of E_total. This is useful mostly in the case that
E_total is already summed over particles, in which case a
single number can be passed for it rather than an array.
"""
if n is not None and np.size(w) != 1:
raise RuntimeError("Cannot pass array for w and specify n")
if n is None and np.size(w) == 1:
raise RuntimeError("Cannot pass single value for w and not specify n")
data = np.zeros(7)
# time for current step
data[0] = mwxrun.get_it() * mwxrun.get_dt()
# current step
data[1] = mwxrun.get_it()
# species ID
data[2] = species.species_number
# voltage of emitter
data[3] = self.getvoltage_e()
# number of macroparticles
data[4] = n if np.size(w) == 1 else np.size(w)
# total charge emitted
data[5] = species.sq * np.sum(w)
if E_total is not None:
data[6] = np.sum(E_total)
self.append_injectedparticles(data)
def append_injectedparticles(self, data):
"""Append one or more lines of injected particles data.
Arguments:
data (np.ndarray): Array of shape (m) or (n, m) where m is the
number of fields and n is the number of rows of data to append.
"""
self._injectedparticles_data.append(data)
def get_injectedparticles(self, clear=False):
"""Retrieve a copy of injectedparticles data.
Arguments:
clear (bool): If True, clear the particle data rows entered (field
names are still initialized as before). Default False.
Returns:
injectedparticles_dict (collections.OrderedDict): Keys are the
originally passed field strings for lost particles. Values are
an (n)-shape numpy array for each field.
"""
lpdata = self._injectedparticles_data.data()
# Sum all except t/step/species_id/V_e from all processors
lpdata[:,4:] = parallel_util.parallelsum(np.array(lpdata[:,4:]))
lpdict = collections.OrderedDict(
[(fieldname, np.array(lpdata[:, ii], copy=True))
for ii, fieldname in enumerate(self._injectedparticles_fields)])
if clear:
self._injectedparticles_data.cleardata()
return lpdict
class FixedNumberInjector(Injector):
"""Inject n particles every t timesteps."""
def __init__(self, emitter, species, npart,
injectfreq=None, injectoffset=1,
weight=0., rseed=None,
name=None, unique_particles=True):
"""Sets up user-specified injection with fixed timestep and weights.
Arguments:
emitter (:class:`mewarpx.emission.Emitter`): Emitter object that
will specify positions and velocities of particles to inject.
species (picmi.Species): Premade species to inject particles of.
npart (int): Number of particles to inject total
injectfreq (int): Number of steps to wait for next injection.
Default infinity.
injectoffset (int): First timestep to inject. Default 1 (the
first possible timestep in WarpX).
weight (float): Macroparticle weight to be introduced.
rseed (int): If specified, all injection should be repeatable using
this rseed. At present each set of injected particles will have
the same initial position and velocities as the previous set.
name (str): Injector name for diagnostics. Constructed from
speciesname if not given.
unique_particles (bool): Whether WarpX will keep all particles
given it from every processor (True) or keep only a fraction of
particles based on processor count (False).
"""
# Save class parameters
self.emitter = emitter
self.species = species
self.npart_total = npart
self.injectfreq = injectfreq
if self.injectfreq is None:
self.injectfreq = np.inf
self.injectoffset = injectoffset
self.weight = weight
self.rseed = rseed
self.name = name
if self.name is None:
self.name = "fixed_injector_" + self.species.name
self.unique_particles = unique_particles
logger.info(
f"Fixed injection of {self.npart_total} particles, "
f"weight {self.weight}, every {self.injectfreq}"
f"timesteps."
)
callbacks.installparticleinjection(self.inject_particles)
# add E_total PID to this species
self.species.add_pid("E_total")
def inject_particles(self):
"""Perform the actual injection!"""
effective_it = mwxrun.get_it() - self.injectoffset
if effective_it >= 0 and effective_it % self.injectfreq == 0:
# Adjust npart for processor number if needed
npart = self.compute_npart(
npart_total=self.npart_total,
unique_particles=self.unique_particles
)
# TODO randomdt and velhalfstep are False simply because they're
# not supported at present
particles_dict = self.emitter.get_newparticles(
npart=npart, w=self.weight,
q=self.species.sq, m=self.species.sm,
rseed=self.rseed,
randomdt=False, velhalfstep=False
)
logger.info(f"Inject {len(particles_dict['x'])} particles")
# Note some parts of WarpX call the variables ux and some parts vx,
# and they're referred to as momenta. But I don't see anywhere
# they're actually used as momenta including the particle mass -
# the actual update is in Source/Particles/Pusher/UpdatePosition.H
mwxrun.sim_ext.add_particles(
self.species.name,
x=particles_dict['x'],
y=particles_dict['y'],
z=particles_dict['z'],
ux=particles_dict['vx'],
uy=particles_dict['vy'],
uz=particles_dict['vz'],
w=particles_dict['w'],
E_total=particles_dict['E_total'],
unique_particles=self.unique_particles
)
if self.injector_diag is not None:
self.record_injectedparticles(
species=self.species,
w=particles_dict['w'],
E_total=particles_dict['E_total'],
)
class ThermionicInjector(Injector):
"""Performs standard every-timestep injection from a thermionic cathode."""
def __init__(self, emitter, species, npart_per_cellstep, T=None,
WF=None, A=constants.A0*1e4, use_Schottky=True,
allow_poisson=False, wfac=1.0,
name=None, profile_decorator=None,
unique_particles=True):
"""Sets up user-specified injection for warpX.
Arguments:
emitter (:class:`mewarpx.emission.Emitter`): Emitter object that
will specify positions and velocities of particles to inject.
species (mewarpx.mespecies.Species): A premade species. Note only
electrons will actually give physically meaningful weight
calculations.
npart_per_cellstep (int): Number of macroparticles to inject per
cell on the cathode surface per timestep
T (float): Cathode temperature (K). Uses emitter T if not specified.
WF (float): Cathode work function (eV). Uses WF of the conductor
associated with the emitter if not specified.
A (float): Coefficient of emission in Amp/m^2/K^2. Default is
the theoretical max, approximately 1.2e6.
use_Schottky (bool): Flag specifying whether or not to augment the
emission current via field-dependent particle weights.
Defaults to True.
allow_poisson (bool): If True and < npart_per_cellstep electrons
would be injected per cell, inject whole electrons with a
Poisson distribution. If False, inject fractions of electrons.
Default False.
wfac (float): Constant factor applied to variable particle
weights, which changes the actual injection weight from the
physically calculated quantity. Currently used only for
testing, or for e.g. artificially lowering weight of trace
particles.
name (str or None): Injector name for diagnostics. Constructed from
speciesname if not given.
profile_decorator (decorator): A decorator used to profile the
injection methods and related functions.
unique_particles (bool): Whether WarpX will keep all particles
given it from every processor (True) or keep only a fraction of
particles based on processor count (False). Default True.
"""
# sanity check species
if species.particle_type != 'electron':
raise AttributeError(
"Thermionic emission is only applicable with electrons as the "
f"injection species, but species type {species.particle_type} "
"was given."
)
# Save class parameters
self.emitter = emitter
self.species = species
self.T = T
self.WF = WF
self.A = A
self.use_Schottky = use_Schottky
self.wfac = wfac
# Get values from the emitter and its conductor if not specified
if self.T is None:
self.T = self.emitter.T
if self.WF is None:
self.WF = self.emitter.conductor.WF
if profile_decorator is not None:
self.inject_particles = profile_decorator(self.inject_particles)
self.record_injectedparticles = (
profile_decorator(self.record_injectedparticles)
)
self.name = name
if self.name is None:
self.name = "thermionic_injector_" + self.species.name
self.unique_particles = unique_particles
area = self.emitter.area
dt = mwxrun.get_dt()
if (area is None) or (area <= 0.0) or (dt <= 0.0):
raise ValueError(f"area {area} or dt {dt}"
f" is invalid for injection.")
# Determine weight and injection numbers
electrons_per_step = (mwxutil.J_RD(self.T, self.WF, self.A)
* area * dt / picmi.constants.q_e)
logger.info(
f"Setting up thermionic paticle injection. Area {area:.3g} m^2, "
f"dt {dt:.3e} s, J {mwxutil.J_RD(self.T, self.WF, self.A):.3g} "
"A/m^2."
)
logger.info(
"Emission current corresponds to injection of "
f"{electrons_per_step:.2e} electrons per timestep"
)
max_injections = int(round(npart_per_cellstep *
self.emitter.cell_count))
# If it was requested to inject more particles than we have electrons,
# we instead inject electrons with a poisson distribution if allowed.
if electrons_per_step < max_injections and allow_poisson:
self.ptcl_per_step = electrons_per_step
self.weight = self.wfac
self.poisson = True
logger.info(
"Using stochastic injection of electrons with "
"Poisson sampling"
)
else:
self.ptcl_per_step = max_injections
self.weight = self.wfac * electrons_per_step / self.ptcl_per_step
self.poisson = False
logger.info(
f"Using deterministic injection of {self.ptcl_per_step} "
f"particles per step, each with weight {self.weight}"
)
# create new species that will be used to properly distribute new
# particles and retrieve the electric field at their injection sites in
# order to calculate Schottky enhancement
if self.use_Schottky:
self.injection_species = Species(
particle_type='electron', name=self.species.name+'_injection'
)
else:
self.injection_species = self.species
callbacks.installparticleinjection(self.inject_particles)
# add E_total PID to this species
self.species.add_pid("E_total")
self.injection_species.add_pid("E_total")
if self.use_Schottky:
# add PIDs to hold the normal vector
# TODO work out a better way to handle these PIDs since this is not
# a great use of memory
self.species.add_pid("norm_x")
self.species.add_pid("norm_y")
self.species.add_pid("norm_z")
self.injection_species.add_pid("norm_x")
self.injection_species.add_pid("norm_y")
self.injection_species.add_pid("norm_z")
def inject_particles(self):
"""Perform the actual injection!"""
if self.poisson:
num_injections = np.random.poisson(self.ptcl_per_step)
else:
num_injections = self.ptcl_per_step
# Adjust npart for processor number if needed
npart = self.compute_npart(
npart_total=num_injections,
unique_particles=self.unique_particles
)
# TODO randomdt and velhalfstep are False simply because they're
# not supported at present
particles_dict = self.emitter.get_newparticles(
npart=npart, w=self.weight, q=self.species.sq, m=self.species.sm,
randomdt=False, velhalfstep=False
)
extra_pids = {}
extra_pids['E_total'] = particles_dict['E_total']
extra_pids['w'] = particles_dict['w']
if self.use_Schottky:
# Determine the local surface normal for each particle
normal_vectors = self.emitter.get_normals(
particles_dict['x'], particles_dict['y'], particles_dict['z']
)
extra_pids['norm_x'] = normal_vectors[:, 0]
extra_pids['norm_y'] = normal_vectors[:, 1]
extra_pids['norm_z'] = normal_vectors[:, 2]
# Note some parts of WarpX call the variables ux and some parts vx,
# and they're referred to as momenta. But I don't see anywhere
# they're actually used as momenta including the particle mass -
# the actual update is in Source/Particles/Pusher/UpdatePosition.H
mwxrun.sim_ext.add_particles(
self.injection_species.name,
x=particles_dict['x'],
y=particles_dict['y'],
z=particles_dict['z'],
ux=particles_dict['vx'],
uy=particles_dict['vy'],
uz=particles_dict['vz'],
unique_particles=self.unique_particles,
**extra_pids
)
if self.use_Schottky:
# Up-weight the particles by the local Schottky factor, calculated
# as exp[sqrt(e / 4*pi*eps0) / (kT) * sqrt(max(-E, 0))]
pre_fac = (
np.sqrt(constants.e / (4.0 * np.pi * constants.epsilon_0))
/ (constants.kb_eV * self.emitter.T)
)
mwxrun.calc_Schottky_weight(
self.injection_species.name, pre_fac
)
# get the total injected weight and energy
total_weight = 0.
total_energy = 0.
npart = 0
weight_arrays = mwxrun.sim_ext.get_particle_arrays(
self.injection_species.name, 'w', 0
)
ux_arrays = mwxrun.sim_ext.get_particle_arrays(
self.injection_species.name, 'ux', 0
)
uy_arrays = mwxrun.sim_ext.get_particle_arrays(
self.injection_species.name, 'uy', 0
)
uz_arrays = mwxrun.sim_ext.get_particle_arrays(
self.injection_species.name, 'uz', 0
)
for ii, w in enumerate(weight_arrays):
npart += len(w)
total_weight += np.sum(w)
total_energy += np.sum(self.emitter._get_E_total(
ux_arrays[ii], uy_arrays[ii], uz_arrays[ii],
constants.e, constants.m_e, w
))
# Move particles from temporary container to "real" container
mwxrun.move_particles_between_species(
self.injection_species.name, self.species.name
)
else:
total_weight = np.sum(particles_dict['w'])
total_energy = np.sum(particles_dict['E_total'])
if self.injector_diag is not None:
self.record_injectedparticles(
species=self.species,
w=total_weight,
E_total=total_energy,
n=npart
)
class PlasmaInjector(Injector):
"""Inject particles at simulation start, or at regular timesteps, to
seed a plasma. Can use any emitter object. The defining feature is that the
2nd species positions and weights are copied from the first species, so the
spatial distribution is always identical to start. Velocities are
independent, however.
"""
def __init__(self, emitter, species1, species2, npart, T_2=None,
plasma_density=None, ionization_frac=None,
P_neutral=None, T_neutral=None,
injectfreq=None, injectoffset=1,
rseed=None, name=None, unique_particles=True
):
"""Initialize injection of a plasma with two species and given emitter.
Arguments:
emitter (:class:`mewarpx.emission.BaseEmitter`): BaseEmitter object
that will specify positions and velocities of particles to
inject.
species1 (:class:`mewarpx.mespecies.Species`): First species, eg
electron
species2 (:class:`mewarpx.mespecies.Species`): Second species, eg ion
npart (int): Number of macroparticles to inject total among all
processors and species.
T_2 (float): If specified, species2 will be injected at this
temperature.
plasma_density (float): Ion number density to inject. If using
volumetric emitter, in m^(-3), if using surface emitter, in
m^(-2)
ionization_frac (float): Instead of plasma_density, use a specific
ionization fraction of the neutral gas. Volumetric emitter
only.
P_neutral (float): If using ionization_frac only, the neutral gas
density (*Torr*).
T_neutral (float): If using ionization_frac only, the neutral gas
temperature (K).
injectfreq (int): Number of steps to wait for next injection.
Default infinity.
injectoffset (int): First timestep to inject. Default 1 (the first
possible timestep in WarpX).
rseed (int): If specified, all injection should be repeatable using
this rseed. At present each set of injected particles will have
the same initial position and velocities as the previous set.
name (str or None): Injector name for diagnostics. Constructed from
species names if not given.
unique_particles (bool): Whether WarpX will keep all particles
given it from every processor (True) or keep only a fraction of
particles based on processor count (False). Default True.
"""
# Save class parameters
self.emitter = emitter
self.npart_per_species = npart // 2
self.species1 = species1
self.species2 = species2
self.T_2 = T_2
if injectfreq is None:
injectfreq = np.inf
self.injectfreq = injectfreq
self.injectoffset = injectoffset
self.rseed = rseed
self._calc_plasma_density(
plasma_density=plasma_density,
ionization_frac=ionization_frac,
P_neutral=P_neutral,
T_neutral=T_neutral,
)
self.name = name
if self.name is None:
self.name = (
f"plasma_injector_{self.species1.name}_{self.species2.name}"
)
self.unique_particles = unique_particles
logger.info(
f"Plasma injection {self.name}: "
f"{self.npart_per_species} particles each of {self.species1.name} "
f"and {self.species2.name}, every {self.injectfreq} timesteps,"
)
# Surface emission
if isinstance(self.emitter, Emitter):
self.weight = (
self.emitter.area * self.plasma_density / self.npart_per_species
)
warnings.warn(
"Using a surface emitter with the PlasmaInjector has not been "
"tested for accuracy."
)
logger.info(
f" full weight {self.weight:.4g}, surface density "
f"{self.plasma_density:.4g} m^-2, area "
f"{self.emitter.area:.4g} m^2."
)
# Volume emission
else:
self.weight = (
self.emitter.volume * self.plasma_density
/ self.npart_per_species
)
logger.info(
f" full weight {self.weight:.4g}, volume density "
f"{self.plasma_density:.4g} m^-3, volume "
f"{self.emitter.volume:.4g} m^3."
)
debye_length = mwxutil.plasma_Debye_length(
self.emitter.T, self.plasma_density)
logger.info(
f" Corresponding plasma Debye length is {debye_length:.3e} m."
)
callbacks.installparticleinjection(self.inject_particles)
# add E_total PID to the species involved
self.species1.add_pid("E_total")
self.species2.add_pid("E_total")
def _calc_plasma_density(self, plasma_density, ionization_frac, P_neutral,
T_neutral):
"""Helper function to separate out part of initialization."""
self.plasma_density = plasma_density
if ionization_frac is not None:
if self.plasma_density is not None:
raise ValueError(
"Specify ionization_frac or plasma_density, not both.")
if (
(P_neutral is None) or (P_neutral <= 0) or
(T_neutral is None) or (T_neutral <= 0)
):
raise ValueError("Must specify positive neutral pressure and "
"temperature to use ionization_frac.")
if isinstance(self.emitter, Emitter):
raise RuntimeError("Cannot use ionization_frac with a surface"
" (area-based) Emitter.")
n_neutral = mwxutil.ideal_gas_density(P_neutral, T_neutral)
self.plasma_density = n_neutral * ionization_frac
if (self.plasma_density is None) or (self.plasma_density <= 0):
raise ValueError("Invalid plasma_density {}".format(
self.plasma_density))
def inject_particles(self):
"""Inject particles, same position & weight for each."""
effective_it = mwxrun.get_it() - self.injectoffset
if effective_it >= 0 and effective_it % self.injectfreq == 0:
# Adjust npart for processor number if needed
npart = self.compute_npart(
npart_total=self.npart_per_species,
unique_particles=self.unique_particles
)
# TODO randomdt and velhalfstep are False simply because they're
# not supported at present
particles1_dict = self.emitter.get_newparticles(
npart=npart, w=self.weight, q=self.species1.sq,
m=self.species1.sm, rseed=self.rseed,
randomdt=False, velhalfstep=False
)
# if requested get particles for species2 at the specified
# temperature
if self.T_2 is not None:
T_temp, self.emitter.T = self.emitter.T, self.T_2
# TODO randomdt and velhalfstep are False simply because they're
# not supported at present
particles2_dict = self.emitter.get_newparticles(
npart=npart, w=self.weight, q=self.species2.sq,
m=self.species2.sm, rseed=self.rseed,
randomdt=False, velhalfstep=False
)
if self.T_2 is not None:
self.emitter.T = T_temp
for key in ['x', 'y', 'z', 'w']:
particles2_dict[key] = particles1_dict[key]
logger.info(
f"Inject {len(particles1_dict['x'])} particles each of "
f"{self.species1.name} and {self.species2.name}."
)
mwxrun.sim_ext.add_particles(
self.species1.name,
x=particles1_dict['x'],
y=particles1_dict['y'],
z=particles1_dict['z'],
ux=particles1_dict['vx'],
uy=particles1_dict['vy'],
uz=particles1_dict['vz'],
w=particles1_dict['w'],
E_total=particles1_dict['E_total'],
unique_particles=self.unique_particles
)
mwxrun.sim_ext.add_particles(
self.species2.name,
x=particles2_dict['x'],
y=particles2_dict['y'],
z=particles2_dict['z'],
ux=particles2_dict['vx'],
uy=particles2_dict['vy'],
uz=particles2_dict['vz'],
w=particles2_dict['w'],
E_total=particles2_dict['E_total'],
unique_particles=self.unique_particles
)
if self.injector_diag is not None:
self.record_injectedparticles(
species=self.species1,
w=particles1_dict['w'],
E_total=particles1_dict['E_total'],
)
self.record_injectedparticles(
species=self.species2,
w=particles2_dict['w'],
E_total=particles2_dict['E_total'],
)
class BaseEmitter(object):
"""Parent class of both Emitter (which handles injection from a surface or
other area) and VolumeEmitter (which handles injection throughout a
volume).
All BaseEmitter objects are expected to contain:
- ``get_newparticles()`` returns coordinates, velocities, and KE in a
dict - implemented here
- ``_get_xv_coords()`` implements the subclass-specific particle
injection logic
- ``getvoltage()`` calculates the potential energy for particle
energies.
- ``getvoltage_e()`` calculates the potential energy for particle
energies including the work function.
- ``geoms`` is a property containing a list of simulation geometries
supported by the Emitter, as strings
"""
# Stores a list of functions that are used to adjust variable particle
# weights.
_wfnlist = None
# Needs to be overridden to specify acceptable geometries
geoms = []
def __init__(self):
"""Check geometry and any other universal initialization.
"""
self.solver_geom = self.check_geom()
# # Use to get E and phi as needed.
# self.particle_helper = ParticleValHelper()
def check_geom(self):
"""Return the current solver geometry, or throw an error if it is
unsupported by the Emitter.
"""
geom = mwxrun.geom_str
if geom not in self.geoms:
raise ValueError(
f"{geom} geometry not supported by this Emitter")
return geom
def getvoltage(self):
"""This should return the potential energy at the injection site for
fully accurate energetics.
"""
raise NotImplementedError
def getvoltage_e(self):
"""This should return the potential energy, including work function,
at the injection site for fully accurate energetics.
"""
raise NotImplementedError
@staticmethod
def _gen_particle_dict(x, y, z, vx, vy, vz, w, **kwargs):
"""Change standard arrays into format expected by an injector.
The transfer to an injector uses a dict so that optional
arguments can be passed, or additional arguments added.
Arguments:
x (np.ndarray): n-shape position array
y (np.ndarray): n-shape position array
z (np.ndarray): n-shape position array
vx (np.ndarray): n-shape velocity array
vy (np.ndarray): n-shape velocity array
vz (np.ndarray): n-shape velocity array
w (float or np.ndarray): Particle weight, either constant or
per-particle.
kwargs (np.ndarray): These are simply copied into the dictionary
"""
particle_dict = {
'x': x, 'y': y, 'z': z,
'vx': vx, 'vy': vy, 'vz': vz,
'w': np.ones_like(x) * w
}
particle_dict.update(kwargs)
return particle_dict
def _get_E_total(self, vx, vy, vz, q, m, w):
"""Calculate initial particle energies.
Note:
The conductor voltage V of the conductor the particle is ejected
from must also be set for this object.
Arguments:
vx (np.ndarray): n-length array of velocity x-components
vy (np.ndarray): n-length array of velocity y-components
vz (np.ndarray): n-length array of velocity z-components
q (float): Charge of the particles, usually species.sq.
m (float): Mass of the particles, usually species.sm.
w (np.ndarray): Variable particle weight, n-shape
"""
V = self.getvoltage()
E_total = w*(0.5*m*(vx**2 + vy**2 + vz**2) + q*V)
return E_total
def get_newparticles(self, npart, w, q, m, rseed=None,
randomdt=True, velhalfstep=True):
"""Return dict with coordinates, velocities, and KE
Note:
This function SHOULD (but doesn't in WarpX yet) handle the random
timestep advancement and the negative half-step velocity push. They
can be turned off if desired. No leapfrogging is done in the
initial random advancement, which could be a (hopefully very minor)
source of error.
Arguments:
npart (int): Total number of particles to inject
w (float): Weight of the particles
q (float): Charge of the particles, usually species.sq.
m (float): Mass of the particles, usually species.sm. Equals
electron mass if not otherwise specified.
rseed (int): Random seed, if specified, can be used to provide
reproducible results. Typically used for test / not production
runs.
randomdt (bool): If True, move each particle ahead a random delta t
in [0, dt), advancing both position and velocity together.
Default True.
velhalfstep (bool): If True, push the velocities a negative
half-step using the E-field. Aligns position and velocities
correctly for the leapfrog algorithm.
Returns:
particle_dict (dict): Contains lists, each with length equal to the
number of particles:
- ``x``, ``y``, and ``z`` contain initial positions
- ``vx``, ``vy``, and ``vz`` contain initial velocities
- ``E_total`` contains initial energy of each particle, kinetic
& potential.
- ``w`` contains particle weights.
"""
if rseed is not None:
nprstate = np.random.get_state()
np.random.seed(rseed)
rseedxv = np.random.randint(1000000000)
rseedt = np.random.randint(1000000000)
else:
rseedxv = None
rseedt = None
x, y, z, vx, vy, vz = self._get_xv_coords(
npart=npart, m=m, rseed=rseedxv
)
particle_dict = self._gen_particle_dict(
x=x, y=y, z=z, vx=vx, vy=vy, vz=vz, w=w
)
if self._wfnlist is not None:
for wfn in self._wfnlist:
particle_dict['w'] = wfn(particle_dict)
particle_dict['E_total'] = self._get_E_total(
vx=particle_dict['vx'],
vy=particle_dict['vy'],
vz=particle_dict['vz'],
q=q, m=m, w=particle_dict['w']
)
# After E_total has been computed, we advance particles as needed.
if randomdt:
self.particle_helper.advance_random_deltat(
particle_dict['x'], particle_dict['y'], particle_dict['z'],
particle_dict['vx'], particle_dict['vy'], particle_dict['vz'],
q=q, m=m, rseed=rseedt
)
if velhalfstep:
self.particle_helper.push_v_minus_halfstep(
particle_dict['x'], particle_dict['y'], particle_dict['z'],
particle_dict['vx'], particle_dict['vy'], particle_dict['vz'],
q=q, m=m
)
if rseed is not None:
np.random.set_state(nprstate)
return particle_dict
def _update_params(self):
"""Update local parameters if needed based on WarpX settings.
By default does nothing, but subclasses can implement it to update
parameters before new particle coordinates are generated.
"""
pass
def _get_xv_coords(self, npart, m, rseed):
"""Per-subclass implementation of generating new particle data.
See :func:`mewarpx.emission.BaseEmitter.get_newparticles` for details on
arguments.
Returns:
x, y, z, vx, vy, vz (np.array): Each must be a 1D numpy array.
"""
raise NotImplementedError(
"BaseEmitter subclasses must implement _get_xv_coords")
def add_wfn(self, wfn):
"""Add a variable weight function to the emitter.
Arguments:
wfn (function): This must take in a particle dictionary with
positions, velocities, and existing weights, and return a new
array of particle weights.
"""
if self._wfnlist is None:
self._wfnlist = []
self._wfnlist.append(wfn)
class Emitter(BaseEmitter):
"""Parent class for emission from a surface.
All Emitter objects are expected to contain:
- ``area`` is a property containing the area in m^2
- ``cell_count`` is a property containing the number of mesh cells
spanned by the Emitter
- ``geoms`` is a property containing a list of simulation geometries
supported by the Emitter
- ``_get_xv_coords()`` implements the subclass-specific particle
injection logic
- ``get_normals()`` returns the normals for a set of particle
coordinates.
"""
area = None
cell_count = None
geoms = []
def __init__(self, T, conductor=None, emission_type='thermionic'):
"""Default initialization for all Emitter objects.
Arguments:
T (float): Emitter temperature in Kelvin. Determines particle
velocity distribution. If None, the temperature of the
conductor will be used if one is specified.
conductor (assemblies.Assembly): Conductor the emitter is attached
to, used for recording initial voltages and energies. If None,
V_e is set to 0. Since there's no current use case for this, a
warning is printed.
emission_type (str): Distribution function type used to sample
velocities of the emitted particles. Must be defined in
:func:`mewarpx.utils_store.util.get_velocities`. Defaults to
'thermionic'.
"""
super(Emitter, self).__init__()
self.T = T
if self.T is None and conductor is not None:
self.T = conductor.T
if self.T is None:
raise ValueError(
"No value for T given to the Emitter. An Emitter T must be "
"specified directly, or on a conductor passed to the Emitter."
)
self.conductor = conductor
if self.conductor is not None:
if self.conductor.WF <= 0.:
raise ValueError("Conductor WF must be set for emitters.")
else:
warnings.warn("No conductor set for emitter. Power will not be "
"correct.")
self.emission_type = emission_type
def getvoltage(self):
if self.conductor is None:
return 0.
return self.conductor.getvoltage()
def getvoltage_e(self):
"""Electrical voltage includes WF, eg the Fermi level voltage."""
if self.conductor is None:
return 0.
return self.conductor.getvoltage() + self.conductor.WF
def get_normals(self, x, y, z):
"""Calculate local surface normal at specified coordinates.
Arguments:
x (np.ndarray): x-coordinates of emitted particles (in meters).
y (np.ndarray): y-coordinates of emitted particles (in meters).
z (np.ndarray): z-coordinates of emitted particles (in meters).
Returns:
normals (np.ndarray): nx3 array containing the outward surface
normal vector at each particle location.
"""
raise NotImplementedError("Normal calculations must be implemented by "
"Emitter sub-classes.")
class ZPlaneEmitter(Emitter):
"""This is the standard injection for a planar cathode."""
geoms = ['Z', 'XZ', 'XYZ']
def __init__(self, conductor, T=None, xmin=None, xmax=None,
ymin=None, ymax=None, transverse_fac=1.0, **kwargs):
"""Initialize an emitter for a planar cathode.
Arguments:
conductor (:class:`mewarpx.assemblies.Assembly`): Conductor object,
used to obtain work function and z coordinate/direction.
T (float): Temperature in Kelvin for the emitter; determines
velocities. If not specified the temperature of the conductor
will be used.
xmin (float): Minimum position of the rectangular emitter along x.
Default mwxrun.xmin.
xmax (float): Maximum position of the rectangular emitter along x.
Default mwxrun.xmax.
ymin (float): Minimum position of the rectangular emitter along y.
Default mwxrun.ymin.
ymax (float): Maximum position of the rectangular emitter along y.
Default mwxrun.ymax.
transverse_fac (float): Scale the transverse energy distribution by
this factor. Default 1. See
:func:`mewarpx.utils_store.util.get_velocities` for details.
kwargs (dict): Any other keyword arguments supported by the parent
Emitter constructor (such as "emission_type").
"""
# Default initialization
super(ZPlaneEmitter, self).__init__(T=T, conductor=conductor, **kwargs)
self.z = conductor.z
self.zsign = conductor.zsign
self.transverse_fac = transverse_fac
# Determine bounds
# Will be 4 element array [xmin, xmax, ymin, ymax]
self.bounds = []
for coord, default in [(xmin, mwxrun.xmin),
(xmax, mwxrun.xmax),
(ymin, mwxrun.ymin),
(ymax, mwxrun.ymax)]:
self.bounds.append(coord if coord is not None else default)
# Compute area
x_range = self.bounds[1] - self.bounds[0]
y_range = self.bounds[3] - self.bounds[2]
if self.solver_geom == 'Z':
logger.info("x/y span is 1m for purposes of charge injection")
x_range = 1.
y_range = 1.
if self.solver_geom == 'XZ':
logger.info("y span is 1m for purposes of charge injection")
y_range = 1.
self.area = x_range * y_range
# Compute cell count
if self.solver_geom == 'Z':
self.cell_count = 1
elif self.solver_geom == 'XZ':
self.cell_count = self.area / mwxrun.dx
else:
self.cell_count = self.area / (mwxrun.dx * mwxrun.dy)
def _get_xv_coords(self, npart, m, rseed):
"""Get particle coordinates given particle number.
See :func:`mewarpx.emission.BaseEmitter.get_newparticles` for details.
"""
if rseed is not None:
nprstate = np.random.get_state()
np.random.seed(rseed)
rseedv = np.random.randint(1000000000)
rseedx = np.random.randint(1000000000)
else:
rseedv = None
rseedx = None
vx, vy, vz = mwxutil.get_velocities(
npart, self.T, m=m, transverse_fac=self.transverse_fac,
emission_type=self.emission_type, rseed=rseedv)
x, y, z = mwxutil.get_positions(
npart, xmin=self.bounds[0], xmax=self.bounds[1],
ymin=self.bounds[2], ymax=self.bounds[3], z=self.z,
rseed=rseedx)
# Flip z velocities for anode emission. This appears to be faster than
# an if statement for 10000 or fewer particles.
vz = -self.zsign * vz
if rseed is not None:
np.random.set_state(nprstate)
return x, y, z, vx, vy, vz
def get_normals(self, x, y, z):
"""Calculate local surface normal at specified coordinates.
Arguments:
x (np.ndarray): x-coordinates of emitted particles (in meters).
y (np.ndarray): y-coordinates of emitted particles (in meters).
z (np.ndarray): z-coordinates of emitted particles (in meters).
Returns:
normals (np.ndarray): nx3 array containing the outward surface
normal vector at each particle location.
"""
normals = np.zeros((len(x), 3))
normals[:, 2] = -self.zsign
return normals
class XPlaneEmitter(Emitter):
"""Injection for a planar cathode emitting from the simulation side."""
geoms = ['XZ', 'XYZ']
def __init__(self, conductor, T=None, x=None, ymin=None, ymax=None,
zmin=None, zmax=None, transverse_fac=1.0, xdir=1, **kwargs):
"""Initialize an emitter for a planar cathode.
Arguments:
conductor (:class:`mewarpx.assemblies.Assembly`): Conductor object,
used to obtain work function and z coordinate/direction.
T (float): Temperature in Kelvin for the emitter; determines
velocities. If not specified the temperature of the conductor
will be used.
x (float): Position of the emitter along the x axis. Default
conductor.x if it exists otherwise conductor.xmin/max depending
on xdir. If none of those attributes exist an error will be
raised if x is not specified.
ymin (float): Minimum position of the rectangular emitter along y.
Default conductor.ymin if it exists otherwise mwxrun.ymin.
ymax (float): Maximum position of the rectangular emitter along y.
Default conductor.ymax if it exists otherwise mwxrun.ymax.
zmin (float): Minimum position of the rectangular emitter along z.
Default conductor.zmin if it exists otherwise mwxrun.zmin.
zmax (float): Maximum position of the rectangular emitter along z.
Default conductor.zmax if it exists otherwise mwxrun.zmax.
transverse_fac (float): Scale the transverse energy distribution by
this factor. Default 1. See
:func:`mewarpx.utils_store.util.get_velocities` for details.
xdir (int): 1 to emit in +x, -1 to emit in -x.
kwargs (dict): Any other keyword arguments supported by the parent
Emitter constructor (such as "emission_type").
"""
# Default initialization
super(XPlaneEmitter, self).__init__(T=T, conductor=conductor, **kwargs)
self.transverse_fac = transverse_fac
self.xdir = int(round(xdir))
if self.xdir not in [-1, 1]:
raise ValueError("xdir must be +1 or -1 for x-plane emitters.")
self.x = x
if self.x is None:
try:
self.x = conductor.x
except AttributeError:
try:
if self.xdir == 1:
attr = 'xmax'
else:
attr = 'xmin'
self.x = getattr(conductor, attr)
except AttributeError:
raise AttributeError(
'x must be specified for x-plane emitter if the '
f'attached conductor does not specify x or {attr}'
)
# Determine bounds
# Will be 4 element array [ymin, ymax, zmin, zmax]
self.bounds = []
for boundstr in ['ymin', 'ymax', 'zmin', 'zmax']:
bound = locals()[boundstr]
if bound is None:
bound = (
getattr(mwxrun, boundstr) if not hasattr(conductor, boundstr) else
getattr(conductor, boundstr)
)
self.bounds.append(bound)
# Compute area
y_range = self.bounds[1] - self.bounds[0]
z_range = self.bounds[3] - self.bounds[2]
if self.solver_geom == 'XZ':
print("y span is 1m for purposes of charge injection")
y_range = 1.
self.area = y_range * z_range
# Compute cell count
if self.solver_geom == 'XZ':
self.cell_count = self.area / mwxrun.dz
else:
self.cell_count = self.area / (mwxrun.dz * mwxrun.dy)
def _get_xv_coords(self, npart, m, rseed):
"""Get particle coordinates given particle number.
See :func:`mewarpx.emission.BaseEmitter.get_newparticles` for details.
"""
if rseed is not None:
nprstate = np.random.get_state()
np.random.seed(rseed)
rseedv = np.random.randint(1000000000)
rseedx = np.random.randint(1000000000)
else:
rseedv = None
rseedx = None
# in sampling the positions and the velocities the x and z coordinates
# are swapped so that the same functions as for the ZPlaneEmitter can
# be used
vz, vy, vx = mwxutil.get_velocities(
npart, self.T, m=m, transverse_fac=self.transverse_fac,
emission_type=self.emission_type, rseed=rseedv)
z, y, x = mwxutil.get_positions(
npart, xmin=self.bounds[2], xmax=self.bounds[3],
ymin=self.bounds[0], ymax=self.bounds[1], z=self.x,
rseed=rseedx)
# Flip x velocities if needed. This appears to be faster than
# an if statement for 10000 or fewer particles.
vx = self.xdir * vx
if rseed is not None:
np.random.set_state(nprstate)
return x, y, z, vx, vy, vz
def get_normals(self, x, y, z):
"""Calculate local surface normal at specified coordinates.
Arguments:
x (np.ndarray): x-coordinates of emitted particles (in meters).
y (np.ndarray): y-coordinates of emitted particles (in meters).
z (np.ndarray): z-coordinates of emitted particles (in meters).
Returns:
normals (np.ndarray): nx3 array containing the outward surface
normal vector at each particle location.
"""
normals = np.zeros((len(x), 3))
normals[:, 0] = self.xdir
return normals
class ZDiscEmitter(Emitter):
"""This injects over an x-y disc rather than a rectangle."""
geoms = ['RZ']
def __init__(self, conductor, T=None, inner_emission_radius=None,
outer_emission_radius=None, transverse_fac=1.0, **kwargs):
"""Initialize an emitter for a disc (circular) cathode.
Arguments:
conductor (:class:`mewarpx.assemblies.Assembly`): Conductor object,
used to obtain work function and z coordinate/direction.
T (float): Temperature in Kelvin for the emitter; determines
velocities. If not specified the temperature of the conductor
will be used.
inner_emission_radius (float): Inner radius of the disc (in meters)
for particles to be emitted from. Default mwxrun.rmin.
outer_emission_radius (float): Outer radius of the disc (in meters)
for particles to be emitted from. Default mwxrun.rmax.
transverse_fac (float): Scale the transverse energy distribution by
this factor. Default 1. See
:func:`mewarpx.utils_store.util.get_velocities` for details.
kwargs (dict): Any other keyword arguments supported by the parent
Emitter constructor (such as "emission_type").
Notes:
The center of the disc is always x = y = 0 at present.
"""
# Default initialization
super(ZDiscEmitter, self).__init__(T=T, conductor=conductor, **kwargs)
self.z = conductor.z
self.zsign = conductor.zsign
self.transverse_fac = transverse_fac
# Save input parameters
if inner_emission_radius is None:
inner_emission_radius = mwxrun.rmin
self.inner_emission_radius = inner_emission_radius
if outer_emission_radius is None:
outer_emission_radius = mwxrun.rmax
self.outer_emission_radius = outer_emission_radius
self.area = (np.pi
* (self.outer_emission_radius**2 - self.inner_emission_radius**2))
# Compute cell count
self.cell_count = (
(self.outer_emission_radius - self.inner_emission_radius)
/ mwxrun.dr
)
def _get_xv_coords(self, npart, m, rseed):
"""Get particle coordinates given particle number.
See :func:`mewarpx.emission.BaseEmitter.get_newparticles` for details.
"""
if rseed is not None:
nprstate = np.random.get_state()
np.random.seed(rseed)
rseedv = np.random.randint(1000000000)
rseedx = np.random.randint(1000000000)
else:
rseedv = None
rseedx = None
vx, vy, vz = mwxutil.get_velocities(
npart, self.T, m=m, transverse_fac=self.transverse_fac,
emission_type=self.emission_type, rseed=rseedv)
x, y, z = mwxutil.get_positions_RZ(
npart, rmin=self.inner_emission_radius,
rmax=self.outer_emission_radius, z=self.z,
rseed=rseedx)
# Flip z velocities for anode emission. This appears to be faster than
# an if statement for 10000 or fewer particles.
vz = -self.zsign * vz
if rseed is not None:
np.random.set_state(nprstate)
return x, y, z, vx, vy, vz
def get_normals(self, x, y, z):
"""Calculate local surface normal at specified coordinates.
Arguments:
x (np.ndarray): x-coordinates of emitted particles (in meters).
y (np.ndarray): y-coordinates of emitted particles (in meters).
z (np.ndarray): z-coordinates of emitted particles (in meters).
Returns:
normals (np.ndarray): nx3 array containing the outward surface
normal vector at each particle location.
"""
normals = np.zeros((len(x), 3))
normals[:, 2] = -self.zsign
return normals
class ZCylinderEmitter(Emitter):
"""This injects over the side faces of a cylinder oriented along z."""
geoms = ['RZ', 'XYZ']
def __init__(self, conductor, T=None, zmin=None, zmax=None, rdir=1,
transverse_fac=1.0, **kwargs):
"""Initialize a 3D cylindrical emitter oriented along the z-axis.
Arguments:
conductor (:class:`mewarpx.assemblies.Assembly`): Conductor object,
used to obtain work function, coordinates and possibly
temperature.
T (float): Temperature in Kelvin for the emitter; determines
velocities. If not specified the temperature of the conductor
will be used.
zmin (float): Lower z-coordinate for emitting surface. Default
conductor.zmin if it exists otherwise mwxrun.zmin.
zmax (float): Upper z-coordinate for emitting surface. Default
conductor.zmax if it exists otherwise mwxrun.zmax.
rdir (float): 1 for emitting outward of the cylinder (will use
r_outer attribute of the conductor for the emitting surface),
-1 for emitting inward towards r = 0 (will use r_inner attribute
of the conductor for the emitting surface). Default 1.
transverse_fac (float): Scale the transverse energy distribution by
this factor. Default 1. See
:func:`mewarpx.utils_store.util.get_velocities` for details.
kwargs (dict): Any other keyword arguments supported by the parent
Emitter constructor (such as "emission_type").
Notes:
The center of the cylinder is always x = y = 0 at present.
"""
# Default initialization
super(ZCylinderEmitter, self).__init__(T=T, conductor=conductor,
**kwargs)
self.zmin = zmin
if self.zmin is None:
if hasattr(conductor, 'zmin'):
self.zmin = conductor.zmin
else:
self.zmin = mwxrun.zmin
self.zmax = zmax
if self.zmax is None:
if hasattr(conductor, 'zmax'):
self.zmax = conductor.zmax
else:
self.zmax = mwxrun.zmax
self.rdir = int(round(rdir))
if self.rdir not in [-1, 1]:
raise ValueError("rdir must be +1 or -1 for z-cylinder emitters.")
if self.rdir == 1:
self.r = conductor.r_outer + 1e-10
else:
self.r = conductor.r_inner - 1e-10
self.transverse_fac = transverse_fac
# sanity check
if self.r <= 0:
raise AttributeError("Cannot emit from a cylinder with 0 radius.")
self.area = 2.0 * np.pi * self.r * (self.zmax - self.zmin)
# Compute cell count
if mwxrun.geom_str == 'RZ':
self.cell_count = (self.zmax - self.zmin) / mwxrun.dz
elif mwxrun.geom_str == 'XYZ':
self.cell_count = (
self.area
/ min(mwxrun.dx * mwxrun.dy, mwxrun.dx * mwxrun.dz,
mwxrun.dy * mwxrun.dz)
)
def _get_xv_coords(self, npart, m, rseed):
"""Get particle coordinates given particle number.
See :func:`mewarpx.emission.BaseEmitter.get_newparticles` for details.
"""
if rseed is not None:
nprstate = np.random.get_state()
np.random.seed(rseed)
# rseedv is passed to get velocities. The basic rseed here is used
# for positions, below.
rseedv = np.random.randint(1000000000)
else:
rseedv = None
theta = np.random.uniform(0.0, 2.0*np.pi, npart)
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
x = self.r * cos_theta
y = self.r * sin_theta
z = np.random.uniform(self.zmin, self.zmax, npart)
vz, v_trans, v_long = mwxutil.get_velocities(
npart, self.T, m=m, transverse_fac=self.transverse_fac,
emission_type=self.emission_type, rseed=rseedv)
# Flip the longitudinal velocity if needed. This appears to be faster
# than an if statement for 10000 or fewer particles.
v_long = self.rdir * v_long
vx = cos_theta * v_long - sin_theta * v_trans
vy = sin_theta * v_long + cos_theta * v_trans
if rseed is not None:
np.random.set_state(nprstate)
return x, y, z, vx, vy, vz
def get_normals(self, x, y, z):
"""Calculate local surface normal at specified coordinates.
Arguments:
x (np.ndarray): x-coordinates of emitted particles (in meters).
y (np.ndarray): y-coordinates of emitted particles (in meters).
z (np.ndarray): z-coordinates of emitted particles (in meters).
Returns:
normals (np.ndarray): nx3 array containing the outward surface
normal vector at each particle location.
"""
normals = np.zeros((len(x), 3))
# The normal is r-hat
r = np.sqrt(x**2 + y**2)
normals[:, 0] = x / r
normals[:, 1] = y / r
return normals
class ArbitraryEmitter2D(Emitter):
""" ArbitraryEmitter2D class takes in a conductor, calculates an approximate
surface that encloses the conductor and then sets up the appropriate
emitting surfaces, given a number of particles to emit.
"""
geoms = ['XZ']
def __init__(self, conductor, T=None, res_fac=5., transverse_fac=1.0,
**kwargs):
"""Construct the emitter based on conductor object and temperature.
Arguments:
conductor (mewarpx.assemblies object): Conductor to emit from.
T (float): Temperature in Kelvin. If not specified the temperature
of the conductor will be used.
res_fac (float): Level of resolution beyond the grid resolution to
use for calculating shape contours.
transverse_fac (float): Scale the transverse energy distribution by
this factor. Default 1. See
:func:`mewarpx.utils_store.util.get_velocities` for details.
kwargs (dict): Any other keyword arguments supported by the parent
Emitter constructor (such as "emission_type").
"""
# Default initialization
super(ArbitraryEmitter2D, self).__init__(
T=T, conductor=conductor, **kwargs
)
# Save input parameters
self.res_fac = res_fac
self.transverse_fac = transverse_fac
# Generate grid enclosed in bounding box
self.dx = mwxrun.dx/res_fac
self.dy = 1.
self.dz = mwxrun.dz/res_fac
self.dA = np.sqrt(self.dx*self.dz)
# A small delta is added to the maxima here; this ensures the last point
# is included. Without it, floating point errors determine whether or
# not the last point is included.
self.xvec = np.arange(
mwxrun.xmin, mwxrun.xmax + self.dx/1000., self.dx)
self.yvec = [0.]
self.zvec = np.arange(
mwxrun.zmin, mwxrun.zmax + self.dz/1000., self.dz)
[X, Y, Z] = np.squeeze(np.meshgrid(self.xvec, self.yvec, self.zvec,
indexing='xy'))
oshape = X.shape
X = X.flatten()
Y = Y.flatten()
Z = Z.flatten()
inside = np.reshape(
self.conductor.isinside(X, Y, Z, aura=self.dA/5.),
oshape)
# level of 0.17 was chosen to keep the original ratio of 0.5:3 from warp
# compared to 0.17:1 now in warpx
# increasing the level causes particles to be injected inside cylinder
self.contours = np.squeeze(skimage.measure.find_contours(
inside, 0.17))
self.contours[:, 0] = np.interp(self.contours[:, 0],
np.arange(self.xvec.size),
self.xvec)
self.contours[:, 1] = np.interp(self.contours[:, 1],
np.arange(self.zvec.size),
self.zvec)
self.centers = np.array(
[(self.contours[1:, 0] + self.contours[:-1, 0])/2.,
(self.contours[1:, 1] + self.contours[:-1, 1])/2.]).T
self.dvec = np.array(
[self.contours[1:, 0] - self.contours[:-1, 0],
self.contours[1:, 1] - self.contours[:-1, 1]]).T
# Calculate the distance of each segment & sum to calculate the area
self.distances = np.sqrt(self.dvec[:, 0]**2 + self.dvec[:, 1]**2)
self.area = sum(self.distances)
self.cell_count = self.area / min(mwxrun.dx, mwxrun.dz)
self.CDF = np.cumsum(self.distances)/self.area
# Calculate Normal Vector by taking cross product with y-hat
ndvec = self.dvec/np.tile(self.distances, (2, 1)).T
marching_normal = np.zeros(self.dvec.shape)
marching_normal[:, 0] = -ndvec[:, 1]
marching_normal[:, 1] = ndvec[:, 0]
# Check to make sure normal plus center is outside of conductor
partdist = self.dA * float(self.res_fac) / 2.
pos = self.centers + marching_normal * partdist
px = pos[:, 0]
py = np.zeros_like(px)
pz = pos[:, 1]
nhat = self.conductor.calculatenormal(px, py, pz)
self.normal = nhat[[0, 2], :].T
def _get_xv_coords(self, npart, m, rseed):
"""Get particle coordinates given particle number.
See :func:`mewarpx.emitter.get_newparticles` for details.
"""
if rseed is not None:
nprstate = np.random.get_state()
np.random.seed(rseed)
# rseedv is passed to get velocities. The basic rseed here is used
# for positions, below.
rseedv = np.random.randint(1000000000)
else:
rseedv = None
# Draw Random Numbers to determine which face to emit from
self.contour_idx = np.searchsorted(self.CDF, np.random.rand(npart))
vels = np.column_stack(mwxutil.get_velocities(
num_samples=npart, T=self.T, m=m,
rseed=rseedv,
transverse_fac=self.transverse_fac,
emission_type=self.emission_type
))
# Rotate velocities based on angle of normal
newvels = self.convert_vel_zhat_nhat(
vels, self.normal[self.contour_idx])
vx = np.asarray(newvels[:, 0], order="C")
vy = np.asarray(newvels[:, 1], order="C")
vz = np.asarray(newvels[:, 2], order="C")
# Now get positions
pos1 = self.contours[self.contour_idx, :]
positions = (pos1 +
(np.tile(np.random.rand(npart), (2, 1)).T
* self.dvec[self.contour_idx, :]))
x = np.asarray(positions[:, 0], order="C")
y = np.asarray(0., order="C")
z = np.asarray(positions[:, 1], order="C")
if rseed is not None:
np.random.set_state(nprstate)
return x, y, z, vx, vy, vz
@staticmethod
# Synthetic tests showed 18 ms to 660us change from using np.dot +
# numba compilation. Without these changes, this function was taking 2-4% of
# some run times so the improvement is warranted.
@numba.jit(nopython=True)
def convert_vel_zhat_nhat(vels, nhat):
"""Create a rotation matrix for Zhat to Nhat"""
Zhat = np.array([0., 1.])
newvels = np.zeros(vels.shape)
for ii in range(vels.shape[0]):
Cvec = Zhat - nhat[ii, :]
Cvec2 = np.dot(Cvec, Cvec)
theta = np.arccos(1. - Cvec2/2.)
# Check to see if normal is pointing toward -xhat
# Resolves angle ambiguity in law of cosines
if nhat[ii, 0] < 0.:
theta = -theta
# Rotate in XZ plane, keeping Y the same
R = np.array([[np.cos(theta), 0., np.sin(theta)],
[0., 1., 0.],
[-np.sin(theta), 0., np.cos(theta)]])
newvels[ii, :] = np.dot(R, vels[ii, :])
return newvels
def get_normals(self, x, y, z):
"""Calculate local surface normal at specified coordinates.
Arguments:
x (np.ndarray): x-coordinates of emitted particles (in meters).
y (np.ndarray): y-coordinates of emitted particles (in meters).
z (np.ndarray): z-coordinates of emitted particles (in meters).
Returns:
normals (np.ndarray): nx3 array containing the outward surface
normal vector at each particle location.
"""
# Since we've already pre-computed all the normals and already picked
# the right ones during the call to _get_xv_coords(), we can ignore the
# coordinate arguments here entirely and use the recently saved
# "contour_idx" values for indexing the pre-tabulated normals. To
# prevent this from being abused, we'll first check that the length of
# the coordinate lists matches that of the contour_idx list.
if len(x) != len(self.contour_idx):
raise ValueError('Length of particle coordinate list does not match'
+ ' the most recent number of emitted particles!')
normals = np.zeros((len(x), 3))
normals[:, 0] = self.normal[self.contour_idx, 0]
normals[:, 2] = self.normal[self.contour_idx, 1]
return normals
def plot_contours(self):
"""Plots the contours generated for the assembly object and the
assembly object. The object is plotted in yellow, and the contours
are plotted in blue. The plot is saved in contours.png"""
# calculate which tiles are inside of assembly object
self.xvec = np.arange(
mwxrun.xmin, mwxrun.xmax + self.dx/1000., self.dx)
self.yvec = [0.]
self.zvec = np.arange(
mwxrun.zmin, mwxrun.zmax + self.dz/1000., self.dz)
[X, Y, Z] = np.squeeze(np.meshgrid(self.xvec, self.yvec, self.zvec,
indexing='xy'))
oshape = X.shape
X = X.flatten()
Y = Y.flatten()
Z = Z.flatten()
inside = np.reshape(
self.conductor.isinside(X, Y, Z, aura=self.dA/5.),
oshape)
contours = np.array(skimage.measure.find_contours(inside, 0.17))
# plot assembly object first
assembly_cmap = colors.LinearSegmentedColormap.from_list('my_cmap',['white','#66c2a5'],256)
fig, ax = plt.subplots()
ax.imshow(inside, cmap=assembly_cmap, origin="lower")
# plot contours
for contour in contours:
ax.plot(contour[:, 1], contour[:, 0], linewidth=2, color="#fc8d62")
# set title and labels
ax.set_title(f"{self.conductor.name} contour plot")
x_range = [self.res_fac * mwxrun.zmin / mwxrun.dz, self.res_fac * mwxrun.zmax / mwxrun.dz]
y_range = [self.res_fac * mwxrun.xmin / mwxrun.dx, self.res_fac * mwxrun.xmax / mwxrun.dx]
x_step = mwxrun.dz / (self.res_fac)
y_step = mwxrun.dx / (self.res_fac)
minor_xticks = np.linspace(x_range[0], x_range[1], mwxrun.nz)
minor_yticks = np.linspace(y_range[0], y_range[1], mwxrun.nx)
major_xticks = np.linspace(x_range[0], x_range[1], 5)
major_yticks = np.linspace(y_range[0], y_range[1], 5)
ax.set_xlabel("Z (m)")
ax.set_ylabel("X (m)")
ax.set_xticks(major_xticks)
ax.set_xticks(minor_xticks, minor=True)
ax.set_xticklabels(np.round(major_xticks * x_step, 8), rotation=45)
ax.set_yticks(major_yticks)
ax.set_yticks(minor_yticks, minor=True)
ax.set_yticklabels(np.round(major_yticks * y_step, 8))
ax.grid(visible=True, which="minor")
ax.set_aspect(mwxrun.dx/mwxrun.dz, adjustable='box')
fig.tight_layout()
fig.savefig(f"{self.conductor.name}_contour_plot.png")
class VolumeEmitter(BaseEmitter):
"""Parent class for volumetric particle injection coordinates.
- ``volume`` gives the spatial volume in m^3
- ``_get_x_coords()`` implements the subclass-specific particle
injection logic
"""
volume = 0
geoms = ['Z', 'XZ', 'RZ', 'XYZ']
def __init__(self, T, xmin=None, xmax=None, ymin=None, ymax=None,
zmin=None, zmax=None, rmin=None, rmax=None):
"""Initialize emitter boundaries. A rectangular or cylindrical emitter
volume is supported. If x & y boundaries are specified the r boundaries
will be ignored and vice versa. If both x & y and r boundaries are
specified an AttributeError will be raised. If no boundaries are given
the simulation geometry and boundaries will be used.
Arguments:
T (float): Emitter temperature in Kelvin. Determines particle
velocity distribution.
x/y/z/rmin (float): Lower boundary of the volume.
x/y/z/rmax (float): Upper boundary of the volume.
"""
super(VolumeEmitter, self).__init__()
self.T = T
# determine default prism type from simulation geometry
self.rectangular = mwxrun.geom_str != 'RZ'
# check if a different volume was specified
r_bounds_given = (rmin is not None or rmax is not None)
xy_bounds_given = (
xmin is not None or xmax is not None or ymin is not None or
ymax is not None
)
if r_bounds_given and xy_bounds_given:
raise AttributeError(
"Both rectangular and cylindrical boundaries specified for a "
"VolumeEmitter"
)
if r_bounds_given:
self.rectangular = False
if xy_bounds_given:
self.rectangular = True
self.bounds = np.zeros((3, 2))
if self.rectangular:
for ii, (lim, defaultlim) in enumerate(
zip([xmin, xmax, ymin, ymax, zmin, zmax],
[mwxrun.xmin, mwxrun.xmax, mwxrun.ymin,
mwxrun.ymax, mwxrun.zmin, mwxrun.zmax])
):
if lim is None:
lim = defaultlim
self.bounds[ii // 2, ii % 2] = lim
self.volume = np.prod(self.bounds[:, 1] - self.bounds[:, 0])
# handle cylindrical case
else:
for ii, (lim, defaultlim) in enumerate(
zip([rmin, rmax, 0, 2.0*np.pi, zmin, zmax],
[mwxrun.rmin, mwxrun.rmax, 0, 2.0*np.pi,
mwxrun.zmin, mwxrun.zmax])
):
if lim is None:
lim = defaultlim
self.bounds[ii // 2, ii % 2] = lim
self.volume = (
np.pi * (self.bounds[0, 1]**2 - self.bounds[0, 0]**2)
* (self.bounds[2, 1] - self.bounds[2, 0])
)
# Note the negation here will catch nans, checking <= 0 won't.
if not (self.volume > 0):
raise RuntimeError("Invalid warpX geometry limits.")
def getvoltage(self):
"""Ideally this is probably the local potential, but default to 0."""
return 0.
def getvoltage_e(self):
"""Ideally this is probably the local potential, but default to 0."""
return self.getvoltage()
def _get_xv_coords(self, npart, m, rseed):
"""Get velocities and call specialized function for position."""
if rseed is not None:
nprstate = np.random.get_state()
np.random.seed(rseed)
# rseedv is passed to get velocities. The basic rseed here is used
# for positions, below.
rseedv = np.random.randint(1000000000)
else:
rseedv = None
x_coords = self._get_x_coords(npart)
v_coords = mwxutil.get_velocities(
npart, self.T, m=m, emission_type='random', rseed=rseedv)
if rseed is not None:
np.random.set_state(nprstate)
return (
x_coords[:, 0], x_coords[:, 1], x_coords[:, 2],
v_coords[0], v_coords[1], v_coords[2]
)
class UniformDistributionVolumeEmitter(VolumeEmitter):
"""Inject particles uniformly throughout a given volume at a specified
temperature.
"""
def _get_x_coords(self, npart):
"""Get coordinates uniformly distributed in space.
rseed, if used, is handled by the parent function.
"""
if self.rectangular:
xyz_pos = [
np.random.uniform(self.bounds[ii, 0], self.bounds[ii, 1],
npart)
for ii in range(3)
]
# handle cylindrical case
else:
r = np.sqrt(np.random.uniform(
self.bounds[0, 1]**2, self.bounds[0, 0]**2, npart
))
theta = np.random.uniform(self.bounds[1, 0], self.bounds[1, 1],
npart)
xyz_pos = [
r * np.cos(theta), r * np.sin(theta),
np.random.uniform(self.bounds[2, 0], self.bounds[2, 1],
npart)
]
return np.array(xyz_pos).T
class ZSinDistributionVolumeEmitter(VolumeEmitter):
"""Vary density in z as a half-period sin wave."""
def _get_x_coords(self, npart):
"""Get coordinates with sin distribution.
rseed, if used, is handled by the parent function.
"""
if self.rectangular:
xpos = np.random.uniform(self.bounds[0, 0], self.bounds[0, 1],
npart)
ypos = np.random.uniform(self.bounds[1, 0], self.bounds[1, 1],
npart)
# handle cylindrical case
else:
r = np.sqrt(np.random.uniform(
self.bounds[0, 1]**2, self.bounds[0, 0]**2, npart
))
theta = np.random.uniform(self.bounds[1, 0], self.bounds[1, 1],
npart)
xpos = r * np.cos(theta)
ypos = r * np.sin(theta)
z_random_draw = np.random.random(npart)
zpos = (
np.arccos(1 - 2.0*z_random_draw) / np.pi
* (self.bounds[2, 1] - self.bounds[2, 0])
+ self.bounds[2, 0]
)
return np.array([xpos, ypos, zpos]).T
| [
"numpy.sum",
"numpy.random.seed",
"mewarpx.utils_store.util.plasma_Debye_length",
"pywarpx.callbacks.installparticleinjection",
"mewarpx.mwxrun.mwxrun.get_dt",
"numpy.random.set_state",
"mewarpx.utils_store.util.J_RD",
"numpy.sin",
"numpy.arange",
"numpy.random.randint",
"numpy.tile",
"numpy.r... | [((563, 590), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (580, 590), False, 'import logging\n'), ((69090, 69114), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (69099, 69114), False, 'import numba\n'), ((5506, 5517), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (5514, 5517), True, 'import numpy as np\n'), ((5644, 5659), 'mewarpx.mwxrun.mwxrun.get_it', 'mwxrun.get_it', ([], {}), '()\n', (5657, 5659), False, 'from mewarpx.mwxrun import mwxrun\n'), ((9641, 9698), 'pywarpx.callbacks.installparticleinjection', 'callbacks.installparticleinjection', (['self.inject_particles'], {}), '(self.inject_particles)\n', (9675, 9698), False, 'from pywarpx import callbacks, picmi\n'), ((15457, 15472), 'mewarpx.mwxrun.mwxrun.get_dt', 'mwxrun.get_dt', ([], {}), '()\n', (15470, 15472), False, 'from mewarpx.mwxrun import mwxrun\n'), ((17592, 17649), 'pywarpx.callbacks.installparticleinjection', 'callbacks.installparticleinjection', (['self.inject_particles'], {}), '(self.inject_particles)\n', (17626, 17649), False, 'from pywarpx import callbacks, picmi\n'), ((19834, 20103), 'mewarpx.mwxrun.mwxrun.sim_ext.add_particles', 'mwxrun.sim_ext.add_particles', (['self.injection_species.name'], {'x': "particles_dict['x']", 'y': "particles_dict['y']", 'z': "particles_dict['z']", 'ux': "particles_dict['vx']", 'uy': "particles_dict['vy']", 'uz': "particles_dict['vz']", 'unique_particles': 'self.unique_particles'}), "(self.injection_species.name, x=particles_dict[\n 'x'], y=particles_dict['y'], z=particles_dict['z'], ux=particles_dict[\n 'vx'], uy=particles_dict['vy'], uz=particles_dict['vz'],\n unique_particles=self.unique_particles, **extra_pids)\n", (19862, 20103), False, 'from mewarpx.mwxrun import mwxrun\n'), ((27196, 27253), 'pywarpx.callbacks.installparticleinjection', 'callbacks.installparticleinjection', (['self.inject_particles'], {}), '(self.inject_particles)\n', (27230, 27253), False, 'from pywarpx import callbacks, picmi\n'), ((46755, 46886), 'mewarpx.utils_store.util.get_velocities', 'mwxutil.get_velocities', (['npart', 'self.T'], {'m': 'm', 'transverse_fac': 'self.transverse_fac', 'emission_type': 'self.emission_type', 'rseed': 'rseedv'}), '(npart, self.T, m=m, transverse_fac=self.\n transverse_fac, emission_type=self.emission_type, rseed=rseedv)\n', (46777, 46886), True, 'import mewarpx.utils_store.util as mwxutil\n'), ((46925, 47066), 'mewarpx.utils_store.util.get_positions', 'mwxutil.get_positions', (['npart'], {'xmin': 'self.bounds[0]', 'xmax': 'self.bounds[1]', 'ymin': 'self.bounds[2]', 'ymax': 'self.bounds[3]', 'z': 'self.z', 'rseed': 'rseedx'}), '(npart, xmin=self.bounds[0], xmax=self.bounds[1], ymin\n =self.bounds[2], ymax=self.bounds[3], z=self.z, rseed=rseedx)\n', (46946, 47066), True, 'import mewarpx.utils_store.util as mwxutil\n'), ((52521, 52652), 'mewarpx.utils_store.util.get_velocities', 'mwxutil.get_velocities', (['npart', 'self.T'], {'m': 'm', 'transverse_fac': 'self.transverse_fac', 'emission_type': 'self.emission_type', 'rseed': 'rseedv'}), '(npart, self.T, m=m, transverse_fac=self.\n transverse_fac, emission_type=self.emission_type, rseed=rseedv)\n', (52543, 52652), True, 'import mewarpx.utils_store.util as mwxutil\n'), ((52691, 52832), 'mewarpx.utils_store.util.get_positions', 'mwxutil.get_positions', (['npart'], {'xmin': 'self.bounds[2]', 'xmax': 'self.bounds[3]', 'ymin': 'self.bounds[0]', 'ymax': 'self.bounds[1]', 'z': 'self.x', 'rseed': 'rseedx'}), '(npart, xmin=self.bounds[2], xmax=self.bounds[3], ymin\n =self.bounds[0], ymax=self.bounds[1], z=self.x, rseed=rseedx)\n', (52712, 52832), True, 'import mewarpx.utils_store.util as mwxutil\n'), ((56484, 56615), 'mewarpx.utils_store.util.get_velocities', 'mwxutil.get_velocities', (['npart', 'self.T'], {'m': 'm', 'transverse_fac': 'self.transverse_fac', 'emission_type': 'self.emission_type', 'rseed': 'rseedv'}), '(npart, self.T, m=m, transverse_fac=self.\n transverse_fac, emission_type=self.emission_type, rseed=rseedv)\n', (56506, 56615), True, 'import mewarpx.utils_store.util as mwxutil\n'), ((56654, 56780), 'mewarpx.utils_store.util.get_positions_RZ', 'mwxutil.get_positions_RZ', (['npart'], {'rmin': 'self.inner_emission_radius', 'rmax': 'self.outer_emission_radius', 'z': 'self.z', 'rseed': 'rseedx'}), '(npart, rmin=self.inner_emission_radius, rmax=self.\n outer_emission_radius, z=self.z, rseed=rseedx)\n', (56678, 56780), True, 'import mewarpx.utils_store.util as mwxutil\n'), ((61424, 61466), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(2.0 * np.pi)', 'npart'], {}), '(0.0, 2.0 * np.pi, npart)\n', (61441, 61466), True, 'import numpy as np\n'), ((61485, 61498), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (61491, 61498), True, 'import numpy as np\n'), ((61519, 61532), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (61525, 61532), True, 'import numpy as np\n'), ((61608, 61654), 'numpy.random.uniform', 'np.random.uniform', (['self.zmin', 'self.zmax', 'npart'], {}), '(self.zmin, self.zmax, npart)\n', (61625, 61654), True, 'import numpy as np\n'), ((61686, 61817), 'mewarpx.utils_store.util.get_velocities', 'mwxutil.get_velocities', (['npart', 'self.T'], {'m': 'm', 'transverse_fac': 'self.transverse_fac', 'emission_type': 'self.emission_type', 'rseed': 'rseedv'}), '(npart, self.T, m=m, transverse_fac=self.\n transverse_fac, emission_type=self.emission_type, rseed=rseedv)\n', (61708, 61817), True, 'import mewarpx.utils_store.util as mwxutil\n'), ((62829, 62853), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (62836, 62853), True, 'import numpy as np\n'), ((64508, 64534), 'numpy.sqrt', 'np.sqrt', (['(self.dx * self.dz)'], {}), '(self.dx * self.dz)\n', (64515, 64534), True, 'import numpy as np\n'), ((64755, 64818), 'numpy.arange', 'np.arange', (['mwxrun.xmin', '(mwxrun.xmax + self.dx / 1000.0)', 'self.dx'], {}), '(mwxrun.xmin, mwxrun.xmax + self.dx / 1000.0, self.dx)\n', (64764, 64818), True, 'import numpy as np\n'), ((64874, 64937), 'numpy.arange', 'np.arange', (['mwxrun.zmin', '(mwxrun.zmax + self.dz / 1000.0)', 'self.dz'], {}), '(mwxrun.zmin, mwxrun.zmax + self.dz / 1000.0, self.dz)\n', (64883, 64937), True, 'import numpy as np\n'), ((66368, 66420), 'numpy.sqrt', 'np.sqrt', (['(self.dvec[:, 0] ** 2 + self.dvec[:, 1] ** 2)'], {}), '(self.dvec[:, 0] ** 2 + self.dvec[:, 1] ** 2)\n', (66375, 66420), True, 'import numpy as np\n'), ((66732, 66757), 'numpy.zeros', 'np.zeros', (['self.dvec.shape'], {}), '(self.dvec.shape)\n', (66740, 66757), True, 'import numpy as np\n'), ((67067, 67084), 'numpy.zeros_like', 'np.zeros_like', (['px'], {}), '(px)\n', (67080, 67084), True, 'import numpy as np\n'), ((68246, 68282), 'numpy.asarray', 'np.asarray', (['newvels[:, 0]'], {'order': '"""C"""'}), "(newvels[:, 0], order='C')\n", (68256, 68282), True, 'import numpy as np\n'), ((68296, 68332), 'numpy.asarray', 'np.asarray', (['newvels[:, 1]'], {'order': '"""C"""'}), "(newvels[:, 1], order='C')\n", (68306, 68332), True, 'import numpy as np\n'), ((68346, 68382), 'numpy.asarray', 'np.asarray', (['newvels[:, 2]'], {'order': '"""C"""'}), "(newvels[:, 2], order='C')\n", (68356, 68382), True, 'import numpy as np\n'), ((68623, 68661), 'numpy.asarray', 'np.asarray', (['positions[:, 0]'], {'order': '"""C"""'}), "(positions[:, 0], order='C')\n", (68633, 68661), True, 'import numpy as np\n'), ((68674, 68700), 'numpy.asarray', 'np.asarray', (['(0.0)'], {'order': '"""C"""'}), "(0.0, order='C')\n", (68684, 68700), True, 'import numpy as np\n'), ((68712, 68750), 'numpy.asarray', 'np.asarray', (['positions[:, 1]'], {'order': '"""C"""'}), "(positions[:, 1], order='C')\n", (68722, 68750), True, 'import numpy as np\n'), ((69229, 69249), 'numpy.array', 'np.array', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (69237, 69249), True, 'import numpy as np\n'), ((69267, 69287), 'numpy.zeros', 'np.zeros', (['vels.shape'], {}), '(vels.shape)\n', (69275, 69287), True, 'import numpy as np\n'), ((71608, 71671), 'numpy.arange', 'np.arange', (['mwxrun.xmin', '(mwxrun.xmax + self.dx / 1000.0)', 'self.dx'], {}), '(mwxrun.xmin, mwxrun.xmax + self.dx / 1000.0, self.dx)\n', (71617, 71671), True, 'import numpy as np\n'), ((71727, 71790), 'numpy.arange', 'np.arange', (['mwxrun.zmin', '(mwxrun.zmax + self.dz / 1000.0)', 'self.dz'], {}), '(mwxrun.zmin, mwxrun.zmax + self.dz / 1000.0, self.dz)\n', (71736, 71790), True, 'import numpy as np\n'), ((72283, 72361), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'colors.LinearSegmentedColormap.from_list', (['"""my_cmap"""', "['white', '#66c2a5']", '(256)'], {}), "('my_cmap', ['white', '#66c2a5'], 256)\n", (72323, 72361), True, 'import matplotlib.colors as colors\n'), ((72377, 72391), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (72389, 72391), True, 'import matplotlib.pyplot as plt\n'), ((72997, 73043), 'numpy.linspace', 'np.linspace', (['x_range[0]', 'x_range[1]', 'mwxrun.nz'], {}), '(x_range[0], x_range[1], mwxrun.nz)\n', (73008, 73043), True, 'import numpy as np\n'), ((73067, 73113), 'numpy.linspace', 'np.linspace', (['y_range[0]', 'y_range[1]', 'mwxrun.nx'], {}), '(y_range[0], y_range[1], mwxrun.nx)\n', (73078, 73113), True, 'import numpy as np\n'), ((73138, 73176), 'numpy.linspace', 'np.linspace', (['x_range[0]', 'x_range[1]', '(5)'], {}), '(x_range[0], x_range[1], 5)\n', (73149, 73176), True, 'import numpy as np\n'), ((73200, 73238), 'numpy.linspace', 'np.linspace', (['y_range[0]', 'y_range[1]', '(5)'], {}), '(y_range[0], y_range[1], 5)\n', (73211, 73238), True, 'import numpy as np\n'), ((75695, 75711), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {}), '((3, 2))\n', (75703, 75711), True, 'import numpy as np\n'), ((77692, 77777), 'mewarpx.utils_store.util.get_velocities', 'mwxutil.get_velocities', (['npart', 'self.T'], {'m': 'm', 'emission_type': '"""random"""', 'rseed': 'rseedv'}), "(npart, self.T, m=m, emission_type='random', rseed=rseedv\n )\n", (77714, 77777), True, 'import mewarpx.utils_store.util as mwxutil\n'), ((80004, 80027), 'numpy.random.random', 'np.random.random', (['npart'], {}), '(npart)\n', (80020, 80027), True, 'import numpy as np\n'), ((5569, 5584), 'mewarpx.mwxrun.mwxrun.get_it', 'mwxrun.get_it', ([], {}), '()\n', (5582, 5584), False, 'from mewarpx.mwxrun import mwxrun\n'), ((5587, 5602), 'mewarpx.mwxrun.mwxrun.get_dt', 'mwxrun.get_dt', ([], {}), '()\n', (5600, 5602), False, 'from mewarpx.mwxrun import mwxrun\n'), ((5868, 5878), 'numpy.size', 'np.size', (['w'], {}), '(w)\n', (5875, 5878), True, 'import numpy as np\n'), ((5941, 5950), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (5947, 5950), True, 'import numpy as np\n'), ((6006, 6021), 'numpy.sum', 'np.sum', (['E_total'], {}), '(E_total)\n', (6012, 6021), True, 'import numpy as np\n'), ((7101, 7124), 'numpy.array', 'np.array', (['lpdata[:, 4:]'], {}), '(lpdata[:, 4:])\n', (7109, 7124), True, 'import numpy as np\n'), ((9882, 9897), 'mewarpx.mwxrun.mwxrun.get_it', 'mwxrun.get_it', ([], {}), '()\n', (9895, 9897), False, 'from mewarpx.mwxrun import mwxrun\n'), ((10972, 11275), 'mewarpx.mwxrun.mwxrun.sim_ext.add_particles', 'mwxrun.sim_ext.add_particles', (['self.species.name'], {'x': "particles_dict['x']", 'y': "particles_dict['y']", 'z': "particles_dict['z']", 'ux': "particles_dict['vx']", 'uy': "particles_dict['vy']", 'uz': "particles_dict['vz']", 'w': "particles_dict['w']", 'E_total': "particles_dict['E_total']", 'unique_particles': 'self.unique_particles'}), "(self.species.name, x=particles_dict['x'], y=\n particles_dict['y'], z=particles_dict['z'], ux=particles_dict['vx'], uy\n =particles_dict['vy'], uz=particles_dict['vz'], w=particles_dict['w'],\n E_total=particles_dict['E_total'], unique_particles=self.unique_particles)\n", (11000, 11275), False, 'from mewarpx.mwxrun import mwxrun\n'), ((17418, 17490), 'mewarpx.mespecies.Species', 'Species', ([], {'particle_type': '"""electron"""', 'name': "(self.species.name + '_injection')"}), "(particle_type='electron', name=self.species.name + '_injection')\n", (17425, 17490), False, 'from mewarpx.mespecies import Species\n'), ((18398, 18435), 'numpy.random.poisson', 'np.random.poisson', (['self.ptcl_per_step'], {}), '(self.ptcl_per_step)\n', (18415, 18435), True, 'import numpy as np\n'), ((20564, 20629), 'mewarpx.mwxrun.mwxrun.calc_Schottky_weight', 'mwxrun.calc_Schottky_weight', (['self.injection_species.name', 'pre_fac'], {}), '(self.injection_species.name, pre_fac)\n', (20591, 20629), False, 'from mewarpx.mwxrun import mwxrun\n'), ((20826, 20897), 'mewarpx.mwxrun.mwxrun.sim_ext.get_particle_arrays', 'mwxrun.sim_ext.get_particle_arrays', (['self.injection_species.name', '"""w"""', '(0)'], {}), "(self.injection_species.name, 'w', 0)\n", (20860, 20897), False, 'from mewarpx.mwxrun import mwxrun\n'), ((20952, 21024), 'mewarpx.mwxrun.mwxrun.sim_ext.get_particle_arrays', 'mwxrun.sim_ext.get_particle_arrays', (['self.injection_species.name', '"""ux"""', '(0)'], {}), "(self.injection_species.name, 'ux', 0)\n", (20986, 21024), False, 'from mewarpx.mwxrun import mwxrun\n'), ((21079, 21151), 'mewarpx.mwxrun.mwxrun.sim_ext.get_particle_arrays', 'mwxrun.sim_ext.get_particle_arrays', (['self.injection_species.name', '"""uy"""', '(0)'], {}), "(self.injection_species.name, 'uy', 0)\n", (21113, 21151), False, 'from mewarpx.mwxrun import mwxrun\n'), ((21206, 21278), 'mewarpx.mwxrun.mwxrun.sim_ext.get_particle_arrays', 'mwxrun.sim_ext.get_particle_arrays', (['self.injection_species.name', '"""uz"""', '(0)'], {}), "(self.injection_species.name, 'uz', 0)\n", (21240, 21278), False, 'from mewarpx.mwxrun import mwxrun\n'), ((21721, 21811), 'mewarpx.mwxrun.mwxrun.move_particles_between_species', 'mwxrun.move_particles_between_species', (['self.injection_species.name', 'self.species.name'], {}), '(self.injection_species.name, self.\n species.name)\n', (21758, 21811), False, 'from mewarpx.mwxrun import mwxrun\n'), ((21878, 21905), 'numpy.sum', 'np.sum', (["particles_dict['w']"], {}), "(particles_dict['w'])\n", (21884, 21905), True, 'import numpy as np\n'), ((21933, 21966), 'numpy.sum', 'np.sum', (["particles_dict['E_total']"], {}), "(particles_dict['E_total'])\n", (21939, 21966), True, 'import numpy as np\n'), ((26201, 26309), 'warnings.warn', 'warnings.warn', (['"""Using a surface emitter with the PlasmaInjector has not been tested for accuracy."""'], {}), "(\n 'Using a surface emitter with the PlasmaInjector has not been tested for accuracy.'\n )\n", (26214, 26309), False, 'import warnings\n'), ((26986, 27050), 'mewarpx.utils_store.util.plasma_Debye_length', 'mwxutil.plasma_Debye_length', (['self.emitter.T', 'self.plasma_density'], {}), '(self.emitter.T, self.plasma_density)\n', (27013, 27050), True, 'import mewarpx.utils_store.util as mwxutil\n'), ((28333, 28380), 'mewarpx.utils_store.util.ideal_gas_density', 'mwxutil.ideal_gas_density', (['P_neutral', 'T_neutral'], {}), '(P_neutral, T_neutral)\n', (28358, 28380), True, 'import mewarpx.utils_store.util as mwxutil\n'), ((28740, 28755), 'mewarpx.mwxrun.mwxrun.get_it', 'mwxrun.get_it', ([], {}), '()\n', (28753, 28755), False, 'from mewarpx.mwxrun import mwxrun\n'), ((30360, 30677), 'mewarpx.mwxrun.mwxrun.sim_ext.add_particles', 'mwxrun.sim_ext.add_particles', (['self.species1.name'], {'x': "particles1_dict['x']", 'y': "particles1_dict['y']", 'z': "particles1_dict['z']", 'ux': "particles1_dict['vx']", 'uy': "particles1_dict['vy']", 'uz': "particles1_dict['vz']", 'w': "particles1_dict['w']", 'E_total': "particles1_dict['E_total']", 'unique_particles': 'self.unique_particles'}), "(self.species1.name, x=particles1_dict['x'], y=\n particles1_dict['y'], z=particles1_dict['z'], ux=particles1_dict['vx'],\n uy=particles1_dict['vy'], uz=particles1_dict['vz'], w=particles1_dict[\n 'w'], E_total=particles1_dict['E_total'], unique_particles=self.\n unique_particles)\n", (30388, 30677), False, 'from mewarpx.mwxrun import mwxrun\n'), ((30845, 31162), 'mewarpx.mwxrun.mwxrun.sim_ext.add_particles', 'mwxrun.sim_ext.add_particles', (['self.species2.name'], {'x': "particles2_dict['x']", 'y': "particles2_dict['y']", 'z': "particles2_dict['z']", 'ux': "particles2_dict['vx']", 'uy': "particles2_dict['vy']", 'uz': "particles2_dict['vz']", 'w': "particles2_dict['w']", 'E_total': "particles2_dict['E_total']", 'unique_particles': 'self.unique_particles'}), "(self.species2.name, x=particles2_dict['x'], y=\n particles2_dict['y'], z=particles2_dict['z'], ux=particles2_dict['vx'],\n uy=particles2_dict['vy'], uz=particles2_dict['vz'], w=particles2_dict[\n 'w'], E_total=particles2_dict['E_total'], unique_particles=self.\n unique_particles)\n", (30873, 31162), False, 'from mewarpx.mwxrun import mwxrun\n'), ((37485, 37506), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (37504, 37506), True, 'import numpy as np\n'), ((37519, 37540), 'numpy.random.seed', 'np.random.seed', (['rseed'], {}), '(rseed)\n', (37533, 37540), True, 'import numpy as np\n'), ((37563, 37592), 'numpy.random.randint', 'np.random.randint', (['(1000000000)'], {}), '(1000000000)\n', (37580, 37592), True, 'import numpy as np\n'), ((37614, 37643), 'numpy.random.randint', 'np.random.randint', (['(1000000000)'], {}), '(1000000000)\n', (37631, 37643), True, 'import numpy as np\n'), ((38956, 38985), 'numpy.random.set_state', 'np.random.set_state', (['nprstate'], {}), '(nprstate)\n', (38975, 38985), True, 'import numpy as np\n'), ((42270, 42343), 'warnings.warn', 'warnings.warn', (['"""No conductor set for emitter. Power will not be correct."""'], {}), "('No conductor set for emitter. Power will not be correct.')\n", (42283, 42343), False, 'import warnings\n'), ((46509, 46530), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (46528, 46530), True, 'import numpy as np\n'), ((46543, 46564), 'numpy.random.seed', 'np.random.seed', (['rseed'], {}), '(rseed)\n', (46557, 46564), True, 'import numpy as np\n'), ((46586, 46615), 'numpy.random.randint', 'np.random.randint', (['(1000000000)'], {}), '(1000000000)\n', (46603, 46615), True, 'import numpy as np\n'), ((46637, 46666), 'numpy.random.randint', 'np.random.randint', (['(1000000000)'], {}), '(1000000000)\n', (46654, 46666), True, 'import numpy as np\n'), ((47308, 47337), 'numpy.random.set_state', 'np.random.set_state', (['nprstate'], {}), '(nprstate)\n', (47327, 47337), True, 'import numpy as np\n'), ((52100, 52121), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (52119, 52121), True, 'import numpy as np\n'), ((52134, 52155), 'numpy.random.seed', 'np.random.seed', (['rseed'], {}), '(rseed)\n', (52148, 52155), True, 'import numpy as np\n'), ((52177, 52206), 'numpy.random.randint', 'np.random.randint', (['(1000000000)'], {}), '(1000000000)\n', (52194, 52206), True, 'import numpy as np\n'), ((52228, 52257), 'numpy.random.randint', 'np.random.randint', (['(1000000000)'], {}), '(1000000000)\n', (52245, 52257), True, 'import numpy as np\n'), ((53063, 53092), 'numpy.random.set_state', 'np.random.set_state', (['nprstate'], {}), '(nprstate)\n', (53082, 53092), True, 'import numpy as np\n'), ((56238, 56259), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (56257, 56259), True, 'import numpy as np\n'), ((56272, 56293), 'numpy.random.seed', 'np.random.seed', (['rseed'], {}), '(rseed)\n', (56286, 56293), True, 'import numpy as np\n'), ((56315, 56344), 'numpy.random.randint', 'np.random.randint', (['(1000000000)'], {}), '(1000000000)\n', (56332, 56344), True, 'import numpy as np\n'), ((56366, 56395), 'numpy.random.randint', 'np.random.randint', (['(1000000000)'], {}), '(1000000000)\n', (56383, 56395), True, 'import numpy as np\n'), ((57022, 57051), 'numpy.random.set_state', 'np.random.set_state', (['nprstate'], {}), '(nprstate)\n', (57041, 57051), True, 'import numpy as np\n'), ((61145, 61166), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (61164, 61166), True, 'import numpy as np\n'), ((61179, 61200), 'numpy.random.seed', 'np.random.seed', (['rseed'], {}), '(rseed)\n', (61193, 61200), True, 'import numpy as np\n'), ((61337, 61366), 'numpy.random.randint', 'np.random.randint', (['(1000000000)'], {}), '(1000000000)\n', (61354, 61366), True, 'import numpy as np\n'), ((62166, 62195), 'numpy.random.set_state', 'np.random.set_state', (['nprstate'], {}), '(nprstate)\n', (62185, 62195), True, 'import numpy as np\n'), ((64980, 65039), 'numpy.meshgrid', 'np.meshgrid', (['self.xvec', 'self.yvec', 'self.zvec'], {'indexing': '"""xy"""'}), "(self.xvec, self.yvec, self.zvec, indexing='xy')\n", (64991, 65039), True, 'import numpy as np\n'), ((65692, 65717), 'numpy.arange', 'np.arange', (['self.xvec.size'], {}), '(self.xvec.size)\n', (65701, 65717), True, 'import numpy as np\n'), ((65871, 65896), 'numpy.arange', 'np.arange', (['self.zvec.size'], {}), '(self.zvec.size)\n', (65880, 65896), True, 'import numpy as np\n'), ((65973, 66096), 'numpy.array', 'np.array', (['[(self.contours[1:, 0] + self.contours[:-1, 0]) / 2.0, (self.contours[1:, 1\n ] + self.contours[:-1, 1]) / 2.0]'], {}), '([(self.contours[1:, 0] + self.contours[:-1, 0]) / 2.0, (self.\n contours[1:, 1] + self.contours[:-1, 1]) / 2.0])\n', (65981, 66096), True, 'import numpy as np\n'), ((66134, 66241), 'numpy.array', 'np.array', (['[self.contours[1:, 0] - self.contours[:-1, 0], self.contours[1:, 1] - self.\n contours[:-1, 1]]'], {}), '([self.contours[1:, 0] - self.contours[:-1, 0], self.contours[1:, 1\n ] - self.contours[:-1, 1]])\n', (66142, 66241), True, 'import numpy as np\n'), ((66540, 66565), 'numpy.cumsum', 'np.cumsum', (['self.distances'], {}), '(self.distances)\n', (66549, 66565), True, 'import numpy as np\n'), ((67446, 67467), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (67465, 67467), True, 'import numpy as np\n'), ((67480, 67501), 'numpy.random.seed', 'np.random.seed', (['rseed'], {}), '(rseed)\n', (67494, 67501), True, 'import numpy as np\n'), ((67638, 67667), 'numpy.random.randint', 'np.random.randint', (['(1000000000)'], {}), '(1000000000)\n', (67655, 67667), True, 'import numpy as np\n'), ((67829, 67850), 'numpy.random.rand', 'np.random.rand', (['npart'], {}), '(npart)\n', (67843, 67850), True, 'import numpy as np\n'), ((67884, 68028), 'mewarpx.utils_store.util.get_velocities', 'mwxutil.get_velocities', ([], {'num_samples': 'npart', 'T': 'self.T', 'm': 'm', 'rseed': 'rseedv', 'transverse_fac': 'self.transverse_fac', 'emission_type': 'self.emission_type'}), '(num_samples=npart, T=self.T, m=m, rseed=rseedv,\n transverse_fac=self.transverse_fac, emission_type=self.emission_type)\n', (67906, 68028), True, 'import mewarpx.utils_store.util as mwxutil\n'), ((68794, 68823), 'numpy.random.set_state', 'np.random.set_state', (['nprstate'], {}), '(nprstate)\n', (68813, 68823), True, 'import numpy as np\n'), ((69387, 69405), 'numpy.dot', 'np.dot', (['Cvec', 'Cvec'], {}), '(Cvec, Cvec)\n', (69393, 69405), True, 'import numpy as np\n'), ((69427, 69455), 'numpy.arccos', 'np.arccos', (['(1.0 - Cvec2 / 2.0)'], {}), '(1.0 - Cvec2 / 2.0)\n', (69436, 69455), True, 'import numpy as np\n'), ((69886, 69908), 'numpy.dot', 'np.dot', (['R', 'vels[ii, :]'], {}), '(R, vels[ii, :])\n', (69892, 69908), True, 'import numpy as np\n'), ((71833, 71892), 'numpy.meshgrid', 'np.meshgrid', (['self.xvec', 'self.yvec', 'self.zvec'], {'indexing': '"""xy"""'}), "(self.xvec, self.yvec, self.zvec, indexing='xy')\n", (71844, 71892), True, 'import numpy as np\n'), ((73414, 73448), 'numpy.round', 'np.round', (['(major_xticks * x_step)', '(8)'], {}), '(major_xticks * x_step, 8)\n', (73422, 73448), True, 'import numpy as np\n'), ((73574, 73608), 'numpy.round', 'np.round', (['(major_yticks * y_step)', '(8)'], {}), '(major_yticks * y_step, 8)\n', (73582, 73608), True, 'import numpy as np\n'), ((76134, 76180), 'numpy.prod', 'np.prod', (['(self.bounds[:, 1] - self.bounds[:, 0])'], {}), '(self.bounds[:, 1] - self.bounds[:, 0])\n', (76141, 76180), True, 'import numpy as np\n'), ((77365, 77386), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (77384, 77386), True, 'import numpy as np\n'), ((77399, 77420), 'numpy.random.seed', 'np.random.seed', (['rseed'], {}), '(rseed)\n', (77413, 77420), True, 'import numpy as np\n'), ((77557, 77586), 'numpy.random.randint', 'np.random.randint', (['(1000000000)'], {}), '(1000000000)\n', (77574, 77586), True, 'import numpy as np\n'), ((77829, 77858), 'numpy.random.set_state', 'np.random.set_state', (['nprstate'], {}), '(nprstate)\n', (77848, 77858), True, 'import numpy as np\n'), ((78733, 78795), 'numpy.random.uniform', 'np.random.uniform', (['self.bounds[1, 0]', 'self.bounds[1, 1]', 'npart'], {}), '(self.bounds[1, 0], self.bounds[1, 1], npart)\n', (78750, 78795), True, 'import numpy as np\n'), ((79055, 79072), 'numpy.array', 'np.array', (['xyz_pos'], {}), '(xyz_pos)\n', (79063, 79072), True, 'import numpy as np\n'), ((79392, 79454), 'numpy.random.uniform', 'np.random.uniform', (['self.bounds[0, 0]', 'self.bounds[0, 1]', 'npart'], {}), '(self.bounds[0, 0], self.bounds[0, 1], npart)\n', (79409, 79454), True, 'import numpy as np\n'), ((79511, 79573), 'numpy.random.uniform', 'np.random.uniform', (['self.bounds[1, 0]', 'self.bounds[1, 1]', 'npart'], {}), '(self.bounds[1, 0], self.bounds[1, 1], npart)\n', (79528, 79573), True, 'import numpy as np\n'), ((79804, 79866), 'numpy.random.uniform', 'np.random.uniform', (['self.bounds[1, 0]', 'self.bounds[1, 1]', 'npart'], {}), '(self.bounds[1, 0], self.bounds[1, 1], npart)\n', (79821, 79866), True, 'import numpy as np\n'), ((80209, 80237), 'numpy.array', 'np.array', (['[xpos, ypos, zpos]'], {}), '([xpos, ypos, zpos])\n', (80217, 80237), True, 'import numpy as np\n'), ((5275, 5285), 'numpy.size', 'np.size', (['w'], {}), '(w)\n', (5282, 5285), True, 'import numpy as np\n'), ((5390, 5400), 'numpy.size', 'np.size', (['w'], {}), '(w)\n', (5397, 5400), True, 'import numpy as np\n'), ((5847, 5857), 'numpy.size', 'np.size', (['w'], {}), '(w)\n', (5854, 5857), True, 'import numpy as np\n'), ((20426, 20484), 'numpy.sqrt', 'np.sqrt', (['(constants.e / (4.0 * np.pi * constants.epsilon_0))'], {}), '(constants.e / (4.0 * np.pi * constants.epsilon_0))\n', (20433, 20484), True, 'import numpy as np\n'), ((21424, 21433), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (21430, 21433), True, 'import numpy as np\n'), ((34674, 34689), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (34686, 34689), True, 'import numpy as np\n'), ((66672, 66703), 'numpy.tile', 'np.tile', (['self.distances', '(2, 1)'], {}), '(self.distances, (2, 1))\n', (66679, 66703), True, 'import numpy as np\n'), ((78392, 78456), 'numpy.random.uniform', 'np.random.uniform', (['self.bounds[ii, 0]', 'self.bounds[ii, 1]', 'npart'], {}), '(self.bounds[ii, 0], self.bounds[ii, 1], npart)\n', (78409, 78456), True, 'import numpy as np\n'), ((78613, 78685), 'numpy.random.uniform', 'np.random.uniform', (['(self.bounds[0, 1] ** 2)', '(self.bounds[0, 0] ** 2)', 'npart'], {}), '(self.bounds[0, 1] ** 2, self.bounds[0, 0] ** 2, npart)\n', (78630, 78685), True, 'import numpy as np\n'), ((78928, 78990), 'numpy.random.uniform', 'np.random.uniform', (['self.bounds[2, 0]', 'self.bounds[2, 1]', 'npart'], {}), '(self.bounds[2, 0], self.bounds[2, 1], npart)\n', (78945, 78990), True, 'import numpy as np\n'), ((79684, 79756), 'numpy.random.uniform', 'np.random.uniform', (['(self.bounds[0, 1] ** 2)', '(self.bounds[0, 0] ** 2)', 'npart'], {}), '(self.bounds[0, 1] ** 2, self.bounds[0, 0] ** 2, npart)\n', (79701, 79756), True, 'import numpy as np\n'), ((79928, 79941), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (79934, 79941), True, 'import numpy as np\n'), ((79965, 79978), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (79971, 79978), True, 'import numpy as np\n'), ((7193, 7227), 'numpy.array', 'np.array', (['lpdata[:, ii]'], {'copy': '(True)'}), '(lpdata[:, ii], copy=True)\n', (7201, 7227), True, 'import numpy as np\n'), ((15727, 15764), 'mewarpx.utils_store.util.J_RD', 'mwxutil.J_RD', (['self.T', 'self.WF', 'self.A'], {}), '(self.T, self.WF, self.A)\n', (15739, 15764), True, 'import mewarpx.utils_store.util as mwxutil\n'), ((15961, 15998), 'mewarpx.utils_store.util.J_RD', 'mwxutil.J_RD', (['self.T', 'self.WF', 'self.A'], {}), '(self.T, self.WF, self.A)\n', (15973, 15998), True, 'import mewarpx.utils_store.util as mwxutil\n'), ((78878, 78891), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (78884, 78891), True, 'import numpy as np\n'), ((78897, 78910), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (78903, 78910), True, 'import numpy as np\n'), ((80057, 80091), 'numpy.arccos', 'np.arccos', (['(1 - 2.0 * z_random_draw)'], {}), '(1 - 2.0 * z_random_draw)\n', (80066, 80091), True, 'import numpy as np\n'), ((68520, 68541), 'numpy.random.rand', 'np.random.rand', (['npart'], {}), '(npart)\n', (68534, 68541), True, 'import numpy as np\n'), ((69717, 69730), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (69723, 69730), True, 'import numpy as np\n'), ((69736, 69749), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (69742, 69749), True, 'import numpy as np\n'), ((69839, 69852), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (69845, 69852), True, 'import numpy as np\n'), ((69820, 69833), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (69826, 69833), True, 'import numpy as np\n')] |
import torch
import numpy as np
from .logger import logger
from tqdm import tqdm
from .util import toVariable
log = logger()
from .util import toTensor
import imageio
import cv2
def writeTensor(save_path, tensor, nRow=16, row_first=False):
'''
use imageio to write the tensor
:param tensor: nImage x 3 or 1 x height x width
:param save_path: save path
'''
tensor = toTensor(tensor)
nSample = tensor.size()[0]
nCol = np.int16(nSample / nRow)
all = []
k = 0
for iCol in range(nCol):
all_ = []
for iRow in range(nRow):
now = tensor[k, :, :, :]
now = now.permute(1, 2, 0)
all_ += [now]
k += 1
if not row_first:
all += [torch.cat(all_, dim=0)]
else:
all += [torch.cat(all_, dim=1)]
if not row_first:
all = torch.cat(all, dim=1)
else:
all = torch.cat(all, dim=0)
all = all.cpu().numpy().astype(np.uint8)
print('saving tensor to %s' % save_path)
imageio.imwrite(save_path, all)
def untransformTensor(vggImageTensor):
'''
untransform the tensor that is pre-normalized to fit the VGG network
:param vggImageTensor: nImage x 3 x height x width, it should be a tensor
:return:
'''
vggImageTensor = vggImageTensor.cpu()
mean = torch.Tensor((0.485, 0.456, 0.406))
stdv = torch.Tensor((0.229, 0.224, 0.225))
vggImageTensor *= stdv.view(1, 3, 1, 1).expand_as(vggImageTensor)
vggImageTensor += mean.view(1, 3, 1, 1).expand_as(vggImageTensor)
vggImageTensor = vggImageTensor.numpy()
vggImageTensor[vggImageTensor > 1.] = 1.
vggImageTensor[vggImageTensor < 0.] = 0.
vggImageTensor = vggImageTensor * 255
return vggImageTensor
def untransformVariable(vggImageVariable):
mean = torch.Tensor((0.485, 0.456, 0.406))
stdv = torch.Tensor((0.229, 0.224, 0.225))
mean = toVariable(mean).cuda()
stdv = toVariable(stdv).cuda()
vggImageVariable *= stdv.view(1, 3, 1, 1).expand_as(vggImageVariable)
vggImageVariable += mean.view(1, 3, 1, 1).expand_as(vggImageVariable)
vggImageVariable[vggImageVariable.data > 1.] = 1.
vggImageVariable[vggImageVariable.data < 0.] = 0.
return vggImageVariable
| [
"torch.cat",
"numpy.int16",
"imageio.imwrite",
"torch.Tensor"
] | [((452, 476), 'numpy.int16', 'np.int16', (['(nSample / nRow)'], {}), '(nSample / nRow)\n', (460, 476), True, 'import numpy as np\n'), ((1027, 1058), 'imageio.imwrite', 'imageio.imwrite', (['save_path', 'all'], {}), '(save_path, all)\n', (1042, 1058), False, 'import imageio\n'), ((1333, 1368), 'torch.Tensor', 'torch.Tensor', (['(0.485, 0.456, 0.406)'], {}), '((0.485, 0.456, 0.406))\n', (1345, 1368), False, 'import torch\n'), ((1380, 1415), 'torch.Tensor', 'torch.Tensor', (['(0.229, 0.224, 0.225)'], {}), '((0.229, 0.224, 0.225))\n', (1392, 1415), False, 'import torch\n'), ((1816, 1851), 'torch.Tensor', 'torch.Tensor', (['(0.485, 0.456, 0.406)'], {}), '((0.485, 0.456, 0.406))\n', (1828, 1851), False, 'import torch\n'), ((1863, 1898), 'torch.Tensor', 'torch.Tensor', (['(0.229, 0.224, 0.225)'], {}), '((0.229, 0.224, 0.225))\n', (1875, 1898), False, 'import torch\n'), ((865, 886), 'torch.cat', 'torch.cat', (['all'], {'dim': '(1)'}), '(all, dim=1)\n', (874, 886), False, 'import torch\n'), ((911, 932), 'torch.cat', 'torch.cat', (['all'], {'dim': '(0)'}), '(all, dim=0)\n', (920, 932), False, 'import torch\n'), ((747, 769), 'torch.cat', 'torch.cat', (['all_'], {'dim': '(0)'}), '(all_, dim=0)\n', (756, 769), False, 'import torch\n'), ((805, 827), 'torch.cat', 'torch.cat', (['all_'], {'dim': '(1)'}), '(all_, dim=1)\n', (814, 827), False, 'import torch\n')] |
import numpy as np
import pandas as pd
from ..task_type import Task
class Parser(object):
def __init__(self):
self.ttype = None
self.target_mapper = None
# self.categorical_thres = 10
# self.replace_strategy = 'median'
# self.categorical_cols = {}
# self.value_cols = {}
self._parser = {
'object': self._parse_object,
'int': self._parse_int,
'int32': self._parse_int,
'int64': self._parse_int,
'float': self._parse_float,
'float32': self._parse_float,
'float64': self._parse_float
}
def reindex(self, df, target_name):
header = list(df)
try:
target = header.pop(header.index(target_name))
except ValueError:
print('[Warning] '
'The name \'{}\' is not in the list. '
'Skipped.'.format(target_name))
return df
header.append(target)
df = df.reindex(header, axis=1)
return df
def parse(self, df,
ignore_cols=[],
ttype='infer',
categorical_thres=10,
replace_strategy='median',
replace_values=None,
categorical_cols=None,
value_cols=None,
include_target=True,
verbose=1):
self.ttype = ttype
self.categorical_thres = categorical_thres
self.replace_strategy = replace_strategy
self.categorical_cols = categorical_cols
self.value_cols = value_cols
if len(ignore_cols) > 0:
df = self.drop_ignore_cols(df, ignore_cols)
X = self.parse_data(df,
nan_to_category=False,
replace_strategy=replace_strategy,
replace_values=replace_values,
include_target=include_target,
verbose=verbose)
if include_target:
y = self.parse_target(df, verbose=verbose)
else:
y = None
return (X, y)
def drop_ignore_cols(self, df, ignore_cols):
dtype = type(ignore_cols[0]) # infer type of ignore_cols
cols = df.columns[ignore_cols] if dtype is int else ignore_cols
df.drop(cols, axis='columns', inplace=True)
return df
def parse_data(self, df,
nan_to_category=False,
replace_strategy='median',
replace_values=None,
include_target=True,
raise_error=True,
verbose=1):
if include_target:
X = df.iloc[:, :-1]
if self.categorical_cols is None:
self.categorical_cols = {}
if self.value_cols is None:
self.value_cols = {}
else:
X = df
header = list(X)
# TODO: drop columns with too-many-nan values
for name in header:
_x = X[name]
_type_x = str(_x.dtype)
if _type_x not in self._parser:
if raise_error:
raise ValueError('Unknown dtype detected on '
'{}: {}'.format(name, _type_x))
else:
X.drop(columns=[name], inplace=True)
continue
_parser = self._parser[_type_x]
_x = _parser(_x, which='data')
X[name] = _x
if _x.dtype == object or _x.dtype == int:
_converted = list(pd.get_dummies(_x, dummy_na=True))
if self.categorical_cols is not None:
self.categorical_cols[name] = _converted
else: # float
if replace_values is not None:
_value = replace_values[name]
else:
if replace_strategy == 'mean':
_value = _x.mean()
else: # median
_value = _x.median()
# print(name)
self.value_cols[name] = _value
X = self._convert_data(X,
nan_to_category=nan_to_category,
replace_strategy=replace_strategy,
replace_values=replace_values)
return X
def parse_target(self, df, verbose=1):
y = df.iloc[:, -1]
invalid_count = y.isnull().sum()
if invalid_count > 0:
_isfinite = np.isfinite(y)
df = df[_isfinite] # drop na
y = y[_isfinite]
if verbose:
print('Dropped {} row(s) '
'with nan-output value.'.format(invalid_count))
type_y = str(y.dtype)
if type_y not in self._parser:
raise ValueError('Unknown dtype specified on output: '
'{}'.format(type_y))
parser = self._parser[type_y]
y = parser(y, which='target')
return y
def _convert_data(self, df,
nan_to_category=False,
replace_strategy='median',
replace_values=None):
cols = list(self.categorical_cols.keys())
if nan_to_category:
df = pd.get_dummies(df, dummy_na=True, columns=cols)
else:
if replace_values is not None:
_values = replace_values
else:
_values = df[cols].median()
df[cols] = df[cols].fillna(value=_values)
df = pd.get_dummies(df, dummy_na=False, columns=cols)
cols = list(self.value_cols.keys())
df[cols] = df[cols].fillna(value=self.value_cols)
return df
def _parse_object(self, df, which='data'):
if which == 'data':
return df
if self.ttype == Task.REGRESSION:
raise AttributeError('Output values must be integer or float'
'on regression problem.')
self.ttype = Task.CLASSIFICATION
values = np.sort(df.dropna().unique())
self.target_mapper = {key: value for value, key in enumerate(values)}
return df.map(self.target_mapper)
def _parse_int(self, df, which='data'):
if which == 'data':
_values = np.sort(df.dropna().unique())
if len(_values) <= self.categorical_thres \
or (self.categorical_cols is not None
and df.name in self.categorical_cols):
df.update(df[~df.isnull()].astype(int).astype(str))
return df
else: # categorical or number
return df.astype(float)
if self.ttype == Task.CLASSIFICATION:
return self._parse_object(df)
elif self.ttype == Task.REGRESSION:
return df.astype(float)
else: # infer
if len(df.dropna().unique()) / len(df) < 0.3:
self.ttype = Task.CLASSIFICATION
else:
self.ttype = Task.REGRESSION
return df
def _parse_float(self, df, which='data'):
mods = np.mod(df, 1)
mods = mods[~np.isnan(mods)] # drop na
if mods.sum() > 0.:
if which == 'target':
if self.ttype != Task.CLASSIFICATION:
self.ttype = Task.REGRESSION
return df
else:
if which == 'data':
return self._parse_int(df)
if which == 'target':
self.ttype = Task.CLASSIFICATION
return self._parse_int(df.astype(int))
| [
"pandas.get_dummies",
"numpy.isnan",
"numpy.isfinite",
"numpy.mod"
] | [((7180, 7193), 'numpy.mod', 'np.mod', (['df', '(1)'], {}), '(df, 1)\n', (7186, 7193), True, 'import numpy as np\n'), ((4561, 4575), 'numpy.isfinite', 'np.isfinite', (['y'], {}), '(y)\n', (4572, 4575), True, 'import numpy as np\n'), ((5332, 5379), 'pandas.get_dummies', 'pd.get_dummies', (['df'], {'dummy_na': '(True)', 'columns': 'cols'}), '(df, dummy_na=True, columns=cols)\n', (5346, 5379), True, 'import pandas as pd\n'), ((5611, 5659), 'pandas.get_dummies', 'pd.get_dummies', (['df'], {'dummy_na': '(False)', 'columns': 'cols'}), '(df, dummy_na=False, columns=cols)\n', (5625, 5659), True, 'import pandas as pd\n'), ((7215, 7229), 'numpy.isnan', 'np.isnan', (['mods'], {}), '(mods)\n', (7223, 7229), True, 'import numpy as np\n'), ((3602, 3635), 'pandas.get_dummies', 'pd.get_dummies', (['_x'], {'dummy_na': '(True)'}), '(_x, dummy_na=True)\n', (3616, 3635), True, 'import pandas as pd\n')] |
from torch import nn
import numpy as np
import torch
from utils import (
add_device,
get_logger,
)
logger = get_logger()
def train(model, dataloader, input_key, target_key, optimizer, loss_func,
device=torch.device('cpu')):
train_loss = 0.0
for step, data in enumerate(dataloader):
inputs = add_device(data[input_key], device)
targets = add_device(data[target_key], device)
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_func(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
postfix = {'train_loss': f'{(train_loss / (step + 1)):.5f}'}
dataloader.set_postfix(log=postfix)
def valid(model, dataloader, input_key, target_key,
device=torch.device('cpu'), activation=None):
outputs_data = []
targets_data = []
for step, data in enumerate(dataloader):
inputs = add_device(data[input_key], device)
targets = add_device(data[target_key], device)
if activation:
outputs = activation(model(inputs))
else:
outputs = model(inputs)
outputs_data.extend(outputs.tolist())
targets_data.extend(targets.tolist())
return outputs_data, targets_data
def pre_train(epochs, model, dataloader,
optimizer, loss_func,
input_key, target_key,
device=torch.device('cpu'),
patience=5, metrics=None, activation=None):
from tqdm import tqdm
logger.info(model)
model.to(device)
early_stopping = EarlyStopping(patience=patience, verbose=True)
criterion = loss_func
for epoch in range(epochs):
model.train()
dataloader.dataset.train()
train_data = tqdm(dataloader)
train_data.set_description(
f"[Epoch:{epoch+1:04d}/{epochs:04d} " +
f"lr:{optimizer.param_groups[0]['lr']:.5f}]"
)
train(model, train_data, input_key, target_key,
optimizer, criterion, device)
with torch.no_grad():
model.eval()
valid_loss = 0.0
dataloader.dataset.valid()
outputs_data, targets_data = valid(model,
dataloader,
input_key,
target_key,
device=device,
activation=activation)
valid_loss = criterion(torch.tensor(outputs_data),
torch.tensor(targets_data))
if metrics is None:
s = f'[Epoch:{epoch+1:04d}|valid| / '\
f'loss:{valid_loss:.6f}]'
else:
score = metrics(np.array(targets_data),
np.array(outputs_data))
s = f'[Epoch:{epoch+1:04d}|valid| / '\
f'loss:{valid_loss:.6f} / '\
f'metrics:{score:.6f}]'
logger.info(s)
best_model = early_stopping(valid_loss, model)
if early_stopping.early_stop:
logger.info("Early stopping")
break
return best_model
def wrap_phi_to_2pi_torch(x):
"""Shift input angle x to the range of [-pi, pi]
"""
import math
pi = math.pi
x = torch.fmod(2 * pi + torch.fmod(x + pi, 2 * pi), 2 * pi) - pi
return x
def wrap_phi_to_2pi_numpy(x):
"""Shift input angle x to the range of [-pi, pi]
"""
import math
pi = math.pi
x = np.fmod(2 * pi + np.fmod(x + pi, 2 * pi), 2 * pi) - pi
return x
def set_phi_within_valid_range(x):
if isinstance(x, torch.Tensor):
x_phi = x[:, 2]
x_phi = wrap_phi_to_2pi_torch(x_phi)
x_phi.unsqueeze_(1)
x = torch.cat([x[:, 0:2], x_phi], axis=1)
elif isinstance(x, np.ndarray):
x_phi = x[:, 2]
x_phi = wrap_phi_to_2pi_numpy(x_phi)
x_phi = np.expand_dims(x_phi, 1)
x = np.concatenate([x[:, 0:2], x_phi], axis=1)
return x
class EarlyStopping:
def __init__(self, patience=7, verbose=False, save=False, path='./logs'):
self.patience = patience
self.verbose = verbose
self.save = save
self.path = path
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.best_model = None
def __call__(self, val_loss, model):
from copy import deepcopy
score = -val_loss
if self.best_score is None:
self.best_score = score
self.best_model = deepcopy(
self.save_checkpoint(val_loss, model)
)
elif score <= self.best_score:
self.counter += 1
logger.info(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.counter = 0
self.best_model = deepcopy(
self.save_checkpoint(val_loss, model)
)
return self.best_model
def save_checkpoint(self, val_loss, model):
'''Saves model when validation loss decrease.'''
if self.verbose:
logger.info(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). ' +
'updating model ...')
if self.save:
from os.path import join
save_path = join(self.path, 'checkpoint.pt')
torch.save(model.state_dict(), save_path)
self.val_loss_min = val_loss
return model
class MLPBlock(nn.Module):
def __init__(self,
layers,
activation,
activation_last=None,
batch_norm=False,
initialize=True,
*args,
**kwargs):
super(MLPBlock, self).__init__(*args, **kwargs)
from utils import get_module
_layers = []
for i, node in enumerate(layers):
if i == len(layers) - 1:
break
else:
_layers.append(nn.Linear(layers[i], layers[i+1]))
if batch_norm:
_layers.append(nn.BatchNorm1d(layers[i+1]))
if i == len(layers) - 2:
if activation_last is None:
_layers.append(get_module([nn], 'Identity')())
else:
_layers.append(get_module([nn], activation_last)())
else:
_layers.append(get_module([nn], activation)())
self._layers = nn.Sequential(*_layers)
if initialize:
self.apply(self._init_weights)
@staticmethod
def _init_weights(m):
if type(m) == nn.Linear:
nn.init.xavier_uniform_(m.weight)
nn.init.zeros_(m.bias)
def forward(self, x):
return self._layers(x)
class Tau4vec_MLPTask(nn.Module):
def __init__(self,
layers_images=[768, 32, 32, 32, 4],
layers_calib=[8, 32, 32, 32, 4],
activation='ReLU',
batch_norm=False,
**kwargs):
super(Tau4vec_MLPTask, self).__init__(**kwargs)
self._mlp1 = MLPBlock(layers=layers_images,
activation=activation,
activation_last='Identity',
batch_norm=batch_norm)
self._mlp2 = MLPBlock(layers=layers_calib,
activation=activation,
activation_last='Identity',
batch_norm=batch_norm)
self._layers_calib = layers_calib
self._len_output_vers = layers_calib[-1] * 2
def forward(self, x):
fig = x[0].reshape(-1, 3, 16, 16)
x_1 = fig.reshape(fig.size(0), -1)
x_1 = self._mlp1(x_1)
input_jet_reshape_4 = x[1].reshape(-1, 4)
input_jet_reshape_3 = input_jet_reshape_4[:, :3] # mass is not used
x = torch.cat((x_1, input_jet_reshape_4), dim=1)
x = self._mlp2(x)
if self._layers_calib[-1] == 4:
x = x + input_jet_reshape_4
elif self._layers_calib[-1] == 3:
x = x + input_jet_reshape_3
x = set_phi_within_valid_range(x)
output = x.reshape(-1, self._layers_calib[-1] * 2)
return output
class Conv2DBlock(nn.Module):
def __init__(self, layers_conv2d=None, initialize=True, *args, **kwargs):
super(Conv2DBlock, self).__init__(*args, **kwargs)
from copy import copy
from utils import get_module
_layers = []
conv2d_args = {"stride": 1, "padding": 0, "activation": 'ReLU'}
maxpooling2d_args = {"kernel_size": 2, "stride": 2}
for layer, args in layers_conv2d:
if layer == 'conv2d':
layer_args = copy(conv2d_args)
layer_args.update(args)
activation = layer_args.pop('activation')
_layers.append(nn.Conv2d(**layer_args))
_layers.append(get_module([nn], activation)())
elif layer == 'maxpooling2d':
layer_args = copy(maxpooling2d_args)
layer_args.update(args)
_layers.append(nn.MaxPool2d(**layer_args))
else:
raise ValueError(f"{layer} is not implemented")
self._layers = nn.Sequential(*_layers)
if initialize:
self.apply(self._init_weights)
@staticmethod
def _init_weights(m):
if type(m) == nn.Conv2d:
nn.init.xavier_uniform_(m.weight)
nn.init.zeros_(m.bias)
def forward(self, x):
return self._layers(x)
class Tau4vec_Conv2DTask(nn.Module):
def __init__(self,
layers_conv2d=[('conv2d', {'in_channels': 3, 'out_channels': 32, 'kernel_size': 3}),
('conv2d', {'in_channels': 32, 'out_channels': 16, 'kernel_size': 3}),
('maxpooling2d', {}),
('conv2d', {'in_channels': 16, 'out_channels': 16, 'kernel_size': 2}),
('conv2d', {'in_channels': 16, 'out_channels': 8, 'kernel_size': 2})],
layers_images=[128, 16, 16, 16, 4],
layers_calib=[8, 64, 64, 64, 4],
activation='ReLU',
batch_norm=False,
**kwargs):
super(Tau4vec_Conv2DTask, self).__init__(**kwargs)
self._conv2d = Conv2DBlock(layers_conv2d=layers_conv2d)
self._mlp1 = MLPBlock(layers=layers_images,
activation=activation,
activation_last='Identity',
batch_norm=batch_norm)
self._mlp2 = MLPBlock(layers=layers_calib,
activation=activation,
activation_last='Identity',
batch_norm=batch_norm)
self._layers_calib = layers_calib
def forward(self, x):
fig = x[0].reshape(-1, 3, 16, 16)
x_1 = self._conv2d(fig)
x_1 = x_1.reshape(x_1.size(0), -1) # flatten
x_1 = self._mlp1(x_1)
input_jet_reshape_4 = x[1].reshape(-1, 4)
input_jet_reshape_3 = input_jet_reshape_4[:, :3] # mass is not used
x = torch.cat((x_1, input_jet_reshape_4), dim=1)
x = self._mlp2(x)
if self._layers_calib[-1] == 4:
x = x + input_jet_reshape_4
elif self._layers_calib[-1] == 3:
x = x + input_jet_reshape_3
x = set_phi_within_valid_range(x)
output = x.reshape(-1, self._layers_calib[-1] * 2)
return output
class SF_layer(nn.Module):
def __init__(self, input_dim):
super(SF_layer, self).__init__()
self.sf = nn.Parameter(torch.Tensor(
np.ones(input_dim)
))
self.bias = nn.Parameter(torch.Tensor(
np.zeros(input_dim)
))
def forward(self, x):
return x * self.sf + self.bias
class Tau4vec_SFTask(nn.Module):
def __init__(self, n_input_vars=8, n_output_vars=6, n_jets=2):
super(Tau4vec_SFTask, self).__init__()
self.sf_layer = SF_layer(input_dim=(1, n_output_vars//2))
self.n_input_vars = n_input_vars
self.n_output_vars = n_output_vars
self.n_jets = n_jets
def forward(self, x):
x = x[1].reshape(-1, self.n_input_vars//self.n_jets)
if self.n_output_vars == 6:
x = x[:, :3] # mass is not used
x = self.sf_layer(x)
x = set_phi_within_valid_range(x)
x = x.reshape(-1, self.n_output_vars)
return x
class HiggsID_MLPTask(nn.Module):
def __init__(self,
layers=[8, 32, 32, 32, 1],
activation='ReLU',
activation_last='Identity',
batch_norm=False,
**kwargs):
super(HiggsID_MLPTask, self).__init__(**kwargs)
self.mlp = MLPBlock(layers=layers,
activation=activation,
activation_last=activation_last,
batch_norm=batch_norm)
def forward(self, x):
x = self.mlp(x)
return x
class LSTMBlock(nn.Module):
def __init__(self,
layers,
activation=None,
batch_norm=False,
initialize=True,
*args,
**kwargs):
super(LSTMBlock, self).__init__(*args, **kwargs)
from collections import OrderedDict
from utils import get_module
_layers = OrderedDict()
for i, node in enumerate(layers):
if i == len(layers) - 1:
break
else:
_layers[f'LSTM{i}'] = nn.LSTM(layers[i], layers[i+1])
if batch_norm:
_layers['batchnorm1d'] = nn.BatchNorm1d(layers[-1])
if activation is not None:
_layers[activation] = get_module([nn], activation)()
self._layers = nn.Sequential(_layers)
if initialize:
self.apply(self._init_weights)
@staticmethod
def _init_weights(m):
if type(m) == nn.LSTM:
for name, param in m.named_parameters():
if 'weight_ih' in name:
nn.init.xavier_uniform_(param.data)
elif 'weight_hh' in name:
nn.init.orthogonal_(param.data)
elif 'bias' in name:
param.data.fill_(0)
def forward(self, x):
for layer in self._layers:
if type(layer) == nn.LSTM:
x, _ = layer(x)
else:
x = layer(x)
return x
class HiggsID_LSTMTask(nn.Module):
def __init__(self,
layers_lstm=[4, 32, 32, 32, 1],
layers_mlp=[1, 1],
activation_last='Identity',
batch_norm=False,
n_jets=2,
**kwargs):
super(HiggsID_LSTMTask, self).__init__(**kwargs)
self.layers_lstm = layers_lstm
self.n_jets = n_jets
self.lstm = LSTMBlock(layers=layers_lstm,
batch_norm=batch_norm)
self.mlp = MLPBlock(layers=layers_mlp,
activation='Identity',
activation_last=activation_last,
batch_norm=batch_norm)
def forward(self, x):
x = torch.transpose(
x.reshape(-1, self.n_jets, self.layers_lstm[0]),
1, 0)
x = self.lstm(x)[-1]
x = self.mlp(x)
return x
class HiggsID_MassTask(nn.Module):
def __init__(self,
layers=[1, 64, 64, 1],
activation='ReLU',
activation_last='Identity',
batch_norm=False,
scale_mass=1./125.,
n_jets=2,
n_input_vars=8,
**kwargs):
super(HiggsID_MassTask, self).__init__(**kwargs)
self.scale_mass = scale_mass
self.n_input_vars = n_input_vars
self.n_jets = n_jets
self.mlp = MLPBlock(layers=layers,
activation=activation,
activation_last=activation_last,
batch_norm=batch_norm)
def forward(self, x):
x = self.mass_layer(x, self.n_jets, self.n_input_vars)
x = x * self.scale_mass
x = self.mlp(x)
return x
@staticmethod
def mass_layer(tau_4vec, n_jets, n_input_vars):
tau_4vec = tau_4vec.reshape(-1, n_jets, n_input_vars // n_jets)
pt = torch.exp(
torch.clamp(tau_4vec[:, :, 0], min=-7., max=7.)
) - 0.1
eta = tau_4vec[:, :, 1]
phi = tau_4vec[:, :, 2]
mass = 1.777
px = pt * torch.cos(phi)
py = pt * torch.sin(phi)
pz = pt * torch.sinh(torch.clamp(eta, min=-5, max=5))
epsilon = 0.1 # avoid nan when e=0. sqrt(x)^' = -1/2 * 1/sqrt(x)
e = torch.sqrt(
epsilon + px**2 + py**2 + pz**2 + mass**2
)
tau_4vec = torch.stack([px, py, pz, e], dim=2)
tau_4vec = torch.sum(tau_4vec, dim=1)
px, py, pz, e = torch.chunk(tau_4vec, chunks=4, dim=1)
mass = torch.sqrt(
epsilon + e**2 - (px**2 + py**2 + pz**2)
)
return mass
class SubTask_Gaussian(nn.Module):
def __init__(self, in_len=8, sigma=1.0):
super(SubTask_Gaussian, self).__init__()
self.sigma = sigma
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = x.sum(axis=1) * 0.0
if self.sigma != 0:
sampled_noise = torch.empty_like(x).normal_() * self.sigma
x = x + sampled_noise
return self.sigmoid(x).reshape(-1, 1)
| [
"torch.sqrt",
"torch.cat",
"numpy.ones",
"utils.get_module",
"torch.cos",
"torch.empty_like",
"torch.device",
"torch.no_grad",
"os.path.join",
"utils.get_logger",
"torch.fmod",
"torch.nn.Linear",
"torch.nn.LSTM",
"tqdm.tqdm",
"numpy.fmod",
"torch.nn.init.xavier_uniform_",
"torch.nn.B... | [((118, 130), 'utils.get_logger', 'get_logger', ([], {}), '()\n', (128, 130), False, 'from utils import add_device, get_logger\n'), ((224, 243), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (236, 243), False, 'import torch\n'), ((792, 811), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (804, 811), False, 'import torch\n'), ((1416, 1435), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1428, 1435), False, 'import torch\n'), ((329, 364), 'utils.add_device', 'add_device', (['data[input_key]', 'device'], {}), '(data[input_key], device)\n', (339, 364), False, 'from utils import add_device, get_logger\n'), ((383, 419), 'utils.add_device', 'add_device', (['data[target_key]', 'device'], {}), '(data[target_key], device)\n', (393, 419), False, 'from utils import add_device, get_logger\n'), ((937, 972), 'utils.add_device', 'add_device', (['data[input_key]', 'device'], {}), '(data[input_key], device)\n', (947, 972), False, 'from utils import add_device, get_logger\n'), ((991, 1027), 'utils.add_device', 'add_device', (['data[target_key]', 'device'], {}), '(data[target_key], device)\n', (1001, 1027), False, 'from utils import add_device, get_logger\n'), ((1769, 1785), 'tqdm.tqdm', 'tqdm', (['dataloader'], {}), '(dataloader)\n', (1773, 1785), False, 'from tqdm import tqdm\n'), ((3875, 3912), 'torch.cat', 'torch.cat', (['[x[:, 0:2], x_phi]'], {'axis': '(1)'}), '([x[:, 0:2], x_phi], axis=1)\n', (3884, 3912), False, 'import torch\n'), ((6752, 6775), 'torch.nn.Sequential', 'nn.Sequential', (['*_layers'], {}), '(*_layers)\n', (6765, 6775), False, 'from torch import nn\n'), ((8185, 8229), 'torch.cat', 'torch.cat', (['(x_1, input_jet_reshape_4)'], {'dim': '(1)'}), '((x_1, input_jet_reshape_4), dim=1)\n', (8194, 8229), False, 'import torch\n'), ((9573, 9596), 'torch.nn.Sequential', 'nn.Sequential', (['*_layers'], {}), '(*_layers)\n', (9586, 9596), False, 'from torch import nn\n'), ((11532, 11576), 'torch.cat', 'torch.cat', (['(x_1, input_jet_reshape_4)'], {'dim': '(1)'}), '((x_1, input_jet_reshape_4), dim=1)\n', (11541, 11576), False, 'import torch\n'), ((13843, 13856), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (13854, 13856), False, 'from collections import OrderedDict\n'), ((14259, 14281), 'torch.nn.Sequential', 'nn.Sequential', (['_layers'], {}), '(_layers)\n', (14272, 14281), False, 'from torch import nn\n'), ((17282, 17343), 'torch.sqrt', 'torch.sqrt', (['(epsilon + px ** 2 + py ** 2 + pz ** 2 + mass ** 2)'], {}), '(epsilon + px ** 2 + py ** 2 + pz ** 2 + mass ** 2)\n', (17292, 17343), False, 'import torch\n'), ((17378, 17413), 'torch.stack', 'torch.stack', (['[px, py, pz, e]'], {'dim': '(2)'}), '([px, py, pz, e], dim=2)\n', (17389, 17413), False, 'import torch\n'), ((17433, 17459), 'torch.sum', 'torch.sum', (['tau_4vec'], {'dim': '(1)'}), '(tau_4vec, dim=1)\n', (17442, 17459), False, 'import torch\n'), ((17484, 17522), 'torch.chunk', 'torch.chunk', (['tau_4vec'], {'chunks': '(4)', 'dim': '(1)'}), '(tau_4vec, chunks=4, dim=1)\n', (17495, 17522), False, 'import torch\n'), ((17538, 17598), 'torch.sqrt', 'torch.sqrt', (['(epsilon + e ** 2 - (px ** 2 + py ** 2 + pz ** 2))'], {}), '(epsilon + e ** 2 - (px ** 2 + py ** 2 + pz ** 2))\n', (17548, 17598), False, 'import torch\n'), ((17814, 17826), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (17824, 17826), False, 'from torch import nn\n'), ((2055, 2070), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2068, 2070), False, 'import torch\n'), ((4034, 4058), 'numpy.expand_dims', 'np.expand_dims', (['x_phi', '(1)'], {}), '(x_phi, 1)\n', (4048, 4058), True, 'import numpy as np\n'), ((4071, 4113), 'numpy.concatenate', 'np.concatenate', (['[x[:, 0:2], x_phi]'], {'axis': '(1)'}), '([x[:, 0:2], x_phi], axis=1)\n', (4085, 4113), True, 'import numpy as np\n'), ((5605, 5637), 'os.path.join', 'join', (['self.path', '"""checkpoint.pt"""'], {}), "(self.path, 'checkpoint.pt')\n", (5609, 5637), False, 'from os.path import join\n'), ((6932, 6965), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['m.weight'], {}), '(m.weight)\n', (6955, 6965), False, 'from torch import nn\n'), ((6978, 7000), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (6992, 7000), False, 'from torch import nn\n'), ((9753, 9786), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['m.weight'], {}), '(m.weight)\n', (9776, 9786), False, 'from torch import nn\n'), ((9799, 9821), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (9813, 9821), False, 'from torch import nn\n'), ((14107, 14133), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['layers[-1]'], {}), '(layers[-1])\n', (14121, 14133), False, 'from torch import nn\n'), ((17086, 17100), 'torch.cos', 'torch.cos', (['phi'], {}), '(phi)\n', (17095, 17100), False, 'import torch\n'), ((17119, 17133), 'torch.sin', 'torch.sin', (['phi'], {}), '(phi)\n', (17128, 17133), False, 'import torch\n'), ((2562, 2588), 'torch.tensor', 'torch.tensor', (['outputs_data'], {}), '(outputs_data)\n', (2574, 2588), False, 'import torch\n'), ((2625, 2651), 'torch.tensor', 'torch.tensor', (['targets_data'], {}), '(targets_data)\n', (2637, 2651), False, 'import torch\n'), ((3437, 3463), 'torch.fmod', 'torch.fmod', (['(x + pi)', '(2 * pi)'], {}), '(x + pi, 2 * pi)\n', (3447, 3463), False, 'import torch\n'), ((3642, 3665), 'numpy.fmod', 'np.fmod', (['(x + pi)', '(2 * pi)'], {}), '(x + pi, 2 * pi)\n', (3649, 3665), True, 'import numpy as np\n'), ((9038, 9055), 'copy.copy', 'copy', (['conv2d_args'], {}), '(conv2d_args)\n', (9042, 9055), False, 'from copy import copy\n'), ((12052, 12070), 'numpy.ones', 'np.ones', (['input_dim'], {}), '(input_dim)\n', (12059, 12070), True, 'import numpy as np\n'), ((12141, 12160), 'numpy.zeros', 'np.zeros', (['input_dim'], {}), '(input_dim)\n', (12149, 12160), True, 'import numpy as np\n'), ((14014, 14047), 'torch.nn.LSTM', 'nn.LSTM', (['layers[i]', 'layers[i + 1]'], {}), '(layers[i], layers[i + 1])\n', (14021, 14047), False, 'from torch import nn\n'), ((14204, 14232), 'utils.get_module', 'get_module', (['[nn]', 'activation'], {}), '([nn], activation)\n', (14214, 14232), False, 'from utils import get_module\n'), ((16918, 16967), 'torch.clamp', 'torch.clamp', (['tau_4vec[:, :, 0]'], {'min': '(-7.0)', 'max': '(7.0)'}), '(tau_4vec[:, :, 0], min=-7.0, max=7.0)\n', (16929, 16967), False, 'import torch\n'), ((17163, 17194), 'torch.clamp', 'torch.clamp', (['eta'], {'min': '(-5)', 'max': '(5)'}), '(eta, min=-5, max=5)\n', (17174, 17194), False, 'import torch\n'), ((2836, 2858), 'numpy.array', 'np.array', (['targets_data'], {}), '(targets_data)\n', (2844, 2858), True, 'import numpy as np\n'), ((2892, 2914), 'numpy.array', 'np.array', (['outputs_data'], {}), '(outputs_data)\n', (2900, 2914), True, 'import numpy as np\n'), ((6281, 6316), 'torch.nn.Linear', 'nn.Linear', (['layers[i]', 'layers[i + 1]'], {}), '(layers[i], layers[i + 1])\n', (6290, 6316), False, 'from torch import nn\n'), ((6375, 6404), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['layers[i + 1]'], {}), '(layers[i + 1])\n', (6389, 6404), False, 'from torch import nn\n'), ((9185, 9208), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {}), '(**layer_args)\n', (9194, 9208), False, 'from torch import nn\n'), ((9344, 9367), 'copy.copy', 'copy', (['maxpooling2d_args'], {}), '(maxpooling2d_args)\n', (9348, 9367), False, 'from copy import copy\n'), ((14537, 14572), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['param.data'], {}), '(param.data)\n', (14560, 14572), False, 'from torch import nn\n'), ((6696, 6724), 'utils.get_module', 'get_module', (['[nn]', 'activation'], {}), '([nn], activation)\n', (6706, 6724), False, 'from utils import get_module\n'), ((9241, 9269), 'utils.get_module', 'get_module', (['[nn]', 'activation'], {}), '([nn], activation)\n', (9251, 9269), False, 'from utils import get_module\n'), ((9439, 9465), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {}), '(**layer_args)\n', (9451, 9465), False, 'from torch import nn\n'), ((14635, 14666), 'torch.nn.init.orthogonal_', 'nn.init.orthogonal_', (['param.data'], {}), '(param.data)\n', (14654, 14666), False, 'from torch import nn\n'), ((17942, 17961), 'torch.empty_like', 'torch.empty_like', (['x'], {}), '(x)\n', (17958, 17961), False, 'import torch\n'), ((6521, 6549), 'utils.get_module', 'get_module', (['[nn]', '"""Identity"""'], {}), "([nn], 'Identity')\n", (6531, 6549), False, 'from utils import get_module\n'), ((6610, 6643), 'utils.get_module', 'get_module', (['[nn]', 'activation_last'], {}), '([nn], activation_last)\n', (6620, 6643), False, 'from utils import get_module\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 23 10:49:20 2020
@author: dberke
A script to compare the results of fitting transition velocity offsets and pairs
as a function of stellar parameters using different functions.
"""
import argparse
import os
from pathlib import Path
import pickle
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
from tqdm import tqdm
import unyt as u
import sys
import varconlib as vcl
colors = {'linear': 'Gold',
'quadratic': 'ForestGreen',
'cross_term': 'RoyalBlue',
'quadratic_mag': 'FireBrick'}
corr_colors = {'pre_uncorr': 'SaddleBrown',
'pre_corr': 'LightSalmon',
'post_uncorr': 'RoyalBlue',
'post_corr': 'LightSkyBlue'}
def plot_histograms(target):
"""Create plots of histograms for the various quantities of interest.
Parameters
----------
target : str, ['transitions', 'pairs']
A string denoting whether to compare the results for transitions, or for
pairs.
Returns
-------
None.
"""
cols = {'index': 0,
'chi_squared_pre': 1,
'sigma_pre': 2,
'sigma_sys_pre': 3,
'chi_squared_post': 4,
'sigma_post': 5,
'sigma_sys_post': 6}
main_dir = vcl.output_dir /\
f'stellar_parameter_fits_{target}_{args.sigma}sigma'
functions = {'linear': 'Linear',
'cross_term': 'Linear, [Fe/H]/T$_{eff}$',
'quadratic': 'Quadratic',
'quad_cross_term': 'Quadratic, cross term',
'cubic': 'Cubic'}
files = {x: main_dir / f'{x}/{x}_{target}_fit_results.csv' for
x in functions.keys()}
x_lims = {'left': -5, 'right': 5}
fig = plt.figure(figsize=(12, 7), tight_layout=True)
ax_pre = fig.add_subplot(1, 2, 1)
# ax_pre.set_yscale('log')
ax_pre.set_xlim(**x_lims)
ax_pre.set_xlabel(r'Pre-change $\sigma_\mathrm{sys}-'
r'\sigma_\mathrm{sys,linear}$ (m/s)')
ax_post = fig.add_subplot(1, 2, 2,
sharex=ax_pre, sharey=ax_pre)
ax_post.set_xlabel(r'Post-change $\sigma_\mathrm{sys}-'
r'\sigma_\mathrm{sys,linear}$ (m/s)')
for ax in (ax_pre, ax_post):
ax.axvline(color='Black', linestyle='-')
ax.xaxis.set_major_locator(ticker.MultipleLocator(base=1))
ax.xaxis.grid(which='major', color='Gray', alpha=0.4)
# Set the number of bins.
bin_edges = np.linspace(x_lims['left'], x_lims['right'], num=40)
data_dict = {}
for function in functions.keys():
with open(files[function], 'r', newline='') as f:
data_dict[function] = np.loadtxt(f, delimiter=',')
linear_sigma_sys_pre = np.array(data_dict['linear']
[:, cols['sigma_sys_pre']])
linear_sigma_sys_post = np.array(data_dict['linear']
[:, cols['sigma_sys_post']])
# for function in ('cross_term', 'quadratic',
# 'quad_cross_term', 'cubic'):
for function in functions.keys():
if function == 'linear':
continue
data_pre = np.array(data_dict[function]
[:, cols['sigma_sys_pre']])
data_post = np.array(data_dict[function]
[:, cols['sigma_sys_post']])
diffs_pre = data_pre - linear_sigma_sys_pre
diffs_post = data_post - linear_sigma_sys_post
ax_pre.hist(diffs_pre,
cumulative=False, histtype='step',
label=f'{function}: {np.median(diffs_pre):.2f} m/s',
bins=bin_edges)
ax_post.hist(diffs_post,
cumulative=False, histtype='step',
label=f'{function}: {np.median(diffs_post):.2f} m/s',
bins=bin_edges)
ax_pre.legend(loc='upper left')
ax_post.legend(loc='upper left')
file_name = main_dir /\
f'Model_comparison_histograms_{target}_{args.sigma}sigma.png'
fig.savefig(str(file_name))
plt.close('all')
sys.exit()
def plot_per_transition():
"""Create plots showing quantities of interest based on transition index
number.
Returns
-------
None.
"""
plots_dir = Path('/Users/dberke/Pictures/fitting_comparisons')
if not plots_dir.exists():
os.mkdir(plots_dir)
cols = {'index': 0,
'chi_squared_pre': 1,
'sigma_pre': 2,
'sigma_sys_pre': 3,
'chi_squared_post': 4,
'sigma_post': 5,
'sigma_sys_post': 6}
quantities = {#'chi_squared': r'$\chi^2_\nu$',
'sigma': r'$\sigma$ (m/s)',
'sigma_sys': r'$\sigma_{\mathrm{sys}} (m/s)$'}
main_dir = Path(vcl.config['PATHS']['output_dir']) /\
'stellar_parameter_fits'
functions = {'linear': 'Linear',
'quadratic': 'Quadratic',
'cross_term': 'Linear, [Fe/H]/T$_{eff}$',
'quadratic_mag': r'Linear, cross term, $\mathrm{M}_{v}^2$'}
files = [main_dir / f'{x}/{x}_fit_results.csv' for x in functions.keys()]
corr_files = [main_dir /
f'{x}_corrected/{x}_fit_results.csv' for x in
functions.keys()]
# tqdm.write('Unpickling transitions list...')
# with open(vcl.final_selection_file, 'r+b') as f:
# transitions_list = pickle.load(f)
for quantity in tqdm(quantities.keys()):
for file, corr_file, function in tqdm(zip(files, corr_files,
functions.keys())):
with open(file, 'r', newline='') as f:
data = np.loadtxt(f, delimiter=',')
with open(corr_file, 'r', newline='') as f:
corr_data = np.loadtxt(f, delimiter=',')
fig = plt.figure(figsize=(11, 7), tight_layout=True)
ax_pre = fig.add_subplot(2, 1, 1)
ax_post = fig.add_subplot(2, 1, 2)
x = data[:, 0]
corr_x = corr_data[:, 0]
for ax, time in zip((ax_pre, ax_post), ('pre', 'post')):
ax.set_xlabel(f'{time.capitalize()}-fiber change index')
# ax.set_yscale('log')
ax.set_ylabel(f'{quantities[quantity]} ({functions[function]})')
ax.set_xlim(left=-1, right=len(x)+1)
if quantity == 'sigma':
ax.set_ylim(bottom=0, top=250)
elif quantity == 'sigma_sys':
ax.set_ylim(bottom=-1, top=85)
ax.xaxis.set_major_locator(ticker.MultipleLocator(base=10))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(base=2))
ax.xaxis.grid(which='both', color='Gray',
linestyle='-', alpha=0.6)
ax.yaxis.grid(which='major', color='Gray',
linestyle='--', alpha=0.4)
y = data[:, cols[quantity + f'_{time}']]
corr_y = corr_data[:, cols[quantity + f'_{time}']]
ax.fill_between(x, y, corr_y,
color='Gray',
alpha=0.5)
ax.plot(x, y, color=corr_colors[time + '_uncorr'],
marker='o',
label='No outlier rejection',
markeredgecolor='Black',
markersize=6)
ax.plot(corr_x, corr_y, color=corr_colors[time + '_corr'],
marker='o',
label='Outlier rejection',
markeredgecolor='Black',
markersize=6)
ax_pre.legend(loc='best')
ax_post.legend(loc='best')
file_name = plots_dir /\
f'{quantity}_{function}_{args.sigma}sigma.png'
# plt.show(fig)
fig.savefig(str(file_name))
for file, corr_file, function in tqdm(zip(files, corr_files,
functions.keys())):
with open(file, 'r', newline='') as f:
data = np.loadtxt(f, delimiter=',')
with open(corr_file, 'r', newline='') as f:
corr_data = np.loadtxt(f, delimiter=',')
fig = plt.figure(figsize=(11, 7), tight_layout=True)
ax_pre = fig.add_subplot(2, 1, 1)
ax_post = fig.add_subplot(2, 1, 2)
x = data[:, 0]
corr_x = corr_data[:, 0]
for ax, time in zip((ax_pre, ax_post), ('pre', 'post')):
ax.set_xlabel(f'{time.capitalize()}-fiber change index, {function}')
ax.set_ylabel(r'$\sigma_\mathrm{sys}/\sigma$')
ax.set_xlim(left=-1, right=len(x)+1)
ax.axhline(y=1, color='Black')
ax.xaxis.set_major_locator(ticker.MultipleLocator(base=10))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(base=2))
ax.xaxis.grid(which='both', color='Gray',
linestyle='-', alpha=0.6)
ax.yaxis.grid(which='major', color='Gray',
linestyle='--', alpha=0.4)
y_sig = data[:, cols[f'sigma_{time}']]
y_sig_sys = data[:, cols[f'sigma_sys_{time}']]
# y_sig_corr = corr_data[:, cols[f'sigma_{time}']]
# y_sig_sys_corr = corr_data[:, cols[f'sigma_sys_{time}']]
ax.plot(x, y_sig_sys / y_sig, color='LightCoral',
marker='+',
label=r'$\sigma_\mathrm{sys}/\sigma$',
markeredgecolor='Black',
markersize=6)
# ax.plot(x, y_sig_sys, color='Green',
# marker='+',
# label=quantities['sigma_sys'],
# markeredgecolor='Black',
# markersize=6)
ax_pre.legend(loc='best')
ax_post.legend(loc='best')
file_name = plots_dir / f'sigma-sigma_sys_{function}.png'
# plt.show(fig)
fig.savefig(str(file_name))
sys.exit()
def main():
"""Run the main routine for this script.
Returns
-------
None.
"""
if args.transitions:
target = 'transitions'
elif args.pairs:
target = 'pairs'
if args.histogram:
plot_histograms(target)
elif args.per_transition:
plot_per_transition(target)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create various plots in order'
' to compare various fitting models with'
' each other.')
parser.add_argument('-v', '--verbose', action='store_true',
help='Print out more information about the script.')
parser.add_argument('--sigma', type=float, default=2.5,
help='The sigma-clipping limit for which to plot'
' the data.')
plot_type = parser.add_mutually_exclusive_group(required=True)
plot_type.add_argument('--histogram', action='store_true',
help='Plot histograms of various quantities for'
' different fitting functions.')
plot_type.add_argument('--per-transition', action='store_true',
help='Create plots which show various quantities as'
' a function of transition number.')
target_type = parser.add_mutually_exclusive_group(required=True)
target_type.add_argument('-T', '--transitions', action='store_true',
help='Plot for individual transitions.')
target_type.add_argument('-P', '--pairs', action='store_true',
help='Plot for pairs.')
args = parser.parse_args()
vprint = vcl.verbose_print(args.verbose)
main()
| [
"os.mkdir",
"argparse.ArgumentParser",
"numpy.median",
"matplotlib.pyplot.close",
"varconlib.verbose_print",
"matplotlib.pyplot.figure",
"pathlib.Path",
"numpy.array",
"numpy.loadtxt",
"numpy.linspace",
"matplotlib.ticker.MultipleLocator",
"sys.exit"
] | [((1820, 1866), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 7)', 'tight_layout': '(True)'}), '(figsize=(12, 7), tight_layout=True)\n', (1830, 1866), True, 'import matplotlib.pyplot as plt\n'), ((2563, 2615), 'numpy.linspace', 'np.linspace', (["x_lims['left']", "x_lims['right']"], {'num': '(40)'}), "(x_lims['left'], x_lims['right'], num=40)\n", (2574, 2615), True, 'import numpy as np\n'), ((2823, 2878), 'numpy.array', 'np.array', (["data_dict['linear'][:, cols['sigma_sys_pre']]"], {}), "(data_dict['linear'][:, cols['sigma_sys_pre']])\n", (2831, 2878), True, 'import numpy as np\n'), ((2944, 3000), 'numpy.array', 'np.array', (["data_dict['linear'][:, cols['sigma_sys_post']]"], {}), "(data_dict['linear'][:, cols['sigma_sys_post']])\n", (2952, 3000), True, 'import numpy as np\n'), ((4159, 4175), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4168, 4175), True, 'import matplotlib.pyplot as plt\n'), ((4180, 4190), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4188, 4190), False, 'import sys\n'), ((4371, 4421), 'pathlib.Path', 'Path', (['"""/Users/dberke/Pictures/fitting_comparisons"""'], {}), "('/Users/dberke/Pictures/fitting_comparisons')\n", (4375, 4421), False, 'from pathlib import Path\n'), ((10131, 10141), 'sys.exit', 'sys.exit', ([], {}), '()\n', (10139, 10141), False, 'import sys\n'), ((10513, 10642), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create various plots in order to compare various fitting models with each other."""'}), "(description=\n 'Create various plots in order to compare various fitting models with each other.'\n )\n", (10536, 10642), False, 'import argparse\n'), ((11884, 11915), 'varconlib.verbose_print', 'vcl.verbose_print', (['args.verbose'], {}), '(args.verbose)\n', (11901, 11915), True, 'import varconlib as vcl\n'), ((3253, 3308), 'numpy.array', 'np.array', (["data_dict[function][:, cols['sigma_sys_pre']]"], {}), "(data_dict[function][:, cols['sigma_sys_pre']])\n", (3261, 3308), True, 'import numpy as np\n'), ((3358, 3414), 'numpy.array', 'np.array', (["data_dict[function][:, cols['sigma_sys_post']]"], {}), "(data_dict[function][:, cols['sigma_sys_post']])\n", (3366, 3414), True, 'import numpy as np\n'), ((4461, 4480), 'os.mkdir', 'os.mkdir', (['plots_dir'], {}), '(plots_dir)\n', (4469, 4480), False, 'import os\n'), ((4876, 4915), 'pathlib.Path', 'Path', (["vcl.config['PATHS']['output_dir']"], {}), "(vcl.config['PATHS']['output_dir'])\n", (4880, 4915), False, 'from pathlib import Path\n'), ((8384, 8430), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(11, 7)', 'tight_layout': '(True)'}), '(figsize=(11, 7), tight_layout=True)\n', (8394, 8430), True, 'import matplotlib.pyplot as plt\n'), ((2422, 2452), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', ([], {'base': '(1)'}), '(base=1)\n', (2444, 2452), True, 'import matplotlib.ticker as ticker\n'), ((2766, 2794), 'numpy.loadtxt', 'np.loadtxt', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (2776, 2794), True, 'import numpy as np\n'), ((5944, 5990), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(11, 7)', 'tight_layout': '(True)'}), '(figsize=(11, 7), tight_layout=True)\n', (5954, 5990), True, 'import matplotlib.pyplot as plt\n'), ((8235, 8263), 'numpy.loadtxt', 'np.loadtxt', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (8245, 8263), True, 'import numpy as np\n'), ((8340, 8368), 'numpy.loadtxt', 'np.loadtxt', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (8350, 8368), True, 'import numpy as np\n'), ((5783, 5811), 'numpy.loadtxt', 'np.loadtxt', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (5793, 5811), True, 'import numpy as np\n'), ((5896, 5924), 'numpy.loadtxt', 'np.loadtxt', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (5906, 5924), True, 'import numpy as np\n'), ((8911, 8942), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', ([], {'base': '(10)'}), '(base=10)\n', (8933, 8942), True, 'import matplotlib.ticker as ticker\n'), ((8983, 9013), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', ([], {'base': '(2)'}), '(base=2)\n', (9005, 9013), True, 'import matplotlib.ticker as ticker\n'), ((6696, 6727), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', ([], {'base': '(10)'}), '(base=10)\n', (6718, 6727), True, 'import matplotlib.ticker as ticker\n'), ((6772, 6802), 'matplotlib.ticker.MultipleLocator', 'ticker.MultipleLocator', ([], {'base': '(2)'}), '(base=2)\n', (6794, 6802), True, 'import matplotlib.ticker as ticker\n'), ((3681, 3701), 'numpy.median', 'np.median', (['diffs_pre'], {}), '(diffs_pre)\n', (3690, 3701), True, 'import numpy as np\n'), ((3880, 3901), 'numpy.median', 'np.median', (['diffs_post'], {}), '(diffs_post)\n', (3889, 3901), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 10 13:16:41 2020
@author: <NAME>
"""
# from .models.codegnngru import CodeGNNGRU
import argparse
import os
import pickle
import random
import sys
import time
import traceback
import numpy as np
# import tensorflow as tf
import torch
# from torchsummary import summary
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as Data
import copy
# from torch_scatter import scatter_add
# from torch_geometric.nn.conv import MessagePassing
# from keras.callbacks import ModelCheckpoint, Callback
# import keras.backend as K
from models.GCNLayer_pytorch import GraphConvolution
from timeit import default_timer as timer
from utils.myutils import batch_gen, init_tf, seq2sent
from models.HAConvGNN import HAConvGNN, TimeDistributed, Flatten
from utils.model import create_model
from utils.myutils import batch_gen, init_tf
def set_random_seed(seed = 10,deterministic=False,benchmark=False):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
if benchmark:
torch.backends.cudnn.benchmark = True
def gen_pred(model, data, device, comstok, comlen, batchsize, config, fid_set, strat='greedy'):
tdats, coms, wsmlnodes, wedge_1 = zip(*data.values())
tdats = np.array(tdats)
coms = np.array(coms)
wsmlnodes = np.array(wsmlnodes)
wedge_1 = np.array(wedge_1)
tdats = torch.from_numpy(tdats)
coms = torch.from_numpy(coms)
wsmlnodes = torch.from_numpy(wsmlnodes)
wedge_1 = torch.from_numpy(wedge_1)
tdats = tdats.type(torch.LongTensor)
coms = coms.type(torch.LongTensor)
wsmlnodes = wsmlnodes.type(torch.LongTensor)
wedge_1 = wedge_1.type(torch.LongTensor)
tdats = tdats.to(device)
coms = coms.to(device)
wsmlnodes = wsmlnodes.to(device)
wedge_1 = wedge_1.to(device)
for i in range(1, comlen):
if i == 1:
pass
else:
coms = torch.from_numpy(coms)
coms = coms.type(torch.LongTensor)
output = model([tdats, coms, wsmlnodes, wedge_1])
output = output.cpu().detach().numpy()
coms = coms.cpu().numpy()
for c, s in enumerate(output):
coms[c][i] = np.argmax(s)
final_data = {}
for fid, com in zip(data.keys(), coms):
final_data[fid] = seq2sent(com, comstok)
return final_data
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('model', type=str, default=None)
parser.add_argument('--gpu', dest='gpu', type=str, default='')
parser.add_argument('--data', dest='dataprep', type=str, default='../data')
parser.add_argument('--outdir', dest='outdir', type=str, default='modelout/')
parser.add_argument('--batch-size', dest='batchsize', type=int, default=2)
parser.add_argument('--outfile', dest='outfile', type=str, default=None)
args = parser.parse_args()
modelfile = args.model
outdir = args.outdir
dataprep = args.dataprep
gpu = args.gpu
batchsize = args.batchsize
outfile = args.outfile
config = dict()
# User set parameters#
config['maxastnodes'] = 300
config['asthops'] = 2
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = gpu
codetok = pickle.load(open('{}/code_notebook.tok'.format(dataprep), 'rb'), encoding='UTF-8')
comstok = pickle.load(open('{}/coms_notebook.tok'.format(dataprep), 'rb'), encoding='UTF-8')
asttok = pickle.load(open('{}/ast_notebook.tok'.format(dataprep), 'rb'), encoding='UTF-8')
seqdata = pickle.load(open('dataset_notebook.pkl', 'rb'))
allfids = list(seqdata['ctest'].keys())
codevocabsize = codetok.vocab_size
comvocabsize = comstok.vocab_size
astvocabsize = asttok.vocab_size
config['codevocabsize'] = codevocabsize
config['comvocabsize'] = comvocabsize
config['astvocabsize'] = astvocabsize
print('codevocabsize {}'.format(codevocabsize))
print('comvocabsize {}'.format(comvocabsize))
print('astvocabsize {}'.format(astvocabsize))
# set sequence lengths
config['codelen'] = 200
config['comlen'] = 30
config['batch_size'] = batchsize
comlen = 30
print('len', len(seqdata['ctest']))
print('allfids', len(allfids))
model, device = create_model(config)
checkpoint = torch.load(modelfile)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer = torch.optim.Adamax(model.parameters(), lr = 1e-3)
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
loss_func = torch.nn.CrossEntropyLoss()
print("MODEL LOADED")
node_data = seqdata['stest_nodes']
edgedata = seqdata['stest_edges']
config['batch_maker'] = 'graph_multi_1'
testgen = batch_gen(seqdata, 'test', config, nodedata=seqdata['stest_nodes'], edgedata=seqdata['stest_edges'])
print(model)
# set up prediction string and output file
comstart = np.zeros(comlen)
stk = comstok.w2i['<s>']
comstart[0] = stk
outfn = outdir+"/predictions/predict_notebook.txt"
outf = open(outfn, 'w')
print("writing to file: " + outfn)
batch_sets = [allfids[i:i+batchsize] for i in range(0, len(allfids), batchsize)]
#predict
for c, fid_set in enumerate(batch_sets):
st = timer()
for fid in fid_set:
seqdata['ctest'][fid] = comstart #np.asarray([stk])
batch = testgen.make_batch(fid_set)
batch_results = gen_pred(model, batch, device, comstok, comlen, batchsize, config, fid_set, strat='greedy')
for key, val in batch_results.items():
outf.write("{}\t{}\n".format(key, val))
outf.flush()
end = timer ()
print("{} processed, {} per second this batch".format((c+1)*batchsize, int(batchsize/(end-st))), end='\r')
outf.close()
| [
"utils.model.create_model",
"argparse.ArgumentParser",
"numpy.argmax",
"torch.manual_seed",
"torch.load",
"timeit.default_timer",
"torch.nn.CrossEntropyLoss",
"numpy.zeros",
"utils.myutils.seq2sent",
"utils.myutils.batch_gen",
"torch.cuda.manual_seed_all",
"numpy.array",
"torch.from_numpy"
] | [((984, 1007), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1001, 1007), False, 'import torch\n'), ((1012, 1044), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (1038, 1044), False, 'import torch\n'), ((1347, 1362), 'numpy.array', 'np.array', (['tdats'], {}), '(tdats)\n', (1355, 1362), True, 'import numpy as np\n'), ((1374, 1388), 'numpy.array', 'np.array', (['coms'], {}), '(coms)\n', (1382, 1388), True, 'import numpy as np\n'), ((1405, 1424), 'numpy.array', 'np.array', (['wsmlnodes'], {}), '(wsmlnodes)\n', (1413, 1424), True, 'import numpy as np\n'), ((1439, 1456), 'numpy.array', 'np.array', (['wedge_1'], {}), '(wedge_1)\n', (1447, 1456), True, 'import numpy as np\n'), ((1469, 1492), 'torch.from_numpy', 'torch.from_numpy', (['tdats'], {}), '(tdats)\n', (1485, 1492), False, 'import torch\n'), ((1504, 1526), 'torch.from_numpy', 'torch.from_numpy', (['coms'], {}), '(coms)\n', (1520, 1526), False, 'import torch\n'), ((1543, 1570), 'torch.from_numpy', 'torch.from_numpy', (['wsmlnodes'], {}), '(wsmlnodes)\n', (1559, 1570), False, 'import torch\n'), ((1585, 1610), 'torch.from_numpy', 'torch.from_numpy', (['wedge_1'], {}), '(wedge_1)\n', (1601, 1610), False, 'import torch\n'), ((2474, 2513), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (2497, 2513), False, 'import argparse\n'), ((4379, 4399), 'utils.model.create_model', 'create_model', (['config'], {}), '(config)\n', (4391, 4399), False, 'from utils.model import create_model\n'), ((4417, 4438), 'torch.load', 'torch.load', (['modelfile'], {}), '(modelfile)\n', (4427, 4438), False, 'import torch\n'), ((4645, 4672), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (4670, 4672), False, 'import torch\n'), ((4835, 4939), 'utils.myutils.batch_gen', 'batch_gen', (['seqdata', '"""test"""', 'config'], {'nodedata': "seqdata['stest_nodes']", 'edgedata': "seqdata['stest_edges']"}), "(seqdata, 'test', config, nodedata=seqdata['stest_nodes'],\n edgedata=seqdata['stest_edges'])\n", (4844, 4939), False, 'from utils.myutils import batch_gen, init_tf\n'), ((5017, 5033), 'numpy.zeros', 'np.zeros', (['comlen'], {}), '(comlen)\n', (5025, 5033), True, 'import numpy as np\n'), ((2387, 2409), 'utils.myutils.seq2sent', 'seq2sent', (['com', 'comstok'], {}), '(com, comstok)\n', (2395, 2409), False, 'from utils.myutils import batch_gen, init_tf, seq2sent\n'), ((5365, 5372), 'timeit.default_timer', 'timer', ([], {}), '()\n', (5370, 5372), True, 'from timeit import default_timer as timer\n'), ((5772, 5779), 'timeit.default_timer', 'timer', ([], {}), '()\n', (5777, 5779), True, 'from timeit import default_timer as timer\n'), ((2011, 2033), 'torch.from_numpy', 'torch.from_numpy', (['coms'], {}), '(coms)\n', (2027, 2033), False, 'import torch\n'), ((2284, 2296), 'numpy.argmax', 'np.argmax', (['s'], {}), '(s)\n', (2293, 2296), True, 'import numpy as np\n')] |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import codecs
import os
import yaml
import numpy as np
import paddleseg.transforms as T
from paddle.inference import create_predictor, PrecisionType
from paddle.inference import Config as PredictConfig
from paddleseg.cvlibs import manager
from paddleseg.utils import get_sys_env, logger
from paddleseg.utils.visualize import get_pseudo_color_map
class DeployConfig:
def __init__(self, path):
with codecs.open(path, 'r', 'utf-8') as file:
self.dic = yaml.load(file, Loader=yaml.FullLoader)
self._transforms = self._load_transforms(
self.dic['Deploy']['transforms'])
self._dir = os.path.dirname(path)
@property
def transforms(self):
return self._transforms
@property
def model(self):
return os.path.join(self._dir, self.dic['Deploy']['model'])
@property
def params(self):
return os.path.join(self._dir, self.dic['Deploy']['params'])
def _load_transforms(self, t_list):
com = manager.TRANSFORMS
transforms = []
for t in t_list:
ctype = t.pop('type')
transforms.append(com[ctype](**t))
return T.Compose(transforms)
class Predictor:
def __init__(self, args):
self.cfg = DeployConfig(args.cfg)
self.args = args
pred_cfg = PredictConfig(self.cfg.model, self.cfg.params)
pred_cfg.disable_glog_info()
if self.args.use_gpu:
pred_cfg.enable_use_gpu(100, 0)
if self.args.use_trt:
ptype = PrecisionType.Int8 if args.use_int8 else PrecisionType.Float32
pred_cfg.enable_tensorrt_engine(
workspace_size=1 << 30,
max_batch_size=1,
min_subgraph_size=3,
precision_mode=ptype,
use_static=False,
use_calib_mode=False)
self.predictor = create_predictor(pred_cfg)
def preprocess(self, img):
return self.cfg.transforms(img)[0]
def run(self, imgs):
if not isinstance(imgs, (list, tuple)):
imgs = [imgs]
num = len(imgs)
input_names = self.predictor.get_input_names()
input_handle = self.predictor.get_input_handle(input_names[0])
results = []
for i in range(0, num, self.args.batch_size):
data = np.array([
self.preprocess(img) for img in imgs[i:i + self.args.batch_size]
])
input_handle.reshape(data.shape)
input_handle.copy_from_cpu(data)
self.predictor.run()
output_names = self.predictor.get_output_names()
output_handle = self.predictor.get_output_handle(output_names[0])
results.append(output_handle.copy_to_cpu())
self.postprocess(results, imgs)
def postprocess(self, results, imgs):
if not os.path.exists(self.args.save_dir):
os.makedirs(self.args.save_dir)
results = np.concatenate(results, axis=0)
for i in range(results.shape[0]):
result = np.argmax(results[i], axis=0)
result = get_pseudo_color_map(result)
basename = os.path.basename(imgs[i])
basename, _ = os.path.splitext(basename)
basename = f'{basename}.png'
result.save(os.path.join(self.args.save_dir, basename))
def parse_args():
parser = argparse.ArgumentParser(description='Model training')
# params of training
parser.add_argument(
"--config",
dest="cfg",
help="The config file.",
default=None,
type=str,
required=True)
parser.add_argument(
'--image_path',
dest='image_path',
help='The directory or path of the image to be predicted.',
type=str,
default=None,
required=True)
parser.add_argument(
'--batch_size',
dest='batch_size',
help='Mini batch size of one gpu or cpu.',
type=int,
default=1)
parser.add_argument(
'--save_dir',
dest='save_dir',
help='The directory for saving the predict result.',
type=str,
default='./output')
parser.add_argument(
'--use_trt',
dest='use_trt',
help='Whether to use Nvidia TensorRT to accelerate prediction.',
action='store_true')
parser.add_argument(
'--use_int8',
dest='use_int8',
help='Whether to use Int8 prediction when using TensorRT prediction.',
action='store_true')
return parser.parse_args()
def get_images(image_path, support_ext=".jpg|.jpeg|.png"):
if not os.path.exists(image_path):
raise Exception(f"Image path {image_path} invalid")
if os.path.isfile(image_path):
return [image_path]
imgs = []
for item in os.listdir(image_path):
ext = os.path.splitext(item)[1][1:].strip().lower()
if (len(ext) > 0 and ext in support_ext):
item_path = os.path.join(image_path, item)
imgs.append(item_path)
return imgs
def main(args):
env_info = get_sys_env()
args.use_gpu = True if env_info['Paddle compiled with cuda'] and env_info[
'GPUs used'] else False
predictor = Predictor(args)
predictor.run(get_images(args.image_path))
if __name__ == '__main__':
args = parse_args()
main(args)
| [
"paddle.inference.Config",
"yaml.load",
"argparse.ArgumentParser",
"codecs.open",
"os.makedirs",
"numpy.argmax",
"paddleseg.utils.visualize.get_pseudo_color_map",
"os.path.dirname",
"paddle.inference.create_predictor",
"os.path.exists",
"paddleseg.transforms.Compose",
"os.path.basename",
"pa... | [((4033, 4086), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Model training"""'}), "(description='Model training')\n", (4056, 4086), False, 'import argparse\n'), ((5375, 5401), 'os.path.isfile', 'os.path.isfile', (['image_path'], {}), '(image_path)\n', (5389, 5401), False, 'import os\n'), ((5462, 5484), 'os.listdir', 'os.listdir', (['image_path'], {}), '(image_path)\n', (5472, 5484), False, 'import os\n'), ((5735, 5748), 'paddleseg.utils.get_sys_env', 'get_sys_env', ([], {}), '()\n', (5746, 5748), False, 'from paddleseg.utils import get_sys_env, logger\n'), ((1259, 1280), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (1274, 1280), False, 'import os\n'), ((1405, 1457), 'os.path.join', 'os.path.join', (['self._dir', "self.dic['Deploy']['model']"], {}), "(self._dir, self.dic['Deploy']['model'])\n", (1417, 1457), False, 'import os\n'), ((1510, 1563), 'os.path.join', 'os.path.join', (['self._dir', "self.dic['Deploy']['params']"], {}), "(self._dir, self.dic['Deploy']['params'])\n", (1522, 1563), False, 'import os\n'), ((1784, 1805), 'paddleseg.transforms.Compose', 'T.Compose', (['transforms'], {}), '(transforms)\n', (1793, 1805), True, 'import paddleseg.transforms as T\n'), ((1942, 1988), 'paddle.inference.Config', 'PredictConfig', (['self.cfg.model', 'self.cfg.params'], {}), '(self.cfg.model, self.cfg.params)\n', (1955, 1988), True, 'from paddle.inference import Config as PredictConfig\n'), ((2542, 2568), 'paddle.inference.create_predictor', 'create_predictor', (['pred_cfg'], {}), '(pred_cfg)\n', (2558, 2568), False, 'from paddle.inference import create_predictor, PrecisionType\n'), ((3614, 3645), 'numpy.concatenate', 'np.concatenate', (['results'], {'axis': '(0)'}), '(results, axis=0)\n', (3628, 3645), True, 'import numpy as np\n'), ((5279, 5305), 'os.path.exists', 'os.path.exists', (['image_path'], {}), '(image_path)\n', (5293, 5305), False, 'import os\n'), ((1038, 1069), 'codecs.open', 'codecs.open', (['path', '"""r"""', '"""utf-8"""'], {}), "(path, 'r', 'utf-8')\n", (1049, 1069), False, 'import codecs\n'), ((1102, 1141), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'yaml.FullLoader'}), '(file, Loader=yaml.FullLoader)\n', (1111, 1141), False, 'import yaml\n'), ((3515, 3549), 'os.path.exists', 'os.path.exists', (['self.args.save_dir'], {}), '(self.args.save_dir)\n', (3529, 3549), False, 'import os\n'), ((3563, 3594), 'os.makedirs', 'os.makedirs', (['self.args.save_dir'], {}), '(self.args.save_dir)\n', (3574, 3594), False, 'import os\n'), ((3709, 3738), 'numpy.argmax', 'np.argmax', (['results[i]'], {'axis': '(0)'}), '(results[i], axis=0)\n', (3718, 3738), True, 'import numpy as np\n'), ((3760, 3788), 'paddleseg.utils.visualize.get_pseudo_color_map', 'get_pseudo_color_map', (['result'], {}), '(result)\n', (3780, 3788), False, 'from paddleseg.utils.visualize import get_pseudo_color_map\n'), ((3812, 3837), 'os.path.basename', 'os.path.basename', (['imgs[i]'], {}), '(imgs[i])\n', (3828, 3837), False, 'import os\n'), ((3864, 3890), 'os.path.splitext', 'os.path.splitext', (['basename'], {}), '(basename)\n', (3880, 3890), False, 'import os\n'), ((5620, 5650), 'os.path.join', 'os.path.join', (['image_path', 'item'], {}), '(image_path, item)\n', (5632, 5650), False, 'import os\n'), ((3956, 3998), 'os.path.join', 'os.path.join', (['self.args.save_dir', 'basename'], {}), '(self.args.save_dir, basename)\n', (3968, 3998), False, 'import os\n'), ((5500, 5522), 'os.path.splitext', 'os.path.splitext', (['item'], {}), '(item)\n', (5516, 5522), False, 'import os\n')] |
import argparse
import csv
import numpy as np
def compute_mse_error_csv(ground_truth_path: str, inference_output_path: str) -> np.ndarray:
"""
Arguments:
ground_truth_path (str): Path to the ground truth csv file.
inference_output_path (str): Path to the csv file containing network output.
"""
def extract_csv(csv_path: str) -> list:
with open(csv_path, "r", newline="") as csv_file:
return [row for row in csv.reader(csv_file, delimiter=",")]
ground_truth_rows = extract_csv(ground_truth_path)[1:]
inference_rows = extract_csv(inference_output_path)[1:]
assert len(ground_truth_rows[0]) == len(inference_rows[0]), "number of fields do not match"
future_num_frames = int(ground_truth_rows[0][2]) # read future_num_frames TODO: make this less mysterious
def to_key(vals: str) -> str:
return str(",".join(vals))
def parse_values(values: list, future_num_frames: int) -> dict:
raw_value = np.array(values).astype(np.float64)
coords = raw_value[: future_num_frames * 2].reshape(future_num_frames, 2).copy()
avail = raw_value[future_num_frames * 2 :].copy()
return {"coords": coords, "avail": avail}
ground_truth = {to_key(i[0:3]): parse_values(i[3:], future_num_frames) for i in ground_truth_rows}
inference = {to_key(i[0:3]): parse_values(i[3:], future_num_frames) for i in inference_rows}
def validate(ground_truth: dict, inference: dict) -> bool:
valid = True
if not (len(ground_truth.keys()) == len(inference.keys())):
print(
f"""Incorrect number of rows in inference csv. Expected {len(ground_truth.keys())},
Got {len(inference.keys())}"""
)
valid = False
missing_obstacles = ground_truth.keys() - inference.keys()
if len(missing_obstacles):
valid = False
for missing_obstacle in missing_obstacles:
print(f"Missing obstacle: {missing_obstacle}")
unknown_obstacles = inference.keys() - ground_truth.keys()
if len(unknown_obstacles):
valid = False
for unknown_obstacle in unknown_obstacles:
print(f"Unknown obstacle: {unknown_obstacle}")
return valid
valid = validate(ground_truth, inference)
if not valid:
raise ValueError("Error validating csv, see above for details.")
def compute_mse(A: np.ndarray, B: np.ndarray) -> np.ndarray:
return ((A - B) ** 2).mean(axis=-1) # reduce coords, keep steps
errors = []
gt_avails = []
for key, ground_truth_value in ground_truth.items():
gt_coords = ground_truth_value["coords"]
pred_coords = inference[key]["coords"]
errors.append(compute_mse(gt_coords, pred_coords))
gt_avails.append(ground_truth_value["avail"])
# use numpy masked package to get a mean where unavailable elements are masked out
mask = ~np.array(gt_avails).astype(np.bool) # note the ~ here, masked seems to assume 0 means keep
errors = np.ma.masked_array(np.array(errors), mask=mask)
return np.mean(errors, axis=0).data
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="""Print mean squared error for a deep prediction run. Takes as input a csv file for ground truth,
another as output from inference. """
)
parser.add_argument("ground_truth_csv", type=str, help="Path to the csv containing ground truth.")
parser.add_argument("inference_csv", type=str, help="Path to the csv containing output from an inference run.")
args = parser.parse_args()
mse = compute_mse_error_csv(args.ground_truth_csv, args.inference_csv)
print("mse", mse)
| [
"csv.reader",
"numpy.mean",
"numpy.array",
"argparse.ArgumentParser"
] | [((3201, 3421), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Print mean squared error for a deep prediction run. Takes as input a csv file for ground truth,\n another as output from inference. """'}), '(description=\n """Print mean squared error for a deep prediction run. Takes as input a csv file for ground truth,\n another as output from inference. """\n )\n', (3224, 3421), False, 'import argparse\n'), ((3090, 3106), 'numpy.array', 'np.array', (['errors'], {}), '(errors)\n', (3098, 3106), True, 'import numpy as np\n'), ((3130, 3153), 'numpy.mean', 'np.mean', (['errors'], {'axis': '(0)'}), '(errors, axis=0)\n', (3137, 3153), True, 'import numpy as np\n'), ((989, 1005), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (997, 1005), True, 'import numpy as np\n'), ((2966, 2985), 'numpy.array', 'np.array', (['gt_avails'], {}), '(gt_avails)\n', (2974, 2985), True, 'import numpy as np\n'), ((464, 499), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (474, 499), False, 'import csv\n')] |
import numpy as np
import time
from graph_tool.all import Graph, shortest_path, load_graph
from power_planner.utils.utils import angle, get_lg_donut
from power_planner.utils.utils_constraints import ConstraintUtils
from power_planner.utils.utils_costs import CostUtils
from .general_graph import GeneralGraph
class LineGraphFromGraph():
"""
Class to build a line graph from a given weighted graph
"""
def __init__(
self,
prev_graph,
cost_instance,
hard_constraints,
directed=True,
graphtool=1,
verbose=1
):
tic = time.time()
assert cost_instance.shape == hard_constraints.shape
self.cost_instance = cost_instance
self.hard_constraints = hard_constraints
# Load graph
GeneralGraph.load_graph(self, prev_graph)
# self.weight_prev = prev_graph.ep.weight
self.n_edges = len(list(self.g_prev.edges()))
# node to pos mapping
x_len, y_len = cost_instance.shape
self.node_pos = [
(i, j) for i in range(x_len) for j in range(y_len)
if hard_constraints[i, j]
]
# pos to node mapping
self.pos2node = np.ones(cost_instance.shape)
self.pos2node *= -1
for n, (i, j) in enumerate(self.node_pos):
self.pos2node[i, j] = n
print("initialized weighted graph (pos2node and node_pos)")
# edge to node mapping
max_shape = (
int(np.max(self.pos2node)) + 1, int(np.max(self.pos2node)) + 1
)
self.edge_to_node = np.ones(max_shape)
self.edge_to_node *= -1
for k, edge in enumerate(self.g_prev.edges()):
(i, j) = tuple(edge)
self.edge_to_node[int(i), int(j)] = k
# initilize graph
GeneralGraph.__init__(
self, directed=directed, graphtool=graphtool, verbose=verbose
)
self.verbose = verbose
self.time_logs = {}
self.time_logs["init_graph"] = round(time.time() - tic, 3)
def add_nodes(self):
tic_function = time.time()
GeneralGraph.add_nodes(self, self.n_edges)
self.time_logs["add_nodes"] = round(time.time() - tic_function, 3)
def add_edges(self, max_angle=0.5 * np.pi):
tic_edges = time.time()
edges = []
for i, v in enumerate(self.g_prev.vertices()):
for in_nb in v.in_neighbours():
for out_nb in v.out_neighbours():
in_nb_ind = self.node_pos[int(in_nb)]
out_nb_ind = self.node_pos[int(out_nb)]
pos = self.node_pos[i]
# vector between: subtract two pos tuples
vec1 = np.subtract(in_nb_ind, pos)
vec2 = np.subtract(pos, out_nb_ind)
angle_cost = angle(vec1, vec2) / (max_angle)
if angle_cost <= 1:
v1_line = self.edge_to_node[int(in_nb), i]
v2_line = self.edge_to_node[i, int(out_nb)]
cost_before = self.cost_instance[pos[0], pos[1]]
edges.append(
[v1_line, v2_line, 0.5 * angle_cost + cost_before]
)
toc_edges = time.time()
tic = time.time()
self.graph.add_edge_list(edges, eprops=[self.weight])
# time logs
self.time_logs["add_edges"] = round(time.time() - tic, 3)
self.time_logs["add_edges_times"] = 0
self.time_logs["edge_list"] = round(toc_edges - tic_edges, 3)
self.time_logs["edge_list_times"] = 0
self.time_logs["add_all_edges"] = round(time.time() - tic_edges, 3)
def add_start_and_dest(self, source_pos, dest_pos):
tic = time.time()
source = self.pos2node[source_pos[0], source_pos[1]]
dest = self.pos2node[dest_pos[0], dest_pos[1]]
source_line = self.graph.add_vertex()
dest_line = self.graph.add_vertex()
source_dest_edges = []
for e_out in self.g_prev.vertex(source).out_edges():
e_out = tuple(e_out)
node_line = self.edge_to_node[int(e_out[0]), int(e_out[1])]
source_dest_edges.append(
[self.graph.vertex_index[source_line], node_line, 0]
)
for e_out in self.g_prev.vertex(dest).in_edges():
e_out = tuple(e_out)
node_line = self.edge_to_node[int(e_out[0]), int(e_out[1])]
source_dest_edges.append(
[node_line, self.graph.vertex_index[dest_line], 0]
)
self.graph.add_edge_list(source_dest_edges, eprops=[self.weight])
self.time_logs["add_start_end"] = round(time.time() - tic, 3)
return source_line, dest_line
def get_shortest_path(self, source, dest):
vertices_path = GeneralGraph.get_shortest_path(self, source, dest)
path_line = []
for i, v in enumerate(vertices_path[1:-1]):
v_ind_line = self.graph.vertex_index[v]
edge_actual = tuple(list(self.g_prev.edges())[v_ind_line])
if i == 0:
path_line.append(
self.node_pos[self.g_prev.vertex_index[edge_actual[0]]]
)
path_line.append(
self.node_pos[self.g_prev.vertex_index[edge_actual[1]]]
)
return path_line, []
| [
"numpy.subtract",
"numpy.ones",
"time.time",
"numpy.max",
"power_planner.utils.utils.angle"
] | [((602, 613), 'time.time', 'time.time', ([], {}), '()\n', (611, 613), False, 'import time\n'), ((1208, 1236), 'numpy.ones', 'np.ones', (['cost_instance.shape'], {}), '(cost_instance.shape)\n', (1215, 1236), True, 'import numpy as np\n'), ((1587, 1605), 'numpy.ones', 'np.ones', (['max_shape'], {}), '(max_shape)\n', (1594, 1605), True, 'import numpy as np\n'), ((2094, 2105), 'time.time', 'time.time', ([], {}), '()\n', (2103, 2105), False, 'import time\n'), ((2301, 2312), 'time.time', 'time.time', ([], {}), '()\n', (2310, 2312), False, 'import time\n'), ((3291, 3302), 'time.time', 'time.time', ([], {}), '()\n', (3300, 3302), False, 'import time\n'), ((3318, 3329), 'time.time', 'time.time', ([], {}), '()\n', (3327, 3329), False, 'import time\n'), ((3789, 3800), 'time.time', 'time.time', ([], {}), '()\n', (3798, 3800), False, 'import time\n'), ((2023, 2034), 'time.time', 'time.time', ([], {}), '()\n', (2032, 2034), False, 'import time\n'), ((2201, 2212), 'time.time', 'time.time', ([], {}), '()\n', (2210, 2212), False, 'import time\n'), ((3457, 3468), 'time.time', 'time.time', ([], {}), '()\n', (3466, 3468), False, 'import time\n'), ((3690, 3701), 'time.time', 'time.time', ([], {}), '()\n', (3699, 3701), False, 'import time\n'), ((4734, 4745), 'time.time', 'time.time', ([], {}), '()\n', (4743, 4745), False, 'import time\n'), ((1490, 1511), 'numpy.max', 'np.max', (['self.pos2node'], {}), '(self.pos2node)\n', (1496, 1511), True, 'import numpy as np\n'), ((1522, 1543), 'numpy.max', 'np.max', (['self.pos2node'], {}), '(self.pos2node)\n', (1528, 1543), True, 'import numpy as np\n'), ((2731, 2758), 'numpy.subtract', 'np.subtract', (['in_nb_ind', 'pos'], {}), '(in_nb_ind, pos)\n', (2742, 2758), True, 'import numpy as np\n'), ((2786, 2814), 'numpy.subtract', 'np.subtract', (['pos', 'out_nb_ind'], {}), '(pos, out_nb_ind)\n', (2797, 2814), True, 'import numpy as np\n'), ((2848, 2865), 'power_planner.utils.utils.angle', 'angle', (['vec1', 'vec2'], {}), '(vec1, vec2)\n', (2853, 2865), False, 'from power_planner.utils.utils import angle, get_lg_donut\n')] |
import discord
from discord.ext import commands
from Functions.data import DATA
from Functions.dates import DATES
import numpy as np
import asyncio
import os
class finales(commands.Cog):
"""
Shows a list of all of a player's finales - defined as rounds with only two players.
"""
FORMAT = "[player]"
USAGE = """Using `gl/finales PLAYER` outputs a list of all "finales" they've
competed in (defined as any round with only two people, not necessarily just
the final round of a season). /ln/ Long lists are divided into pages that you
can navigate using the ⬅️ and ➡️ reactions. /ln/ By default, the list is sorted
by oldest round. You can cycle through different sorting methods with the ⏺️
reaction, and you can reverse the current sorting with the ↕️ reaction. /ln/
Including 'file' on the end of the command, i.e. `gl/finales PLAYER file` outputs
the entire finale list in a text file.
""".replace("\n", "").replace("\t", "").replace(" /ln/ ", "\n")
def __init__(self, BOT):
self.BOT = BOT
@commands.command(name="finales", aliases=['f'])
async def output(self, ctx):
message = ctx.message
args = ctx.message.content.split(" ")
level = len(args)
server = ctx.guild
await self.run_command(message, args, level, server)
return
async def run_command(self, message, args, level, server):
# Needs at least 2 arguments: command, player
if level < 2:
await message.channel.send(
"Include the name of the contestant whose finales you want to see!")
return
# Check if the user requests a file output
make_file = False
if args[-1].lower() == "file":
make_file = True
args = args[:-1]
level = len(args)
raw_cont_name = " ".join(args[1:])
# Check if player exists
if not (username := DATA.true_name(raw_cont_name)):
await message.channel.send(
f"Could not find a player named **`{raw_cont_name}`** in the data.")
return
sorting = [
# Category, ascending label, descending label, whether default reversed
["name", "alphabetical order", "reverse-alphabetical order", False],
["RM change", "largest loss", "highest gain", True],
["rank", "best rank", "worst rank", False],
["strength", "weakest", "strongest", True],
["date", "oldest", "newest", False]
]
sort_info = [4, False]
finales = []
round_count = 0
for month in range(len(DATA.HISTORY)):
if username not in DATA.HISTORY[month]:
continue
for round_name, round_info in DATA.HISTORY[month][username].items():
round_count += 1
if round_info[2] != 2:
continue
strength, date = DATA.ROUNDS[month][round_name]
NR = 2 - round_info[1]
finales.append([round_name] + round_info[:2] + [strength, date, NR])
# Finales are [name, gain, rank, strength, date, NR]
# Player hasn't been in any finales
if len(finales) == 0:
await message.channel.send(
f"""```diff
+ {username}``````md
# Rounds: {round_count}
# Finales: 0
# Finale Ratio: 0.00%```""".replace("\t", ""))
return
msg = f"```diff\n+ {username}```"
per_page = 15
finale_count = len(finales)
total_pages = int(np.ceil(finale_count / per_page))
# Wrapper function to customize page generation
def gen_page(p_n, sort=4, rev=False):
add_msg = "```md\n"
add_msg += f"# Rounds: {round_count}\n"
add_msg += f"# Finales: {finale_count}\n"
add_msg += (
f"# Finale Ratio: {100*finale_count/round_count:.2f}%\n\n")
w = len([f for f in finales if f[2] == 1])
l = finale_count - w
add_msg += f"# W/L Record: {w} / {l}\n"
add_msg += (
f"# W/L Ratio: {100 * w / (w + l):.2f}% / {100 * l / (w + l):.2f}%\n\n")
rev = sorting[sort][3] ^ rev
subset = sorted(finales, reverse=rev, key=lambda m: m[sort])
subset = subset[per_page * (p_n - 1) : per_page * (p_n)]
add_msg += (
"| Date || Round Name || RM Change || Ranks || N.R. || Round Str\n")
for name, gain, rank, strength, date, NR in subset:
full_date = DATES.as_YMD(date)
gain_sign = "+" if gain >= 0 else "-"
abs_gain = np.abs(round(gain, 2))
nr_format = "100.0%" if NR == 1 else "0.000%"
add_msg += (
f"[{full_date}]: {name:<26} || {gain_sign} {abs_gain:<7} || {rank:>3} / 2 || {nr_format} || {strength:.02f}\n")
add_msg += "\n"
# Add page info if there's more than one
if total_pages > 1:
bounds = [
per_page * (p_n - 1) + 1,
min(per_page * p_n, finale_count)
]
add_msg += (
f"< Page [{p_n} / {total_pages}] -- Rounds [{bounds[0]} ~ {bounds[1]}] of [{finale_count}]>\n")
# Add finale sorting info if there's more than one
if finale_count > 1:
add_msg += (
f"< [{sorting[sort][0].upper()}] type sorting -- ordered by [{sorting[sort][1 + int(rev)].upper()}] >\n")
add_msg += "```"
return msg + add_msg
# File-writing routine
if make_file:
with open(f"{username} Finales {message.id}.txt", "a", encoding="utf-8") as file:
sort, rev = sort_info
file.write(f"# Rounds: {round_count}\n")
file.write(f"# Finales: {finale_count}\n")
file.write(
f"# Finale Ratio: {100*finale_count/round_count:.2f}%\n\n")
w = len([f for f in finales if f[2] == 1])
l = finale_count - w
file.write(f"# W/L Record: {w} / {l}\n")
file.write(
f"# W/L Ratio: {100 * w / (w + l):.2f}% / {100 * l / (w + l):.2f}%\n\n")
rev = sorting[sort][3] ^ rev
finales = sorted(finales, reverse=rev, key=lambda m: m[sort])
file.write(
"| Date || Round Name || RM Change || Ranks || N.R. || Round Str\n")
for name, gain, rank, strength, date, NR in finales:
full_date = DATES.as_YMD(date)
gain_sign = "+" if gain >= 0 else "-"
abs_gain = np.abs(round(gain, 2))
nr_format = "100.0%" if NR == 1 else "0.000%"
file.write(
f"[{full_date}]: {name:<26} || {gain_sign} {abs_gain:<7} || {rank:>3} / 2 || {nr_format} || {strength:.02f}\n")
await message.channel.send(msg,
file=discord.File(f"{username} Finales {message.id}.txt"))
os.remove(f"{username} Finales {message.id}.txt")
return
page_number = 1
page = gen_page(page_number, sort=sort_info[0], rev=sort_info[1])
page_msg = await message.channel.send(page)
await page_msg.edit(content=page)
reaction_list = ['⬅️', '➡️', '⏺️', '↕️']
if total_pages > 1:
await page_msg.add_reaction('⬅️')
await page_msg.add_reaction('➡️')
if finale_count > 1:
await page_msg.add_reaction('⏺️')
await page_msg.add_reaction('↕️')
# Check that the reaction is what we want
def check(reaction, user):
return (user == message.author
and str(reaction.emoji) in reaction_list
and reaction.message.id == page_msg.id)
while True:
try:
reaction, react_user = await self.BOT.wait_for(
'reaction_add', timeout=120.0, check=check)
except asyncio.TimeoutError:
try:
await page_msg.clear_reactions()
except discord.errors.Forbidden:
for r in reaction_list:
await page_msg.remove_reaction(r, self.BOT.user)
break
else:
if str(reaction.emoji) == '⬅️':
try:
await page_msg.remove_reaction('⬅️', react_user)
except discord.errors.Forbidden:
pass
if page_number > 1:
page_number -= 1
if str(reaction.emoji) == '➡️':
try:
await page_msg.remove_reaction('➡️', react_user)
except discord.errors.Forbidden:
pass
if page_number < total_pages:
page_number += 1
if str(reaction.emoji) == '⏺️':
try:
await page_msg.remove_reaction('⏺️', react_user)
except discord.errors.Forbidden:
pass
sort_info[0] += 1
sort_info[0] %= len(sorting)
if str(reaction.emoji) == '↕️':
try:
await page_msg.remove_reaction('↕️', react_user)
except discord.errors.Forbidden:
pass
sort_info[1] = not sort_info[1]
page = gen_page(page_number, sort=sort_info[0], rev=sort_info[1])
await page_msg.edit(content=page)
continue
def setup(BOT):
BOT.add_cog(finales(BOT)) | [
"os.remove",
"discord.ext.commands.command",
"numpy.ceil",
"Functions.data.DATA.true_name",
"discord.File",
"Functions.dates.DATES.as_YMD"
] | [((1058, 1105), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""finales"""', 'aliases': "['f']"}), "(name='finales', aliases=['f'])\n", (1074, 1105), False, 'from discord.ext import commands\n'), ((3227, 3259), 'numpy.ceil', 'np.ceil', (['(finale_count / per_page)'], {}), '(finale_count / per_page)\n', (3234, 3259), True, 'import numpy as np\n'), ((6271, 6320), 'os.remove', 'os.remove', (['f"""{username} Finales {message.id}.txt"""'], {}), "(f'{username} Finales {message.id}.txt')\n", (6280, 6320), False, 'import os\n'), ((1828, 1857), 'Functions.data.DATA.true_name', 'DATA.true_name', (['raw_cont_name'], {}), '(raw_cont_name)\n', (1842, 1857), False, 'from Functions.data import DATA\n'), ((4123, 4141), 'Functions.dates.DATES.as_YMD', 'DATES.as_YMD', (['date'], {}), '(date)\n', (4135, 4141), False, 'from Functions.dates import DATES\n'), ((5858, 5876), 'Functions.dates.DATES.as_YMD', 'DATES.as_YMD', (['date'], {}), '(date)\n', (5870, 5876), False, 'from Functions.dates import DATES\n'), ((6211, 6263), 'discord.File', 'discord.File', (['f"""{username} Finales {message.id}.txt"""'], {}), "(f'{username} Finales {message.id}.txt')\n", (6223, 6263), False, 'import discord\n')] |
import numpy as np
from PIL import Image
import tensorflow as tf
from matplotlib import gridspec
from matplotlib import pyplot as plt
import tarfile
import os
import time
class DeepLabModel(object):
"""Class to load deeplab model and run inference."""
INPUT_TENSOR_NAME = 'ImageTensor:0'
OUTPUT_TENSOR_NAME = 'SemanticPredictions:0'
INPUT_SIZE = 513
FROZEN_GRAPH_NAME = 'frozen_inference_graph'
def __init__(self, tarball_path):
"""Creates and loads pretrained deeplab model."""
self.graph = tf.Graph()
graph_def = None
# Extract frozen graph from tar archive.
tar_file = tarfile.open(tarball_path)
for tar_info in tar_file.getmembers():
if self.FROZEN_GRAPH_NAME in os.path.basename(tar_info.name):
file_handle = tar_file.extractfile(tar_info)
graph_def = tf.compat.v1.GraphDef.FromString(file_handle.read())
break
tar_file.close()
if graph_def is None:
raise RuntimeError('Cannot find inference graph in tar archive.')
with self.graph.as_default():
tf.import_graph_def(graph_def, name='')
self.sess = tf.compat.v1.Session(graph=self.graph)
def run(self, image):
"""Runs inference on a single image.
Args:
image: A PIL.Image object, raw input image.
Returns:
resized_image: RGB image resized from original input image.
seg_map: Segmentation map of `resized_image`.
"""
width, height = image.size
resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)
target_size = (int(resize_ratio * width), int(resize_ratio * height))
resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)
batch_seg_map = self.sess.run(
self.OUTPUT_TENSOR_NAME,
feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})
seg_map = batch_seg_map[0]
return resized_image, seg_map
def create_ade20k_label_colormap():
"""Creates a label colormap used in ADE20K segmentation benchmark.
Returns:
A colormap for visualizing segmentation results.
"""
colormap = np.asarray([
[0,0,0],
[120, 120, 120],
[180, 120, 120],
[6, 230, 230],
[80, 50, 50],
[4, 200, 3],
[120, 120, 80],
[140, 140, 140],
[204, 5, 255],
[230, 230, 230],
[4, 250, 7],
[224, 5, 255],
[235, 255, 7],
[150, 5, 61],
[120, 120, 70],
[8, 255, 51],
[255, 6, 82],
[143, 255, 140],
[204, 255, 4],
[255, 51, 7],
[204, 70, 3],
[0, 102, 200],
[61, 230, 250],
[255, 6, 51],
[11, 102, 255],
[255, 7, 71],
[255, 9, 224],
[9, 7, 230],
[220, 220, 220],
[255, 9, 92],
[112, 9, 255],
[8, 255, 214],
[7, 255, 224],
[255, 184, 6],
[10, 255, 71],
[255, 41, 10],
[7, 255, 255],
[224, 255, 8],
[102, 8, 255],
[255, 61, 6],
[255, 194, 7],
[255, 122, 8],
[0, 255, 20],
[255, 8, 41],
[255, 5, 153],
[6, 51, 255],
[235, 12, 255],
[160, 150, 20],
[0, 163, 255],
[140, 140, 140],
[250, 10, 15],
[20, 255, 0],
[31, 255, 0],
[255, 31, 0],
[255, 224, 0],
[153, 255, 0],
[0, 0, 255],
[255, 71, 0],
[0, 235, 255],
[0, 173, 255],
[31, 0, 255],
[11, 200, 200],
[255, 82, 0],
[0, 255, 245],
[0, 61, 255],
[0, 255, 112],
[0, 255, 133],
[255, 0, 0],
[255, 163, 0],
[255, 102, 0],
[194, 255, 0],
[0, 143, 255],
[51, 255, 0],
[0, 82, 255],
[0, 255, 41],
[0, 255, 173],
[10, 0, 255],
[173, 255, 0],
[0, 255, 153],
[255, 92, 0],
[255, 0, 255],
[255, 0, 245],
[255, 0, 102],
[255, 173, 0],
[255, 0, 20],
[255, 184, 184],
[0, 31, 255],
[0, 255, 61],
[0, 71, 255],
[255, 0, 204],
[0, 255, 194],
[0, 255, 82],
[0, 10, 255],
[0, 112, 255],
[51, 0, 255],
[0, 194, 255],
[0, 122, 255],
[0, 255, 163],
[255, 153, 0],
[0, 255, 10],
[255, 112, 0],
[143, 255, 0],
[82, 0, 255],
[163, 255, 0],
[255, 235, 0],
[8, 184, 170],
[133, 0, 255],
[0, 255, 92],
[184, 0, 255],
[255, 0, 31],
[0, 184, 255],
[0, 214, 255],
[255, 0, 112],
[92, 255, 0],
[0, 224, 255],
[112, 224, 255],
[70, 184, 160],
[163, 0, 255],
[153, 0, 255],
[71, 255, 0],
[255, 0, 163],
[255, 204, 0],
[255, 0, 143],
[0, 255, 235],
[133, 255, 0],
[255, 0, 235],
[245, 0, 255],
[255, 0, 122],
[255, 245, 0],
[10, 190, 212],
[214, 255, 0],
[0, 204, 255],
[20, 0, 255],
[255, 255, 0],
[0, 153, 255],
[0, 41, 255],
[0, 255, 204],
[41, 0, 255],
[41, 255, 0],
[173, 0, 255],
[0, 245, 255],
[71, 0, 255],
[122, 0, 255],
[0, 255, 184],
[0, 92, 255],
[184, 255, 0],
[0, 133, 255],
[255, 214, 0],
[25, 194, 194],
[102, 255, 0],
[92, 0, 255],
])
return colormap
def label_to_color_image(label):
colormap = create_ade20k_label_colormap()
return colormap[label]
def vis_segmentation(image, seg_map):
"""Visualizes input image, segmentation map and overlay view."""
plt.figure(figsize=(15, 5))
grid_spec = gridspec.GridSpec(1, 4, width_ratios=[6, 6, 6, 1])
plt.subplot(grid_spec[0])
plt.imshow(image)
plt.axis('off')
plt.title('input image')
plt.subplot(grid_spec[1])
seg_image = label_to_color_image(seg_map).astype(np.uint8)
plt.imshow(seg_image)
plt.axis('off')
plt.title('segmentation map')
plt.subplot(grid_spec[2])
plt.imshow(image)
plt.imshow(seg_image, alpha=0.7)
plt.axis('off')
plt.title('segmentation overlay')
unique_labels = np.unique(seg_map)
ax = plt.subplot(grid_spec[3])
plt.imshow(
FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation='nearest')
ax.yaxis.tick_right()
plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])
plt.xticks([], [])
ax.tick_params(width=0.0)
plt.grid('off')
plt.show()
if __name__ == '__main__':
LABEL_NAMES = np.genfromtxt('labels_ADE20K.txt', delimiter=',', dtype='U')
print(list(LABEL_NAMES))
FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)
FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)
img_path = "img.jpg"
model_path = "deeplabv3_mnv2_ade20k_train_2018_12_03.tar.gz"
# load model
model = DeepLabModel(model_path)
# Reduce image size if mobilenet model
if "mnv2" in model_path:
model.INPUT_SIZE = 257
# read image
original_im = Image.open(img_path)
# inferences DeepLab model
start_time = time.time()
resized_im, seg_map = model.run(original_im)
ellapsed_time = time.time() - start_time
print("Ellapsed time: " + str(ellapsed_time) + "s")
# show inference result
vis_segmentation(resized_im, seg_map)
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"numpy.unique",
"matplotlib.pyplot.imshow",
"numpy.genfromtxt",
"tensorflow.compat.v1.Session",
"tarfile.open",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"os.path.basename",
"numpy.asarray",
"tensorflow.Graph",
"tensorflow.im... | [((2120, 4508), 'numpy.asarray', 'np.asarray', (['[[0, 0, 0], [120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],\n [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], [230, 230,\n 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], [150, 5, 61], [120, \n 120, 70], [8, 255, 51], [255, 6, 82], [143, 255, 140], [204, 255, 4], [\n 255, 51, 7], [204, 70, 3], [0, 102, 200], [61, 230, 250], [255, 6, 51],\n [11, 102, 255], [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, \n 220], [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], [255, \n 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], [224, 255, 8], [\n 102, 8, 255], [255, 61, 6], [255, 194, 7], [255, 122, 8], [0, 255, 20],\n [255, 8, 41], [255, 5, 153], [6, 51, 255], [235, 12, 255], [160, 150, \n 20], [0, 163, 255], [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, \n 255, 0], [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], [255,\n 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255], [11, 200, 200], [\n 255, 82, 0], [0, 255, 245], [0, 61, 255], [0, 255, 112], [0, 255, 133],\n [255, 0, 0], [255, 163, 0], [255, 102, 0], [194, 255, 0], [0, 143, 255],\n [51, 255, 0], [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],\n [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255], [255, 0, 245\n ], [255, 0, 102], [255, 173, 0], [255, 0, 20], [255, 184, 184], [0, 31,\n 255], [0, 255, 61], [0, 71, 255], [255, 0, 204], [0, 255, 194], [0, 255,\n 82], [0, 10, 255], [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122,\n 255], [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0], [143, \n 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0], [8, 184, 170], [\n 133, 0, 255], [0, 255, 92], [184, 0, 255], [255, 0, 31], [0, 184, 255],\n [0, 214, 255], [255, 0, 112], [92, 255, 0], [0, 224, 255], [112, 224, \n 255], [70, 184, 160], [163, 0, 255], [153, 0, 255], [71, 255, 0], [255,\n 0, 163], [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0], [\n 255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0], [10, 190, \n 212], [214, 255, 0], [0, 204, 255], [20, 0, 255], [255, 255, 0], [0, \n 153, 255], [0, 41, 255], [0, 255, 204], [41, 0, 255], [41, 255, 0], [\n 173, 0, 255], [0, 245, 255], [71, 0, 255], [122, 0, 255], [0, 255, 184],\n [0, 92, 255], [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, \n 194], [102, 255, 0], [92, 0, 255]]'], {}), '([[0, 0, 0], [120, 120, 120], [180, 120, 120], [6, 230, 230], [80,\n 50, 50], [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], [\n 230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], [150, 5, 61],\n [120, 120, 70], [8, 255, 51], [255, 6, 82], [143, 255, 140], [204, 255,\n 4], [255, 51, 7], [204, 70, 3], [0, 102, 200], [61, 230, 250], [255, 6,\n 51], [11, 102, 255], [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, \n 220, 220], [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], [\n 255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], [224, 255, 8\n ], [102, 8, 255], [255, 61, 6], [255, 194, 7], [255, 122, 8], [0, 255, \n 20], [255, 8, 41], [255, 5, 153], [6, 51, 255], [235, 12, 255], [160, \n 150, 20], [0, 163, 255], [140, 140, 140], [250, 10, 15], [20, 255, 0],\n [31, 255, 0], [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],\n [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255], [11, 200, 200\n ], [255, 82, 0], [0, 255, 245], [0, 61, 255], [0, 255, 112], [0, 255, \n 133], [255, 0, 0], [255, 163, 0], [255, 102, 0], [194, 255, 0], [0, 143,\n 255], [51, 255, 0], [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, \n 255], [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255], [255, \n 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20], [255, 184, 184], [\n 0, 31, 255], [0, 255, 61], [0, 71, 255], [255, 0, 204], [0, 255, 194],\n [0, 255, 82], [0, 10, 255], [0, 112, 255], [51, 0, 255], [0, 194, 255],\n [0, 122, 255], [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0\n ], [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0], [8, 184, \n 170], [133, 0, 255], [0, 255, 92], [184, 0, 255], [255, 0, 31], [0, 184,\n 255], [0, 214, 255], [255, 0, 112], [92, 255, 0], [0, 224, 255], [112, \n 224, 255], [70, 184, 160], [163, 0, 255], [153, 0, 255], [71, 255, 0],\n [255, 0, 163], [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, \n 0], [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0], [10, \n 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255], [255, 255, 0], [\n 0, 153, 255], [0, 41, 255], [0, 255, 204], [41, 0, 255], [41, 255, 0],\n [173, 0, 255], [0, 245, 255], [71, 0, 255], [122, 0, 255], [0, 255, 184\n ], [0, 92, 255], [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194,\n 194], [102, 255, 0], [92, 0, 255]])\n', (2130, 4508), True, 'import numpy as np\n'), ((5370, 5397), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (5380, 5397), True, 'from matplotlib import pyplot as plt\n'), ((5413, 5463), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(4)'], {'width_ratios': '[6, 6, 6, 1]'}), '(1, 4, width_ratios=[6, 6, 6, 1])\n', (5430, 5463), False, 'from matplotlib import gridspec\n'), ((5469, 5494), 'matplotlib.pyplot.subplot', 'plt.subplot', (['grid_spec[0]'], {}), '(grid_spec[0])\n', (5480, 5494), True, 'from matplotlib import pyplot as plt\n'), ((5498, 5515), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (5508, 5515), True, 'from matplotlib import pyplot as plt\n'), ((5519, 5534), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (5527, 5534), True, 'from matplotlib import pyplot as plt\n'), ((5538, 5562), 'matplotlib.pyplot.title', 'plt.title', (['"""input image"""'], {}), "('input image')\n", (5547, 5562), True, 'from matplotlib import pyplot as plt\n'), ((5568, 5593), 'matplotlib.pyplot.subplot', 'plt.subplot', (['grid_spec[1]'], {}), '(grid_spec[1])\n', (5579, 5593), True, 'from matplotlib import pyplot as plt\n'), ((5659, 5680), 'matplotlib.pyplot.imshow', 'plt.imshow', (['seg_image'], {}), '(seg_image)\n', (5669, 5680), True, 'from matplotlib import pyplot as plt\n'), ((5684, 5699), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (5692, 5699), True, 'from matplotlib import pyplot as plt\n'), ((5703, 5732), 'matplotlib.pyplot.title', 'plt.title', (['"""segmentation map"""'], {}), "('segmentation map')\n", (5712, 5732), True, 'from matplotlib import pyplot as plt\n'), ((5738, 5763), 'matplotlib.pyplot.subplot', 'plt.subplot', (['grid_spec[2]'], {}), '(grid_spec[2])\n', (5749, 5763), True, 'from matplotlib import pyplot as plt\n'), ((5767, 5784), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (5777, 5784), True, 'from matplotlib import pyplot as plt\n'), ((5788, 5820), 'matplotlib.pyplot.imshow', 'plt.imshow', (['seg_image'], {'alpha': '(0.7)'}), '(seg_image, alpha=0.7)\n', (5798, 5820), True, 'from matplotlib import pyplot as plt\n'), ((5824, 5839), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (5832, 5839), True, 'from matplotlib import pyplot as plt\n'), ((5843, 5876), 'matplotlib.pyplot.title', 'plt.title', (['"""segmentation overlay"""'], {}), "('segmentation overlay')\n", (5852, 5876), True, 'from matplotlib import pyplot as plt\n'), ((5898, 5916), 'numpy.unique', 'np.unique', (['seg_map'], {}), '(seg_map)\n', (5907, 5916), True, 'import numpy as np\n'), ((5925, 5950), 'matplotlib.pyplot.subplot', 'plt.subplot', (['grid_spec[3]'], {}), '(grid_spec[3])\n', (5936, 5950), True, 'from matplotlib import pyplot as plt\n'), ((6143, 6161), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]', '[]'], {}), '([], [])\n', (6153, 6161), True, 'from matplotlib import pyplot as plt\n'), ((6194, 6209), 'matplotlib.pyplot.grid', 'plt.grid', (['"""off"""'], {}), "('off')\n", (6202, 6209), True, 'from matplotlib import pyplot as plt\n'), ((6213, 6223), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6221, 6223), True, 'from matplotlib import pyplot as plt\n'), ((6273, 6333), 'numpy.genfromtxt', 'np.genfromtxt', (['"""labels_ADE20K.txt"""'], {'delimiter': '""","""', 'dtype': '"""U"""'}), "('labels_ADE20K.txt', delimiter=',', dtype='U')\n", (6286, 6333), True, 'import numpy as np\n'), ((6779, 6799), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (6789, 6799), False, 'from PIL import Image\n'), ((6848, 6859), 'time.time', 'time.time', ([], {}), '()\n', (6857, 6859), False, 'import time\n'), ((538, 548), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (546, 548), True, 'import tensorflow as tf\n'), ((635, 661), 'tarfile.open', 'tarfile.open', (['tarball_path'], {}), '(tarball_path)\n', (647, 661), False, 'import tarfile\n'), ((1147, 1185), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'self.graph'}), '(graph=self.graph)\n', (1167, 1185), True, 'import tensorflow as tf\n'), ((6927, 6938), 'time.time', 'time.time', ([], {}), '()\n', (6936, 6938), False, 'import time\n'), ((1088, 1127), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (1107, 1127), True, 'import tensorflow as tf\n'), ((742, 773), 'os.path.basename', 'os.path.basename', (['tar_info.name'], {}), '(tar_info.name)\n', (758, 773), False, 'import os\n'), ((1828, 1853), 'numpy.asarray', 'np.asarray', (['resized_image'], {}), '(resized_image)\n', (1838, 1853), True, 'import numpy as np\n')] |
# coding=utf-8
from typing import List
from sklearn.datasets import make_moons, make_blobs, make_circles
from sklearn.model_selection import train_test_split
import numpy as np
import matplotlib.pyplot as plt
from magnn.loss import MSE
from magnn.nn import Net
from magnn.layer import Swish, Linear, Sigmoid, Tanh, Dropout, ReLu
from magnn.optimize import SGD
from magnn.train import train
from magnn.io import Scaler, BatchIterator
Act = Swish
lr_init = 0.005
epochs_first = 5000
epochs_second = 1000
val_frequency = 10
n_data = 5000
early_stopping = False
dropout_prob = 0.2
first_layer_width = 8
second_layer_width = 32
third_layer_width = 16
n_epochs_plot_1th = (epochs_first + epochs_second) // 10
batch_size = n_data // 10
losses = []
# CODE STARTS HERE
# ds = make_moons(n_samples=n_data, noise=0.3)
ds = make_circles(n_samples=n_data, noise=0.01, factor=0.3)
def encode_binary(y: int) -> List:
if y == 1:
return [0, 1]
else:
return [1, 0]
X, y = ds
y = np.array([encode_binary(_y) for _y in y])
X_t, X_test, y_t, y_test = train_test_split(X, y, test_size=0.15, shuffle=True)
X_train, X_val, y_train, y_val = train_test_split(
X_t, y_t, test_size=0.10, shuffle=True
)
scaler = Scaler()
scaler.fit(X)
X_train = scaler.transform(X_train)
net = Net(
[
Linear(input_size=2, output_size=first_layer_width),
Act(),
Dropout(prob=dropout_prob, size=first_layer_width),
Linear(input_size=first_layer_width, output_size=second_layer_width),
Act(),
Dropout(prob=dropout_prob, size=second_layer_width),
# Linear(input_size=second_layer_width, output_size=third_layer_width),
# Act(),
# Dropout(prob=dropout_prob, size=third_layer_width),
Linear(input_size=second_layer_width, output_size=2),
Sigmoid(),
]
)
loss_1, loss_1_val = train(
net,
X_train,
y_train,
val_data=(X_val, y_val),
val_frequency=val_frequency,
epochs=epochs_first,
iterator=BatchIterator(batch_size=100),
optim=SGD(lr=lr_init),
early_stopping=early_stopping,
)
loss_2, loss_2_val = train(
net,
X_train,
y_train,
epochs=epochs_second,
val_data=(X_val, y_val),
val_frequency=val_frequency,
iterator=BatchIterator(batch_size=100),
optim=SGD(lr=lr_init * 0.1),
early_stopping=early_stopping,
)
# fix validation graph
loss_1_val = np.concatenate([[i] * val_frequency for i in loss_1_val])
loss_2_val = np.concatenate([[i] * val_frequency for i in loss_2_val])
if len(loss_1) != len(loss_1_val):
loss_1_val = loss_1_val[: len(loss_1)]
if len(loss_2) != len(loss_2_val):
loss_2_val = loss_2_val[: len(loss_2)]
losses = [loss_1, loss_2]
losses = np.concatenate(losses)
loss_val = np.concatenate([loss_1_val, loss_2_val])
test_pred = net.forward(X_test)
test_loss = MSE().loss(predicted=test_pred, actual=y_test)
correct = np.sum(
np.argmax(test_pred, axis=1) == np.argmax(y_test, axis=1)
) / len(test_pred)
print(f"Training Loss: {losses[-1]:.2f}")
print(f"Validation Loss: {loss_val[-1]:.2f}")
print(f"Test Loss: {test_loss:.2f}")
print(f"Percent Correct: {correct:.2%}")
# correct = 0
# for x, y in zip(X_test, y_test):
# preds = net.forward(x)
# # print(np.round(preds, decimals=0), y)
# # print(f"pred:\t{np.argmax(preds):.0f} -> {np.argmax(y):.0f}\t:true")
# # print(np.round(preds, decimals=2), y)
# if np.argmax(preds) == np.argmax(y):
# correct += 1
#
# print(1.0 * correct / len(X_test))
fig, axes = plt.subplots(nrows=3, figsize=(6, 6))
axes[0].plot(losses, label="Loss")
axes[0].plot(loss_val, label="Val loss", alpha=0.5, c="r", ls="--")
axes[0].axvline(len(loss_1), c="k", alpha=0.5, label="LR changed")
axes[0].legend()
axes[1].plot(losses[-n_epochs_plot_1th:], label="Loss")
axes[1].legend()
axes[2].plot(loss_val[-n_epochs_plot_1th:], label="Val loss", ls="--")
axes[0].set_yscale("log")
axes[1].set_yscale("log")
axes[2].set_yscale("log")
fig.savefig("./circles_training.png")
fig.clf()
def visualize_decisions(X):
from matplotlib.colors import ListedColormap
h = 0.2
fig, ax = plt.subplots()
x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5
y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
cm_bright = ListedColormap(["#FF0000", "#0000FF"])
ax.scatter(
X_test[:, 0],
X_test[:, 1],
c=np.argmax(net.forward(X), axis=1),
cmap=cm_bright,
alpha=0.6,
edgecolors="k",
)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
zz = net.forward(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# zz = np.argmax(net.forward(np.c_[xx.ravel(), yy.ravel()]),axis=1)
zz = zz.reshape(xx.shape)
ax.contourf(xx, yy, zz, cmap=plt.cm.RdBu, alpha=0.3)
visualize_decisions(X_test,)
plt.savefig("./moons.png")
| [
"sklearn.datasets.make_circles",
"matplotlib.pyplot.savefig",
"magnn.io.BatchIterator",
"magnn.layer.Linear",
"numpy.argmax",
"magnn.optimize.SGD",
"sklearn.model_selection.train_test_split",
"magnn.io.Scaler",
"numpy.arange",
"magnn.layer.Sigmoid",
"magnn.layer.Dropout",
"magnn.loss.MSE",
"... | [((823, 877), 'sklearn.datasets.make_circles', 'make_circles', ([], {'n_samples': 'n_data', 'noise': '(0.01)', 'factor': '(0.3)'}), '(n_samples=n_data, noise=0.01, factor=0.3)\n', (835, 877), False, 'from sklearn.datasets import make_moons, make_blobs, make_circles\n'), ((1071, 1123), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.15)', 'shuffle': '(True)'}), '(X, y, test_size=0.15, shuffle=True)\n', (1087, 1123), False, 'from sklearn.model_selection import train_test_split\n'), ((1158, 1213), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_t', 'y_t'], {'test_size': '(0.1)', 'shuffle': '(True)'}), '(X_t, y_t, test_size=0.1, shuffle=True)\n', (1174, 1213), False, 'from sklearn.model_selection import train_test_split\n'), ((1231, 1239), 'magnn.io.Scaler', 'Scaler', ([], {}), '()\n', (1237, 1239), False, 'from magnn.io import Scaler, BatchIterator\n'), ((2408, 2467), 'numpy.concatenate', 'np.concatenate', (['[([i] * val_frequency) for i in loss_1_val]'], {}), '([([i] * val_frequency) for i in loss_1_val])\n', (2422, 2467), True, 'import numpy as np\n'), ((2479, 2538), 'numpy.concatenate', 'np.concatenate', (['[([i] * val_frequency) for i in loss_2_val]'], {}), '([([i] * val_frequency) for i in loss_2_val])\n', (2493, 2538), True, 'import numpy as np\n'), ((2731, 2753), 'numpy.concatenate', 'np.concatenate', (['losses'], {}), '(losses)\n', (2745, 2753), True, 'import numpy as np\n'), ((2765, 2805), 'numpy.concatenate', 'np.concatenate', (['[loss_1_val, loss_2_val]'], {}), '([loss_1_val, loss_2_val])\n', (2779, 2805), True, 'import numpy as np\n'), ((3539, 3576), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(3)', 'figsize': '(6, 6)'}), '(nrows=3, figsize=(6, 6))\n', (3551, 3576), True, 'import matplotlib.pyplot as plt\n'), ((4962, 4988), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./moons.png"""'], {}), "('./moons.png')\n", (4973, 4988), True, 'import matplotlib.pyplot as plt\n'), ((4143, 4157), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4155, 4157), True, 'import matplotlib.pyplot as plt\n'), ((4376, 4414), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["['#FF0000', '#0000FF']"], {}), "(['#FF0000', '#0000FF'])\n", (4390, 4414), False, 'from matplotlib.colors import ListedColormap\n'), ((1317, 1368), 'magnn.layer.Linear', 'Linear', ([], {'input_size': '(2)', 'output_size': 'first_layer_width'}), '(input_size=2, output_size=first_layer_width)\n', (1323, 1368), False, 'from magnn.layer import Swish, Linear, Sigmoid, Tanh, Dropout, ReLu\n'), ((1393, 1443), 'magnn.layer.Dropout', 'Dropout', ([], {'prob': 'dropout_prob', 'size': 'first_layer_width'}), '(prob=dropout_prob, size=first_layer_width)\n', (1400, 1443), False, 'from magnn.layer import Swish, Linear, Sigmoid, Tanh, Dropout, ReLu\n'), ((1453, 1521), 'magnn.layer.Linear', 'Linear', ([], {'input_size': 'first_layer_width', 'output_size': 'second_layer_width'}), '(input_size=first_layer_width, output_size=second_layer_width)\n', (1459, 1521), False, 'from magnn.layer import Swish, Linear, Sigmoid, Tanh, Dropout, ReLu\n'), ((1546, 1597), 'magnn.layer.Dropout', 'Dropout', ([], {'prob': 'dropout_prob', 'size': 'second_layer_width'}), '(prob=dropout_prob, size=second_layer_width)\n', (1553, 1597), False, 'from magnn.layer import Swish, Linear, Sigmoid, Tanh, Dropout, ReLu\n'), ((1766, 1818), 'magnn.layer.Linear', 'Linear', ([], {'input_size': 'second_layer_width', 'output_size': '(2)'}), '(input_size=second_layer_width, output_size=2)\n', (1772, 1818), False, 'from magnn.layer import Swish, Linear, Sigmoid, Tanh, Dropout, ReLu\n'), ((1828, 1837), 'magnn.layer.Sigmoid', 'Sigmoid', ([], {}), '()\n', (1835, 1837), False, 'from magnn.layer import Swish, Linear, Sigmoid, Tanh, Dropout, ReLu\n'), ((2011, 2040), 'magnn.io.BatchIterator', 'BatchIterator', ([], {'batch_size': '(100)'}), '(batch_size=100)\n', (2024, 2040), False, 'from magnn.io import Scaler, BatchIterator\n'), ((2052, 2067), 'magnn.optimize.SGD', 'SGD', ([], {'lr': 'lr_init'}), '(lr=lr_init)\n', (2055, 2067), False, 'from magnn.optimize import SGD\n'), ((2271, 2300), 'magnn.io.BatchIterator', 'BatchIterator', ([], {'batch_size': '(100)'}), '(batch_size=100)\n', (2284, 2300), False, 'from magnn.io import Scaler, BatchIterator\n'), ((2312, 2333), 'magnn.optimize.SGD', 'SGD', ([], {'lr': '(lr_init * 0.1)'}), '(lr=lr_init * 0.1)\n', (2315, 2333), False, 'from magnn.optimize import SGD\n'), ((2851, 2856), 'magnn.loss.MSE', 'MSE', ([], {}), '()\n', (2854, 2856), False, 'from magnn.loss import MSE\n'), ((4303, 4329), 'numpy.arange', 'np.arange', (['x_min', 'x_max', 'h'], {}), '(x_min, x_max, h)\n', (4312, 4329), True, 'import numpy as np\n'), ((4331, 4357), 'numpy.arange', 'np.arange', (['y_min', 'y_max', 'h'], {}), '(y_min, y_max, h)\n', (4340, 4357), True, 'import numpy as np\n'), ((2920, 2948), 'numpy.argmax', 'np.argmax', (['test_pred'], {'axis': '(1)'}), '(test_pred, axis=1)\n', (2929, 2948), True, 'import numpy as np\n'), ((2952, 2977), 'numpy.argmax', 'np.argmax', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (2961, 2977), True, 'import numpy as np\n')] |
import operator
from math import isinf, isnan
from typing import Callable, Optional, Sequence, SupportsFloat, SupportsIndex, Type, TypedDict, Union
import numpy as np
__all__ = ["ArrayLike", "OptimizerVariables", "type_check", "immutable_view"]
ArrayLike = Union[
np.ndarray,
float,
Sequence[float],
Sequence[Sequence[float]],
Sequence[Sequence[Sequence[float]]],
Sequence[Sequence[Sequence[Sequence[float]]]],
Sequence[Sequence[Sequence[Sequence[Sequence[float]]]]],
]
OptimizerVariables = TypedDict(
"OptimizerVariables",
x_best=np.ndarray,
y_best=np.ndarray,
x=np.ndarray,
y=float,
lr=float,
beta_noise=float,
beta1=float,
beta2=float,
noise=float,
gradient=np.ndarray,
slow_gradient=np.ndarray,
square_gradient=np.ndarray,
)
def type_check(
f: Callable[[np.ndarray], float],
x: ArrayLike,
adam: bool,
iterations: int,
lr: Optional[float],
lr_decay: float,
lr_power: float,
px: Union[float, Type[int]],
px_decay: float,
px_power: float,
momentum: float,
beta: float,
epsilon: float,
/,
) -> np.ndarray:
"""Type check the parameters and casts `x` to a numpy array."""
# Type-check.
if not callable(f):
raise TypeError(f"f must be callable, got {f!r}")
elif not isinstance(x, (SupportsFloat, np.ndarray, Sequence)):
raise TypeError(f"x must be either a real number, numpy array, or sequence, got {x!r}")
elif not isinstance(iterations, SupportsIndex):
raise TypeError(f"iterations cannot be interpreted as an integer, got {iterations!r}")
elif not isinstance(adam, SupportsIndex):
raise TypeError(f"adam cannot be interpreted as an integer, got {adam!r}")
adam = bool(operator.index(adam))
names = ("lr_decay", "lr_power")
values = (lr_decay, lr_power)
if lr is not None:
names = ("lr", *names)
values = (lr, *values)
if px is not int:
names = (*names, "px")
values = (*values, px)
names = (*names, "px_decay", "px_power", "momentum", "beta", "epsilon")
values = (*values, px_decay, px_power, momentum, beta, epsilon)
for name, value in zip(names, values):
if not isinstance(value, SupportsFloat):
raise TypeError(f"{name} must be real, got {value!r}")
elif isnan(float(value)):
raise ValueError(f"{name} must not be nan, got {value!r}")
elif isinf(float(value)):
raise ValueError(f"{name} must not be infinite, got {value!r}")
elif float(value) <= 0:
raise ValueError(f"{name} must not be negative, got {value!r}")
names = ("lr_power", "px_power", "momentum", "beta")
values = (lr_power, px_power, momentum, beta)
for name, value in zip(names, values):
if float(value) >= 1:
raise ValueError(f"{name} must not be greater than 1, got {value!r}")
# Cast to numpy array.
x = np.array(x, dtype=float)
# Type-check.
if x.size == 0:
raise ValueError("cannot optimize with array of size 0")
elif np.isnan(x).any():
raise ValueError(f"x must not contain nan")
elif np.isinf(x).any():
raise ValueError(f"x must not contain infinity")
return x
def immutable_view(x: np.ndarray, /) -> np.ndarray:
"""Returns a view of the input which cannot be modified."""
view = np.asarray(x)[...]
view.setflags(write=False)
return view
| [
"operator.index",
"numpy.asarray",
"numpy.isinf",
"numpy.isnan",
"numpy.array",
"typing.TypedDict"
] | [((523, 770), 'typing.TypedDict', 'TypedDict', (['"""OptimizerVariables"""'], {'x_best': 'np.ndarray', 'y_best': 'np.ndarray', 'x': 'np.ndarray', 'y': 'float', 'lr': 'float', 'beta_noise': 'float', 'beta1': 'float', 'beta2': 'float', 'noise': 'float', 'gradient': 'np.ndarray', 'slow_gradient': 'np.ndarray', 'square_gradient': 'np.ndarray'}), "('OptimizerVariables', x_best=np.ndarray, y_best=np.ndarray, x=np.\n ndarray, y=float, lr=float, beta_noise=float, beta1=float, beta2=float,\n noise=float, gradient=np.ndarray, slow_gradient=np.ndarray,\n square_gradient=np.ndarray)\n", (532, 770), False, 'from typing import Callable, Optional, Sequence, SupportsFloat, SupportsIndex, Type, TypedDict, Union\n'), ((2955, 2979), 'numpy.array', 'np.array', (['x'], {'dtype': 'float'}), '(x, dtype=float)\n', (2963, 2979), True, 'import numpy as np\n'), ((1770, 1790), 'operator.index', 'operator.index', (['adam'], {}), '(adam)\n', (1784, 1790), False, 'import operator\n'), ((3389, 3402), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (3399, 3402), True, 'import numpy as np\n'), ((3092, 3103), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (3100, 3103), True, 'import numpy as np\n'), ((3172, 3183), 'numpy.isinf', 'np.isinf', (['x'], {}), '(x)\n', (3180, 3183), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.