code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# coding=utf-8
'''
Show optical flow
USAGE: python opt_flow.py [<video_source>] (Defaults to webcam if no video
source provided.
Keys:
1 - Show HSV
ESC - Exit
'''
from __future__ import print_function
import numpy as np
import cv2
import sys
def draw_flow(img, flow, step=10):
h, w = img.shape[:2]
y, x = np.mgrid[step / 2:h:step, step / 2:w:step].reshape(2, -1).astype(
int)
fx, fy = flow[y, x].T
lines = np.vstack([x, y, x + fx, y + fy]).T.reshape(-1, 2, 2)
lines = np.int32(lines + 0.5)
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.polylines(vis, lines, 0, (0, 255, 0))
for (x1, y1), (x2, y2) in lines:
cv2.circle(vis, (x1, y1), 1, (255, 0, 255), -1)
return vis
def draw_hsv(flow):
h, w = flow.shape[:2]
fx, fy = flow[:, :, 0], flow[:, :, 1]
ang = np.arctan2(fy, fx) + np.pi
v = np.sqrt(fx * fx + fy * fy)
hsv = np.zeros((h, w, 3), np.uint8)
med_ang = np.average(np.ndarray.flatten(ang))
med_ang *= 180 / np.pi / 2
med_ang_arr = np.full(ang.shape, med_ang)
hsv[..., 0] = med_ang_arr
hsv[..., 1] = 255
hsv[..., 2] = np.minimum(v * 4, 255)
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return bgr
if __name__ == '__main__':
print(__doc__)
try:
fn = sys.argv[1]
except IndexError:
fn = 0
cam = cv2.VideoCapture(fn)
ret, prev = cam.read()
prevgray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
show_hsv = False
image_width = int(cam.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
image_height = int(cam.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
fps = int(cam.get(cv2.cv.CV_CAP_PROP_FPS))
out_flow = cv2.VideoWriter('flow.avi', cv2.cv.CV_FOURCC(*'DIVX'), fps,
(image_width, image_height))
out_hsv = cv2.VideoWriter('hsv.avi', cv2.cv.CV_FOURCC(*'DIVX'), fps,
(image_width, image_height))
while True:
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prevgray, gray, 0.5, 15, 25, 20, 7,
1.5, 0)
prevgray = gray
flow_img = draw_flow(gray, flow)
cv2.imshow('flow', flow_img)
out_flow.write(flow_img)
if show_hsv:
hsv_img = draw_hsv(flow)
cv2.imshow('flow HSV', hsv_img)
out_hsv.write(hsv_img)
ch = 0xFF & cv2.waitKey(5)
if ch == 27:
break
if ch == ord('1'):
show_hsv = not show_hsv
print('HSV flow visualization is', ['off', 'on'][show_hsv])
cv2.destroyAllWindows()
out_flow.release()
out_hsv.release()
| [
"numpy.sqrt",
"numpy.minimum",
"cv2.polylines",
"numpy.int32",
"cv2.imshow",
"numpy.ndarray.flatten",
"numpy.zeros",
"cv2.circle",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"numpy.arctan2",
"cv2.cv.CV_FOURCC",
"numpy.full",
"cv2.calcOpticalFlowFarneback",
"numpy.vsta... | [((517, 538), 'numpy.int32', 'np.int32', (['(lines + 0.5)'], {}), '(lines + 0.5)\n', (525, 538), True, 'import numpy as np\n'), ((549, 586), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_GRAY2BGR'], {}), '(img, cv2.COLOR_GRAY2BGR)\n', (561, 586), False, 'import cv2\n'), ((591, 632), 'cv2.polylines', 'cv2.polylines', (['vis', 'lines', '(0)', '(0, 255, 0)'], {}), '(vis, lines, 0, (0, 255, 0))\n', (604, 632), False, 'import cv2\n'), ((876, 902), 'numpy.sqrt', 'np.sqrt', (['(fx * fx + fy * fy)'], {}), '(fx * fx + fy * fy)\n', (883, 902), True, 'import numpy as np\n'), ((913, 942), 'numpy.zeros', 'np.zeros', (['(h, w, 3)', 'np.uint8'], {}), '((h, w, 3), np.uint8)\n', (921, 942), True, 'import numpy as np\n'), ((1043, 1070), 'numpy.full', 'np.full', (['ang.shape', 'med_ang'], {}), '(ang.shape, med_ang)\n', (1050, 1070), True, 'import numpy as np\n'), ((1142, 1164), 'numpy.minimum', 'np.minimum', (['(v * 4)', '(255)'], {}), '(v * 4, 255)\n', (1152, 1164), True, 'import numpy as np\n'), ((1176, 1212), 'cv2.cvtColor', 'cv2.cvtColor', (['hsv', 'cv2.COLOR_HSV2BGR'], {}), '(hsv, cv2.COLOR_HSV2BGR)\n', (1188, 1212), False, 'import cv2\n'), ((1360, 1380), 'cv2.VideoCapture', 'cv2.VideoCapture', (['fn'], {}), '(fn)\n', (1376, 1380), False, 'import cv2\n'), ((1424, 1462), 'cv2.cvtColor', 'cv2.cvtColor', (['prev', 'cv2.COLOR_BGR2GRAY'], {}), '(prev, cv2.COLOR_BGR2GRAY)\n', (1436, 1462), False, 'import cv2\n'), ((2650, 2673), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2671, 2673), False, 'import cv2\n'), ((678, 725), 'cv2.circle', 'cv2.circle', (['vis', '(x1, y1)', '(1)', '(255, 0, 255)', '(-1)'], {}), '(vis, (x1, y1), 1, (255, 0, 255), -1)\n', (688, 725), False, 'import cv2\n'), ((841, 859), 'numpy.arctan2', 'np.arctan2', (['fy', 'fx'], {}), '(fy, fx)\n', (851, 859), True, 'import numpy as np\n'), ((969, 992), 'numpy.ndarray.flatten', 'np.ndarray.flatten', (['ang'], {}), '(ang)\n', (987, 992), True, 'import numpy as np\n'), ((1703, 1728), 'cv2.cv.CV_FOURCC', 'cv2.cv.CV_FOURCC', (["*'DIVX'"], {}), "(*'DIVX')\n", (1719, 1728), False, 'import cv2\n'), ((1836, 1861), 'cv2.cv.CV_FOURCC', 'cv2.cv.CV_FOURCC', (["*'DIVX'"], {}), "(*'DIVX')\n", (1852, 1861), False, 'import cv2\n'), ((1989, 2026), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (2001, 2026), False, 'import cv2\n'), ((2042, 2114), 'cv2.calcOpticalFlowFarneback', 'cv2.calcOpticalFlowFarneback', (['prevgray', 'gray', '(0.5)', '(15)', '(25)', '(20)', '(7)', '(1.5)', '(0)'], {}), '(prevgray, gray, 0.5, 15, 25, 20, 7, 1.5, 0)\n', (2070, 2114), False, 'import cv2\n'), ((2233, 2261), 'cv2.imshow', 'cv2.imshow', (['"""flow"""', 'flow_img'], {}), "('flow', flow_img)\n", (2243, 2261), False, 'import cv2\n'), ((2366, 2397), 'cv2.imshow', 'cv2.imshow', (['"""flow HSV"""', 'hsv_img'], {}), "('flow HSV', hsv_img)\n", (2376, 2397), False, 'import cv2\n'), ((2454, 2468), 'cv2.waitKey', 'cv2.waitKey', (['(5)'], {}), '(5)\n', (2465, 2468), False, 'import cv2\n'), ((451, 484), 'numpy.vstack', 'np.vstack', (['[x, y, x + fx, y + fy]'], {}), '([x, y, x + fx, y + fy])\n', (460, 484), True, 'import numpy as np\n')] |
# -*- coding:utf8 -*-
# ==============================================================================
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
This module implements data process strategies.
"""
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import os
import json
import logging
import numpy as np
from collections import Counter
import jieba
import re
class Dataset(object):
"""
This module implements the APIs for loading and using baidu reading comprehension dataset
"""
def __init__(self, args):
self.logger = logging.getLogger("alibaba")
self.args = args
if self.args.predict:
self.test_sets = self._load_test_dataset(args.preposs_file)
else:
self.data_sets = self._load_dataset(args.preposs_file)
self.train_set, self.dev_set = self._shuffle_and_split_data_set(self.data_sets)
def _load_dataset(self, data_path):
"""
Loads the dataset
Args:
data_path: the data file to load
"""
with open(data_path, "r") as fin:
data_set = []
for idx, line in enumerate(fin):
line = unicode(line, encoding="utf8")
sample = {}
line_list = str(line).strip().split("|")
if len(line_list) != 4:
self.logger.warning("第{}行数据格式错误".format(idx + 1))
continue
else:
sample["id"] = line_list[0]
sample["document1"] = [
unicode(_, "utf8") for _ in line_list[1].split(" ")
]
sample["document1_character"] = self._add_character(
line_list[1].split(" ")
)
sample["document2"] = [
unicode(_, "utf8") for _ in line_list[2].split(" ")
]
sample["document2_character"] = self._add_character(
line_list[2].split(" ")
)
sample["label"] = self._label_2_list(int(line_list[3]))
data_set.append(sample)
self.logger.info("DataSet size {} sample".format(len(data_set)))
return data_set
def _load_test_dataset(self, data_path):
"""
Loads the dataset
Args:
data_path: the data file to load
"""
with open(data_path, "r") as fin:
data_set = []
for idx, line in enumerate(fin):
line = unicode(line, encoding="utf8")
sample = {}
line_list = str(line).strip().split("|")
if len(line_list) != 3:
self.logger.warning("第{}行数据格式错误".format(idx + 1))
continue
else:
sample["id"] = line_list[0]
sample["document1"] = [
unicode(_, "utf8") for _ in line_list[1].split(" ")
]
sample["document1_character"] = self._add_character(
line_list[1].split(" ")
)
sample["document2"] = [
unicode(_, "utf8") for _ in line_list[2].split(" ")
]
sample["document2_character"] = self._add_character(
line_list[2].split(" ")
)
data_set.append(sample)
self.logger.info("DataSet size {} sample".format(len(data_set)))
return data_set
def _add_character(self, word_list):
"""
Add the characters
Args:
word_list: list of words
Returns:
list of characters
"""
character_list = []
for word in word_list:
character_list.append([character for character in unicode(word, "utf8")])
return character_list
def _shuffle_and_split_data_set(self, data_set):
"""
打乱并且分割数据集
"""
data_size = len(data_set)
indices = np.arange(data_size)
np.random.shuffle(indices)
index = int(data_size * (1 - self.args.dev))
train_indices = indices[0:index]
dev_indices = indices[index:-1]
train_set = []
dev_set = []
for idx in train_indices:
train_set.append(data_set[idx])
for idx in dev_indices:
dev_set.append(data_set[idx])
return train_set, dev_set
def get_mini_batchs(self, batch_size, set_name="train", shuffle=False, predict=False):
# self.train_set, self.dev_set = self._shuffle_and_split_data_set(self.data_sets)
if set_name == "train":
data_set = self.train_set
elif set_name == "dev":
data_set = self.dev_set
elif set_name == 'test':
data_set = self.test_sets
else:
raise NotImplementedError("No data set named as {}".format(set_name))
data_size = len(data_set)
indices = np.arange(data_size)
if shuffle:
np.random.shuffle(indices)
for batch_start in np.arange(0, data_size, batch_size):
batch_indices = indices[batch_start : batch_start + batch_size]
yield self._one_mini_batch(data_set, batch_indices, predict=predict)
def _one_mini_batch(self, data, batch_indices, predict=False):
"""
Get one mini batch
Args:
data: all data
batch_indices: the indices of the samples to be selected
Returns:
one batch of data
"""
if predict:
batch_data = {
"raw_data": [data[i] for i in batch_indices],
"document1_ids": [],
"document2_ids": [],
"document1_character_ids": [],
"document2_character_ids": [],
"id": [],
}
else:
batch_data = {
"raw_data": [data[i] for i in batch_indices],
"document1_ids": [],
"document2_ids": [],
"document1_character_ids": [],
"document2_character_ids": [],
"label": [],
"id": [],
}
for data in batch_data["raw_data"]:
try:
batch_data["document1_ids"].append(data["document1_ids"])
batch_data["document2_ids"].append(data["document2_ids"])
batch_data["document1_character_ids"].append(
data["document1_character_ids"]
)
batch_data["document2_character_ids"].append(
data["document2_character_ids"]
)
batch_data["id"].append(data["id"])
if predict:
continue
batch_data["label"].append(data["label"])
except KeyError:
print(" ")
return batch_data
def word_iter(self, set_name=None, character=False):
"""
Iterates over all the words in the dataset
Args:
set_name: if it is set, then the specific set will be used
Returns:
a generator
"""
if set_name is None:
data_set = self.train_set + self.dev_set
elif set_name == "train":
data_set = self.train_set
elif set_name == "dev":
data_set = self.dev_set
else:
raise NotImplementedError("No data set named as {}".format(set_name))
if data_set is not None:
for sample in data_set:
if character:
for token in sample["document1_character"]:
for character in token:
yield character
for token in sample["document2_character"]:
for character in token:
yield character
else:
for token in sample["document1"]:
yield token
for token in sample["document2"]:
yield token
def convert_to_ids(self, vocab, character=False, set_name=None):
"""
Convert the question and passage in the original dataset to ids
Args:
vocab: the vocabulary on this dataset
"""
if set_name is None:
data_sets = [self.train_set, self.dev_set]
elif set_name == 'test':
data_sets = [self.test_sets]
for data_set in data_sets:
if data_set is None:
continue
for sample in data_set:
if character:
sample["document1_character_ids"] = vocab.convert_character_to_ids(
sample["document1_character"],
self.args.max_document_len,
self.args.max_word_len,
)
sample["document2_character_ids"] = vocab.convert_character_to_ids(
sample["document2_character"],
self.args.max_document_len,
self.args.max_word_len,
)
else:
sample["document1_ids"] = vocab.convert_to_ids(
sample["document1"], self.args.max_document_len
)
sample["document2_ids"] = vocab.convert_to_ids(
sample["document2"], self.args.max_document_len
)
def _label_2_list(self, label):
label_list = [0 for _ in range(2)]
label_list[label] = 1
return label_list
| [
"logging.getLogger",
"sys.setdefaultencoding",
"numpy.arange",
"numpy.random.shuffle"
] | [((809, 840), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf-8"""'], {}), "('utf-8')\n", (831, 840), False, 'import sys\n'), ((1140, 1168), 'logging.getLogger', 'logging.getLogger', (['"""alibaba"""'], {}), "('alibaba')\n", (1157, 1168), False, 'import logging\n'), ((4710, 4730), 'numpy.arange', 'np.arange', (['data_size'], {}), '(data_size)\n', (4719, 4730), True, 'import numpy as np\n'), ((4739, 4765), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (4756, 4765), True, 'import numpy as np\n'), ((5669, 5689), 'numpy.arange', 'np.arange', (['data_size'], {}), '(data_size)\n', (5678, 5689), True, 'import numpy as np\n'), ((5776, 5811), 'numpy.arange', 'np.arange', (['(0)', 'data_size', 'batch_size'], {}), '(0, data_size, batch_size)\n', (5785, 5811), True, 'import numpy as np\n'), ((5722, 5748), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (5739, 5748), True, 'import numpy as np\n')] |
# yawing moment due to rudder in F-16 model
import numpy as np
def dndr(alpha, beta):
A = np.asarray([[-.018, -.052, -.052, -.052, -.054, -.049, -.059, -.051, -.030, -.037, -.026, -.013],
[-.028, -.051, -.043, -.046, -.045, -.049, -.057, -.052, -.030, -.033, -.030, -.008],
[-.037, -.041, -.038, -.040, -.040, -.038, -.037, -.030, -.027, -.024, -.019, -.013],
[-.048, -.045, -.045, -.045, -.044, -.045, -.047, -.048, -.049, -.045, -.033, -.016],
[-.043, -.044, -.041, -.041, -.040, -.038, -.034, -.035, -.035, -.029, -.022, -.009],
[-.052, -.034, -.036, -.036, -.035, -.028, -.024, -.023, -.020, -.016, -.010, -.014],
[-.062, -.034, -.027, -.028, -.027, -.027, -.023, -.023, -.019, -.009, -.025, -.010]])
A = np.transpose(A)
row = 2
col = 3
s = 0.2 * alpha
k = np.fix(s)
if k <= -2:
k = -1
if k >= 9:
k = 8
da = s - k
l = k + np.fix(np.sign(da) * 1.1)
if l < -2:
l = -2
elif l > 9:
l = 9
s = 0.1 * beta
m = np.fix(s)
if m <= -3:
m = -2
if m >= 3:
m = 2
db = s - m
n = m + np.fix(np.sign(db) * 1.1)
if n < -3:
n = -3
elif n > 3:
n = 3
t = A[int(k) + row, int(m) + col]
u = A[int(k) + row, int(n) + col]
v = t + abs(da) * (A[int(l) + row, int(m) + col] - t)
w = u + abs(da) * (A[int(l) + row, int(n) + col] - u)
dndr_value = v + (w - v) * abs(db)
return dndr_value
| [
"numpy.fix",
"numpy.transpose",
"numpy.asarray",
"numpy.sign"
] | [((101, 828), 'numpy.asarray', 'np.asarray', (['[[-0.018, -0.052, -0.052, -0.052, -0.054, -0.049, -0.059, -0.051, -0.03, -\n 0.037, -0.026, -0.013], [-0.028, -0.051, -0.043, -0.046, -0.045, -0.049,\n -0.057, -0.052, -0.03, -0.033, -0.03, -0.008], [-0.037, -0.041, -0.038,\n -0.04, -0.04, -0.038, -0.037, -0.03, -0.027, -0.024, -0.019, -0.013], [\n -0.048, -0.045, -0.045, -0.045, -0.044, -0.045, -0.047, -0.048, -0.049,\n -0.045, -0.033, -0.016], [-0.043, -0.044, -0.041, -0.041, -0.04, -0.038,\n -0.034, -0.035, -0.035, -0.029, -0.022, -0.009], [-0.052, -0.034, -\n 0.036, -0.036, -0.035, -0.028, -0.024, -0.023, -0.02, -0.016, -0.01, -\n 0.014], [-0.062, -0.034, -0.027, -0.028, -0.027, -0.027, -0.023, -0.023,\n -0.019, -0.009, -0.025, -0.01]]'], {}), '([[-0.018, -0.052, -0.052, -0.052, -0.054, -0.049, -0.059, -0.051,\n -0.03, -0.037, -0.026, -0.013], [-0.028, -0.051, -0.043, -0.046, -0.045,\n -0.049, -0.057, -0.052, -0.03, -0.033, -0.03, -0.008], [-0.037, -0.041,\n -0.038, -0.04, -0.04, -0.038, -0.037, -0.03, -0.027, -0.024, -0.019, -\n 0.013], [-0.048, -0.045, -0.045, -0.045, -0.044, -0.045, -0.047, -0.048,\n -0.049, -0.045, -0.033, -0.016], [-0.043, -0.044, -0.041, -0.041, -0.04,\n -0.038, -0.034, -0.035, -0.035, -0.029, -0.022, -0.009], [-0.052, -\n 0.034, -0.036, -0.036, -0.035, -0.028, -0.024, -0.023, -0.02, -0.016, -\n 0.01, -0.014], [-0.062, -0.034, -0.027, -0.028, -0.027, -0.027, -0.023,\n -0.023, -0.019, -0.009, -0.025, -0.01]])\n', (111, 828), True, 'import numpy as np\n'), ((851, 866), 'numpy.transpose', 'np.transpose', (['A'], {}), '(A)\n', (863, 866), True, 'import numpy as np\n'), ((925, 934), 'numpy.fix', 'np.fix', (['s'], {}), '(s)\n', (931, 934), True, 'import numpy as np\n'), ((1155, 1164), 'numpy.fix', 'np.fix', (['s'], {}), '(s)\n', (1161, 1164), True, 'import numpy as np\n'), ((1039, 1050), 'numpy.sign', 'np.sign', (['da'], {}), '(da)\n', (1046, 1050), True, 'import numpy as np\n'), ((1269, 1280), 'numpy.sign', 'np.sign', (['db'], {}), '(db)\n', (1276, 1280), True, 'import numpy as np\n')] |
import numpy as np
import pytest
from emukit.examples.fabolas import FabolasModel
@pytest.fixture
def model():
rng = np.random.RandomState(42)
x_init = rng.rand(5, 2)
s_min = 10
s_max = 10000
s = np.random.uniform(s_min, s_max, x_init.shape[0])
x_init = np.concatenate((x_init, s[:, None]), axis=1)
y_init = rng.rand(5, 1)
model = FabolasModel(X_init=x_init, Y_init=y_init, s_min=s_min, s_max=s_max)
return model
def test_predict_shape(model):
rng = np.random.RandomState(43)
x_test = rng.rand(10, 2)
s = np.random.uniform(model.s_min, model.s_max, x_test.shape[0])
x_test = np.concatenate((x_test, s[:, None]), axis=1)
m, v = model.predict(x_test)
assert m.shape == (10, 1)
assert v.shape == (10, 1)
def test_update_data(model):
rng = np.random.RandomState(43)
x_new = rng.rand(5, 2)
s = np.random.uniform(model.s_min, model.s_max, x_new.shape[0])
x_new = np.concatenate((x_new, s[:, None]), axis=1)
y_new = rng.rand(5, 1)
model.set_data(x_new, y_new)
assert model.X.shape == x_new.shape
assert model.Y.shape == y_new.shape
| [
"numpy.concatenate",
"emukit.examples.fabolas.FabolasModel",
"numpy.random.RandomState",
"numpy.random.uniform"
] | [((124, 149), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (145, 149), True, 'import numpy as np\n'), ((219, 267), 'numpy.random.uniform', 'np.random.uniform', (['s_min', 's_max', 'x_init.shape[0]'], {}), '(s_min, s_max, x_init.shape[0])\n', (236, 267), True, 'import numpy as np\n'), ((281, 325), 'numpy.concatenate', 'np.concatenate', (['(x_init, s[:, None])'], {'axis': '(1)'}), '((x_init, s[:, None]), axis=1)\n', (295, 325), True, 'import numpy as np\n'), ((367, 435), 'emukit.examples.fabolas.FabolasModel', 'FabolasModel', ([], {'X_init': 'x_init', 'Y_init': 'y_init', 's_min': 's_min', 's_max': 's_max'}), '(X_init=x_init, Y_init=y_init, s_min=s_min, s_max=s_max)\n', (379, 435), False, 'from emukit.examples.fabolas import FabolasModel\n'), ((496, 521), 'numpy.random.RandomState', 'np.random.RandomState', (['(43)'], {}), '(43)\n', (517, 521), True, 'import numpy as np\n'), ((560, 620), 'numpy.random.uniform', 'np.random.uniform', (['model.s_min', 'model.s_max', 'x_test.shape[0]'], {}), '(model.s_min, model.s_max, x_test.shape[0])\n', (577, 620), True, 'import numpy as np\n'), ((634, 678), 'numpy.concatenate', 'np.concatenate', (['(x_test, s[:, None])'], {'axis': '(1)'}), '((x_test, s[:, None]), axis=1)\n', (648, 678), True, 'import numpy as np\n'), ((814, 839), 'numpy.random.RandomState', 'np.random.RandomState', (['(43)'], {}), '(43)\n', (835, 839), True, 'import numpy as np\n'), ((875, 934), 'numpy.random.uniform', 'np.random.uniform', (['model.s_min', 'model.s_max', 'x_new.shape[0]'], {}), '(model.s_min, model.s_max, x_new.shape[0])\n', (892, 934), True, 'import numpy as np\n'), ((947, 990), 'numpy.concatenate', 'np.concatenate', (['(x_new, s[:, None])'], {'axis': '(1)'}), '((x_new, s[:, None]), axis=1)\n', (961, 990), True, 'import numpy as np\n')] |
import argparse
from itertools import count
import numpy as np
import h5py
from traits.api import HasTraits, Range, Instance, Bool, Int, on_trait_change
from traitsui.api import View, Item, HGroup, RangeEditor
from tvtk.api import tvtk
from tvtk.pyface.scene_editor import SceneEditor
from tvtk.common import configure_input, configure_input_data
from mayavi.tools.mlab_scene_model import MlabSceneModel
from mayavi.core.ui.mayavi_scene import MayaviScene
from pyface.timer.api import Timer
from util import veclen
from inout import load_splocs
class Visualization(HasTraits):
component = Int(0)
_max_component_index = Int()
activation = Range(-1., 1.)
oscillate = Bool(True)
allow_negative = Bool(False)
pd = Instance(tvtk.PolyData)
normals = Instance(tvtk.PolyDataNormals)
actor = Instance(tvtk.Actor)
scene = Instance(MlabSceneModel, (), kw=dict(background=(1,1,1)))
timer = Instance(Timer)
def __init__(self, Xmean, tris, components):
HasTraits.__init__(self)
self._components = components
self._max_component_index = len(components)
self._Xmean = Xmean
self.pd = tvtk.PolyData(points=Xmean, polys=tris)
self.normals = tvtk.PolyDataNormals(splitting=False)
configure_input_data(self.normals, self.pd)
mapper = tvtk.PolyDataMapper(immediate_mode_rendering=True)
self.actor = tvtk.Actor(mapper=mapper)
configure_input(self.actor.mapper, self.normals)
self.actor.mapper.lookup_table = tvtk.LookupTable(
hue_range = (0.45, 0.6),
saturation_range = (0., 0.8),
value_range = (.6, 1.),
)
self.scene.add_actor(self.actor)
self.timer = Timer(40, self.animate().next)
def animate(self):
for i in count():
if self.oscillate:
frame = i % 30
alpha = np.sin(frame/30. * np.pi*2)
if not self.allow_negative:
alpha = np.abs(alpha)
self.activation = alpha
yield
@on_trait_change('activation, component')
def update_plot(self):
c = self._components[self.component]
self.pd.points = self._Xmean + self.activation * c
magnitude = veclen(c)
self.pd.point_data.scalars = magnitude
self.actor.mapper.scalar_range = (0, magnitude.max())
self.scene.render()
view = View(
Item('scene', editor=SceneEditor(scene_class=MayaviScene),
height=600, width=800, show_label=False),
HGroup(
Item('component', editor=RangeEditor(
is_float=False, low=0, high_name='_max_component_index', mode='spinner')),
'activation',
'oscillate',
'allow_negative',
),
resizable=True, title="View SPLOC's",
)
def main(component_hdf5_file):
Xmean, tris, components, names = load_splocs(component_hdf5_file)
visualization = Visualization(Xmean, tris, components)
visualization.configure_traits()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Viewer for sparse localized deformation components')
parser.add_argument('input_sploc_file')
args = parser.parse_args()
main(args.input_sploc_file)
| [
"numpy.sin",
"inout.load_splocs",
"argparse.ArgumentParser",
"tvtk.api.tvtk.PolyDataMapper",
"tvtk.api.tvtk.LookupTable",
"traits.api.Range",
"tvtk.api.tvtk.PolyDataNormals",
"traits.api.Bool",
"tvtk.common.configure_input",
"numpy.abs",
"traitsui.api.RangeEditor",
"tvtk.api.tvtk.PolyData",
... | [((596, 602), 'traits.api.Int', 'Int', (['(0)'], {}), '(0)\n', (599, 602), False, 'from traits.api import HasTraits, Range, Instance, Bool, Int, on_trait_change\n'), ((630, 635), 'traits.api.Int', 'Int', ([], {}), '()\n', (633, 635), False, 'from traits.api import HasTraits, Range, Instance, Bool, Int, on_trait_change\n'), ((653, 669), 'traits.api.Range', 'Range', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (658, 669), False, 'from traits.api import HasTraits, Range, Instance, Bool, Int, on_trait_change\n'), ((684, 694), 'traits.api.Bool', 'Bool', (['(True)'], {}), '(True)\n', (688, 694), False, 'from traits.api import HasTraits, Range, Instance, Bool, Int, on_trait_change\n'), ((716, 727), 'traits.api.Bool', 'Bool', (['(False)'], {}), '(False)\n', (720, 727), False, 'from traits.api import HasTraits, Range, Instance, Bool, Int, on_trait_change\n'), ((737, 760), 'traits.api.Instance', 'Instance', (['tvtk.PolyData'], {}), '(tvtk.PolyData)\n', (745, 760), False, 'from traits.api import HasTraits, Range, Instance, Bool, Int, on_trait_change\n'), ((775, 805), 'traits.api.Instance', 'Instance', (['tvtk.PolyDataNormals'], {}), '(tvtk.PolyDataNormals)\n', (783, 805), False, 'from traits.api import HasTraits, Range, Instance, Bool, Int, on_trait_change\n'), ((818, 838), 'traits.api.Instance', 'Instance', (['tvtk.Actor'], {}), '(tvtk.Actor)\n', (826, 838), False, 'from traits.api import HasTraits, Range, Instance, Bool, Int, on_trait_change\n'), ((921, 936), 'traits.api.Instance', 'Instance', (['Timer'], {}), '(Timer)\n', (929, 936), False, 'from traits.api import HasTraits, Range, Instance, Bool, Int, on_trait_change\n'), ((2072, 2112), 'traits.api.on_trait_change', 'on_trait_change', (['"""activation, component"""'], {}), "('activation, component')\n", (2087, 2112), False, 'from traits.api import HasTraits, Range, Instance, Bool, Int, on_trait_change\n'), ((2923, 2955), 'inout.load_splocs', 'load_splocs', (['component_hdf5_file'], {}), '(component_hdf5_file)\n', (2934, 2955), False, 'from inout import load_splocs\n'), ((3095, 3189), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Viewer for sparse localized deformation components"""'}), "(description=\n 'Viewer for sparse localized deformation components')\n", (3118, 3189), False, 'import argparse\n'), ((995, 1019), 'traits.api.HasTraits.__init__', 'HasTraits.__init__', (['self'], {}), '(self)\n', (1013, 1019), False, 'from traits.api import HasTraits, Range, Instance, Bool, Int, on_trait_change\n'), ((1156, 1195), 'tvtk.api.tvtk.PolyData', 'tvtk.PolyData', ([], {'points': 'Xmean', 'polys': 'tris'}), '(points=Xmean, polys=tris)\n', (1169, 1195), False, 'from tvtk.api import tvtk\n'), ((1219, 1256), 'tvtk.api.tvtk.PolyDataNormals', 'tvtk.PolyDataNormals', ([], {'splitting': '(False)'}), '(splitting=False)\n', (1239, 1256), False, 'from tvtk.api import tvtk\n'), ((1265, 1308), 'tvtk.common.configure_input_data', 'configure_input_data', (['self.normals', 'self.pd'], {}), '(self.normals, self.pd)\n', (1285, 1308), False, 'from tvtk.common import configure_input, configure_input_data\n'), ((1326, 1376), 'tvtk.api.tvtk.PolyDataMapper', 'tvtk.PolyDataMapper', ([], {'immediate_mode_rendering': '(True)'}), '(immediate_mode_rendering=True)\n', (1345, 1376), False, 'from tvtk.api import tvtk\n'), ((1398, 1423), 'tvtk.api.tvtk.Actor', 'tvtk.Actor', ([], {'mapper': 'mapper'}), '(mapper=mapper)\n', (1408, 1423), False, 'from tvtk.api import tvtk\n'), ((1432, 1480), 'tvtk.common.configure_input', 'configure_input', (['self.actor.mapper', 'self.normals'], {}), '(self.actor.mapper, self.normals)\n', (1447, 1480), False, 'from tvtk.common import configure_input, configure_input_data\n'), ((1522, 1618), 'tvtk.api.tvtk.LookupTable', 'tvtk.LookupTable', ([], {'hue_range': '(0.45, 0.6)', 'saturation_range': '(0.0, 0.8)', 'value_range': '(0.6, 1.0)'}), '(hue_range=(0.45, 0.6), saturation_range=(0.0, 0.8),\n value_range=(0.6, 1.0))\n', (1538, 1618), False, 'from tvtk.api import tvtk\n'), ((1799, 1806), 'itertools.count', 'count', ([], {}), '()\n', (1804, 1806), False, 'from itertools import count\n'), ((2264, 2273), 'util.veclen', 'veclen', (['c'], {}), '(c)\n', (2270, 2273), False, 'from util import veclen\n'), ((1894, 1926), 'numpy.sin', 'np.sin', (['(frame / 30.0 * np.pi * 2)'], {}), '(frame / 30.0 * np.pi * 2)\n', (1900, 1926), True, 'import numpy as np\n'), ((2458, 2494), 'tvtk.pyface.scene_editor.SceneEditor', 'SceneEditor', ([], {'scene_class': 'MayaviScene'}), '(scene_class=MayaviScene)\n', (2469, 2494), False, 'from tvtk.pyface.scene_editor import SceneEditor\n'), ((1994, 2007), 'numpy.abs', 'np.abs', (['alpha'], {}), '(alpha)\n', (2000, 2007), True, 'import numpy as np\n'), ((2604, 2693), 'traitsui.api.RangeEditor', 'RangeEditor', ([], {'is_float': '(False)', 'low': '(0)', 'high_name': '"""_max_component_index"""', 'mode': '"""spinner"""'}), "(is_float=False, low=0, high_name='_max_component_index', mode=\n 'spinner')\n", (2615, 2693), False, 'from traitsui.api import View, Item, HGroup, RangeEditor\n')] |
"""
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from typing import Dict
from mo.front.tf.graph_utils import create_op_with_const_inputs
from mo.graph.graph import Graph, Node
from mo.middle.replacement import MiddleReplacementPattern
from mo.ops.broadcast import Broadcast
class RandomUniformReplacer(MiddleReplacementPattern):
"""
Replaces RandomUniform operation with Broadcast of ones in sub-graph:
ShapeOf ---> RandomUniform ---> Mul
"""
enabled = True
@staticmethod
def pattern():
return dict(
nodes=[
('shape', dict(op='ShapeOf')),
('shape_data', dict()),
('random_uniform', dict(op='RandomUniform')),
('random_uniform_data', dict()),
('mul', dict(op='Mul')),
('mul_const', dict(op='Const')),
('mul_const_data', dict())
],
edges=[
('shape', 'shape_data'),
('shape_data', 'random_uniform'),
('random_uniform', 'random_uniform_data'),
('random_uniform_data', 'mul'),
('mul_const', 'mul_const_data'),
('mul_const_data', 'mul')
]
)
@staticmethod
def replace_pattern(graph: Graph, match: Dict[str, Node]):
node = match['random_uniform']
node_name = node.soft_get('name', node.id)
data_type = match['mul_const'].out_port(0).get_data_type()
broadcast_node = create_op_with_const_inputs(graph, Broadcast, port_value_dict={0: np.array([1], dtype=data_type)},
op_attrs={'name': node_name + '/Broadcast', 'mode': 'numpy'})
node.in_port(0).get_connection().set_destination(broadcast_node.in_port(1))
node.out_port(0).get_connection().set_source(broadcast_node.out_port(0))
| [
"numpy.array"
] | [((2129, 2159), 'numpy.array', 'np.array', (['[1]'], {'dtype': 'data_type'}), '([1], dtype=data_type)\n', (2137, 2159), True, 'import numpy as np\n')] |
import numpy as np
import cv2
import os
def get_mean_and_std(imgs_path,img_h,img_w):
# img_h, img_w = 32, 32
means, stdevs = [], []
img_list = []
imgs_path_list = os.listdir(imgs_path)
len_ = len(imgs_path_list)
i = 0
for item in imgs_path_list:
img = cv2.imread(os.path.join(imgs_path, item))
img = cv2.resize(img, (img_w, img_h))
img = img[:, :, :, np.newaxis]
img_list.append(img)
i += 1
print(i, '/', len_)
imgs = np.concatenate(img_list, axis=3)
imgs = imgs.astype(np.float32) / 255.
for i in range(3):
pixels = imgs[:, :, i, :].ravel() # 拉成一行
means.append(np.mean(pixels))
stdevs.append(np.std(pixels))
# BGR --> RGB , CV读取的需要转换,PIL读取的不用转换
means.reverse()
stdevs.reverse()
print("normMean = {}".format(means))
print("normStd = {}".format(stdevs))
if __name__ == '__main__':
imgs_path = './base_images' # 图片目录
img_h = 400
img_w = 600 # 根据自己数据集适当调整,别太大了,最开始头铁4000、6000速度特别慢
get_mean_and_std(imgs_path, img_h, img_w) | [
"numpy.mean",
"os.listdir",
"os.path.join",
"numpy.concatenate",
"numpy.std",
"cv2.resize"
] | [((188, 209), 'os.listdir', 'os.listdir', (['imgs_path'], {}), '(imgs_path)\n', (198, 209), False, 'import os\n'), ((521, 553), 'numpy.concatenate', 'np.concatenate', (['img_list'], {'axis': '(3)'}), '(img_list, axis=3)\n', (535, 553), True, 'import numpy as np\n'), ((360, 391), 'cv2.resize', 'cv2.resize', (['img', '(img_w, img_h)'], {}), '(img, (img_w, img_h))\n', (370, 391), False, 'import cv2\n'), ((314, 343), 'os.path.join', 'os.path.join', (['imgs_path', 'item'], {}), '(imgs_path, item)\n', (326, 343), False, 'import os\n'), ((696, 711), 'numpy.mean', 'np.mean', (['pixels'], {}), '(pixels)\n', (703, 711), True, 'import numpy as np\n'), ((736, 750), 'numpy.std', 'np.std', (['pixels'], {}), '(pixels)\n', (742, 750), True, 'import numpy as np\n')] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2019 <NAME> <<EMAIL>>
#
# Distributed under terms of the MIT license.
import os
import gzip
import numpy as np
import logging
from recordclass import recordclass
from collections import defaultdict
from transformers import AutoTokenizer
from pytorch_pretrained_bert import BertTokenizer
from deep_morphology.data.base_data import BaseDataset, Vocab, DataFields
SentenceProbeFields = recordclass(
'SentenceProbeFields',
['sentence', 'sentence_len', 'target_idx', 'label']
)
class SentenceTokenPairFields(DataFields):
_fields = ('raw_sentence', 'idx1', 'idx2', 'token_starts',
'subwords', 'sentence_subword_len', 'label')
_alias = {
'tgt': 'label',
'input': 'subwords',
'input_len': 'sentence_subword_len'
}
_needs_vocab = ('label', 'subwords', )
class WordOnlyFields(DataFields):
_fields = ('sentence', 'target_word', 'target_word_len', 'target_idx',
'label')
_alias = {
'input': 'target_word',
'input_len': 'target_word_len',
'src_len': 'target_word_len',
'tgt': 'label',
}
_needs_vocab = ('target_word', 'label')
class EmbeddingOnlyFields(DataFields):
_fields = ('sentence', 'target_word', 'target_word_idx', 'label')
_alias = {
'tgt': 'label',
'src': 'target_word',
}
_needs_vocab = ('label', )
class EmbeddingOnlyPairFields(DataFields):
_fields = (
'left_sentence', 'left_target_word', 'left_target_idx',
'right_sentence', 'right_target_word', 'right_target_idx',
'label',
)
_alias = {
'tgt': 'label',
}
_needs_vocab = ('label', )
class BERTProberFields(DataFields):
_fields = (
'sentence', 'tokens', 'target', 'idx',
'sentence_len', 'target_idx', 'label',
)
_alias = {'tgt': 'label'}
_needs_vocab = ('label', )
class TokenInSequenceProberFields(DataFields):
_fields = (
'raw_sentence', 'raw_target', 'raw_idx',
'tokens', 'num_tokens', 'target_idx', 'label', 'token_starts',
)
_alias = {
'tgt': 'label',
# 'src_len': 'num_tokens',
'input_len': 'num_tokens'}
# token_starts needs a vocabulary because we manually set PAD=1000
_needs_vocab = ('tokens', 'label', 'token_starts')
_needs_constants = ('tokens', )
class MidSequenceProberFields(DataFields):
_fields = (
'raw_sentence', 'raw_target', 'raw_idx',
'input', 'input_len', 'target_idx', 'label', 'target_ids',
)
_alias = {'tgt': 'label', 'src_len': 'input_len'}
_needs_vocab = ('input', 'label', 'target_ids')
_needs_constants = ('input', )
class SequenceClassificationWithSubwordsDataFields(DataFields):
_fields = (
'raw_sentence', 'labels',
'sentence_len', 'subwords', 'sentence_subword_len', 'token_starts',
)
_alias = {'input': 'subwords',
'input_len': 'sentence_subword_len',
'tgt': 'labels'}
_needs_vocab = ('labels', )
class SentencePairFields(DataFields):
_fields = (
'left_sentence', 'left_sentence_len',
'left_target_word', 'left_target_idx',
'right_sentence', 'right_sentence_len',
'right_target_word', 'right_target_idx',
'label',
)
_alias = {'tgt': 'label'}
_needs_vocab = ('label', )
class WordOnlySentencePairFields(DataFields):
_fields = (
'left_sentence', 'left_target_word',
'left_target_word_len', 'left_target_idx',
'right_sentence', 'right_target_word',
'right_target_word_len', 'right_target_idx',
'label',
)
_alias = {'tgt': 'label'}
_needs_vocab = ('left_target_word', 'right_target_word', 'label', )
class BERTSentencePairFields(DataFields):
_fields = (
'left_sentence', 'left_tokens', 'left_sentence_len',
'left_target_word', 'left_target_first', 'left_target_last',
'right_sentence', 'right_tokens', 'right_sentence_len',
'right_target_word', 'right_target_first', 'right_target_last',
'label',
)
_alias = {'tgt': 'label'}
_needs_vocab = ('label', )
class Embedding:
def __init__(self, embedding_file, filter=None):
self.filter_ = filter
if embedding_file.endswith('.gz'):
with gzip.open(embedding_file, 'rt') as f:
self.load_stream(f)
else:
with open(embedding_file, 'rt') as f:
self.load_stream(f)
def load_stream(self, stream):
self.mtx = []
self.vocab = {}
for line in stream:
fd = line.strip().split(" ")
if len(fd) == 2:
continue
word = fd[0]
if self.filter_ and word not in self.filter_:
continue
self.vocab[word] = len(self.mtx)
self.mtx.append(list(map(float, fd[1:])))
self.mtx = np.array(self.mtx)
def __len__(self):
return self.mtx.shape[0]
def __getitem__(self, key):
if key not in self.vocab:
return self.mtx[0]
return self.mtx[self.vocab[key]]
@property
def embedding_dim(self):
return self.mtx.shape[1]
class EmbeddingProberDataset(BaseDataset):
unlabeled_data_class = 'UnlabeledEmbeddingProberDataset'
constants = []
data_recordclass = EmbeddingOnlyFields
def load_or_create_vocabs(self):
vocab_pre = os.path.join(self.config.experiment_dir, 'vocab_')
needs_vocab = getattr(self.data_recordclass, '_needs_vocab',
self.data_recordclass._fields)
self.vocabs = self.data_recordclass()
for field in needs_vocab:
vocab_fn = getattr(self.config, 'vocab_{}'.format(field),
vocab_pre+field)
if field == 'label':
constants = []
else:
constants = ['SOS', 'EOS', 'PAD', 'UNK']
if os.path.exists(vocab_fn):
setattr(self.vocabs, field, Vocab(file=vocab_fn, frozen=True))
else:
setattr(self.vocabs, field, Vocab(constants=constants))
def to_idx(self):
vocab = set(r.target_word for r in self.raw)
if self.config.embedding == 'discover':
language = self.config.train_file.split("/")[-2]
emb_fn = os.path.join(os.environ['HOME'], 'resources',
'fasttext', language, 'common.vec')
self.config.embedding = emb_fn
else:
emb_fn = self.config.embedding
self.embedding = Embedding(emb_fn, filter=vocab)
self.embedding_size = self.embedding.embedding_dim
if getattr(self.config, 'permute_embedding', False):
self.embedding.mtx = np.random.permutation(self.embedding.mtx)
if getattr(self.config, 'randomize_embedding', False):
self.embedding.mtx = np.random.random(self.embedding.mtx.shape)
word_vecs = []
labels = []
for r in self.raw:
word_vecs.append(self.embedding[r.target_word])
labels.append(self.vocabs.label[r.label])
self.mtx = EmbeddingOnlyFields(
target_word=word_vecs,
label=labels
)
def extract_sample_from_line(self, line):
fd = line.rstrip("\n").split("\t")
sent, target, idx = fd[:3]
if len(fd) > 3:
label = fd[3]
else:
label = None
return EmbeddingOnlyFields(
sentence=sent,
target_word=target,
target_word_idx=int(idx),
label=label
)
def print_sample(self, sample, stream):
stream.write("{}\t{}\t{}\t{}\n".format(
sample.sentence, sample.target_word,
sample.target_word_idx, sample.label
))
def decode(self, model_output):
for i, sample in enumerate(self.raw):
output = model_output[i].argmax().item()
sample.label = self.vocabs.label.inv_lookup(output)
def batched_iter(self, batch_size):
starts = list(range(0, len(self), batch_size))
if self.is_unlabeled is False and self.config.shuffle_batches:
np.random.shuffle(starts)
for start in starts:
end = start + batch_size
yield EmbeddingOnlyFields(
target_word=self.mtx.target_word[start:end],
label=self.mtx.label[start:end]
)
class UnlabeledEmbeddingProberDataset(EmbeddingProberDataset):
pass
class EmbeddingPairDataset(BaseDataset):
unlabeled_data_class = 'UnlabeledEmbeddingPairDataset'
constants = []
data_recordclass = EmbeddingOnlyPairFields
def load_or_create_vocabs(self):
vocab_pre = os.path.join(self.config.experiment_dir, 'vocab_')
self.vocabs = self.data_recordclass()
for field in ('left_target_word', 'label'):
vocab_fn = getattr(self.config, 'vocab_{}'.format(field),
vocab_pre+field)
constants = []
if os.path.exists(vocab_fn):
setattr(self.vocabs, field, Vocab(file=vocab_fn, frozen=True))
else:
setattr(self.vocabs, field, Vocab(constants=constants))
self.vocabs.right_target_word = self.vocabs.left_target_word
def to_idx(self):
vocab = set(r.left_target_word for r in self.raw) | \
set(r.right_target_word for r in self.raw)
if self.config.embedding == 'discover':
language = self.config.train_file.split("/")[-2]
emb_fn = os.path.join(os.environ['HOME'], 'resources',
'fasttext', language, 'common.vec')
self.config.embedding = emb_fn
else:
emb_fn = self.config.embedding
self.embedding = Embedding(emb_fn, filter=vocab)
self.embedding_size = self.embedding.embedding_dim
left_vecs = []
right_vecs = []
labels = []
for r in self.raw:
left_vecs.append(self.embedding[r.left_target_word])
right_vecs.append(self.embedding[r.right_target_word])
labels.append(self.vocabs.label[r.label])
self.mtx = EmbeddingOnlyPairFields(
left_target_word=left_vecs,
right_target_word=right_vecs,
label=labels,
)
def extract_sample_from_line(self, line):
fd = line.rstrip("\n").split("\t")
if len(fd) > 6:
label = fd[6]
else:
label = None
return EmbeddingOnlyPairFields(
left_sentence=fd[0],
left_target_word=fd[1],
left_target_idx=fd[2],
right_sentence=fd[3],
right_target_word=fd[4],
right_target_idx=fd[5],
label=label
)
def decode(self, model_output):
for i, sample in enumerate(self.raw):
output = model_output[i].argmax().item()
sample.label = self.vocabs.label.inv_lookup(output)
def print_sample(self, sample, stream):
stream.write("{}\n".format("\t".join(map(str, (
sample.left_sentence,
sample.left_target_word,
sample.left_target_idx,
sample.right_sentence,
sample.right_target_word,
sample.right_target_idx,
sample.label)
))))
def batched_iter(self, batch_size):
starts = list(range(0, len(self), batch_size))
if self.is_unlabeled is False and self.config.shuffle_batches:
np.random.shuffle(starts)
for start in starts:
end = start + batch_size
yield EmbeddingOnlyPairFields(
left_target_word=self.mtx.left_target_word[start:end],
right_target_word=self.mtx.right_target_word[start:end],
label=self.mtx.label[start:end]
)
class UnlabeledEmbeddingPairDataset(EmbeddingPairDataset):
pass
class ELMOSentencePairDataset(BaseDataset):
data_recordclass = SentencePairFields
unlabeled_data_class = 'UnlabeledELMOSentencePairDataset'
constants = []
# FIXME this is a copy of WordOnlySentenceProberDataset's method
# should be removed along with recordclass
def load_or_create_vocabs(self):
# FIXME this should be init or more like nowhere
self.tgt_field_idx = -1
vocab_pre = os.path.join(self.config.experiment_dir, 'vocab_')
needs_vocab = getattr(self.data_recordclass, '_needs_vocab',
self.data_recordclass._fields)
self.vocabs = self.data_recordclass()
for field in needs_vocab:
vocab_fn = getattr(self.config, 'vocab_{}'.format(field),
vocab_pre+field)
if field == 'label':
constants = []
else:
constants = ['SOS', 'EOS', 'PAD', 'UNK']
if os.path.exists(vocab_fn):
setattr(self.vocabs, field, Vocab(file=vocab_fn, frozen=True))
else:
setattr(self.vocabs, field, Vocab(constants=constants))
def extract_sample_from_line(self, line):
fd = line.rstrip("\n").split("\t")
left_sen = fd[0].split(" ")
right_sen = fd[3].split(" ")
lidx = int(fd[2])
ridx = int(fd[5])
assert left_sen[lidx] == fd[1]
assert right_sen[ridx] == fd[4]
if len(fd) > 6:
label = fd[6]
else:
label = None
return SentencePairFields(
left_sentence=left_sen,
left_sentence_len=len(left_sen),
left_target_word=left_sen[lidx],
left_target_idx=lidx,
right_sentence=right_sen,
right_sentence_len=len(right_sen),
right_target_word=right_sen[ridx],
right_target_idx=ridx,
label=label
)
def to_idx(self):
mtx = SentencePairFields.initialize_all(list)
for sample in self.raw:
for field, value in sample._asdict().items():
if field == 'label':
mtx.label.append(self.vocabs.label[value])
else:
getattr(mtx, field).append(value)
self.mtx = mtx
def batched_iter(self, batch_size):
starts = list(range(0, len(self), batch_size))
if self.is_unlabeled is False and self.config.shuffle_batches:
np.random.shuffle(starts)
PAD = '<pad>'
for start in starts:
self._start = start
end = min(start + batch_size, len(self.raw))
batch = SentencePairFields.initialize_all(list)
# pad left sentences
maxlen = max(self.mtx.left_sentence_len[start:end])
sents = [self.mtx.left_sentence[i] +
[PAD] * (maxlen - self.mtx.left_sentence_len[i])
for i in range(start, end)]
batch.left_sentence = sents
batch.left_target_idx = self.mtx.left_target_idx[start:end]
# pad right sentences
maxlen = max(self.mtx.right_sentence_len[start:end])
sents = [self.mtx.right_sentence[i] +
[PAD] * (maxlen - self.mtx.right_sentence_len[i])
for i in range(start, end)]
batch.right_sentence = sents
batch.right_target_idx = self.mtx.right_target_idx[start:end]
batch.label = self.mtx.label[start:end]
yield batch
def decode(self, model_output):
for i, sample in enumerate(self.raw):
output = model_output[i].argmax().item()
sample.label = self.vocabs.label.inv_lookup(output)
def print_sample(self, sample, stream):
stream.write("{}\n".format("\t".join(map(str, (
" ".join(sample.left_sentence),
sample.left_target_word,
sample.left_target_idx,
" ".join(sample.right_sentence),
sample.right_target_word,
sample.right_target_idx,
sample.label)
))))
class UnlabeledELMOSentencePairDataset(ELMOSentencePairDataset):
pass
class BERTSentencePairDataset(ELMOSentencePairDataset):
unlabeled_data_class = 'UnlabeledBERTSentencePairDataset'
def __init__(self, config, stream_or_file, **kwargs):
model_name = getattr(config, 'bert_model', 'bert-base-multilingual-cased')
if 'bert_tokenizer' in globals():
self.tokenizer = globals()['bert_tokenizer']
else:
self.tokenizer = BertTokenizer.from_pretrained(
model_name, do_lower_case=False)
globals()['bert_tokenizer'] = self.tokenizer
super().__init__(config, stream_or_file, **kwargs)
def extract_sample_from_line(self, line):
fd = line.rstrip("\n").split("\t")
left_sen, left_first, left_last = self.parse_sentence(fd[:3])
right_sen, right_first, right_last = self.parse_sentence(fd[3:6])
if len(fd) > 6:
label = fd[6]
else:
label = None
return BERTSentencePairFields(
left_sentence=fd[0],
left_tokens=left_sen,
left_sentence_len=len(left_sen),
left_target_word=fd[1],
left_target_first=left_first,
left_target_last=left_last,
right_sentence=fd[3],
right_tokens=right_sen,
right_sentence_len=len(right_sen),
right_target_word=fd[4],
right_target_first=right_first,
right_target_last=right_last,
label=label,
)
def parse_sentence(self, fields):
sent, target, idx = fields
idx = int(idx)
bert_tokens = ['[CLS]']
for i, t in enumerate(sent.split(" ")):
bt = self.tokenizer.tokenize(t)
if i == idx:
first = len(bert_tokens)
last = len(bert_tokens) + len(bt) - 1
bert_tokens.extend(bt)
bert_tokens.append('[SEP]')
return bert_tokens, first, last
def to_idx(self):
self.mtx = BERTSentencePairFields.initialize_all(list)
for sample in self.raw:
# left fields
self.mtx.left_sentence_len.append(sample.left_sentence_len)
tok_idx = self.tokenizer.convert_tokens_to_ids(sample.left_tokens)
self.mtx.left_tokens.append(tok_idx)
self.mtx.left_target_first.append(sample.left_target_first)
self.mtx.left_target_last.append(sample.left_target_last)
# right fields
self.mtx.right_sentence_len.append(sample.right_sentence_len)
tok_idx = self.tokenizer.convert_tokens_to_ids(sample.right_tokens)
self.mtx.right_tokens.append(tok_idx)
self.mtx.right_target_first.append(sample.right_target_first)
self.mtx.right_target_last.append(sample.right_target_last)
# label if labeled
if sample.label is None:
self.mtx.label.append(None)
else:
self.mtx.label.append(self.vocabs.label[sample.label])
def __len__(self):
return len(self.raw)
def batched_iter(self, batch_size):
starts = list(range(0, len(self), batch_size))
if self.is_unlabeled is False and self.config.shuffle_batches:
np.random.shuffle(starts)
PAD = 0
for start in starts:
self._start = start
end = min(start + batch_size, len(self.raw))
batch = BERTSentencePairFields.initialize_all(list)
# pad left sentences
maxlen = max(self.mtx.left_sentence_len[start:end])
sents = [self.mtx.left_tokens[i] +
[PAD] * (maxlen - self.mtx.left_sentence_len[i])
for i in range(start, end)]
batch.left_tokens = sents
batch.left_sentence_len = self.mtx.left_sentence_len[start:end]
batch.left_target_first = self.mtx.left_target_first[start:end]
batch.left_target_last = self.mtx.left_target_last[start:end]
# pad right sentences
maxlen = max(self.mtx.right_sentence_len[start:end])
sents = [self.mtx.right_tokens[i] +
[PAD] * (maxlen - self.mtx.right_sentence_len[i])
for i in range(start, end)]
batch.right_tokens = sents
batch.right_sentence_len = self.mtx.right_sentence_len[start:end]
batch.right_target_first = self.mtx.right_target_first[start:end]
batch.right_target_last = self.mtx.right_target_last[start:end]
batch.label = self.mtx.label[start:end]
yield batch
def print_sample(self, sample, stream):
stream.write("{}\n".format("\t".join(map(str, (
sample.left_sentence,
sample.left_target_word,
sample.left_target_first,
sample.right_sentence,
sample.right_target_word,
sample.right_target_first,
sample.label)
))))
class UnlabeledBERTSentencePairDataset(BERTSentencePairDataset):
pass
class WordOnlySentenceProberDataset(BaseDataset):
data_recordclass = WordOnlyFields
unlabeled_data_class = 'UnlabeledWordOnlySentenceProberDataset'
constants = []
def load_or_create_vocabs(self):
vocab_pre = os.path.join(self.config.experiment_dir, 'vocab_')
needs_vocab = getattr(self.data_recordclass, '_needs_vocab',
self.data_recordclass._fields)
self.vocabs = self.data_recordclass()
for field in needs_vocab:
vocab_fn = getattr(self.config, 'vocab_{}'.format(field),
vocab_pre+field)
if field == 'label':
constants = []
else:
constants = ['SOS', 'EOS', 'PAD', 'UNK']
if os.path.exists(vocab_fn):
setattr(self.vocabs, field, Vocab(file=vocab_fn, frozen=True))
else:
setattr(self.vocabs, field, Vocab(constants=constants))
def extract_sample_from_line(self, line):
fd = line.rstrip("\n").split("\t")
if len(line) > 3:
sent, target, idx, label = fd[:4]
else:
sent, target, idx = fd[:3]
label = None
idx = int(idx)
return WordOnlyFields(
sentence=sent,
target_word=target,
target_idx=idx,
target_word_len=len(target),
label=label,
)
def to_idx(self):
words = []
lens = []
labels = []
if self.config.use_global_padding:
maxlen = self.get_max_seqlen()
longer = sum(s.target_word_len > maxlen for s in self.raw)
if longer > 0:
logging.warning('{} elements longer than maxlen'.format(longer))
for sample in self.raw:
idx = list(self.vocabs.target_word[c] for c in sample.target_word)
if self.config.use_global_padding:
idx = idx[:maxlen-2]
idx = [self.vocabs.target_word.SOS] + \
idx + [self.vocabs.target_word.EOS]
idx = idx + [self.vocabs.target_word.PAD] * (maxlen - len(idx))
lens.append(maxlen)
else:
idx = [self.vocabs.target_word.SOS] + \
idx + [self.vocabs.target_word.EOS]
lens.append(len(idx))
words.append(idx)
labels.append(self.vocabs.label[sample.label])
self.mtx = WordOnlyFields(
target_word=words, target_word_len=lens, label=labels
)
def print_sample(self, sample, stream):
stream.write("{}\t{}\t{}\t{}\n".format(
sample.sentence, sample.target_word,
sample.target_idx, sample.label
))
def decode(self, model_output):
for i, sample in enumerate(self.raw):
output = model_output[i].argmax().item()
sample.label = self.vocabs.label.inv_lookup(output)
def __len__(self):
return len(self.raw)
def get_max_seqlen(self):
if hasattr(self.config, 'max_seqlen'):
return self.config.max_seqlen
return max(s.target_word_len for s in self.raw) + 2
class UnlabeledWordOnlySentenceProberDataset(WordOnlySentenceProberDataset):
def is_unlabeled(self):
return True
class BERTRandomTokenizer:
def __init__(self, tokenizer, keep_until=106, mix_initial_and_cont=False):
self.bert_tokenizer = tokenizer
start_rand = keep_until
bert_size = len(self.bert_tokenizer.vocab)
self.bert2rand = {}
if mix_initial_and_cont:
rand_range = np.arange(start_rand, bert_size)
np.random.shuffle(rand_range)
full_range = np.concatenate((np.arange(start_rand), rand_range))
for tok, idx in self.bert_tokenizer.vocab.items():
j = full_range[idx]
self.bert2rand[tok] = self.bert_tokenizer.ids_to_tokens[j]
else:
continuation = []
initial = []
for tok, idx in self.bert_tokenizer.vocab.items():
if idx < start_rand:
continue
if tok.startswith('##'):
continuation.append(tok)
else:
initial.append(tok)
crand = np.array(continuation)
np.random.shuffle(crand)
cmap = dict(zip(*(continuation, crand)))
irand = np.array(initial)
np.random.shuffle(irand)
imap = dict(zip(*(initial, irand)))
for tok, idx in self.bert_tokenizer.vocab.items():
if idx < start_rand:
self.bert2rand[tok] = tok
elif tok in cmap:
self.bert2rand[tok] = cmap[tok]
elif tok in imap:
self.bert2rand[tok] = imap[tok]
else:
raise ValueError("Token [{}] not found".format(tok))
def load(self, fn):
self.bert2rand = {}
with open(fn) as f:
for line in f:
src, tgt = line.rstrip("\n").split("\t")
self.bert2rand[src] = tgt
def save(self, fn):
with open(fn, 'w') as f:
for src, tgt in self.bert2rand.items():
f.write("{}\t{}\n".format(src, tgt))
def tokenize(self, text):
bert_tokens = self.bert_tokenizer.tokenize(text)
replaced = []
for b in bert_tokens:
replaced.append(self.bert2rand[b])
return replaced
def convert_tokens_to_ids(self, tokens):
return self.bert_tokenizer.convert_tokens_to_ids(tokens)
@property
def rand2bert(self):
if not hasattr(self, '_rand2bert'):
self._rand2bert = {v: k for k, v in self.bert2rand.items()}
return self._rand2bert
def convert_to_orig(self, tokens):
return [self.rand2bert[t] for t in tokens]
# TODO replace MidSentenceProberDataset with TokenInSequenceProberFields
class MidSentenceProberDataset(BaseDataset):
unlabeled_data_class = 'UnlabeledMidSentenceProberDataset'
data_recordclass = MidSequenceProberFields
constants = ['SOS', 'EOS', 'UNK', 'PAD']
def extract_sample_from_line(self, line):
raw_sent, raw_target, raw_idx, label = line.rstrip("\n").split("\t")
raw_idx = int(raw_idx)
input = list(raw_sent)
words = raw_sent.split(' ')
if self.config.probe_first_char:
target_idx = sum(len(w) for w in words[:raw_idx]) + raw_idx
else:
target_idx = sum(len(w) for w in words[:raw_idx]) + raw_idx + len(raw_target) - 1
return self.data_recordclass(
raw_sentence=raw_sent,
raw_target=raw_target,
raw_idx=raw_idx,
input=input,
input_len=len(input),
target_idx=target_idx,
label=label,
)
def to_idx(self):
mtx = self.data_recordclass(input=[], input_len=[],
target_idx=[], label=[])
SOS = self.vocabs.input['SOS']
EOS = self.vocabs.input['EOS']
for sample in self.raw:
mtx.label.append(self.vocabs.label[sample.label])
mtx.input_len.append(sample.input_len)
mtx.target_idx.append(sample.target_idx)
mtx.input.append(
[SOS] + [self.vocabs.input[s] for s in sample.input] + [EOS]
)
self.mtx = mtx
def decode(self, model_output):
for i, sample in enumerate(self.raw):
output = np.argmax(model_output[i])
self.raw[i].label = self.vocabs.label.inv_lookup(output)
def print_sample(self, sample, stream):
stream.write("{}\t{}\t{}\t{}\n".format(
sample.raw_sentence, sample.raw_target, sample.raw_idx, sample.label
))
class UnlabeledMidSentenceProberDataset(MidSentenceProberDataset):
@property
def is_unlabeled(self):
return True
class BERTSentenceProberDataset(BaseDataset):
unlabeled_data_class = 'UnlabeledBERTSentenceProberDataset'
data_recordclass = BERTProberFields
constants = []
def __init__(self, config, stream_or_file, share_vocabs_with=None,
max_samples=None, **kwargs):
self.config = config
self.max_samples = max_samples
model_name = getattr(self.config, 'bert_model',
'bert-base-multilingual-cased')
if 'bert_tokenizer' in globals():
self.tokenizer = globals()['bert_tokenizer']
else:
self.tokenizer = BertTokenizer.from_pretrained(
model_name, do_lower_case=False)
globals()['bert_tokenizer'] = self.tokenizer
if self.config.randomize_wordpieces:
logging.info("Randomizing WordPiece vocabulary")
self.tokenizer = BERTRandomTokenizer(
self.tokenizer,
keep_until=self.config.keep_wp_until,
mix_initial_and_cont=self.config.mix_initial_and_continuation_wp)
self.load_or_create_vocabs()
self.load_stream_or_file(stream_or_file)
self.to_idx()
self.tgt_field_idx = -1
self._cache = {}
def load_or_create_vocabs(self):
existing = os.path.join(self.config.experiment_dir, 'vocab_label')
if self.config.randomize_wordpieces is True:
fn = os.path.join(self.config.experiment_dir, 'random_bert_vocab')
if os.path.exists(fn):
self.tokenizer.load(fn)
else:
self.tokenizer.save(fn)
if os.path.exists(existing):
vocab = Vocab(file=existing, frozen=True)
else:
vocab = Vocab(constants=[])
self.vocabs = BERTProberFields(label=vocab)
def load_stream(self, stream):
if self.is_unlabeled:
permutations = self.config.test_permutations
else:
permutations = self.config.train_permutations
if permutations == 0:
super().load_stream(stream)
else:
self.raw = []
for line in stream:
sent, target, tgt_idx, label = line.rstrip("\n").split("\t")
tgt_idx = int(tgt_idx)
tokens = sent.split(" ")
for n in range(permutations):
perm_idx = np.arange(len(tokens))
np.random.shuffle(perm_idx)
inv_idx = np.argsort(perm_idx)
perm_tokens = [tokens[inv_idx[i]] for i in range(len(tokens))]
perm_tgt_idx = perm_idx[tgt_idx]
bert_tokens, bert_tok_idx = self.perturb_sentence(
perm_tokens, target, perm_tgt_idx)
self.raw.append(BERTProberFields(
sentence=sent,
tokens=bert_tokens,
sentence_len=len(bert_tokens),
idx=tgt_idx,
target_idx=bert_tok_idx[perm_tgt_idx],
target=target,
label=label,
))
def perturb_sentence(self, sentence, target, tgt_idx):
tokens = ['[CLS]']
tok_idx = []
for i, t in enumerate(sentence):
bert_tokens = self.tokenizer.tokenize(t)
if i == tgt_idx:
if self.config.mask_target:
if self.config.mask_each_wordpiece:
bert_tokens = ['[MASK]'] * len(bert_tokens)
else:
bert_tokens = ['[MASK]']
else:
if self.config.mask_all_context:
bert_tokens = ['[MASK]']
elif abs(i-tgt_idx) <= self.config.mask_context:
bert_tokens = ['[MASK]']
elif 0 < tgt_idx-i <= self.config.mask_left_context:
bert_tokens = ['[MASK]']
elif 0 < i-tgt_idx <= self.config.mask_right_context:
bert_tokens = ['[MASK]']
if self.config.use_wordpiece_unit == 'first':
tok_idx.append(len(tokens))
else:
tok_idx.append(len(tokens) + len(bert_tokens)-1)
tokens.extend(bert_tokens)
tokens.append('[SEP]')
if self.config.mask_target is True:
assert tokens[tok_idx[tgt_idx]] == '[MASK]'
elif self.config.randomize_wordpieces is False:
if not tokens[tok_idx[tgt_idx]] == '[UNK]':
assert set(tokens[tok_idx[tgt_idx]]) & set(target)
return tokens, tok_idx
def extract_sample_from_line(self, line):
sent, target, idx, label = line.rstrip("\n").split("\t")
idx = int(idx)
bert_tokens, bert_tok_idx = self.perturb_sentence(
sent.split(" "), target, idx)
if self.config.shift_target == -1:
if idx == 0:
# [CLS] symbol
bert_target_idx = 0
else:
bert_target_idx = bert_tok_idx[idx-1]
elif self.config.shift_target == 1:
if idx == len(bert_tok_idx) - 1:
# [SEP] symbol
bert_target_idx = len(bert_tokens) - 1
else:
bert_target_idx = bert_tok_idx[idx+1]
else:
bert_target_idx = bert_tok_idx[idx]
return BERTProberFields(
sentence=sent,
tokens=bert_tokens,
sentence_len=len(bert_tokens),
idx=idx,
target_idx=bert_target_idx,
target=target,
label=label,
)
def to_idx(self):
mtx = BERTProberFields.initialize_all(list)
for sample in self.raw:
# int fields
mtx.sentence_len.append(sample.sentence_len)
mtx.target_idx.append(sample.target_idx)
# sentence
idx = self.tokenizer.convert_tokens_to_ids(sample.tokens)
mtx.sentence.append(idx)
# label
if sample.label is None:
mtx.label.append(None)
else:
mtx.label.append(self.vocabs.label[sample.label])
self.mtx = mtx
if not self.is_unlabeled:
if self.config.sort_data_by_length:
self.sort_data_by_length(sort_field='sentence_len')
@property
def is_unlabeled(self):
return False
def batched_iter(self, batch_size):
starts = list(range(0, len(self), batch_size))
if self.is_unlabeled is False and self.config.shuffle_batches:
np.random.shuffle(starts)
for start in starts:
end = start + batch_size
batch = []
for i, mtx in enumerate(self.mtx):
if i == 0:
sents = mtx[start:end]
maxlen = max(len(s) for s in sents)
sents = [
s + [0] * (maxlen-len(s))
for s in sents
]
batch.append(sents)
else:
batch.append(mtx[start:end])
self._start = start
yield self.create_recordclass(*batch)
def decode(self, model_output):
for i, sample in enumerate(self.raw):
output = model_output[i].argmax().item()
sample.label = self.vocabs.label.inv_lookup(output)
def print_sample(self, sample, stream):
stream.write("{}\t{}\t{}\t{}\n".format(
sample.sentence, sample.target, sample.idx, sample.label
))
class UnlabeledBERTSentenceProberDataset(BERTSentenceProberDataset):
@property
def is_unlabeled(self):
return True
class BERTSentenceProberDatasetWithPOS(BERTSentenceProberDataset):
unlabeled_data_class = 'UnlabeledBERTSentenceProberDatasetWithPOS'
def extract_sample_from_line(self, line):
sentence, target, idx, label = line.rstrip("\n").split("\t")
idx = int(idx)
tokens = []
pos_list = []
left_mask_idx = defaultdict(list)
right_mask_idx = defaultdict(list)
sent_split = sentence.split(" ")
for ti, t in enumerate(sentence.split(" ")):
fd = t.split("_")
pos = fd[-1]
token = "_".join(fd[:-1])
pos_list.append(pos)
tokens.append(token)
for ti in range(idx-1, -1, -1):
pos = pos_list[ti]
if len(left_mask_idx[pos]) < self.config.mask_left_pos.get(pos, 0):
left_mask_idx[pos].append(ti)
for ti in range(idx+1, len(pos_list)):
pos = pos_list[ti]
if len(right_mask_idx[pos]) < self.config.mask_right_pos.get(pos, 0):
right_mask_idx[pos].append(ti)
mask_idx = set()
for v in left_mask_idx.values():
mask_idx |= set(v)
for v in right_mask_idx.values():
mask_idx |= set(v)
bert_sentence = ['[CLS]']
for ti, token in enumerate(tokens):
if ti == idx and self.config.use_wordpiece_unit == 'first':
bert_target_idx = len(bert_sentence)
if ti in mask_idx:
bert_sentence.append('[MASK]')
else:
bert_sentence.extend(self.tokenizer.tokenize(token))
if ti == idx and self.config.use_wordpiece_unit == 'last':
bert_target_idx = len(bert_sentence) - 1
bert_sentence.append('[SEP]')
# check the target symbol
if not bert_sentence[bert_target_idx] == '[UNK]':
assert set(bert_sentence[bert_target_idx]) & set(target)
return BERTProberFields(
sentence=sentence,
tokens=bert_sentence,
sentence_len=len(bert_sentence),
idx=idx,
target_idx=bert_target_idx,
target=target,
label=label,
)
class UnlabeledBERTSentenceProberDatasetWithPOS(BERTSentenceProberDatasetWithPOS):
@property
def is_unlabeled(self):
return True
class ELMOSentenceProberDataset(BaseDataset):
unlabeled_data_class = 'UnlabeledELMOSentenceProberDataset'
data_recordclass = SentenceProbeFields
constants = []
def __init__(self, config, stream_or_file, share_vocabs_with=None, **kwargs):
self.config = config
self.load_or_create_vocabs()
self.load_stream_or_file(stream_or_file)
self.to_idx()
self.tgt_field_idx = -1
self._cache = {}
def load_or_create_vocabs(self):
existing = os.path.join(self.config.experiment_dir, 'vocab_label')
if os.path.exists(existing):
vocab = Vocab(file=existing, frozen=True)
else:
vocab = Vocab(constants=[])
self.vocabs = SentenceProbeFields(
None, None, None, vocab
)
def extract_sample_from_line(self, line):
sent, target, idx, label = line.rstrip("\n").split("\t")
if self.config.word_only:
sent = sent.split(" ")[int(idx)]
idx = 0
sent = sent.split(" ")
return SentenceProbeFields(
sentence=sent,
sentence_len=len(sent),
target_idx=int(idx),
label=label,
)
def to_idx(self):
mtx = SentenceProbeFields(
[], [], [], []
)
for sample in self.raw:
# int fields
mtx.sentence_len.append(sample.sentence_len)
mtx.target_idx.append(sample.target_idx)
# sentence
mtx.sentence.append(sample.sentence)
# label
if sample.label is None:
mtx.label.append(None)
else:
mtx.label.append(self.vocabs.label[sample.label])
self.mtx = mtx
if not self.is_unlabeled:
if self.config.sort_data_by_length:
self.sort_data_by_length(sort_field='sentence_len')
def batched_iter(self, batch_size):
starts = list(range(0, len(self), batch_size))
if self.is_unlabeled is False and self.config.shuffle_batches:
np.random.shuffle(starts)
PAD = '<pad>'
for start in starts:
self._start = start
end = start + batch_size
batch = []
for i, mtx in enumerate(self.mtx):
if i == 0:
sents = mtx[start:end]
maxlen = max(len(s) for s in sents)
sents = [
s + [PAD] * (maxlen-len(s))
for s in sents
]
batch.append(sents)
else:
batch.append(mtx[start:end])
yield self.create_recordclass(*batch)
@property
def is_unlabeled(self):
return False
class UnlabeledELMOSentenceProberDataset(ELMOSentenceProberDataset):
@property
def is_unlabeled(self):
return True
def extract_sample_from_line(self, line):
sent, target, idx = line.rstrip("\n").split("\t")[:3]
if self.config.word_only:
sent = sent.split(" ")[int(idx)]
idx = 0
sent = sent.split(" ")
return SentenceProbeFields(
sentence=sent,
sentence_len=len(sent),
target_idx=int(idx),
label=None,
)
def decode(self, model_output):
for i, sample in enumerate(self.raw):
output = model_output[i].argmax().item()
sample.label = self.vocabs.label.inv_lookup(output)
def print_sample(self, sample, stream):
stream.write("{}\t{}\t{}\t{}\n".format(
" ".join(sample.sentence), sample.sentence[sample.target_idx],
sample.target_idx, sample.label
))
class WordOnlySentencePairDataset(BaseDataset):
data_recordclass = WordOnlySentencePairFields
unlabeled_data_class = 'UnlabeledWordOnlySentencePairDataset'
def load_or_create_vocabs(self):
vocab_pre = os.path.join(self.config.experiment_dir, 'vocab_')
self.vocabs = self.data_recordclass()
for field in ['left_target_word', 'label']:
vocab_fn = getattr(self.config, 'vocab_{}'.format(field),
vocab_pre+field)
if field == 'label':
constants = []
else:
constants = ['SOS', 'EOS', 'PAD', 'UNK']
if os.path.exists(vocab_fn):
setattr(self.vocabs, field, Vocab(file=vocab_fn, frozen=True))
else:
setattr(self.vocabs, field, Vocab(constants=constants))
self.vocabs.right_target_word = self.vocabs.left_target_word
def extract_sample_from_line(self, line):
fd = line.rstrip("\n").split("\t")
left_sen = fd[0].split(" ")
right_sen = fd[3].split(" ")
lidx = int(fd[2])
ridx = int(fd[5])
lw = left_sen[lidx]
rw = right_sen[ridx]
assert lw == fd[1]
assert rw == fd[4]
if len(fd) > 6:
label = fd[6]
else:
label = None
return WordOnlySentencePairFields(
left_sentence=fd[0],
left_target_word=lw,
left_target_word_len=len(lw),
left_target_idx=int(fd[2]),
right_sentence=fd[3],
right_target_word=rw,
right_target_word_len=len(rw),
right_target_idx=int(fd[5]),
label=label
)
def to_idx(self):
left = []
left_lens = []
right = []
right_lens = []
labels = []
SOS = self.vocabs.left_target_word.SOS
EOS = self.vocabs.left_target_word.EOS
PAD = self.vocabs.left_target_word.PAD
if self.config.use_global_padding:
maxlen = self.get_max_seqlen()
longer = sum(s.left_target_word_len > maxlen for s in self.raw) + \
sum(s.right_target_word_len > maxlen for s in self.raw)
if longer > 0:
logging.warning('{} elements longer than maxlen'.format(longer))
for sample in self.raw:
left_idx = list(self.vocabs.left_target_word[c]
for c in sample.left_target_word)
right_idx = list(self.vocabs.right_target_word[c]
for c in sample.right_target_word)
if self.config.use_global_padding:
left_idx = left_idx[:maxlen-2]
right_idx = right_idx[:maxlen-2]
left_idx = [SOS] + left_idx + [EOS]
left_idx = left_idx + [PAD] * (maxlen - len(left_idx))
left.append(left_idx)
left_lens.append(maxlen)
right_idx = [SOS] + right_idx + [EOS]
right_idx = right_idx + [PAD] * (maxlen - len(right_idx))
right.append(right_idx)
right_lens.append(maxlen)
else:
left_idx = [SOS] + left_idx + [EOS]
left.append(left_idx)
left_lens.append(len(left_idx))
right_idx = [SOS] + right_idx + [EOS]
right.append(right_idx)
right_lens.append(len(right_idx))
labels.append(self.vocabs.label[sample.label])
self.mtx = WordOnlySentencePairFields(
left_target_word=left,
left_target_word_len=left_lens,
right_target_word=right,
right_target_word_len=right_lens,
label=labels,
)
def get_max_seqlen(self):
if hasattr(self.config, 'max_seqlen'):
return self.config.max_seqlen
return max(
max(s.left_target_word_len, s.right_target_word_len)
for s in self.raw
) + 2
def get_input_vocab_size(self):
return len(self.vocabs.left_target_word)
def decode(self, model_output):
for i, sample in enumerate(self.raw):
output = model_output[i].argmax().item()
sample.label = self.vocabs.label.inv_lookup(output)
def print_sample(self, sample, stream):
stream.write("{}\n".format("\t".join(map(str, (
sample.left_sentence,
sample.left_target_word,
sample.left_target_idx,
sample.right_sentence,
sample.right_target_word,
sample.right_target_idx,
sample.label)
))))
class UnlabeledWordOnlySentencePairDataset(WordOnlySentencePairDataset):
pass
class SequenceClassificationWithSubwords(BaseDataset):
unlabeled_data_class = 'UnlabeledSequenceClassificationWithSubwords'
data_recordclass = SequenceClassificationWithSubwordsDataFields
constants = ['UNK']
def __init__(self, config, stream_or_file, max_samples=None,
share_vocabs_with=None, **kwargs):
self.config = config
self.max_samples = max_samples
if share_vocabs_with is None:
self.load_or_create_vocabs()
else:
self.vocabs = share_vocabs_with.vocabs
for vocab in self.vocabs:
if vocab:
vocab.frozen = True
global_key = f'{self.config.model_name}_tokenizer'
if global_key in globals():
self.tokenizer = globals()[global_key]
else:
self.tokenizer = AutoTokenizer.from_pretrained(self.config.model_name)
globals()[global_key] = self.tokenizer
self.load_or_create_vocabs()
self.load_stream_or_file(stream_or_file)
self.to_idx()
self.PAD = self.tokenizer.convert_tokens_to_ids([self.tokenizer.pad_token])[0]
def load_stream(self, stream):
self.raw = []
sent = []
for line in stream:
if not line.strip():
if sent:
sample = self.create_sentence_from_lines(sent)
if not self.ignore_sample(sample):
self.raw.append(sample)
if self.max_samples and len(self.raw) >= self.max_samples:
break
sent = []
else:
sent.append(line.rstrip("\n"))
if sent:
if self.max_samples is None or len(self.raw) < self.max_samples:
sample = self.create_sentence_from_lines(sent)
if not self.ignore_sample(sample):
self.raw.append(sample)
def create_sentence_from_lines(self, lines):
sent = []
labels = []
token_starts = [0]
subwords = [self.tokenizer.cls_token]
for line in lines:
fd = line.rstrip("\n").split("\t")
sent.append(fd[0])
if len(fd) > 1:
labels.append(fd[1])
token_starts.append(len(subwords))
pieces = self.tokenizer.tokenize(fd[0])
subwords.extend(pieces)
token_starts.append(len(subwords))
subwords.append(self.tokenizer.sep_token)
if len(labels) == 0:
labels = None
return self.data_recordclass(
raw_sentence=sent, labels=labels,
sentence_len=len(sent),
subwords=subwords,
sentence_subword_len=len(subwords),
token_starts=token_starts,
)
def ignore_sample(self, sample):
return sample.sentence_subword_len > 500
def to_idx(self):
mtx = self.data_recordclass.initialize_all(list)
for sample in self.raw:
mtx.sentence_len.append(sample.sentence_len)
mtx.sentence_subword_len.append(sample.sentence_subword_len)
mtx.token_starts.append(sample.token_starts)
mtx.subwords.append(self.tokenizer.convert_tokens_to_ids(sample.subwords))
if sample.labels is None:
mtx.labels.append(None)
else:
mtx.labels.append([self.vocabs.labels[l] for l in sample.labels])
self.mtx = mtx
if not self.is_unlabeled:
if self.config.sort_data_by_length:
self.sort_data_by_length(sort_field='sentence_subword_len')
def batched_iter(self, batch_size):
starts = list(range(0, len(self), batch_size))
if self.is_unlabeled is False and self.config.shuffle_batches:
np.random.shuffle(starts)
for start in starts:
self._start = start
end = start + batch_size
batch = self.data_recordclass()
maxlen = max(self.mtx.sentence_subword_len[start:end])
subwords = [
s + [self.PAD] * (maxlen-len(s))
for s in self.mtx.subwords[start:end]]
batch.subwords = subwords
if self.mtx.labels[0] is not None:
batch.labels = np.concatenate(self.mtx.labels[start:end])
else:
batch.labels = None
batch.sentence_len = self.mtx.sentence_len[start:end]
padded_token_starts = []
# Include [CLS] and [SEP].
token_maxcount = max(batch.sentence_len) + 2
for si in range(start, min(len(self.mtx.token_starts), end)):
starts = self.mtx.token_starts[si]
pad_count = token_maxcount - len(starts)
starts.extend([0 for _ in range(pad_count)])
padded_token_starts.append(starts)
batch.token_starts = np.array(padded_token_starts)
batch.sentence_subword_len = self.mtx.sentence_subword_len[start:end]
yield batch
def decode(self, model_output):
offset = 0
for si, sample in enumerate(self.raw):
labels = []
for ti in range(sample.sentence_len):
label_idx = model_output[offset + ti].argmax()
labels.append(self.vocabs.labels.inv_lookup(label_idx))
sample.labels = labels
offset += sample.sentence_len
def print_sample(self, sample, stream):
stream.write("\n".join(
"{}\t{}".format(sample.raw_sentence[i], sample.labels[i])
for i in range(sample.sentence_len)
))
stream.write("\n")
def print_raw(self, stream):
for si, sample in enumerate(self.raw):
self.print_sample(sample, stream)
if si < len(self.raw) - 1:
stream.write("\n")
class UnlabeledSequenceClassificationWithSubwords(SequenceClassificationWithSubwords):
@property
def is_unlabeled(self):
return True
class SentenceProberDataset(BaseDataset):
unlabeled_data_class = 'UnlabeledSentenceProberDataset'
data_recordclass = TokenInSequenceProberFields
constants = []
def __init__(self, config, stream_or_file, max_samples=None, **kwargs):
self.config = config
self.max_samples = max_samples
global_key = f'{self.config.model_name}_tokenizer'
if global_key in globals():
self.tokenizer = globals()[global_key]
else:
self.tokenizer = AutoTokenizer.from_pretrained(
self.config.model_name)
globals()[global_key] = self.tokenizer
self.MASK = self.tokenizer.mask_token
self.mask_positions = set(self.config.mask_positions)
self.load_or_create_vocabs()
self.load_stream_or_file(stream_or_file)
self.to_idx()
self.tgt_field_idx = -1
self.max_seqlen = max(s.input_len for s in self.raw)
def load_or_create_vocabs(self):
super().load_or_create_vocabs()
self.vocabs.tokens.PAD = self.tokenizer.convert_tokens_to_ids(
[self.tokenizer.pad_token])[0]
self.vocabs.token_starts.PAD = 1000
def batched_iter(self, batch_size):
for batch in super().batched_iter(batch_size):
batch.token_starts = np.array(batch.token_starts)
yield batch
def extract_sample_from_line(self, line):
raw_sent, raw_target, raw_idx, label = line.rstrip("\n").split("\t")
raw_idx = int(raw_idx)
# Build a list-of-lists from the tokenized words.
# This allows shuffling it later.
tokenized = [[self.tokenizer.cls_token]]
for ti, token in enumerate(raw_sent.split(" ")):
if ti - raw_idx in self.mask_positions:
pieces = [self.MASK]
else:
pieces = self.tokenizer.tokenize(token)
tokenized.append(pieces)
# Add [SEP] token start.
tokenized.append([self.tokenizer.sep_token])
# Perform BOW.
if self.config.bow:
all_idx = np.arange(1, len(tokenized) - 1)
np.random.shuffle(all_idx)
all_idx = np.concatenate(([0], all_idx, [len(tokenized)-1]))
tokenized = [tokenized[i] for i in all_idx]
target_map = np.argsort(all_idx)
# Add 1 to include [CLS].
target_idx = target_map[raw_idx + 1]
else:
# Add 1 to include [CLS].
target_idx = raw_idx + 1
merged = []
token_starts = []
for pieces in tokenized:
token_starts.append(len(merged))
merged.extend(pieces)
return self.data_recordclass(
raw_sentence=raw_sent,
raw_target=raw_target,
raw_idx=raw_idx,
tokens=merged,
num_tokens=len(merged),
target_idx=target_idx,
token_starts=token_starts,
label=label,
)
def ignore_sample(self, sample):
if self.config.exclude_short_sentences is False or self.is_unlabeled:
return False
sent_len = len(sample.raw_sentence.split(" "))
for pi in self.mask_positions:
if sample.raw_idx + pi < 0:
return True
if sample.raw_idx + pi >= sent_len:
return True
return False
def to_idx(self):
mtx = self.data_recordclass.initialize_all(list)
for sample in self.raw:
# int fields
mtx.num_tokens.append(sample.num_tokens)
mtx.target_idx.append(sample.target_idx)
mtx.raw_idx.append(sample.raw_idx)
# int list
mtx.token_starts.append(sample.token_starts)
# sentence
encoded_tokens = self.tokenizer.convert_tokens_to_ids(
sample.tokens)
mtx.tokens.append(encoded_tokens)
# label
if sample.label is None:
mtx.label.append(None)
else:
mtx.label.append(self.vocabs.label[sample.label])
self.mtx = mtx
if not self.is_unlabeled:
if self.config.sort_data_by_length:
self.sort_data_by_length(sort_field='input_len')
@property
def is_unlabeled(self):
return False
def decode(self, model_output):
for i, sample in enumerate(self.raw):
output = model_output[i].argmax().item()
sample.label = self.vocabs.label.inv_lookup(output)
def print_sample(self, sample, stream):
stream.write("{}\t{}\t{}\t{}\n".format(
sample.raw_sentence, sample.raw_target, sample.raw_idx, sample.label
))
class UnlabeledSentenceProberDataset(SentenceProberDataset):
@property
def is_unlabeled(self):
return True
class SentenceRepresentationTokenPairProberDataset(BaseDataset):
unlabeled_data_class = 'UnlabeledSentenceRepresentationTokenPairProberDataset'
data_recordclass = SentenceTokenPairFields
constants = []
def __init__(self, config, stream_or_file, max_samples=None, **kwargs):
self.config = config
self.max_samples = max_samples
global_key = f'{self.config.model_name}_tokenizer'
if global_key in globals():
self.tokenizer = globals()[global_key]
else:
self.tokenizer = AutoTokenizer.from_pretrained(self.config.model_name)
globals()[global_key] = self.tokenizer
self.load_or_create_vocabs()
self.load_stream_or_file(stream_or_file)
self.to_idx()
self.tgt_field_idx = -1
self.max_seqlen = max(s.input_len for s in self.raw)
def load_or_create_vocabs(self):
super().load_or_create_vocabs()
self.vocabs.input.PAD = self.tokenizer.convert_tokens_to_ids(
[self.tokenizer.pad_token])[0]
def extract_sample_from_line(self, line):
fd = line.rstrip("\n").split("\t")
raw_sent = fd[0]
raw_tokens = raw_sent.split(" ")
idx1 = int(fd[3])
idx2 = int(fd[4])
tok1 = fd[1]
tok2 = fd[2]
assert raw_tokens[idx1] == tok1
assert raw_tokens[idx2] == tok2
subwords = [self.tokenizer.cls_token]
token_starts = [0]
if len(fd) > 5:
label = fd[5]
else:
label = None
for ti, token in enumerate(raw_tokens):
token_starts.append(len(subwords))
pieces = self.tokenizer.tokenize(token)
subwords.extend(pieces)
token_starts.append(len(subwords))
subwords.append(self.tokenizer.sep_token)
return self.data_recordclass(
raw_sentence=raw_sent,
idx1=idx1,
idx2=idx2,
token_starts=token_starts,
subwords=subwords,
sentence_subword_len=len(subwords),
label=label,
)
def to_idx(self):
mtx = self.data_recordclass.initialize_all(list)
for sample in self.raw:
# integer fields
for field in ('idx1', 'idx2', 'sentence_subword_len'):
getattr(mtx, field).append(getattr(sample, field))
mtx.subwords.append(
self.tokenizer.convert_tokens_to_ids(sample.subwords)
)
mtx.token_starts.append(sample.token_starts)
if sample.label is None:
mtx.label.append(None)
else:
mtx.label.append(self.vocabs.label[sample.label])
self.mtx = mtx
if not self.is_unlabeled:
if self.config.sort_data_by_length:
self.sort_data_by_length(sort_field='sentence_subword_len')
def batched_iter(self, batch_size):
starts = list(range(0, len(self), batch_size))
if self.is_unlabeled is False and self.config.shuffle_batches:
np.random.shuffle(starts)
for start in starts:
self._start = start
end = start + batch_size
batch = self.data_recordclass.initialize_all(list)
for field in ('idx1', 'idx2', 'sentence_subword_len'):
setattr(batch, field, np.array(getattr(self.mtx, field)[start:end]))
token_starts = self.mtx.token_starts[start:end]
max_ts = max(len(s) for s in token_starts)
token_starts = [
toks + [1000] * (max_ts - len(toks))
for toks in token_starts
]
batch.token_starts = np.array(token_starts)
subwords = self.mtx.subwords[start:end]
maxlen = max(len(s) for s in subwords)
PAD = self.vocabs.input.PAD
subwords = [
subw + [PAD] * (maxlen - len(subw))
for subw in subwords
]
batch.subwords = subwords
batch.label = self.mtx.label[start:end]
yield batch
@property
def is_unlabeled(self):
return False
def decode(self, model_output):
for i, sample in enumerate(self.raw):
output = model_output[i].argmax().item()
sample.label = self.vocabs.label.inv_lookup(output)
def print_sample(self, sample, stream):
toks = sample.raw_sentence.split(" ")
stream.write("\t".join(map(str, (
sample.raw_sentence, toks[sample.idx1], toks[sample.idx2],
sample.idx1, sample.idx2, sample.label
))) + "\n")
class UnlabeledSentenceRepresentationTokenPairProberDataset(SentenceRepresentationTokenPairProberDataset):
@property
def is_unlabeled(self):
return True
| [
"os.path.exists",
"pytorch_pretrained_bert.BertTokenizer.from_pretrained",
"numpy.random.shuffle",
"deep_morphology.data.base_data.Vocab",
"gzip.open",
"numpy.random.random",
"os.path.join",
"numpy.argmax",
"numpy.argsort",
"numpy.array",
"collections.defaultdict",
"transformers.AutoTokenizer.... | [((452, 543), 'recordclass.recordclass', 'recordclass', (['"""SentenceProbeFields"""', "['sentence', 'sentence_len', 'target_idx', 'label']"], {}), "('SentenceProbeFields', ['sentence', 'sentence_len',\n 'target_idx', 'label'])\n", (463, 543), False, 'from recordclass import recordclass\n'), ((4960, 4978), 'numpy.array', 'np.array', (['self.mtx'], {}), '(self.mtx)\n', (4968, 4978), True, 'import numpy as np\n'), ((5478, 5528), 'os.path.join', 'os.path.join', (['self.config.experiment_dir', '"""vocab_"""'], {}), "(self.config.experiment_dir, 'vocab_')\n", (5490, 5528), False, 'import os\n'), ((8829, 8879), 'os.path.join', 'os.path.join', (['self.config.experiment_dir', '"""vocab_"""'], {}), "(self.config.experiment_dir, 'vocab_')\n", (8841, 8879), False, 'import os\n'), ((12490, 12540), 'os.path.join', 'os.path.join', (['self.config.experiment_dir', '"""vocab_"""'], {}), "(self.config.experiment_dir, 'vocab_')\n", (12502, 12540), False, 'import os\n'), ((21514, 21564), 'os.path.join', 'os.path.join', (['self.config.experiment_dir', '"""vocab_"""'], {}), "(self.config.experiment_dir, 'vocab_')\n", (21526, 21564), False, 'import os\n'), ((30587, 30642), 'os.path.join', 'os.path.join', (['self.config.experiment_dir', '"""vocab_label"""'], {}), "(self.config.experiment_dir, 'vocab_label')\n", (30599, 30642), False, 'import os\n'), ((30919, 30943), 'os.path.exists', 'os.path.exists', (['existing'], {}), '(existing)\n', (30933, 30943), False, 'import os\n'), ((37403, 37420), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (37414, 37420), False, 'from collections import defaultdict\n'), ((37446, 37463), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (37457, 37463), False, 'from collections import defaultdict\n'), ((39912, 39967), 'os.path.join', 'os.path.join', (['self.config.experiment_dir', '"""vocab_label"""'], {}), "(self.config.experiment_dir, 'vocab_label')\n", (39924, 39967), False, 'import os\n'), ((39979, 40003), 'os.path.exists', 'os.path.exists', (['existing'], {}), '(existing)\n', (39993, 40003), False, 'import os\n'), ((43373, 43423), 'os.path.join', 'os.path.join', (['self.config.experiment_dir', '"""vocab_"""'], {}), "(self.config.experiment_dir, 'vocab_')\n", (43385, 43423), False, 'import os\n'), ((6011, 6035), 'os.path.exists', 'os.path.exists', (['vocab_fn'], {}), '(vocab_fn)\n', (6025, 6035), False, 'import os\n'), ((6412, 6497), 'os.path.join', 'os.path.join', (["os.environ['HOME']", '"""resources"""', '"""fasttext"""', 'language', '"""common.vec"""'], {}), "(os.environ['HOME'], 'resources', 'fasttext', language,\n 'common.vec')\n", (6424, 6497), False, 'import os\n'), ((6838, 6879), 'numpy.random.permutation', 'np.random.permutation', (['self.embedding.mtx'], {}), '(self.embedding.mtx)\n', (6859, 6879), True, 'import numpy as np\n'), ((6976, 7018), 'numpy.random.random', 'np.random.random', (['self.embedding.mtx.shape'], {}), '(self.embedding.mtx.shape)\n', (6992, 7018), True, 'import numpy as np\n'), ((8275, 8300), 'numpy.random.shuffle', 'np.random.shuffle', (['starts'], {}), '(starts)\n', (8292, 8300), True, 'import numpy as np\n'), ((9138, 9162), 'os.path.exists', 'os.path.exists', (['vocab_fn'], {}), '(vocab_fn)\n', (9152, 9162), False, 'import os\n'), ((9672, 9757), 'os.path.join', 'os.path.join', (["os.environ['HOME']", '"""resources"""', '"""fasttext"""', 'language', '"""common.vec"""'], {}), "(os.environ['HOME'], 'resources', 'fasttext', language,\n 'common.vec')\n", (9684, 9757), False, 'import os\n'), ((11646, 11671), 'numpy.random.shuffle', 'np.random.shuffle', (['starts'], {}), '(starts)\n', (11663, 11671), True, 'import numpy as np\n'), ((13023, 13047), 'os.path.exists', 'os.path.exists', (['vocab_fn'], {}), '(vocab_fn)\n', (13037, 13047), False, 'import os\n'), ((14542, 14567), 'numpy.random.shuffle', 'np.random.shuffle', (['starts'], {}), '(starts)\n', (14559, 14567), True, 'import numpy as np\n'), ((16663, 16725), 'pytorch_pretrained_bert.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['model_name'], {'do_lower_case': '(False)'}), '(model_name, do_lower_case=False)\n', (16692, 16725), False, 'from pytorch_pretrained_bert import BertTokenizer\n'), ((19476, 19501), 'numpy.random.shuffle', 'np.random.shuffle', (['starts'], {}), '(starts)\n', (19493, 19501), True, 'import numpy as np\n'), ((22047, 22071), 'os.path.exists', 'os.path.exists', (['vocab_fn'], {}), '(vocab_fn)\n', (22061, 22071), False, 'import os\n'), ((24910, 24942), 'numpy.arange', 'np.arange', (['start_rand', 'bert_size'], {}), '(start_rand, bert_size)\n', (24919, 24942), True, 'import numpy as np\n'), ((24955, 24984), 'numpy.random.shuffle', 'np.random.shuffle', (['rand_range'], {}), '(rand_range)\n', (24972, 24984), True, 'import numpy as np\n'), ((25602, 25624), 'numpy.array', 'np.array', (['continuation'], {}), '(continuation)\n', (25610, 25624), True, 'import numpy as np\n'), ((25637, 25661), 'numpy.random.shuffle', 'np.random.shuffle', (['crand'], {}), '(crand)\n', (25654, 25661), True, 'import numpy as np\n'), ((25736, 25753), 'numpy.array', 'np.array', (['initial'], {}), '(initial)\n', (25744, 25753), True, 'import numpy as np\n'), ((25766, 25790), 'numpy.random.shuffle', 'np.random.shuffle', (['irand'], {}), '(irand)\n', (25783, 25790), True, 'import numpy as np\n'), ((28874, 28900), 'numpy.argmax', 'np.argmax', (['model_output[i]'], {}), '(model_output[i])\n', (28883, 28900), True, 'import numpy as np\n'), ((29904, 29966), 'pytorch_pretrained_bert.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['model_name'], {'do_lower_case': '(False)'}), '(model_name, do_lower_case=False)\n', (29933, 29966), False, 'from pytorch_pretrained_bert import BertTokenizer\n'), ((30098, 30146), 'logging.info', 'logging.info', (['"""Randomizing WordPiece vocabulary"""'], {}), "('Randomizing WordPiece vocabulary')\n", (30110, 30146), False, 'import logging\n'), ((30713, 30774), 'os.path.join', 'os.path.join', (['self.config.experiment_dir', '"""random_bert_vocab"""'], {}), "(self.config.experiment_dir, 'random_bert_vocab')\n", (30725, 30774), False, 'import os\n'), ((30790, 30808), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (30804, 30808), False, 'import os\n'), ((30965, 30998), 'deep_morphology.data.base_data.Vocab', 'Vocab', ([], {'file': 'existing', 'frozen': '(True)'}), '(file=existing, frozen=True)\n', (30970, 30998), False, 'from deep_morphology.data.base_data import BaseDataset, Vocab, DataFields\n'), ((31033, 31052), 'deep_morphology.data.base_data.Vocab', 'Vocab', ([], {'constants': '[]'}), '(constants=[])\n', (31038, 31052), False, 'from deep_morphology.data.base_data import BaseDataset, Vocab, DataFields\n'), ((35929, 35954), 'numpy.random.shuffle', 'np.random.shuffle', (['starts'], {}), '(starts)\n', (35946, 35954), True, 'import numpy as np\n'), ((40025, 40058), 'deep_morphology.data.base_data.Vocab', 'Vocab', ([], {'file': 'existing', 'frozen': '(True)'}), '(file=existing, frozen=True)\n', (40030, 40058), False, 'from deep_morphology.data.base_data import BaseDataset, Vocab, DataFields\n'), ((40093, 40112), 'deep_morphology.data.base_data.Vocab', 'Vocab', ([], {'constants': '[]'}), '(constants=[])\n', (40098, 40112), False, 'from deep_morphology.data.base_data import BaseDataset, Vocab, DataFields\n'), ((41477, 41502), 'numpy.random.shuffle', 'np.random.shuffle', (['starts'], {}), '(starts)\n', (41494, 41502), True, 'import numpy as np\n'), ((43794, 43818), 'os.path.exists', 'os.path.exists', (['vocab_fn'], {}), '(vocab_fn)\n', (43808, 43818), False, 'import os\n'), ((48724, 48777), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['self.config.model_name'], {}), '(self.config.model_name)\n', (48753, 48777), False, 'from transformers import AutoTokenizer\n'), ((51681, 51706), 'numpy.random.shuffle', 'np.random.shuffle', (['starts'], {}), '(starts)\n', (51698, 51706), True, 'import numpy as np\n'), ((52784, 52813), 'numpy.array', 'np.array', (['padded_token_starts'], {}), '(padded_token_starts)\n', (52792, 52813), True, 'import numpy as np\n'), ((54402, 54455), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['self.config.model_name'], {}), '(self.config.model_name)\n', (54431, 54455), False, 'from transformers import AutoTokenizer\n'), ((55198, 55226), 'numpy.array', 'np.array', (['batch.token_starts'], {}), '(batch.token_starts)\n', (55206, 55226), True, 'import numpy as np\n'), ((56016, 56042), 'numpy.random.shuffle', 'np.random.shuffle', (['all_idx'], {}), '(all_idx)\n', (56033, 56042), True, 'import numpy as np\n'), ((56197, 56216), 'numpy.argsort', 'np.argsort', (['all_idx'], {}), '(all_idx)\n', (56207, 56216), True, 'import numpy as np\n'), ((59271, 59324), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['self.config.model_name'], {}), '(self.config.model_name)\n', (59300, 59324), False, 'from transformers import AutoTokenizer\n'), ((61777, 61802), 'numpy.random.shuffle', 'np.random.shuffle', (['starts'], {}), '(starts)\n', (61794, 61802), True, 'import numpy as np\n'), ((62401, 62423), 'numpy.array', 'np.array', (['token_starts'], {}), '(token_starts)\n', (62409, 62423), True, 'import numpy as np\n'), ((4355, 4386), 'gzip.open', 'gzip.open', (['embedding_file', '"""rt"""'], {}), "(embedding_file, 'rt')\n", (4364, 4386), False, 'import gzip\n'), ((52161, 52203), 'numpy.concatenate', 'np.concatenate', (['self.mtx.labels[start:end]'], {}), '(self.mtx.labels[start:end])\n', (52175, 52203), True, 'import numpy as np\n'), ((6081, 6114), 'deep_morphology.data.base_data.Vocab', 'Vocab', ([], {'file': 'vocab_fn', 'frozen': '(True)'}), '(file=vocab_fn, frozen=True)\n', (6086, 6114), False, 'from deep_morphology.data.base_data import BaseDataset, Vocab, DataFields\n'), ((6178, 6204), 'deep_morphology.data.base_data.Vocab', 'Vocab', ([], {'constants': 'constants'}), '(constants=constants)\n', (6183, 6204), False, 'from deep_morphology.data.base_data import BaseDataset, Vocab, DataFields\n'), ((9208, 9241), 'deep_morphology.data.base_data.Vocab', 'Vocab', ([], {'file': 'vocab_fn', 'frozen': '(True)'}), '(file=vocab_fn, frozen=True)\n', (9213, 9241), False, 'from deep_morphology.data.base_data import BaseDataset, Vocab, DataFields\n'), ((9305, 9331), 'deep_morphology.data.base_data.Vocab', 'Vocab', ([], {'constants': 'constants'}), '(constants=constants)\n', (9310, 9331), False, 'from deep_morphology.data.base_data import BaseDataset, Vocab, DataFields\n'), ((13093, 13126), 'deep_morphology.data.base_data.Vocab', 'Vocab', ([], {'file': 'vocab_fn', 'frozen': '(True)'}), '(file=vocab_fn, frozen=True)\n', (13098, 13126), False, 'from deep_morphology.data.base_data import BaseDataset, Vocab, DataFields\n'), ((13190, 13216), 'deep_morphology.data.base_data.Vocab', 'Vocab', ([], {'constants': 'constants'}), '(constants=constants)\n', (13195, 13216), False, 'from deep_morphology.data.base_data import BaseDataset, Vocab, DataFields\n'), ((22117, 22150), 'deep_morphology.data.base_data.Vocab', 'Vocab', ([], {'file': 'vocab_fn', 'frozen': '(True)'}), '(file=vocab_fn, frozen=True)\n', (22122, 22150), False, 'from deep_morphology.data.base_data import BaseDataset, Vocab, DataFields\n'), ((22214, 22240), 'deep_morphology.data.base_data.Vocab', 'Vocab', ([], {'constants': 'constants'}), '(constants=constants)\n', (22219, 22240), False, 'from deep_morphology.data.base_data import BaseDataset, Vocab, DataFields\n'), ((25026, 25047), 'numpy.arange', 'np.arange', (['start_rand'], {}), '(start_rand)\n', (25035, 25047), True, 'import numpy as np\n'), ((31719, 31746), 'numpy.random.shuffle', 'np.random.shuffle', (['perm_idx'], {}), '(perm_idx)\n', (31736, 31746), True, 'import numpy as np\n'), ((31777, 31797), 'numpy.argsort', 'np.argsort', (['perm_idx'], {}), '(perm_idx)\n', (31787, 31797), True, 'import numpy as np\n'), ((43864, 43897), 'deep_morphology.data.base_data.Vocab', 'Vocab', ([], {'file': 'vocab_fn', 'frozen': '(True)'}), '(file=vocab_fn, frozen=True)\n', (43869, 43897), False, 'from deep_morphology.data.base_data import BaseDataset, Vocab, DataFields\n'), ((43961, 43987), 'deep_morphology.data.base_data.Vocab', 'Vocab', ([], {'constants': 'constants'}), '(constants=constants)\n', (43966, 43987), False, 'from deep_morphology.data.base_data import BaseDataset, Vocab, DataFields\n')] |
"""
This module implements several agents in which the Q function is approximated
"""
from agent import IndQLearningAgentSoftmax, Level2QAgent
import numpy as np
import jax
import jax.numpy as jnp
from jax.experimental import optimizers
from jax.experimental import stax
from jax.experimental.stax import Dense, Relu, LogSoftmax
from numpy.random import choice
from scipy.special import softmax
from scipy.signal import convolve
def stable_softmax(x):
z = x - max(x)
numerator = np.exp(z)
denominator = np.sum(numerator)
softmax = numerator/denominator
return softmax
class RegressionIndQLearningSoftmax(IndQLearningAgentSoftmax):
def __init__(self, action_space, n_states, learning_rate, epsilon, gamma, enemy_action_space=None):
IndQLearningAgentSoftmax.__init__(self, action_space, n_states, learning_rate, epsilon, gamma, enemy_action_space)
# Regression weights
self.n_a = len(action_space)
self.w = 0.001*np.random.randn(9*4, len(action_space))
#self.grad_fn = jax.jit(jax.grad(Q_val))
def act(self, obs=None):
obs_flat = obs.flatten()
Q = np.dot(obs_flat, self.w)
p = stable_softmax(Q)
#print(Q)
#print(p)
#return np.argmax(np.dot(obs_flat, self.w))
return choice(self.action_space, p=p)
def update(self, obs, actions, rewards, new_obs):
"""The vanilla Q-learning update rule"""
a0, _ = actions
r0, _ = rewards
Qp = np.dot(new_obs.flatten(), self.w)
Q = np.dot(obs.flatten(), self.w)
#w_jax = jnp.array(self.w)
#grad = self.grad_fn(w_jax, obs, a0)[:, a0]
#print(grad)
grad = obs.flatten()
#print(grad)
#grad = np.clip(grad, -2, 2)
#print(grad.shape)
#print((r0 + self.gamma*jnp.max(Qp) - Q[a0]).shape)
self.w[:, a0] = self.w[:, a0] + self.alpha*(r0 + self.gamma*np.max(Qp) - Q[a0])*grad
#self.Q[obs, a0] = (1 - self.alpha)*self.Q[obs, a0] + self.alpha*(r0 + self.gamma*np.max(self.Q[new_obs, :]))
class DQN(IndQLearningAgentSoftmax):
def __init__(self, action_space, n_states, learning_rate, epsilon, gamma, enemy_action_space=None):
IndQLearningAgentSoftmax.__init__(self, action_space, n_states, learning_rate, epsilon, gamma, enemy_action_space)
# Regression weights
self.n_a = len(action_space)
self.w = 0.001*np.random.randn(9*4, len(action_space))
self.W1 = np.random.normal(0, 2 / np.sqrt(4*2 * 2), size=(4, 2, 2))
def relu(self, x):
return np.where(x>0,x,0)
def relu_prime(self, x):
return np.where(x>0,1,0)
def forward(self, W1, W2, obs, y):
l0 = np.einsum('ijk->kji', obs)
#l0 = obs[0, :, :]
l0_conv = convolve(l0, W1[::-1, ::-1], 'same', 'direct')
l1 = self.relu(l0_conv)
def act(self, obs=None):
self.forward(self.W1, self.W1, obs, None)
obs_flat = obs.flatten()
Q = np.dot(obs_flat, self.w)
p = stable_softmax(Q)
#print(Q)
#print(p)
#return np.argmax(np.dot(obs_flat, self.w))
return choice(self.action_space, p=p)
def update(self, obs, actions, rewards, new_obs):
"""The vanilla Q-learning update rule"""
a0, _ = actions
r0, _ = rewards
Qp = np.dot(new_obs.flatten(), self.w)
Q = np.dot(obs.flatten(), self.w)
#w_jax = jnp.array(self.w)
#grad = self.grad_fn(w_jax, obs, a0)[:, a0]
#print(grad)
grad = obs.flatten()
#print(grad)
#grad = np.clip(grad, -2, 2)
#print(grad.shape)
#print((r0 + self.gamma*jnp.max(Qp) - Q[a0]).shape)
self.w[:, a0] = self.w[:, a0] + self.alpha*(r0 + self.gamma*np.max(Qp) - Q[a0])*grad
#self.Q[obs, a0] = (1 - self.alpha)*self.Q[obs, a0] + self.alpha*(r0 + self.gamma*np.max(self.Q[new_obs, :]))
class RegressionLevel2QAgentSoftmax(Level2QAgent):
"""
A Q-learning agent that treats the other player as a level 1 agent.
She learns from other's actions, estimating their Q function.
She represents Q-values in a tabular fashion, i.e., using a matrix Q.
"""
def __init__(self, action_space, enemy_action_space, n_states, learning_rate, epsilon, gamma):
Level2QAgent.__init__(self, action_space, enemy_action_space, n_states, learning_rate, epsilon, gamma)
self.enemy = RegressionIndQLearningSoftmax(self.enemy_action_space, self.n_states,
learning_rate=self.alphaB, epsilon=self.epsilonB, gamma=self.gammaB)
self.w = 0.001*np.random.randn(9*4, len(action_space), len(enemy_action_space))
def act(self, obs=None):
b = self.enemy.act(obs)
obs_flat = obs.flatten()
Q = np.einsum('i,ijk->jk', obs_flat, self.w)[:, b]
#Q = np.dot(obs_flat, self.w)
p = stable_softmax(Q)
return choice(self.action_space, p=p)
def update(self, obs, actions, rewards, new_obs):
"""The vanilla Q-learning update rule"""
a, b = actions
rA, rB = rewards
self.enemy.update(obs, [b,a], [rB, rA], new_obs )
# We obtain opponent's next action using Q_B
bb = self.enemy.act(obs)
#Qp = np.dot(new_obs.flatten(), self.w)
Qp = np.einsum('i,ijk->jk', new_obs.flatten(), self.w)
#Q = np.dot(obs.flatten(), self.w)
Q = np.einsum('i,ijk->jk', obs.flatten(), self.w)
grad = obs.flatten()
# Finally we update the supported agent's Q-function
self.w[:, a, b] = self.w[:, a, b] + self.alphaA*(rA + self.gammaA*np.max(Qp[:, bb]) - Q[a, b])*grad
#self.QA[obs, a, b] = (1 - self.alphaA)*self.QA[obs, a, b] + self.alphaA*(rA + self.gammaA*np.max(self.QA[new_obs, :, bb])) | [
"agent.IndQLearningAgentSoftmax.__init__",
"scipy.signal.convolve",
"numpy.sqrt",
"agent.Level2QAgent.__init__",
"numpy.random.choice",
"numpy.where",
"numpy.max",
"numpy.exp",
"numpy.sum",
"numpy.dot",
"numpy.einsum"
] | [((491, 500), 'numpy.exp', 'np.exp', (['z'], {}), '(z)\n', (497, 500), True, 'import numpy as np\n'), ((519, 536), 'numpy.sum', 'np.sum', (['numerator'], {}), '(numerator)\n', (525, 536), True, 'import numpy as np\n'), ((770, 888), 'agent.IndQLearningAgentSoftmax.__init__', 'IndQLearningAgentSoftmax.__init__', (['self', 'action_space', 'n_states', 'learning_rate', 'epsilon', 'gamma', 'enemy_action_space'], {}), '(self, action_space, n_states,\n learning_rate, epsilon, gamma, enemy_action_space)\n', (803, 888), False, 'from agent import IndQLearningAgentSoftmax, Level2QAgent\n'), ((1156, 1180), 'numpy.dot', 'np.dot', (['obs_flat', 'self.w'], {}), '(obs_flat, self.w)\n', (1162, 1180), True, 'import numpy as np\n'), ((1314, 1344), 'numpy.random.choice', 'choice', (['self.action_space'], {'p': 'p'}), '(self.action_space, p=p)\n', (1320, 1344), False, 'from numpy.random import choice\n'), ((2241, 2359), 'agent.IndQLearningAgentSoftmax.__init__', 'IndQLearningAgentSoftmax.__init__', (['self', 'action_space', 'n_states', 'learning_rate', 'epsilon', 'gamma', 'enemy_action_space'], {}), '(self, action_space, n_states,\n learning_rate, epsilon, gamma, enemy_action_space)\n', (2274, 2359), False, 'from agent import IndQLearningAgentSoftmax, Level2QAgent\n'), ((2601, 2622), 'numpy.where', 'np.where', (['(x > 0)', 'x', '(0)'], {}), '(x > 0, x, 0)\n', (2609, 2622), True, 'import numpy as np\n'), ((2668, 2689), 'numpy.where', 'np.where', (['(x > 0)', '(1)', '(0)'], {}), '(x > 0, 1, 0)\n', (2676, 2689), True, 'import numpy as np\n'), ((2740, 2766), 'numpy.einsum', 'np.einsum', (['"""ijk->kji"""', 'obs'], {}), "('ijk->kji', obs)\n", (2749, 2766), True, 'import numpy as np\n'), ((2812, 2858), 'scipy.signal.convolve', 'convolve', (['l0', 'W1[::-1, ::-1]', '"""same"""', '"""direct"""'], {}), "(l0, W1[::-1, ::-1], 'same', 'direct')\n", (2820, 2858), False, 'from scipy.signal import convolve\n'), ((3043, 3067), 'numpy.dot', 'np.dot', (['obs_flat', 'self.w'], {}), '(obs_flat, self.w)\n', (3049, 3067), True, 'import numpy as np\n'), ((3201, 3231), 'numpy.random.choice', 'choice', (['self.action_space'], {'p': 'p'}), '(self.action_space, p=p)\n', (3207, 3231), False, 'from numpy.random import choice\n'), ((4368, 4474), 'agent.Level2QAgent.__init__', 'Level2QAgent.__init__', (['self', 'action_space', 'enemy_action_space', 'n_states', 'learning_rate', 'epsilon', 'gamma'], {}), '(self, action_space, enemy_action_space, n_states,\n learning_rate, epsilon, gamma)\n', (4389, 4474), False, 'from agent import IndQLearningAgentSoftmax, Level2QAgent\n'), ((4978, 5008), 'numpy.random.choice', 'choice', (['self.action_space'], {'p': 'p'}), '(self.action_space, p=p)\n', (4984, 5008), False, 'from numpy.random import choice\n'), ((4848, 4888), 'numpy.einsum', 'np.einsum', (['"""i,ijk->jk"""', 'obs_flat', 'self.w'], {}), "('i,ijk->jk', obs_flat, self.w)\n", (4857, 4888), True, 'import numpy as np\n'), ((2528, 2546), 'numpy.sqrt', 'np.sqrt', (['(4 * 2 * 2)'], {}), '(4 * 2 * 2)\n', (2535, 2546), True, 'import numpy as np\n'), ((1946, 1956), 'numpy.max', 'np.max', (['Qp'], {}), '(Qp)\n', (1952, 1956), True, 'import numpy as np\n'), ((3833, 3843), 'numpy.max', 'np.max', (['Qp'], {}), '(Qp)\n', (3839, 3843), True, 'import numpy as np\n'), ((5685, 5702), 'numpy.max', 'np.max', (['Qp[:, bb]'], {}), '(Qp[:, bb])\n', (5691, 5702), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Train and track model on MLFlow.
Usage
-----
To be used as part of scheduled Continuous Training workflow.
"""
import os
import pickle
import re
import json
from datetime import datetime as dt
from datetime import date, timedelta
import datetime
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_validate
from sklearn.model_selection import ParameterSampler
from sklearn.pipeline import Pipeline
from sklearn.naive_bayes import BernoulliNB
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import make_scorer
from imblearn.over_sampling import SMOTE
from textblob import TextBlob
from textblob.sentiments import NaiveBayesAnalyzer
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from nltk.stem import WordNetLemmatizer
from google.oauth2 import service_account
from google.api_core.client_options import ClientOptions
from google.oauth2 import service_account
from google.auth.transport.requests import AuthorizedSession
import mlflow
import mlflow.sklearn
import nltk
import pandas as pd
import numpy as np
import gspread
import gdown
import requests
import google.cloud.storage
from app import load_data_gdrive, load_google_worksheet
from text_preprocessing import remove_link_lemma
def load_google_worksheet_from_info(private_key_id, private_key, client_id, sheet_url):
"""
This function directly:
- Obtains credentials from Github secrets
- Builds credentials object to connect with Googlesheet containing collected user input via streamlit app
- Accesses the Googlesheet
- Loads all data from the Googlesheet into pandas
See app.build_connection() for more info.
Parameters
----------
private_key_id : str
Required credential to access Google Drive. Stored in Github secrets.
private_key : str
Required credential to access Google Drive. Stored in Github secrets.
client_id : str
Required credential to access Google Drive. Stored in Github secrets.
sheet_url : str
Link to Google sheet. Stored in Github secrets.
Returns
-------
pandas.core.frame.DataFrame
Dataframe containing all rows from google worksheet returned by app.load_google_worksheet().
"""
info = {
'type': "service_account",
'project_id': "quixotic-card-325716",
'private_key_id': private_key_id,
'private_key': private_key,
'client_email': "<EMAIL>",
'client_id': client_id,
'auth_uri': "https://accounts.google.com/o/oauth2/auth",
'token_uri': "https://oauth2.googleapis.com/token",
'auth_provider_x509_cert_url': "https://www.googleapis.com/oauth2/v1/certs",
'client_x509_cert_url': "https://www.googleapis.com/robot/v1/metadata/x509/tweet-sentiment%40quixotic-card-325716.iam.gserviceaccount.com"
}
# Create credential object
credentials = service_account.Credentials.from_service_account_info(info)
scoped_credentials = credentials.with_scopes(
['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive'])
# Initialize gspread Client instance
gc = gspread.Client(auth=scoped_credentials)
gc.session = AuthorizedSession(scoped_credentials)
# Get Googlesheet url from stored github secrets
sheet_url = sheet_url
# Access the Googlesheet via shared link
sheet = gc.open_by_url(sheet_url)
# Return dataframe containing data in 1st worksheet of the accessed Googlesheet
return load_google_worksheet(sheet.get_worksheet(0))
def load_data(private_key_id, private_key, client_id, sheet_url):
"""
Load data stored in Google Drive and user input data stored in Googlesheet
Parameters
----------
private_key_id : str
Required credential to access Google Drive. Stored in Github secrets.
private_key : str
Required credential to access Google Drive. Stored in Github secrets.
client_id : str
Required credential to access Google Drive. Stored in Github secrets.
sheet_url : str
Link to Google sheet. Stored in Github secrets.
Returns
-------
df : pandas.core.frame.DataFrame
Dataframe containing all scraped and previously cleaned data.
df_googlesheet : pandas.core.frame.DataFrame
Dataframe containing all rows from google worksheet returned by app.load_google_worksheet().
"""
# Load data
data_file_id = '1XiABfco1-NpSwSjl32BAUS_HqPrBFYzD' # File ID for Tweet data stored in Google Drive
# Load all scraped data
df = load_data_gdrive(data_file_id)[['tweet']]
# Load data from user-validation google sheet (collected from streamlit app)
df_googlesheet = load_google_worksheet_from_info(private_key_id, private_key, client_id, sheet_url)
# Remove duplicates
df_googlesheet = df_googlesheet.drop_duplicates(subset=['tweet', 'polarity'])
return df, df_googlesheet
def preprocess_data(df, df_googlesheet):
"""
Preprocess data prior to training by performing:
- Classification using TextBlob and VaderSentiment to obtain class label
- Class label consolidation
- Removing 'neutral' polarity
- Removing unused columns
Parameters
----------
df : pandas.core.frame.DataFrame
Dataframe containing all scraped and previously cleaned data.
df_googlesheet : pandas.core.frame.DataFrame
Dataframe containing all rows from google worksheet returned by app.load_google_worksheet().
Returns
-------
df_final : pandas.core.frame.DataFrame
Preprocessed data.
"""
# Amend polarity column (0 = negative, 1 = positive)
# If original predicted polarity is positive, change it to 0 (negative)
df_googlesheet['user_validated'] = df_googlesheet['polarity'].apply(lambda x: 0 if x == 'positive' else 1)
# Keep only required columns
df_googlesheet_final = df_googlesheet[['tweet', 'user_validated']]
## Classification using TextBlob sentiment
# Get polarity score using TextBlob sentiment
df['textblob_polarity'] = df['tweet'].apply(lambda x: TextBlob(x).sentiment.polarity)
# Extract polarity from scores
"""
negative sentiment: (polarity score < 0) and (polarity score >= -1)
positive sentiment: (polarity score >= 0) and (polarity score <= 1)
"""
# Create a list of polarity conditions
textblob_conditions = [
(df['textblob_polarity'] < 0) & (df['textblob_polarity'][0] >= -1),
(df['textblob_polarity'] == 0),
(df['textblob_polarity'][0] >= 0) & (df['textblob_polarity'][0] <= 1)
]
# Create a list of values to assign to each polarity conditions (0 = negative, -1 = neutral, 1 = positive)
textblob_values = [0, -1, 1]
# Create a new column in the original df and use np.select to assign values to it using the lists as arguments
df['textblob'] = np.select(textblob_conditions, textblob_values)
## Classification using Vader sentiment
# Initialize Vader sentiment
vader = SentimentIntensityAnalyzer()
# Get polarity scores using VaderSentiment
df['vader_polarity'] = df['tweet'].apply(lambda x: vader.polarity_scores(x)['compound'])
# Extract polarity from scores
"""
negative sentiment: compound score <= -0.05
neutral sentiment: (compound score > -0.05) and (compound score < 0.05)
positive sentiment: compound score >= 0.05
"""
# Create a list of polarity conditions
vader_conditions = [
(df['vader_polarity'] <= -0.05),
(df['vader_polarity'] > -0.05) & (df['vader_polarity'] < 0.05),
(df['vader_polarity'] >= 0.05)
]
# Create a list of values to assign to each polarity conditions (0 = negative, -1 = neutral, 1 = positive)
vader_values = [0, -1, 1]
# Create a new column in the original df and use np.select to assign values to it using the lists as arguments
df['vader'] = np.select(vader_conditions, vader_values)
## Class label consolidation
# Keep only required cols & remove rows where vader and textblob gives different polarity
df2 = df[df['vader'] == df['textblob']][['tweet', 'textblob', 'vader']]
# Remove rows with neutral polarity
df2 = df2[df2['vader'] != -1]
# Add data from googlesheet
df_final = pd.concat([df2, df_googlesheet_final])
# Consolidate polarity column
# If value from textblob col is null, get value from user_validated, otherwise get value from textblob col
df_final['polarity'] = np.where(pd.isna(df_final['textblob']), df_final['user_validated'], df_final['textblob'])
# Keep only required columns
df_final = df_final[['tweet', 'polarity']]
return df_final
def resample_data(k_neighbors):
"""
Resamples data using SMOTE and split resampled data into training and testing sets.
Parameters
----------
k_neighbors : int
Value to set for SMOTE() k_neigbors parameters.
Returns
-------
X_train : scipy.sparse.csr.csr_matrix
Split array containing training set.
X_test : scipy.sparse.csr.csr_matrix
Split array containing test set.
y_train : pandas.core.series.Series
Split array containing class label of training set.
y_test : pandas.core.series.Series
Split array containing class label of testing set.
tfidf : sklearn.feature_extraction.text.TfidfVectorizer
Initialized vectorizer with custom preprocessor.
"""
# Select X and Y variable
# (Since both vader and textblob now have the same polarity, either one is choosen as Y variable)
X = df_final['tweet']
y = df_final['polarity']
# Vectorize using bigram tfidf
tfidf = TfidfVectorizer(lowercase=False, ngram_range=(1, 1), preprocessor=remove_link)
# Fit and transform
Xtfidf = tfidf.fit_transform(X)
# Create artificial datapoints for minority class label using smote
smote = SMOTE(random_state=1, k_neighbors=k_neighbors)
X_smote, y_smote = smote.fit_resample(Xtfidf, y)
# Split Train and Test set
X_train, X_test, y_train, y_test = train_test_split(X_smote, y_smote, test_size=0.2, random_state=1)
return X_train, X_test, y_train, y_test, tfidf
if __name__ == '__main__':
# Get secrets from env
private_key_id = os.environ['GSA_PRIVATE_KEY_ID']
private_key = os.environ['GSA_PRIVATE_KEY'].replace('\\n', '\n')
client_id = os.environ['GSA_CLIENT_ID']
sheet_url = os.environ["GSA_PRIVATE_GSHEETS_URL"]
# Set mlflow tracking config
experiment_name = "SentimentAnalysis"
tracking_uri = os.environ.get('MLFLOW_TRACKING_URI')
# Set experiment name
mlflow.set_experiment(experiment_name)
experiment = mlflow.get_experiment_by_name(experiment_name)
# Set path to log
mlflow.set_tracking_uri(tracking_uri)
# Set metrics
metric = {'matthews_corrcoef': make_scorer(matthews_corrcoef),
'accuracy': 'accuracy',
'f1': 'f1',
'precision': 'precision',
'recall': 'recall',
'neg_log_loss': 'neg_log_loss'}
# Set hyperparams and params
hyperparams = {'k_neighbors': range(1, 21), # SMOTE
'alpha': range(0, 3)}
params = {'cv_folds': 5,
'n_iter': 6}
param_list = list(ParameterSampler(hyperparams, n_iter=params['n_iter'], random_state=0))
# Load data
df, df_googlesheet = load_data(private_key_id, private_key, client_id, sheet_url)
# Preprocess data
df_final = preprocess_data(df, df_googlesheet)
for run in range(params['n_iter']):
run_hyperparams = param_list[run]
with mlflow.start_run(experiment_id=experiment.experiment_id):
# Resample data
X_train, X_test, y_train, y_test, tfidf = resample_data(run_hyperparams['k_neighbors'])
# Build pipeline
model = Pipeline([('nb', BernoulliNB(alpha=run_hyperparams['alpha']))])
# Train and score
model.fit(X_train, y_train)
model.score(X_test, y_test)
# Assemble final pipe
pipe_final = Pipeline([('vectorizer', tfidf),
('nb', model.steps[0][1])])
# Perform cross validation
scores = cross_validate(model, X_train, y_train, scoring=metric)
metrics_dict = {}
for m in metric:
metrics_dict[m] = np.mean(scores['test_' + m])
# Log model parameters
mlflow.log_params(run_hyperparams)
# Log model metrics
mlflow.log_metrics(metrics_dict)
# Log model and create version
mlflow.sklearn.log_model(
sk_model=pipe_final,
artifact_path="model",
registered_model_name="SentimentAnalysisClassifier")
| [
"google.oauth2.service_account.Credentials.from_service_account_info",
"mlflow.set_experiment",
"mlflow.sklearn.log_model",
"sklearn.naive_bayes.BernoulliNB",
"numpy.mean",
"textblob.TextBlob",
"numpy.select",
"mlflow.set_tracking_uri",
"gspread.Client",
"sklearn.model_selection.ParameterSampler",... | [((3069, 3128), 'google.oauth2.service_account.Credentials.from_service_account_info', 'service_account.Credentials.from_service_account_info', (['info'], {}), '(info)\n', (3122, 3128), False, 'from google.oauth2 import service_account\n'), ((3331, 3370), 'gspread.Client', 'gspread.Client', ([], {'auth': 'scoped_credentials'}), '(auth=scoped_credentials)\n', (3345, 3370), False, 'import gspread\n'), ((3388, 3425), 'google.auth.transport.requests.AuthorizedSession', 'AuthorizedSession', (['scoped_credentials'], {}), '(scoped_credentials)\n', (3405, 3425), False, 'from google.auth.transport.requests import AuthorizedSession\n'), ((7081, 7128), 'numpy.select', 'np.select', (['textblob_conditions', 'textblob_values'], {}), '(textblob_conditions, textblob_values)\n', (7090, 7128), True, 'import numpy as np\n'), ((7219, 7247), 'vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer', 'SentimentIntensityAnalyzer', ([], {}), '()\n', (7245, 7247), False, 'from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n'), ((8120, 8161), 'numpy.select', 'np.select', (['vader_conditions', 'vader_values'], {}), '(vader_conditions, vader_values)\n', (8129, 8161), True, 'import numpy as np\n'), ((8489, 8527), 'pandas.concat', 'pd.concat', (['[df2, df_googlesheet_final]'], {}), '([df2, df_googlesheet_final])\n', (8498, 8527), True, 'import pandas as pd\n'), ((9885, 9963), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'lowercase': '(False)', 'ngram_range': '(1, 1)', 'preprocessor': 'remove_link'}), '(lowercase=False, ngram_range=(1, 1), preprocessor=remove_link)\n', (9900, 9963), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((10110, 10156), 'imblearn.over_sampling.SMOTE', 'SMOTE', ([], {'random_state': '(1)', 'k_neighbors': 'k_neighbors'}), '(random_state=1, k_neighbors=k_neighbors)\n', (10115, 10156), False, 'from imblearn.over_sampling import SMOTE\n'), ((10281, 10346), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_smote', 'y_smote'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(X_smote, y_smote, test_size=0.2, random_state=1)\n', (10297, 10346), False, 'from sklearn.model_selection import train_test_split\n'), ((10772, 10809), 'os.environ.get', 'os.environ.get', (['"""MLFLOW_TRACKING_URI"""'], {}), "('MLFLOW_TRACKING_URI')\n", (10786, 10809), False, 'import os\n'), ((10841, 10879), 'mlflow.set_experiment', 'mlflow.set_experiment', (['experiment_name'], {}), '(experiment_name)\n', (10862, 10879), False, 'import mlflow\n'), ((10897, 10943), 'mlflow.get_experiment_by_name', 'mlflow.get_experiment_by_name', (['experiment_name'], {}), '(experiment_name)\n', (10926, 10943), False, 'import mlflow\n'), ((10971, 11008), 'mlflow.set_tracking_uri', 'mlflow.set_tracking_uri', (['tracking_uri'], {}), '(tracking_uri)\n', (10994, 11008), False, 'import mlflow\n'), ((4743, 4773), 'app.load_data_gdrive', 'load_data_gdrive', (['data_file_id'], {}), '(data_file_id)\n', (4759, 4773), False, 'from app import load_data_gdrive, load_google_worksheet\n'), ((8710, 8739), 'pandas.isna', 'pd.isna', (["df_final['textblob']"], {}), "(df_final['textblob'])\n", (8717, 8739), True, 'import pandas as pd\n'), ((11063, 11093), 'sklearn.metrics.make_scorer', 'make_scorer', (['matthews_corrcoef'], {}), '(matthews_corrcoef)\n', (11074, 11093), False, 'from sklearn.metrics import make_scorer\n'), ((11491, 11561), 'sklearn.model_selection.ParameterSampler', 'ParameterSampler', (['hyperparams'], {'n_iter': "params['n_iter']", 'random_state': '(0)'}), "(hyperparams, n_iter=params['n_iter'], random_state=0)\n", (11507, 11561), False, 'from sklearn.model_selection import ParameterSampler\n'), ((11836, 11892), 'mlflow.start_run', 'mlflow.start_run', ([], {'experiment_id': 'experiment.experiment_id'}), '(experiment_id=experiment.experiment_id)\n', (11852, 11892), False, 'import mlflow\n'), ((12308, 12368), 'sklearn.pipeline.Pipeline', 'Pipeline', (["[('vectorizer', tfidf), ('nb', model.steps[0][1])]"], {}), "([('vectorizer', tfidf), ('nb', model.steps[0][1])])\n", (12316, 12368), False, 'from sklearn.pipeline import Pipeline\n'), ((12465, 12520), 'sklearn.model_selection.cross_validate', 'cross_validate', (['model', 'X_train', 'y_train'], {'scoring': 'metric'}), '(model, X_train, y_train, scoring=metric)\n', (12479, 12520), False, 'from sklearn.model_selection import cross_validate\n'), ((12692, 12726), 'mlflow.log_params', 'mlflow.log_params', (['run_hyperparams'], {}), '(run_hyperparams)\n', (12709, 12726), False, 'import mlflow\n'), ((12772, 12804), 'mlflow.log_metrics', 'mlflow.log_metrics', (['metrics_dict'], {}), '(metrics_dict)\n', (12790, 12804), False, 'import mlflow\n'), ((12861, 12986), 'mlflow.sklearn.log_model', 'mlflow.sklearn.log_model', ([], {'sk_model': 'pipe_final', 'artifact_path': '"""model"""', 'registered_model_name': '"""SentimentAnalysisClassifier"""'}), "(sk_model=pipe_final, artifact_path='model',\n registered_model_name='SentimentAnalysisClassifier')\n", (12885, 12986), False, 'import mlflow\n'), ((12615, 12643), 'numpy.mean', 'np.mean', (["scores['test_' + m]"], {}), "(scores['test_' + m])\n", (12622, 12643), True, 'import numpy as np\n'), ((6296, 6307), 'textblob.TextBlob', 'TextBlob', (['x'], {}), '(x)\n', (6304, 6307), False, 'from textblob import TextBlob\n'), ((12090, 12133), 'sklearn.naive_bayes.BernoulliNB', 'BernoulliNB', ([], {'alpha': "run_hyperparams['alpha']"}), "(alpha=run_hyperparams['alpha'])\n", (12101, 12133), False, 'from sklearn.naive_bayes import BernoulliNB\n')] |
"""Common wrapper for the ValueBot-logic
The real input generation still depends on the actual bot, but the game logic
on how to play, given the output of some value network, is the same
"""
import copy
import numpy as np
from src.learn.BaseNNBot import BaseNNBot
from src.play.model.Board import BLACK, WHITE
from src.play.model.Move import Move
class ValueBot(BaseNNBot):
def _genmove(self, color, game, flat_board):
"""Generate a move - ValueBot logic
The logic of this bot is basically:
1. Evaluate current probability of winning
2. Evaluate the probabilities of winning for each move
3. Make the best move if there is a valid move that raises the probs
"""
color = WHITE if color == 'w' else BLACK
flat_board = flat_board.reshape(1, len(flat_board))
my_value = color
# 1. Get current Win Probability
inp = self.generate_nn_input(flat_board, color)
current_prob = self.model.predict(inp)
assert np.sum(current_prob) == 1, np.sum(current_prob)
# print(current_prob)
# 2. Evaluate all possible moves
best_win_prob = current_prob[0, 0]
best_move = Move(is_pass=True)
playable_locations = game.get_playable_locations(color)
for move in playable_locations:
if move.is_pass:
continue
# Play the move and evaluate the resulting board
test_board = copy.deepcopy(game.board)
test_board.place_stone_and_capture_if_applicable_default_values(
move.to_matrix_location(), my_value)
inp = self.generate_nn_input(test_board.flatten(), color)
pred_result = self.model.predict(inp)[0, 0]
if pred_result > best_win_prob:
best_move = move
best_win_prob = pred_result
return best_move
| [
"numpy.sum",
"src.play.model.Move.Move",
"copy.deepcopy"
] | [((1040, 1060), 'numpy.sum', 'np.sum', (['current_prob'], {}), '(current_prob)\n', (1046, 1060), True, 'import numpy as np\n'), ((1196, 1214), 'src.play.model.Move.Move', 'Move', ([], {'is_pass': '(True)'}), '(is_pass=True)\n', (1200, 1214), False, 'from src.play.model.Move import Move\n'), ((1013, 1033), 'numpy.sum', 'np.sum', (['current_prob'], {}), '(current_prob)\n', (1019, 1033), True, 'import numpy as np\n'), ((1461, 1486), 'copy.deepcopy', 'copy.deepcopy', (['game.board'], {}), '(game.board)\n', (1474, 1486), False, 'import copy\n')] |
""" Define a mixture ARMA model. """
import numpy as np
from bioslds import sources
from bioslds.utils import rle_encode
from bioslds.markov import SemiMarkov
from typing import Sequence, Tuple, Union, Callable, Optional
def sample_switching_models(
models: Sequence,
usage_seq: Sequence,
X: Union[None, Sequence, Callable] = None,
initial_conditions: Optional[Tuple[Sequence, Sequence]] = None,
return_input: bool = False,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
""" Sample from a non-stationary stochastic processes that switches between
different ARMA models at given times.
This functions sets the models' `history_` attribute appropriately to ensure
consistency across time.
Parameters
----------
models
Sequence of models to use.
usage_seq
Sequence identifying the model to use at each time steps. Models are
labeled from `0` to `len(models) - 1`.
X
If given, this overrides the input source for the models. If it is a
sequence, it should be at least as long as `len(usage_seq)`.
initial_conditions
A tuple, `(initial_y, initial_x)`, of recent samples of the output and
input sequences used to seed the simulation. If these are not provided,
they are assumed equal to zero.
return_input
If true, returns both output and input. If false (the default), returns only
the output.
Returns a sequence `Y` of generated samples. If `return_input` is true,
returns a tuple `(Y, X)` of generated output samples and input samples. If
the `U` parameter was used and was a sequence, the output `X` simply mirrors
the input.
"""
# check the inputs
if len(models) == 0:
raise ValueError("No models given.")
if np.min(usage_seq) < 0 or np.max(usage_seq) >= len(models):
raise ValueError("Invalid entry in usage_seq vector.")
# handle vector X
if X is not None and not callable(X):
if len(X) < len(usage_seq):
raise ValueError("Not enough input values in X.")
X_ret = X
X = sources.Stream(X)
have_X_ret = True
else:
X_ret = np.zeros(len(usage_seq))
have_X_ret = False
# handle default initial conditions
if initial_conditions is None:
initial_conditions = ([], [])
# generate the samples
Y_ret = np.zeros(len(usage_seq))
usage_rle = rle_encode(usage_seq)
ptr = 0
for model_id, n_samples in usage_rle:
model = models[model_id]
# ensure proper history
if ptr >= model.p:
history_y = np.copy(Y_ret[ptr - model.p : ptr])
else:
n_left = model.p - ptr
if len(initial_conditions[0]) >= n_left:
history_y = np.hstack((initial_conditions[0][-n_left:], Y_ret[:ptr]))
else:
history_y = np.hstack(
(
np.zeros(n_left - len(initial_conditions[0])),
initial_conditions[0],
Y_ret[:ptr],
)
)
if ptr >= model.q:
history_x = np.copy(X_ret[ptr - model.q : ptr])
else:
n_left = model.q - ptr
if len(initial_conditions[1]) >= n_left:
history_x = np.hstack((initial_conditions[1][-n_left:], X_ret[:ptr]))
else:
history_x = np.hstack(
(
np.zeros(n_left - len(initial_conditions[1])),
initial_conditions[1],
X_ret[:ptr],
)
)
model.history_ = (history_y, history_x)
# generate and store the samples from this model
crt_y, crt_x = model.transform(n_samples, X=X, return_input=True)
Y_ret[ptr : ptr + n_samples] = crt_y
if not have_X_ret:
X_ret[ptr : ptr + n_samples] = crt_x
ptr += n_samples
if return_input:
return Y_ret, X_ret
else:
return Y_ret
class ArmaHSMM(object):
""" A hidden semi-Markov model with ARMA emissions.
This class can be used to generate samples from a non-stationary stochastic
process that stochastically switches between several ARMA processes based on
a hidden semi-Markov model.
Attributes
==========
n_features : int
Number of input dimensions. This is always equal to 1.
n_components : int
Number of output dimensions.This is always equal to 1.
models
Sequence of models to use.
smm
Semi-Markov model used to generate ARMA states.
"""
def __init__(self, models: Sequence, **kwargs):
""" Initialize the ARMA HSMM.
Parameters
----------
models
Sequence of models to use. This sets the number of states in the
semi-Markov model.
All other keyword arguments are passed to the semi-Markov model
constructor.
"""
self.models = models
self.smm = SemiMarkov(len(models), **kwargs)
self.n_features = 1
self.n_components = 1
def transform(
self,
n_samples: Optional[int] = None,
X: Union[None, Sequence, Callable] = None,
initial_conditions: Optional[Tuple[Sequence, Sequence]] = None,
return_input: bool = False,
return_usage_seq: bool = False,
) -> Union[
np.ndarray,
Tuple[np.ndarray, np.ndarray],
Tuple[np.ndarray, np.ndarray, np.ndarray],
]:
""" Process input samples.
The function uses exactly `n_samples` input samples.
If no input source is explicitly provided, the default source for each
of the ARMAs is used. An exception is raised if a process needs to be
used that does not have a default source.
Parameters
----------
n_samples
Number of samples to generate. If not provided, `U` must be provided
and it must be a sequence.
X
Input samples or input generator. See `Arma.transform`.
initial_conditions
A tuple, `(initial_y, initial_x)`, of recent samples of the output
and input sequences used to seed the simulation. If these are not
provided, they are assumed equal to zero.
return_input
If true, returns both output and input. If false (the default), returns only
the output.
return_usage_seq
If true, returns the `usage_seq` in addition to output (and potentially
input).
Returns either a single array (`Y`) if `return_input` and `return_usage_seq` are
both false; or a tuple `(Y, X)` or `(Y, usage_sea)` if only `return_input` or
only `return_usage_seq` is true, respectively; or a tuple `(Y, X, usage_seq)` if
both are true. Here `Y` is an array of generated `y`; `X` contains the input `x`
samples; and `usage_seq` is an integer array indicating which model was used at
each time step. If the `X` parameter was used and was a sequence, the output `X`
simply mirrors the input `X`.
"""
# check inputs
if n_samples is None:
if X is None or not hasattr(X, "__len__"):
raise ValueError("Need either n_samples or sequence U.")
n_samples = len(X)
# generate usage sequence, then use sample_switching_models
usage_seq = self.smm.sample(n_samples)
y, x = sample_switching_models(
self.models,
usage_seq,
X=X,
initial_conditions=initial_conditions,
return_input=True,
)
res = (y,)
if return_input:
res = res + (x,)
if return_usage_seq:
res = res + (usage_seq,)
if len(res) == 1:
return res[0]
else:
return res
def __repr__(self) -> str:
r = f"ArmaHSMM(models={repr(self.models)}, smm={repr(self.smm)})"
return r
def __str__(self) -> str:
s = f"ArmaHSMM(models={str(self.models)}, smm={str(self.smm)})"
return s
| [
"numpy.copy",
"bioslds.utils.rle_encode",
"numpy.hstack",
"numpy.max",
"numpy.min",
"bioslds.sources.Stream"
] | [((2443, 2464), 'bioslds.utils.rle_encode', 'rle_encode', (['usage_seq'], {}), '(usage_seq)\n', (2453, 2464), False, 'from bioslds.utils import rle_encode\n'), ((2126, 2143), 'bioslds.sources.Stream', 'sources.Stream', (['X'], {}), '(X)\n', (2140, 2143), False, 'from bioslds import sources\n'), ((1810, 1827), 'numpy.min', 'np.min', (['usage_seq'], {}), '(usage_seq)\n', (1816, 1827), True, 'import numpy as np\n'), ((1835, 1852), 'numpy.max', 'np.max', (['usage_seq'], {}), '(usage_seq)\n', (1841, 1852), True, 'import numpy as np\n'), ((2636, 2669), 'numpy.copy', 'np.copy', (['Y_ret[ptr - model.p:ptr]'], {}), '(Y_ret[ptr - model.p:ptr])\n', (2643, 2669), True, 'import numpy as np\n'), ((3185, 3218), 'numpy.copy', 'np.copy', (['X_ret[ptr - model.q:ptr]'], {}), '(X_ret[ptr - model.q:ptr])\n', (3192, 3218), True, 'import numpy as np\n'), ((2802, 2859), 'numpy.hstack', 'np.hstack', (['(initial_conditions[0][-n_left:], Y_ret[:ptr])'], {}), '((initial_conditions[0][-n_left:], Y_ret[:ptr]))\n', (2811, 2859), True, 'import numpy as np\n'), ((3351, 3408), 'numpy.hstack', 'np.hstack', (['(initial_conditions[1][-n_left:], X_ret[:ptr])'], {}), '((initial_conditions[1][-n_left:], X_ret[:ptr]))\n', (3360, 3408), True, 'import numpy as np\n')] |
# Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
# from memory_profiler import profile
import os
from toast_planck.reproc_modules.destripe_tools import (fast_hit_binning,
fast_binning)
import scipy.signal
from toast import qarray as qa
from toast.mpi import MPI
from toast_planck.preproc_modules import MapSampler, flagged_running_average
import astropy.io.fits as pf
import numpy as np
import toast.fod as tf
import toast.timing as timing
class OpNoiseEstim():
def __init__(
self, signal=None, flags=None, detmask=1, commonmask=3, out=None,
maskfile=None, mapfile=None, rimo=None, pol=True, nbin_psd=1000,
lagmax=100000, stationary_period=86400., nosingle=False,
no_spin_harmonics=False, calibrate_signal_estimate=False,
nsum=10, naverage=100):
self._signal = signal
self._flags = flags
self._detmask = detmask
self._commonmask = commonmask
self._out = out
self._maskfile = maskfile
self._mapfile = mapfile
if rimo is None:
raise RuntimeError('OpNoiseEstim: You must provide a RIMO')
self._rimo = rimo
self._pol = pol
self._nbin_psd = nbin_psd
self._lagmax = lagmax
self._stationary_period = stationary_period
self._nosingle = nosingle
self._no_spin_harmonics = no_spin_harmonics
self._calibrate_signal_estimate = calibrate_signal_estimate
# Parameters for downsampling the data
self._nsum = nsum
self._naverage = naverage
def exec(self, data):
cworld = data.comm.comm_world
rank = cworld.Get_rank()
masksampler = None
if self._maskfile:
masksampler = MapSampler(self._maskfile, comm=cworld)
mapsampler = None
if self._mapfile:
mapsampler = MapSampler(self._mapfile, comm=cworld, pol=True)
for obs in data.obs:
tod = obs['tod']
local_intervals = tod.local_intervals(obs['intervals'])
dets = tod.local_dets
ndet = len(dets)
timestamps = tod.local_timestamps()
commonflags = tod.local_common_flags()
commonflags = (commonflags & self._commonmask != 0)
fsample = self.subtract_signal(
tod, cworld, rank, masksampler, mapsampler, local_intervals)
# Extend the gap between intervals to prevent sample pairs
# that cross the gap.
intervals = obs['intervals']
gap_min = np.int(self._lagmax) + 1
# Downsampled data requires longer gaps
gap_min_nsum = np.int(self._lagmax * self._nsum) + 1
offset, nsamp = tod.local_samples
gapflags = np.zeros_like(commonflags)
gapflags_nsum = np.zeros_like(commonflags)
for ival1, ival2 in zip(intervals[:-1], intervals[1:]):
gap_start = ival1.last + 1
gap_stop = max(gap_start + gap_min, ival2.first)
gap_stop_nsum = max(gap_start + gap_min_nsum, ival2.first)
if gap_start < offset + nsamp and gap_stop > offset:
gap_start = max(0, gap_start - offset)
gap_stop = min(offset + nsamp, gap_stop - offset)
gapflags[gap_start:gap_stop] = True
gap_stop_nsum = min(offset + nsamp, gap_stop_nsum - offset)
gapflags_nsum[gap_start:gap_stop_nsum] = True
for idet1 in range(ndet):
for idet2 in range(idet1, ndet):
det1 = dets[idet1]
det2 = dets[idet2]
if det1 == det2 and self._nosingle:
continue
signal1 = tod.local_signal(det1)
flags1 = tod.local_flags(det1, name=self._flags)
flags = (flags1 & self._detmask != 0)
signal2 = None
flags2 = None
if det1 != det2:
signal2 = tod.local_signal(det2)
flags2 = tod.local_flags(det2, name=self._flags)
flags[flags2 & self._detmask != 0] = True
flags[commonflags] = True
self.process_noise_estimate(
signal1, signal2, flags, gapflags, gapflags_nsum,
timestamps, fsample, cworld, rank, 'noise', det1, det2,
local_intervals)
return
def subtract_signal(self, tod, cworld, rank, masksampler, mapsampler,
local_intervals):
""" Subtract a signal estimate from the TOD and update the
flags for noise estimation.
"""
start_signal_subtract = MPI.Wtime()
for det in tod.local_dets:
if rank == 0:
print('Subtracting signal for {}'.format(det), flush=True)
tod.cache.report()
fsample = self._rimo[det].fsample
epsilon = self._rimo[det].epsilon
eta = (1 - epsilon) / (1 + epsilon)
signal = tod.local_signal(det, name=self._signal)
flags = tod.local_flags(det, name=self._flags)
flags &= self._detmask
for ival in local_intervals:
ind = slice(ival.first, ival.last + 1)
sig = signal[ind]
flg = flags[ind]
quat = tod.local_pointing(det)[ind]
if self._pol:
theta, phi, psi = qa.to_angles(quat)
iw = np.ones_like(theta)
qw = eta * np.cos(2 * psi)
uw = eta * np.sin(2 * psi)
iquw = np.column_stack([iw, qw, uw])
else:
theta, phi = qa.to_position(quat)
if masksampler is not None:
maskflg = masksampler.at(theta, phi) < 0.5
flg[maskflg] |= 255
if mapsampler is not None:
if self._pol:
bg = mapsampler.atpol(theta, phi, iquw)
else:
bg = mapsampler.at(theta, phi)
if self._calibrate_signal_estimate:
good = flg == 0
ngood = np.sum(good)
if ngood > 1:
templates = np.vstack([np.ones(ngood), bg[good]])
invcov = np.dot(templates, templates.T)
cov = np.linalg.inv(invcov)
proj = np.dot(templates, sig[good])
coeff = np.dot(cov, proj)
bg = coeff[0] + coeff[1] * bg
sig -= bg
cworld.barrier()
stop_signal_subtract = MPI.Wtime()
if rank == 0:
print('TOD signal-subtracted in {:.2f} s'.format(
stop_signal_subtract - start_signal_subtract),
flush=True)
return fsample
def decimate(self, x, flg, gapflg, local_intervals):
# Low-pass filter with running average, then downsample
xx = x.copy()
flags = flg.copy()
for ival in local_intervals:
ind = slice(ival.first, ival.last + 1)
xx[ind], flags[ind] = flagged_running_average(
x[ind], flg[ind], self._naverage,
return_flags=True)
return xx[::self._nsum].copy(), (flags + gapflg)[::self._nsum].copy()
"""
def highpass(self, x, flg):
# Flagged real-space high pass filter
xx = x.copy()
j = 0
while j < x.size and flg[j]: j += 1
alpha = .999
for i in range(j+1, x.size):
if flg[i]:
xx[i] = x[j]
else:
xx[i] = alpha*(xx[j] + x[i] - x[j])
j = i
xx /= alpha
return xx
"""
def log_bin(self, freq, nbin=100, fmin=None, fmax=None):
if np.any(freq == 0):
raise Exception('Logarithmic binning should not include '
'zero frequency')
if fmin is None:
fmin = np.amin(freq)
if fmax is None:
fmax = np.amax(freq)
bins = np.logspace(np.log(fmin), np.log(fmax), num=nbin + 1,
endpoint=True, base=np.e)
bins[-1] *= 1.01 # Widen the last bin not to have a bin with one entry
locs = np.digitize(freq, bins).astype(np.int32)
hits = np.zeros(nbin + 2, dtype=np.int32)
fast_hit_binning(locs, hits)
return locs, hits
def bin_psds(self, my_psds, fmin=None, fmax=None):
my_binned_psds = []
my_times = []
binfreq0 = None
for i in range(len(my_psds)):
t0, _, freq, psd = my_psds[i]
good = freq != 0
if self._no_spin_harmonics:
# Discard the bins containing spin harmonics and their
# immediate neighbors
for i0 in range(1, 3):
f0 = i0 / 60.
for i in range(1, 30):
fmask = f0 * i
imin = np.argmin(np.abs(freq - fmask))
if i == 1:
# The first bin has a wider neighborhood
good[imin - 2:imin + 3] = False
else:
good[imin - 1:imin + 2] = False
if self._nbin_psd is not None:
locs, hits = self.log_bin(freq[good], nbin=self._nbin_psd,
fmin=fmin, fmax=fmax)
binfreq = np.zeros(hits.size)
fast_binning(freq[good], locs, binfreq)
binfreq = binfreq[hits != 0] / hits[hits != 0]
else:
binfreq = freq
hits = np.ones(len(binfreq))
if binfreq0 is None:
binfreq0 = binfreq
else:
if np.any(binfreq != binfreq0):
raise Exception('Binned PSD frequencies change')
if self._nbin_psd is not None:
binpsd = np.zeros(hits.size)
fast_binning(psd[good], locs, binpsd)
binpsd = binpsd[hits != 0] / hits[hits != 0]
else:
binpsd = psd
my_times.append(t0)
my_binned_psds.append(binpsd)
return my_binned_psds, my_times, binfreq0
def discard_spin_harmonics(self, binfreq, all_psds):
ind = binfreq != 0
for i0 in range(1, 3):
f0 = i0 / 60.
for i in range(1, 10):
fmask = f0 * i
imin = np.argmin(np.abs(binfreq - fmask))
if i == 1:
ind[imin - 1:imin + 2] = False
else:
ind[imin] = False
binfreq = binfreq[ind]
all_psds = all_psds[:, ind]
return binfreq, all_psds
def discard_outliers(self, binfreq, all_psds, all_times):
all_psds = list(all_psds)
all_times = list(all_times)
nrow, ncol = np.shape(all_psds)
# Discard empty PSDs
i = 1
nbad = 0
all_psds = list(all_psds)
all_times = list(all_times)
while i < nrow:
p = all_psds[i]
if np.all(p == 0) or np.any(np.isnan(p)):
del all_psds[i]
del all_times[i]
nrow -= 1
nbad += 1
else:
i += 1
if nbad > 0:
print('Discarded {} empty or NaN psds'.format(nbad), flush=True)
# Throw away outlier PSDs by comparing the PSDs in specific bins
if nrow > 10:
all_good = np.isfinite(np.sum(all_psds, 1))
for col in range(ncol - 1):
if binfreq[col] < .001:
continue
# Local outliers
psdvalues = np.array([x[col] for x in all_psds])
smooth_values = scipy.signal.medfilt(psdvalues, 11)
good = np.ones(psdvalues.size, dtype=np.bool)
good[psdvalues == 0] = False
for i in range(10):
# Local test
diff = np.zeros(psdvalues.size)
diff[good] = np.log(psdvalues[good]) - \
np.log(smooth_values[good])
sdev = np.std(diff[good])
good[np.abs(diff) > 5 * sdev] = False
# Global test
diff = np.zeros(psdvalues.size)
diff[good] = np.log(psdvalues[good]) - \
np.mean(np.log(psdvalues[good]))
sdev = np.std(diff[good])
good[np.abs(diff) > 5 * sdev] = False
all_good[np.logical_not(good)] = False
bad = np.logical_not(all_good)
nbad = np.sum(bad)
if nbad > 0:
for ii in np.argwhere(bad).ravel()[::-1]:
del all_psds[ii]
del all_times[ii]
if nbad > 0:
print('Masked extra {} psds due to outliers.'
''.format(nbad))
return all_psds, all_times
def save_psds(self, binfreq, all_psds, all_times, det1, det2, fsample,
rootname):
if det1 == det2:
fn_out = os.path.join(self._out,
'{}_{}.fits'.format(rootname, det1))
else:
fn_out = os.path.join(self._out,
'{}_{}_{}.fits'.format(rootname, det1, det2))
all_psds = np.vstack([binfreq, all_psds])
cols = []
cols.append(pf.Column(name='OBT', format='D', array=all_times))
coldefs = pf.ColDefs(cols)
hdu1 = pf.BinTableHDU.from_columns(coldefs)
hdu1.header['RATE'] = fsample, 'Sampling rate'
cols = []
cols.append(pf.Column(name='PSD', format='{}E'.format(binfreq.size),
array=all_psds))
coldefs = pf.ColDefs(cols)
hdu2 = pf.BinTableHDU.from_columns(coldefs)
hdu2.header['EXTNAME'] = det1, 'Detector'
hdu2.header['DET1'] = det1, 'Detector1'
hdu2.header['DET2'] = det2, 'Detector2'
hdu0 = pf.PrimaryHDU()
hdulist = pf.HDUList([hdu0, hdu1, hdu2])
if os.path.isfile(fn_out):
os.remove(fn_out)
hdulist.writeto(fn_out)
print('Detector {} vs. {} PSDs stored in {}'.format(
det1, det2, fn_out))
return
def process_noise_estimate(
self, signal1, signal2, flags, gapflags, gapflags_nsum,
timestamps, fsample, cworld, rank, fileroot, det1, det2,
local_intervals):
# High pass filter the signal to avoid aliasing
# self.highpass(signal1, noise_flags)
# self.highpass(signal2, noise_flags)
# Compute the autocovariance function and the matching
# PSD for each stationary interval
start = MPI.Wtime()
if signal2 is None:
my_psds1 = tf.autocov_psd(
timestamps, signal1, flags + gapflags, self._lagmax,
self._stationary_period, fsample, comm=cworld)
else:
my_psds1 = tf.crosscov_psd(
timestamps, signal1, signal2, flags + gapflags, self._lagmax,
self._stationary_period, fsample, comm=cworld)
# Get another PSD for a down-sampled TOD to measure the
# low frequency power
timestamps_decim = timestamps[::self._nsum]
# decimate() will smooth and downsample the signal in
# each valid interval separately
signal1_decim, flags_decim = self.decimate(
signal1, flags, gapflags_nsum, local_intervals)
if signal2 is not None:
signal2_decim, flags_decim = self.decimate(
signal2, flags, gapflags_nsum, local_intervals)
if signal2 is None:
my_psds2 = tf.autocov_psd(
timestamps_decim, signal1_decim, flags_decim,
min(self._lagmax, timestamps_decim.size),
self._stationary_period, fsample / self._nsum, comm=cworld)
else:
my_psds2 = tf.crosscov_psd(
timestamps_decim, signal1_decim, signal2_decim, flags_decim,
min(self._lagmax, timestamps_decim.size),
self._stationary_period, fsample / self._nsum, comm=cworld)
# Ensure the two sets of PSDs are of equal length
my_new_psds1 = []
my_new_psds2 = []
i = 0
while i < min(len(my_psds1), len(my_psds2)):
t1 = my_psds1[i][0]
t2 = my_psds2[i][0]
if np.isclose(t1, t2):
my_new_psds1.append(my_psds1[i])
my_new_psds2.append(my_psds2[i])
i += 1
else:
if t1 < t2:
del my_psds1[i]
else:
del my_psds2[i]
my_psds1 = my_new_psds1
my_psds2 = my_new_psds2
if len(my_psds1) != len(my_psds2):
while my_psds1[-1][0] > my_psds2[-1][0]:
del my_psds1[-1]
while my_psds1[-1][0] < my_psds2[-1][0]:
del my_psds2[-1]
# frequencies that are usable in the down-sampled PSD
fcut = fsample / 2 / self._naverage / 100
stop = MPI.Wtime()
if rank == 0:
print('Correlators and PSDs computed in {:.2f} s'
''.format(stop - start), flush=True)
# Now bin the PSDs
fmin = 1 / self._stationary_period
fmax = fsample / 2
start = MPI.Wtime()
my_binned_psds1, my_times1, binfreq10 = self.bin_psds(
my_psds1, fmin, fmax)
my_binned_psds2, _, binfreq20 = self.bin_psds(
my_psds2, fmin, fmax)
stop = MPI.Wtime()
"""
# DEBUG begin
import pdb
import matplotlib.pyplot as plt
plt.figure()
plt.loglog(my_psds2[0][2], my_psds2[0][3], 'r.')
plt.loglog(my_psds1[0][2], my_psds1[0][3], 'b.')
plt.loglog(binfreq20, my_binned_psds2[0], 'r-')
plt.loglog(binfreq10, my_binned_psds1[0], 'b-')
plt.gca().axvline(fcut, color='k')
plt.draw()
plt.show()
pdb.set_trace()
# DEBUG end
"""
# concatenate
if binfreq10 is None or binfreq20 is None:
my_times = []
my_binned_psds = []
binfreq0 = None
else:
my_times = my_times1
ind1 = binfreq10 > fcut
ind2 = binfreq20 <= fcut
binfreq0 = np.hstack([binfreq20[ind2], binfreq10[ind1]])
my_binned_psds = []
for psd1, psd2 in zip(my_binned_psds1, my_binned_psds2):
my_binned_psds.append(np.hstack([psd2[ind2], psd1[ind1]]))
# Collect and write the PSDs. Start by determining the first
# process to have a valid PSD to determine binning
start = MPI.Wtime()
have_bins = binfreq0 is not None
have_bins_all = cworld.allgather(have_bins)
root = 0
if np.any(have_bins_all):
while not have_bins_all[root]:
root += 1
else:
raise RuntimeError('None of the processes have valid PSDs')
binfreq = cworld.bcast(binfreq0, root=root)
if binfreq0 is not None and np.any(binfreq != binfreq0):
raise Exception(
'{:4} : Binned PSD frequencies change. len(binfreq0)={}'
', len(binfreq)={}, binfreq0={}, binfreq={}. '
'len(my_psds)={}'.format(
rank, binfreq0.size, binfreq.size, binfreq0,
binfreq, len(my_psds1)))
if len(my_times) != len(my_binned_psds):
raise Exception(
'ERROR: Process {} has len(my_times) = {}, len(my_binned_psds)'
' = {}'.format(rank, len(my_times), len(my_binned_psds)))
all_times = cworld.gather(my_times, root=0)
all_psds = cworld.gather(my_binned_psds, root=0)
stop = MPI.Wtime()
if rank == 0:
if len(all_times) != len(all_psds):
raise Exception(
'ERROR: Process {} has len(all_times) = {}, len(all_psds)'
' = {} before deglitch'.format(
rank, len(all_times), len(all_psds)))
# De-glitch the binned PSDs and write them to file
i = 0
while i < len(all_times):
if len(all_times[i]) == 0:
del all_times[i]
del all_psds[i]
else:
i += 1
all_times = np.hstack(all_times)
all_psds = np.vstack(all_psds)
if len(all_times) != len(all_psds):
raise Exception(
'ERROR: Process {} has len(all_times) = {}, len(all_psds)'
' = {} AFTER deglitch'.format(
rank, len(all_times), len(all_psds)))
# if self._no_spin_harmonics:
# binfreq, all_psds = self.discard_spin_harmonics(binfreq, all_psds)
good_psds, good_times = self.discard_outliers(
binfreq, all_psds, all_times)
self.save_psds(
binfreq, all_psds, all_times, det1, det2, fsample, fileroot)
self.save_psds(
binfreq, good_psds, good_times, det1, det2, fsample,
fileroot + '_good')
return
| [
"astropy.io.fits.ColDefs",
"numpy.hstack",
"numpy.log",
"numpy.logical_not",
"numpy.column_stack",
"numpy.array",
"toast.qarray.to_angles",
"numpy.sin",
"astropy.io.fits.BinTableHDU.from_columns",
"toast_planck.reproc_modules.destripe_tools.fast_binning",
"os.remove",
"toast.fod.crosscov_psd",... | [((5004, 5015), 'toast.mpi.MPI.Wtime', 'MPI.Wtime', ([], {}), '()\n', (5013, 5015), False, 'from toast.mpi import MPI\n'), ((7069, 7080), 'toast.mpi.MPI.Wtime', 'MPI.Wtime', ([], {}), '()\n', (7078, 7080), False, 'from toast.mpi import MPI\n'), ((8252, 8269), 'numpy.any', 'np.any', (['(freq == 0)'], {}), '(freq == 0)\n', (8258, 8269), True, 'import numpy as np\n'), ((8780, 8814), 'numpy.zeros', 'np.zeros', (['(nbin + 2)'], {'dtype': 'np.int32'}), '(nbin + 2, dtype=np.int32)\n', (8788, 8814), True, 'import numpy as np\n'), ((8823, 8851), 'toast_planck.reproc_modules.destripe_tools.fast_hit_binning', 'fast_hit_binning', (['locs', 'hits'], {}), '(locs, hits)\n', (8839, 8851), False, 'from toast_planck.reproc_modules.destripe_tools import fast_hit_binning, fast_binning\n'), ((11423, 11441), 'numpy.shape', 'np.shape', (['all_psds'], {}), '(all_psds)\n', (11431, 11441), True, 'import numpy as np\n'), ((13977, 14007), 'numpy.vstack', 'np.vstack', (['[binfreq, all_psds]'], {}), '([binfreq, all_psds])\n', (13986, 14007), True, 'import numpy as np\n'), ((14117, 14133), 'astropy.io.fits.ColDefs', 'pf.ColDefs', (['cols'], {}), '(cols)\n', (14127, 14133), True, 'import astropy.io.fits as pf\n'), ((14149, 14185), 'astropy.io.fits.BinTableHDU.from_columns', 'pf.BinTableHDU.from_columns', (['coldefs'], {}), '(coldefs)\n', (14176, 14185), True, 'import astropy.io.fits as pf\n'), ((14402, 14418), 'astropy.io.fits.ColDefs', 'pf.ColDefs', (['cols'], {}), '(cols)\n', (14412, 14418), True, 'import astropy.io.fits as pf\n'), ((14434, 14470), 'astropy.io.fits.BinTableHDU.from_columns', 'pf.BinTableHDU.from_columns', (['coldefs'], {}), '(coldefs)\n', (14461, 14470), True, 'import astropy.io.fits as pf\n'), ((14633, 14648), 'astropy.io.fits.PrimaryHDU', 'pf.PrimaryHDU', ([], {}), '()\n', (14646, 14648), True, 'import astropy.io.fits as pf\n'), ((14667, 14697), 'astropy.io.fits.HDUList', 'pf.HDUList', (['[hdu0, hdu1, hdu2]'], {}), '([hdu0, hdu1, hdu2])\n', (14677, 14697), True, 'import astropy.io.fits as pf\n'), ((14710, 14732), 'os.path.isfile', 'os.path.isfile', (['fn_out'], {}), '(fn_out)\n', (14724, 14732), False, 'import os\n'), ((15377, 15388), 'toast.mpi.MPI.Wtime', 'MPI.Wtime', ([], {}), '()\n', (15386, 15388), False, 'from toast.mpi import MPI\n'), ((17775, 17786), 'toast.mpi.MPI.Wtime', 'MPI.Wtime', ([], {}), '()\n', (17784, 17786), False, 'from toast.mpi import MPI\n'), ((18043, 18054), 'toast.mpi.MPI.Wtime', 'MPI.Wtime', ([], {}), '()\n', (18052, 18054), False, 'from toast.mpi import MPI\n'), ((18256, 18267), 'toast.mpi.MPI.Wtime', 'MPI.Wtime', ([], {}), '()\n', (18265, 18267), False, 'from toast.mpi import MPI\n'), ((19419, 19430), 'toast.mpi.MPI.Wtime', 'MPI.Wtime', ([], {}), '()\n', (19428, 19430), False, 'from toast.mpi import MPI\n'), ((19552, 19573), 'numpy.any', 'np.any', (['have_bins_all'], {}), '(have_bins_all)\n', (19558, 19573), True, 'import numpy as np\n'), ((20520, 20531), 'toast.mpi.MPI.Wtime', 'MPI.Wtime', ([], {}), '()\n', (20529, 20531), False, 'from toast.mpi import MPI\n'), ((1939, 1978), 'toast_planck.preproc_modules.MapSampler', 'MapSampler', (['self._maskfile'], {'comm': 'cworld'}), '(self._maskfile, comm=cworld)\n', (1949, 1978), False, 'from toast_planck.preproc_modules import MapSampler, flagged_running_average\n'), ((2056, 2104), 'toast_planck.preproc_modules.MapSampler', 'MapSampler', (['self._mapfile'], {'comm': 'cworld', 'pol': '(True)'}), '(self._mapfile, comm=cworld, pol=True)\n', (2066, 2104), False, 'from toast_planck.preproc_modules import MapSampler, flagged_running_average\n'), ((2962, 2988), 'numpy.zeros_like', 'np.zeros_like', (['commonflags'], {}), '(commonflags)\n', (2975, 2988), True, 'import numpy as np\n'), ((3017, 3043), 'numpy.zeros_like', 'np.zeros_like', (['commonflags'], {}), '(commonflags)\n', (3030, 3043), True, 'import numpy as np\n'), ((7572, 7648), 'toast_planck.preproc_modules.flagged_running_average', 'flagged_running_average', (['x[ind]', 'flg[ind]', 'self._naverage'], {'return_flags': '(True)'}), '(x[ind], flg[ind], self._naverage, return_flags=True)\n', (7595, 7648), False, 'from toast_planck.preproc_modules import MapSampler, flagged_running_average\n'), ((8432, 8445), 'numpy.amin', 'np.amin', (['freq'], {}), '(freq)\n', (8439, 8445), True, 'import numpy as np\n'), ((8490, 8503), 'numpy.amax', 'np.amax', (['freq'], {}), '(freq)\n', (8497, 8503), True, 'import numpy as np\n'), ((8532, 8544), 'numpy.log', 'np.log', (['fmin'], {}), '(fmin)\n', (8538, 8544), True, 'import numpy as np\n'), ((8546, 8558), 'numpy.log', 'np.log', (['fmax'], {}), '(fmax)\n', (8552, 8558), True, 'import numpy as np\n'), ((13197, 13221), 'numpy.logical_not', 'np.logical_not', (['all_good'], {}), '(all_good)\n', (13211, 13221), True, 'import numpy as np\n'), ((13241, 13252), 'numpy.sum', 'np.sum', (['bad'], {}), '(bad)\n', (13247, 13252), True, 'import numpy as np\n'), ((14047, 14097), 'astropy.io.fits.Column', 'pf.Column', ([], {'name': '"""OBT"""', 'format': '"""D"""', 'array': 'all_times'}), "(name='OBT', format='D', array=all_times)\n", (14056, 14097), True, 'import astropy.io.fits as pf\n'), ((14746, 14763), 'os.remove', 'os.remove', (['fn_out'], {}), '(fn_out)\n', (14755, 14763), False, 'import os\n'), ((15440, 15559), 'toast.fod.autocov_psd', 'tf.autocov_psd', (['timestamps', 'signal1', '(flags + gapflags)', 'self._lagmax', 'self._stationary_period', 'fsample'], {'comm': 'cworld'}), '(timestamps, signal1, flags + gapflags, self._lagmax, self.\n _stationary_period, fsample, comm=cworld)\n', (15454, 15559), True, 'import toast.fod as tf\n'), ((15625, 15754), 'toast.fod.crosscov_psd', 'tf.crosscov_psd', (['timestamps', 'signal1', 'signal2', '(flags + gapflags)', 'self._lagmax', 'self._stationary_period', 'fsample'], {'comm': 'cworld'}), '(timestamps, signal1, signal2, flags + gapflags, self.\n _lagmax, self._stationary_period, fsample, comm=cworld)\n', (15640, 15754), True, 'import toast.fod as tf\n'), ((17085, 17103), 'numpy.isclose', 'np.isclose', (['t1', 't2'], {}), '(t1, t2)\n', (17095, 17103), True, 'import numpy as np\n'), ((19050, 19095), 'numpy.hstack', 'np.hstack', (['[binfreq20[ind2], binfreq10[ind1]]'], {}), '([binfreq20[ind2], binfreq10[ind1]])\n', (19059, 19095), True, 'import numpy as np\n'), ((19818, 19845), 'numpy.any', 'np.any', (['(binfreq != binfreq0)'], {}), '(binfreq != binfreq0)\n', (19824, 19845), True, 'import numpy as np\n'), ((21138, 21158), 'numpy.hstack', 'np.hstack', (['all_times'], {}), '(all_times)\n', (21147, 21158), True, 'import numpy as np\n'), ((21182, 21201), 'numpy.vstack', 'np.vstack', (['all_psds'], {}), '(all_psds)\n', (21191, 21201), True, 'import numpy as np\n'), ((2751, 2771), 'numpy.int', 'np.int', (['self._lagmax'], {}), '(self._lagmax)\n', (2757, 2771), True, 'import numpy as np\n'), ((2855, 2888), 'numpy.int', 'np.int', (['(self._lagmax * self._nsum)'], {}), '(self._lagmax * self._nsum)\n', (2861, 2888), True, 'import numpy as np\n'), ((8723, 8746), 'numpy.digitize', 'np.digitize', (['freq', 'bins'], {}), '(freq, bins)\n', (8734, 8746), True, 'import numpy as np\n'), ((9950, 9969), 'numpy.zeros', 'np.zeros', (['hits.size'], {}), '(hits.size)\n', (9958, 9969), True, 'import numpy as np\n'), ((9986, 10025), 'toast_planck.reproc_modules.destripe_tools.fast_binning', 'fast_binning', (['freq[good]', 'locs', 'binfreq'], {}), '(freq[good], locs, binfreq)\n', (9998, 10025), False, 'from toast_planck.reproc_modules.destripe_tools import fast_hit_binning, fast_binning\n'), ((10289, 10316), 'numpy.any', 'np.any', (['(binfreq != binfreq0)'], {}), '(binfreq != binfreq0)\n', (10295, 10316), True, 'import numpy as np\n'), ((10456, 10475), 'numpy.zeros', 'np.zeros', (['hits.size'], {}), '(hits.size)\n', (10464, 10475), True, 'import numpy as np\n'), ((10492, 10529), 'toast_planck.reproc_modules.destripe_tools.fast_binning', 'fast_binning', (['psd[good]', 'locs', 'binpsd'], {}), '(psd[good], locs, binpsd)\n', (10504, 10529), False, 'from toast_planck.reproc_modules.destripe_tools import fast_hit_binning, fast_binning\n'), ((11641, 11655), 'numpy.all', 'np.all', (['(p == 0)'], {}), '(p == 0)\n', (11647, 11655), True, 'import numpy as np\n'), ((12070, 12089), 'numpy.sum', 'np.sum', (['all_psds', '(1)'], {}), '(all_psds, 1)\n', (12076, 12089), True, 'import numpy as np\n'), ((12263, 12299), 'numpy.array', 'np.array', (['[x[col] for x in all_psds]'], {}), '([x[col] for x in all_psds])\n', (12271, 12299), True, 'import numpy as np\n'), ((12391, 12429), 'numpy.ones', 'np.ones', (['psdvalues.size'], {'dtype': 'np.bool'}), '(psdvalues.size, dtype=np.bool)\n', (12398, 12429), True, 'import numpy as np\n'), ((5766, 5784), 'toast.qarray.to_angles', 'qa.to_angles', (['quat'], {}), '(quat)\n', (5778, 5784), True, 'from toast import qarray as qa\n'), ((5810, 5829), 'numpy.ones_like', 'np.ones_like', (['theta'], {}), '(theta)\n', (5822, 5829), True, 'import numpy as np\n'), ((5951, 5980), 'numpy.column_stack', 'np.column_stack', (['[iw, qw, uw]'], {}), '([iw, qw, uw])\n', (5966, 5980), True, 'import numpy as np\n'), ((6036, 6056), 'toast.qarray.to_position', 'qa.to_position', (['quat'], {}), '(quat)\n', (6050, 6056), True, 'from toast import qarray as qa\n'), ((11004, 11027), 'numpy.abs', 'np.abs', (['(binfreq - fmask)'], {}), '(binfreq - fmask)\n', (11010, 11027), True, 'import numpy as np\n'), ((11666, 11677), 'numpy.isnan', 'np.isnan', (['p'], {}), '(p)\n', (11674, 11677), True, 'import numpy as np\n'), ((12572, 12596), 'numpy.zeros', 'np.zeros', (['psdvalues.size'], {}), '(psdvalues.size)\n', (12580, 12596), True, 'import numpy as np\n'), ((12737, 12755), 'numpy.std', 'np.std', (['diff[good]'], {}), '(diff[good])\n', (12743, 12755), True, 'import numpy as np\n'), ((12875, 12899), 'numpy.zeros', 'np.zeros', (['psdvalues.size'], {}), '(psdvalues.size)\n', (12883, 12899), True, 'import numpy as np\n'), ((13045, 13063), 'numpy.std', 'np.std', (['diff[good]'], {}), '(diff[good])\n', (13051, 13063), True, 'import numpy as np\n'), ((13148, 13168), 'numpy.logical_not', 'np.logical_not', (['good'], {}), '(good)\n', (13162, 13168), True, 'import numpy as np\n'), ((19235, 19270), 'numpy.hstack', 'np.hstack', (['[psd2[ind2], psd1[ind1]]'], {}), '([psd2[ind2], psd1[ind1]])\n', (19244, 19270), True, 'import numpy as np\n'), ((5861, 5876), 'numpy.cos', 'np.cos', (['(2 * psi)'], {}), '(2 * psi)\n', (5867, 5876), True, 'import numpy as np\n'), ((5908, 5923), 'numpy.sin', 'np.sin', (['(2 * psi)'], {}), '(2 * psi)\n', (5914, 5923), True, 'import numpy as np\n'), ((6554, 6566), 'numpy.sum', 'np.sum', (['good'], {}), '(good)\n', (6560, 6566), True, 'import numpy as np\n'), ((12630, 12653), 'numpy.log', 'np.log', (['psdvalues[good]'], {}), '(psdvalues[good])\n', (12636, 12653), True, 'import numpy as np\n'), ((12682, 12709), 'numpy.log', 'np.log', (['smooth_values[good]'], {}), '(smooth_values[good])\n', (12688, 12709), True, 'import numpy as np\n'), ((12933, 12956), 'numpy.log', 'np.log', (['psdvalues[good]'], {}), '(psdvalues[good])\n', (12939, 12956), True, 'import numpy as np\n'), ((6720, 6750), 'numpy.dot', 'np.dot', (['templates', 'templates.T'], {}), '(templates, templates.T)\n', (6726, 6750), True, 'import numpy as np\n'), ((6785, 6806), 'numpy.linalg.inv', 'np.linalg.inv', (['invcov'], {}), '(invcov)\n', (6798, 6806), True, 'import numpy as np\n'), ((6842, 6870), 'numpy.dot', 'np.dot', (['templates', 'sig[good]'], {}), '(templates, sig[good])\n', (6848, 6870), True, 'import numpy as np\n'), ((6907, 6924), 'numpy.dot', 'np.dot', (['cov', 'proj'], {}), '(cov, proj)\n', (6913, 6924), True, 'import numpy as np\n'), ((9465, 9485), 'numpy.abs', 'np.abs', (['(freq - fmask)'], {}), '(freq - fmask)\n', (9471, 9485), True, 'import numpy as np\n'), ((12781, 12793), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (12787, 12793), True, 'import numpy as np\n'), ((12993, 13016), 'numpy.log', 'np.log', (['psdvalues[good]'], {}), '(psdvalues[good])\n', (12999, 13016), True, 'import numpy as np\n'), ((13089, 13101), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (13095, 13101), True, 'import numpy as np\n'), ((13304, 13320), 'numpy.argwhere', 'np.argwhere', (['bad'], {}), '(bad)\n', (13315, 13320), True, 'import numpy as np\n'), ((6656, 6670), 'numpy.ones', 'np.ones', (['ngood'], {}), '(ngood)\n', (6663, 6670), True, 'import numpy as np\n')] |
import pathlib
import astropy.table as at
import astropy.units as u
from matplotlib.path import Path
import numpy as np
from pyia import GaiaData
from .config import (apogee_parent_filename, galah_parent_filename,
cache_path, plot_path)
class Dataset:
_id_column = None
_radial_velocity_name = None
_elem_err_fmt = None
def __init_subclass__(cls, **kwargs):
if not hasattr(cls, '_radial_velocity_name'):
cls._radial_velocity_name = 'radial_velocity'
for name in ['_id_column', '_elem_err_fmt']:
if getattr(cls, name) is None:
raise ValueError(f'You must specify class param: {name}')
super().__init_subclass__(**kwargs)
def __init__(self, filename_or_tbl):
if (isinstance(filename_or_tbl, str)
or isinstance(filename_or_tbl, pathlib.Path)):
self.t = at.QTable.read(filename_or_tbl)
else:
self.t = at.QTable(filename_or_tbl)
self.t = self._init_mask()
# Abundance ratios should be all caps:
for col in self.t.colnames:
if ((col.upper().endswith('_FE') or
col.upper().startswith('FE_') or
col.upper().endswith('_H')) and
not col.upper().startswith('FLAG')):
self.t.rename_column(col, col.upper())
# Abundance error columns should be _ERR like APOGEE:
for elem in self.elem_ratios:
col1 = self._elem_err_fmt.format(elem_name=elem)
col2 = APOGEEDataset._elem_err_fmt.format(elem_name=elem)
if col1 in self.t.colnames:
self.t.rename_column(col1, col2)
self.g = GaiaData(self.t)
# Use Gaia RV if not defined at dataset subclass level
rv_name = self._radial_velocity_name
rv = u.Quantity(self.t[rv_name])
if rv.unit.is_equivalent(u.one):
rv = rv * u.km/u.s
self.c = self.g.get_skycoord(radial_velocity=rv)
def _init_mask(self):
# TODO: implement on subclasses
return self.t
def __len__(self):
return len(self.t)
@property
def elem_ratios(self):
if not hasattr(self, '_elem_ratios'):
self._elem_ratios = ['FE_H'] + sorted([x for x in self.t.colnames
if x.endswith('_FE') and
not x.startswith('E_') and
not x.startswith('FLAG_') and
not x.startswith('CHI_') and
not x.startswith('FLUX_') and
not x.startswith('NR_')])
return self._elem_ratios
@property
def elem_names(self):
if not hasattr(self, '_elem_names'):
elem_list = ([x.split('_')[0] for x in self.elem_ratios] +
[x.split('_')[1] for x in self.elem_ratios])
elem_list.pop(elem_list.index('H'))
self._elem_names = set(elem_list)
return self._elem_names
def get_elem_ratio(self, elem1, elem2=None):
# Passed in an elem ratio provided by the table, e.g., FE_H
if elem2 is None and elem1 in self.t.colnames:
return self.t[elem1]
if elem2 is None:
try:
elem1, elem2 = elem1.split('_')
except Exception:
raise RuntimeError("If passing a single elem ratio string, "
"it must have the form ELEM_ELEM, not "
f"{elem1}")
elem1 = str(elem1).upper()
elem2 = str(elem2).upper()
if elem2 == 'H':
i1 = self.elem_ratios.index(elem1 + '_FE')
i2 = self.elem_ratios.index('FE_H')
return (self.t[self.elem_ratios[i1]] -
self.t[self.elem_ratios[i2]])
else:
i1 = self.elem_ratios.index(elem1 + '_FE')
i2 = self.elem_ratios.index(elem2 + '_FE')
return (self.t[self.elem_ratios[i1]] -
self.t[self.elem_ratios[i2]])
def get_mh_am_mask(self):
# TODO: implement on subclasses
return np.ones(len(self.t), dtype=bool)
def filter(self, filters, low_alpha=True):
mask = np.ones(len(self.t), dtype=bool)
for k, (x1, x2) in filters.items():
if x1 is None and x2 is None:
raise ValueError("Doh")
arr = u.Quantity(self.t[k]).value
if x1 is None:
mask &= arr < x2
elif x2 is None:
mask &= arr >= x1
else:
mask &= (arr >= x1) & (arr < x2)
if low_alpha is not None:
alpha_mask = self.get_mh_am_mask(low_alpha)
else:
alpha_mask = np.ones(len(self.t), dtype=bool)
return self[mask & alpha_mask]
def __getitem__(self, slc):
if isinstance(slc, int):
slc = slice(slc, slc+1)
return self.__class__(self.t[slc])
class APOGEEDataset(Dataset):
_id_column = 'APOGEE_ID'
_radial_velocity_name = 'VHELIO_AVG'
_elem_err_fmt = '{elem_name}_ERR'
# See: 2-High-alpha-Low-alpha.ipynb
_mh_alpham_nodes = np.array([
[0.6, -0.05],
[0.6, 0.04],
[0.15, 0.04],
[-0.5, 0.13],
[-0.9, 0.13],
[-1., 0.07],
[-0.2, -0.1],
[0.2, -0.1],
[0.6, -0.05]]
)
def _init_mask(self):
aspcap_bitmask = np.sum(2 ** np.array([
7, # STAR_WARN
23 # STAR_BAD
]))
quality_mask = (
(self.t['SNR'] > 20) &
((self.t['ASPCAPFLAG'] & aspcap_bitmask) == 0)
)
# Remove stars targeted in known clusters or dwarf galaxies:
mask_bits = {
'APOGEE_TARGET1': np.array([9, 18, 24, 26]),
'APOGEE_TARGET2': np.array([10, 18]),
'APOGEE2_TARGET1': np.array([9, 18, 20, 21, 22, 23, 24, 26]),
'APOGEE2_TARGET2': np.array([10]),
'APOGEE2_TARGET3': np.array([5, 14, 15])
}
target_mask = np.ones(len(self.t), dtype=bool)
for name, bits in mask_bits.items():
target_mask &= (self.t[name] & np.sum(2**bits)) == 0
return self.t[quality_mask & target_mask]
def get_mh_am_mask(self, low_alpha=True):
mh_alpham_path = Path(self._mh_alpham_nodes[:-1])
low_alpha_mask = mh_alpham_path.contains_points(
np.stack((self.t['M_H'], self.t['ALPHA_M'])).T)
if low_alpha:
return low_alpha_mask
else:
return ((~low_alpha_mask) &
(self.t['M_H'] > -1) &
(self.t['ALPHA_M'] > 0))
class GALAHDataset(Dataset):
_id_column = 'star_id'
_radial_velocity_name = 'rv_galah'
_elem_err_fmt = 'E_{elem_name}'
# See: 2-High-alpha-Low-alpha.ipynb
_mh_alpham_nodes = np.array([
[0.6, -0.01],
[0.6, 0.08],
[0.15, 0.08],
[-0.5, 0.17],
[-0.9, 0.17],
[-1., 0.11],
[-0.2, -0.11],
[0.2, -0.11],
[0.6, -0.03]])
def _init_mask(self):
quality_mask = (
(self.t['flag_sp'] == 0) &
(self.t['flag_fe_h'] == 0)
)
# Remove stars targeted in known clusters or dwarf galaxies:
# TODO: how to do this for GALAH??
return self.t[quality_mask]
def get_mh_am_mask(self, low_alpha=True):
mh_alpham_path = Path(self._mh_alpham_nodes)
low_alpha_mask = mh_alpham_path.contains_points(
np.stack((np.array(self.t['FE_H']),
np.array(self.t['ALPHA_FE']))).T)
if low_alpha:
return low_alpha_mask
else:
return (~low_alpha) & (self.t['FE_H'] > -1)
apogee = APOGEEDataset(apogee_parent_filename)
galah = GALAHDataset(galah_parent_filename)
teff_ref = -382.5 * apogee.t['FE_H'] + 4607
rc_logg_max = 0.0018 * (apogee.t['TEFF'] - teff_ref) + 2.4
datasets = {
'apogee-rgb-loalpha': apogee.filter({'LOGG': (1, 3.4),
'TEFF': (3500, 6500),
'FE_H': (-3, 1)},
low_alpha=True),
'apogee-rc-loalpha': apogee.filter({'LOGG': (1.9, rc_logg_max),
'TEFF': (4200, 5400),
'FE_H': (-3, 1)},
low_alpha=True),
'apogee-rgb-hialpha': apogee.filter({'LOGG': (1, 3.4),
'TEFF': (3500, 6500),
'FE_H': (-3, 1)},
low_alpha=False),
'apogee-ms-loalpha': apogee.filter({'LOGG': (3.75, 5),
'TEFF': (5000, 6000),
'FE_H': (-3, 1)},
low_alpha=True),
'galah-rgb-loalpha': galah.filter({'logg': (1, 3.5),
'teff': (3500, 5500),
'FE_H': (-3, 1)},
low_alpha=True),
'galah-ms-loalpha': galah.filter({'logg': (3.5, 5),
'teff': (5000, 6000),
'FE_H': (-3, 1)},
low_alpha=True)
}
# From visual inspection of the z-vz grid plots!
elem_names = {
'apogee-rgb-loalpha': ['FE_H', 'AL_FE', 'C_FE', 'MG_FE', 'MN_FE', 'NI_FE',
'N_FE', 'O_FE', 'P_FE', 'SI_FE'],
'apogee-ms-loalpha': ['FE_H', 'AL_FE', 'C_FE', 'MG_FE', 'MN_FE', 'NI_FE',
'N_FE', 'O_FE', 'P_FE', 'SI_FE', 'TI_FE'],
'galah-rgb-loalpha': ['FE_H', 'AL_FE', 'BA_FE', 'CA_FE', 'CO_FE', 'CU_FE',
'MG_FE', 'MN_FE', 'NA_FE', 'O_FE', 'SC_FE', 'Y_FE',
'ZN_FE'],
'galah-ms-loalpha': ['FE_H', 'AL_FE', 'CA_FE', 'K_FE', 'MG_FE', 'MN_FE',
'NA_FE', 'SC_FE', 'TI_FE', 'Y_FE']
}
elem_names['apogee-rgb-hialpha'] = elem_names['apogee-rgb-loalpha']
elem_names['apogee-rc-loalpha'] = elem_names['apogee-rgb-loalpha']
for name in datasets:
for path in [plot_path, cache_path]:
this_path = path / name
this_path.mkdir(exist_ok=True)
| [
"matplotlib.path.Path",
"pyia.GaiaData",
"astropy.table.QTable.read",
"numpy.array",
"numpy.stack",
"numpy.sum",
"astropy.table.QTable",
"astropy.units.Quantity"
] | [((5365, 5504), 'numpy.array', 'np.array', (['[[0.6, -0.05], [0.6, 0.04], [0.15, 0.04], [-0.5, 0.13], [-0.9, 0.13], [-1.0,\n 0.07], [-0.2, -0.1], [0.2, -0.1], [0.6, -0.05]]'], {}), '([[0.6, -0.05], [0.6, 0.04], [0.15, 0.04], [-0.5, 0.13], [-0.9, \n 0.13], [-1.0, 0.07], [-0.2, -0.1], [0.2, -0.1], [0.6, -0.05]])\n', (5373, 5504), True, 'import numpy as np\n'), ((7066, 7207), 'numpy.array', 'np.array', (['[[0.6, -0.01], [0.6, 0.08], [0.15, 0.08], [-0.5, 0.17], [-0.9, 0.17], [-1.0,\n 0.11], [-0.2, -0.11], [0.2, -0.11], [0.6, -0.03]]'], {}), '([[0.6, -0.01], [0.6, 0.08], [0.15, 0.08], [-0.5, 0.17], [-0.9, \n 0.17], [-1.0, 0.11], [-0.2, -0.11], [0.2, -0.11], [0.6, -0.03]])\n', (7074, 7207), True, 'import numpy as np\n'), ((1716, 1732), 'pyia.GaiaData', 'GaiaData', (['self.t'], {}), '(self.t)\n', (1724, 1732), False, 'from pyia import GaiaData\n'), ((1856, 1883), 'astropy.units.Quantity', 'u.Quantity', (['self.t[rv_name]'], {}), '(self.t[rv_name])\n', (1866, 1883), True, 'import astropy.units as u\n'), ((6519, 6551), 'matplotlib.path.Path', 'Path', (['self._mh_alpham_nodes[:-1]'], {}), '(self._mh_alpham_nodes[:-1])\n', (6523, 6551), False, 'from matplotlib.path import Path\n'), ((7637, 7664), 'matplotlib.path.Path', 'Path', (['self._mh_alpham_nodes'], {}), '(self._mh_alpham_nodes)\n', (7641, 7664), False, 'from matplotlib.path import Path\n'), ((899, 930), 'astropy.table.QTable.read', 'at.QTable.read', (['filename_or_tbl'], {}), '(filename_or_tbl)\n', (913, 930), True, 'import astropy.table as at\n'), ((966, 992), 'astropy.table.QTable', 'at.QTable', (['filename_or_tbl'], {}), '(filename_or_tbl)\n', (975, 992), True, 'import astropy.table as at\n'), ((5970, 5995), 'numpy.array', 'np.array', (['[9, 18, 24, 26]'], {}), '([9, 18, 24, 26])\n', (5978, 5995), True, 'import numpy as np\n'), ((6027, 6045), 'numpy.array', 'np.array', (['[10, 18]'], {}), '([10, 18])\n', (6035, 6045), True, 'import numpy as np\n'), ((6078, 6119), 'numpy.array', 'np.array', (['[9, 18, 20, 21, 22, 23, 24, 26]'], {}), '([9, 18, 20, 21, 22, 23, 24, 26])\n', (6086, 6119), True, 'import numpy as np\n'), ((6152, 6166), 'numpy.array', 'np.array', (['[10]'], {}), '([10])\n', (6160, 6166), True, 'import numpy as np\n'), ((6199, 6220), 'numpy.array', 'np.array', (['[5, 14, 15]'], {}), '([5, 14, 15])\n', (6207, 6220), True, 'import numpy as np\n'), ((4592, 4613), 'astropy.units.Quantity', 'u.Quantity', (['self.t[k]'], {}), '(self.t[k])\n', (4602, 4613), True, 'import astropy.units as u\n'), ((5641, 5658), 'numpy.array', 'np.array', (['[7, 23]'], {}), '([7, 23])\n', (5649, 5658), True, 'import numpy as np\n'), ((6621, 6665), 'numpy.stack', 'np.stack', (["(self.t['M_H'], self.t['ALPHA_M'])"], {}), "((self.t['M_H'], self.t['ALPHA_M']))\n", (6629, 6665), True, 'import numpy as np\n'), ((6374, 6391), 'numpy.sum', 'np.sum', (['(2 ** bits)'], {}), '(2 ** bits)\n', (6380, 6391), True, 'import numpy as np\n'), ((7744, 7768), 'numpy.array', 'np.array', (["self.t['FE_H']"], {}), "(self.t['FE_H'])\n", (7752, 7768), True, 'import numpy as np\n'), ((7792, 7820), 'numpy.array', 'np.array', (["self.t['ALPHA_FE']"], {}), "(self.t['ALPHA_FE'])\n", (7800, 7820), True, 'import numpy as np\n')] |
"""Test converting an image to a pyramid.
"""
import numpy as np
import napari
from skimage.transform import pyramid_gaussian
image = np.random.random((2000, 2000))
pyramid = list(pyramid_gaussian(image, downscale=2, multichannel=False))[:-4]
print(image.shape)
print(len(pyramid))
print([p.shape for p in pyramid])
with napari.gui_qt():
viewer = napari.Viewer()
viewer.add_image(pyramid, contrast_limits=[0, 1])
| [
"numpy.random.random",
"napari.gui_qt",
"napari.Viewer",
"skimage.transform.pyramid_gaussian"
] | [((136, 166), 'numpy.random.random', 'np.random.random', (['(2000, 2000)'], {}), '((2000, 2000))\n', (152, 166), True, 'import numpy as np\n'), ((325, 340), 'napari.gui_qt', 'napari.gui_qt', ([], {}), '()\n', (338, 340), False, 'import napari\n'), ((355, 370), 'napari.Viewer', 'napari.Viewer', ([], {}), '()\n', (368, 370), False, 'import napari\n'), ((182, 238), 'skimage.transform.pyramid_gaussian', 'pyramid_gaussian', (['image'], {'downscale': '(2)', 'multichannel': '(False)'}), '(image, downscale=2, multichannel=False)\n', (198, 238), False, 'from skimage.transform import pyramid_gaussian\n')] |
import sys
import time
import collections
import numpy as np
from collections import OrderedDict
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / (.0001 + self.count)
def __str__(self):
"""String representation for logging
"""
# for values that should be recorded exactly e.g. iteration number
if self.count == 0:
return str(self.val)
# for stats
return '%.4f (%.4f)' % (self.val, self.avg)
class LogCollector(object):
"""A collection of logging objects that can change from train to val"""
def __init__(self):
# to keep the order of logged variables deterministic
self.meters = OrderedDict()
def update(self, k, v, n=1):
# create a new meter if previously not recorded
if k not in self.meters:
self.meters[k] = AverageMeter()
self.meters[k].update(v, n)
def __str__(self):
"""Concatenate the meters in one log line
"""
s = ''
for i, (k, v) in enumerate(self.meters.items()):
if i > 0:
s += ' '
s += k + ' ' + str(v)
return s
def tb_log(self, tb_logger, prefix='', step=None):
"""Log using tensorboard
"""
for k, v in self.meters.items():
tb_logger.log_value(prefix + k, v.val, step=step)
def read_dict(filepath):
f = open(filepath,'r')
a = f.read()
dict_data = eval(a)
f.close()
return dict_data
def write_dict(filepath, dict_data):
f = open(filepath,'w')
f.write(str(dict_data))
f.close()
# get image id from caption id
def getVideoId(cap_id):
vid_id = cap_id.split('#')[0]
if vid_id.endswith('.jpg') or vid_id.endswith('.mp4'):
vid_id = vid_id[:-4]
return vid_id
class Progbar(object):
"""Displays a progress bar.
# Arguments
target: Total number of steps expected, None if unknown.
width: Progress bar width on screen.
verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over time. Metrics in this list
will be displayed as-is. All others will be averaged
by the progbar before display.
interval: Minimum visual progress update interval (in seconds).
"""
def __init__(self, target, width=30, verbose=1, interval=0.05,
stateful_metrics=None):
self.target = target
self.width = width
self.verbose = verbose
self.interval = interval
if stateful_metrics:
self.stateful_metrics = set(stateful_metrics)
else:
self.stateful_metrics = set()
self._dynamic_display = ((hasattr(sys.stdout, 'isatty') and
sys.stdout.isatty()) or
'ipykernel' in sys.modules)
self._total_width = 0
self._seen_so_far = 0
self._values = collections.OrderedDict()
self._start = time.time()
self._last_update = 0
def update(self, current, values=None):
"""Updates the progress bar.
# Arguments
current: Index of current step.
values: List of tuples:
`(name, value_for_last_step)`.
If `name` is in `stateful_metrics`,
`value_for_last_step` will be displayed as-is.
Else, an average of the metric over time will be displayed.
"""
values = values or []
for k, v in values:
if k not in self.stateful_metrics:
if k not in self._values:
self._values[k] = [v * (current - self._seen_so_far),
current - self._seen_so_far]
else:
self._values[k][0] += v * (current - self._seen_so_far)
self._values[k][1] += (current - self._seen_so_far)
else:
self._values[k] = v
self._seen_so_far = current
now = time.time()
info = ' - %.0fs' % (now - self._start)
if self.verbose == 1:
if (now - self._last_update < self.interval and
self.target is not None and current < self.target):
return
prev_total_width = self._total_width
if self._dynamic_display:
sys.stdout.write('\b' * prev_total_width)
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
if self.target is not None:
numdigits = int(np.floor(np.log10(self.target))) + 1
barstr = '%%%dd/%d [' % (numdigits, self.target)
bar = barstr % current
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
else:
bar = '%7d/Unknown' % current
self._total_width = len(bar)
sys.stdout.write(bar)
if current:
time_per_unit = (now - self._start) / current
else:
time_per_unit = 0
if self.target is not None and current < self.target:
eta = time_per_unit * (self.target - current)
if eta > 3600:
eta_format = '%d:%02d:%02d' % (eta // 3600, (eta % 3600) // 60, eta % 60)
elif eta > 60:
eta_format = '%d:%02d' % (eta // 60, eta % 60)
else:
eta_format = '%ds' % eta
info = ' - ETA: %s' % eta_format
else:
if time_per_unit >= 1:
info += ' %.0fs/step' % time_per_unit
elif time_per_unit >= 1e-3:
info += ' %.0fms/step' % (time_per_unit * 1e3)
else:
info += ' %.0fus/step' % (time_per_unit * 1e6)
for k in self._values:
info += ' - %s:' % k
if isinstance(self._values[k], list):
avg = np.mean(
self._values[k][0] / max(1, self._values[k][1]))
if abs(avg) > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
else:
info += ' %s' % self._values[k]
self._total_width += len(info)
if prev_total_width > self._total_width:
info += (' ' * (prev_total_width - self._total_width))
if self.target is not None and current >= self.target:
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
elif self.verbose == 2:
if self.target is None or current >= self.target:
for k in self._values:
info += ' - %s:' % k
avg = np.mean(
self._values[k][0] / max(1, self._values[k][1]))
if avg > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
self._last_update = now
def add(self, n, values=None):
self.update(self._seen_so_far + n, values)
| [
"collections.OrderedDict",
"numpy.log10",
"sys.stdout.isatty",
"sys.stdout.flush",
"time.time",
"sys.stdout.write"
] | [((1042, 1055), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1053, 1055), False, 'from collections import OrderedDict\n'), ((3470, 3495), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (3493, 3495), False, 'import collections\n'), ((3519, 3530), 'time.time', 'time.time', ([], {}), '()\n', (3528, 3530), False, 'import time\n'), ((4585, 4596), 'time.time', 'time.time', ([], {}), '()\n', (4594, 4596), False, 'import time\n'), ((5853, 5874), 'sys.stdout.write', 'sys.stdout.write', (['bar'], {}), '(bar)\n', (5869, 5874), False, 'import sys\n'), ((7585, 7607), 'sys.stdout.write', 'sys.stdout.write', (['info'], {}), '(info)\n', (7601, 7607), False, 'import sys\n'), ((7621, 7639), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (7637, 7639), False, 'import sys\n'), ((3298, 3317), 'sys.stdout.isatty', 'sys.stdout.isatty', ([], {}), '()\n', (3315, 3317), False, 'import sys\n'), ((4943, 4986), 'sys.stdout.write', 'sys.stdout.write', (["('\\x08' * prev_total_width)"], {}), "('\\x08' * prev_total_width)\n", (4959, 4986), False, 'import sys\n'), ((5002, 5024), 'sys.stdout.write', 'sys.stdout.write', (["'\\r'"], {}), "('\\r')\n", (5018, 5024), False, 'import sys\n'), ((5061, 5083), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (5077, 5083), False, 'import sys\n'), ((8136, 8158), 'sys.stdout.write', 'sys.stdout.write', (['info'], {}), '(info)\n', (8152, 8158), False, 'import sys\n'), ((8176, 8194), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8192, 8194), False, 'import sys\n'), ((5169, 5190), 'numpy.log10', 'np.log10', (['self.target'], {}), '(self.target)\n', (5177, 5190), True, 'import numpy as np\n')] |
'''
Created on Jan 16, 2015
@author: <NAME> <<EMAIL>>
Module that monkey patches classes in other modules with equivalent, but faster
methods.
'''
from __future__ import division
import numba
import numpy as np
# these methods are used in getattr calls
from . import lib_exp_numeric
from utils.numba.patcher import NumbaPatcher, check_return_value_approx
NUMBA_NOPYTHON = True #< globally decide whether we use the nopython mode
NUMBA_NOGIL = True
# initialize the numba patcher and add methods one by one
numba_patcher = NumbaPatcher(module=lib_exp_numeric)
@numba.jit(nopython=NUMBA_NOPYTHON, nogil=NUMBA_NOGIL)
def LibraryExponentialNumeric_receptor_activity_numba(steps, S_ni, c_means,
alpha, count_a):
""" calculate the mutual information using a monte carlo strategy. The
number of steps is given by the model parameter 'monte_carlo_steps' """
Nr, Ns = S_ni.shape
# sample mixtures according to the probabilities of finding
# substrates
for _ in range(steps):
# choose a mixture vector according to substrate probabilities
alpha[:] = 0 #< activity pattern of this mixture
for i in range(Ns):
ci = np.random.exponential() * c_means[i]
for a in range(Nr):
alpha[a] += S_ni[a, i] * ci
# calculate the activity pattern id
for a in range(Nr):
if alpha[a] >= 1:
count_a[a] += 1
def LibraryExponentialNumeric_receptor_activity(self):
""" calculate the mutual information by constructing all possible
mixtures """
count_a = np.zeros(self.Nr, np.uint32)
steps = self._sample_steps
# call the jitted function
LibraryExponentialNumeric_receptor_activity_numba(
self._sample_steps, self.sens_mat,
self.concentration_means, #< c_means
np.empty(self.Nr, np.double), #< alpha
count_a
)
return count_a / steps
numba_patcher.register_method(
'LibraryExponentialNumeric.receptor_activity',
LibraryExponentialNumeric_receptor_activity,
check_return_value_approx
)
@numba.jit(nopython=NUMBA_NOPYTHON, nogil=NUMBA_NOGIL)
def LibraryExponentialNumeric_mutual_information_numba(steps, S_ni, c_means,
alpha, prob_a):
""" calculate the mutual information using a monte carlo strategy. The
number of steps is given by the model parameter 'monte_carlo_steps' """
Nr, Ns = S_ni.shape
# sample mixtures according to the probabilities of finding
# substrates
for _ in range(steps):
# choose a mixture vector according to substrate probabilities
alpha[:] = 0 #< activity pattern of this mixture
for i in range(Ns):
ci = np.random.exponential() * c_means[i]
for a in range(Nr):
alpha[a] += S_ni[a, i] * ci
# calculate the activity pattern id
a_id, base = 0, 1
for a in range(Nr):
if alpha[a] >= 1:
a_id += base
base *= 2
# increment counter for this output
prob_a[a_id] += 1
# normalize the probabilities by the number of steps we did
for k in range(len(prob_a)):
prob_a[k] /= steps
# calculate the mutual information from the observed probabilities
MI = 0
for pa in prob_a:
if pa > 0:
MI -= pa*np.log2(pa)
return MI
def LibraryExponentialNumeric_mutual_information(self, ret_prob_activity=False):
""" calculate the mutual information by constructing all possible
mixtures """
prob_a = np.zeros(2**self.Nr)
# call the jitted function
MI = LibraryExponentialNumeric_mutual_information_numba(
self._sample_steps,
self.sens_mat,
self.concentration_means, #< c_means
np.empty(self.Nr, np.double), #< alpha
prob_a
)
if ret_prob_activity:
return MI, prob_a
else:
return MI
numba_patcher.register_method(
'LibraryExponentialNumeric.mutual_information',
LibraryExponentialNumeric_mutual_information,
check_return_value_approx
)
| [
"numpy.random.exponential",
"utils.numba.patcher.NumbaPatcher",
"numpy.zeros",
"numba.jit",
"numpy.empty",
"numpy.log2"
] | [((530, 566), 'utils.numba.patcher.NumbaPatcher', 'NumbaPatcher', ([], {'module': 'lib_exp_numeric'}), '(module=lib_exp_numeric)\n', (542, 566), False, 'from utils.numba.patcher import NumbaPatcher, check_return_value_approx\n'), ((571, 624), 'numba.jit', 'numba.jit', ([], {'nopython': 'NUMBA_NOPYTHON', 'nogil': 'NUMBA_NOGIL'}), '(nopython=NUMBA_NOPYTHON, nogil=NUMBA_NOGIL)\n', (580, 624), False, 'import numba\n'), ((2164, 2217), 'numba.jit', 'numba.jit', ([], {'nopython': 'NUMBA_NOPYTHON', 'nogil': 'NUMBA_NOGIL'}), '(nopython=NUMBA_NOPYTHON, nogil=NUMBA_NOGIL)\n', (2173, 2217), False, 'import numba\n'), ((1657, 1685), 'numpy.zeros', 'np.zeros', (['self.Nr', 'np.uint32'], {}), '(self.Nr, np.uint32)\n', (1665, 1685), True, 'import numpy as np\n'), ((3713, 3735), 'numpy.zeros', 'np.zeros', (['(2 ** self.Nr)'], {}), '(2 ** self.Nr)\n', (3721, 3735), True, 'import numpy as np\n'), ((1902, 1930), 'numpy.empty', 'np.empty', (['self.Nr', 'np.double'], {}), '(self.Nr, np.double)\n', (1910, 1930), True, 'import numpy as np\n'), ((3934, 3962), 'numpy.empty', 'np.empty', (['self.Nr', 'np.double'], {}), '(self.Nr, np.double)\n', (3942, 3962), True, 'import numpy as np\n'), ((1239, 1262), 'numpy.random.exponential', 'np.random.exponential', ([], {}), '()\n', (1260, 1262), True, 'import numpy as np\n'), ((2833, 2856), 'numpy.random.exponential', 'np.random.exponential', ([], {}), '()\n', (2854, 2856), True, 'import numpy as np\n'), ((3495, 3506), 'numpy.log2', 'np.log2', (['pa'], {}), '(pa)\n', (3502, 3506), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/4/25 10:31
# @Author : Darren
# @Site :
# @File : data_generator.py
# @Software: PyCharm
import cv2
import numpy as np
import random
import os
from config.config import *
def generator(img_path):
# 首先读入img
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
img = cv2.resize(img, (180, 32))
# N对基准控制点
N = 5
points = []
dx = int(180 / (N - 1))
for i in range(2 * N):
points.append((dx * i, 4))
points.append((dx * i, 36))
# 周围拓宽一圈
img = cv2.copyMakeBorder(img, 4, 4, 0, 0, cv2.BORDER_REPLICATE)
# 画上绿色的圆圈
# for point in points:
# cv2.circle(img, point, 1, (0, 255, 0), 2)
tps = cv2.createThinPlateSplineShapeTransformer()
sourceshape = np.array(points, np.int32)
sourceshape = sourceshape.reshape(1, -1, 2)
matches = []
for i in range(1, N + 1):
matches.append(cv2.DMatch(i, i, 0))
# 开始随机变动
newpoints = []
PADDINGSIZ = 10
for i in range(N):
nx = points[i][0] + random.randint(0, PADDINGSIZ) - PADDINGSIZ / 2
ny = points[i][1] + random.randint(0, PADDINGSIZ) - PADDINGSIZ / 2
newpoints.append((nx, ny))
print(points, newpoints)
targetshape = np.array(newpoints, np.int32)
targetshape = targetshape.reshape(1, -1, 2)
tps.estimateTransformation(sourceshape, targetshape, matches)
img = tps.warpImage(img)
# path process
(path, file_name) = os.path.split(img_path)
(file, ext) = os.path.splitext(file_name)
path = str(file + '_' + 'gen' + ext)
# save img
cv2.imwrite(fake_tps_image_path + path, img)
def perspective_transform(path):
img = cv2.imread(path, 1)
rows, cols, channels = img.shape
scale_x = 0.3
scale_y = 1 - scale_x
p1 = np.float32([[0, 0], [cols, 0], [cols, rows], [0, rows]])
# 上视图、下视图、左视图、右视图
points = [np.float32([[int(cols / 3), int(rows / 2)], [int(cols * 2 / 3), int(rows / 2)], [cols, rows], [0, rows]]),
np.float32(
[[0, 0], [cols, 0], [int(cols * 2 / 3), int(rows * 2 / 3)], [int(cols / 3), int(rows * 2 / 3)]]),
np.float32([[int(cols / 4), int(rows / 3)], [cols, 0], [cols, rows], [int(cols / 4), int(rows * 2 / 3)]]),
np.float32(
[[0, 0], [int(cols * 3 / 4), int(rows / 3)], [int(cols * 3 / 4), int(rows * 2 / 3)], [0, rows]])]
mark = ['up', 'down', 'left', 'right']
i = 0
# path process
(path, file_name) = os.path.split(path)
(file, ext) = os.path.splitext(file_name)
for pts in points:
M = cv2.getPerspectiveTransform(p1, pts)
dst = cv2.warpPerspective(img, M, (cols, rows))
path = str(file + '_' + mark[i] + ext)
i += 1
# save img
cv2.imwrite(fake_transform_image_path + path, dst)
print(fake_transform_image_path + path)
# image_size = 288
#
#
# def order_points(pts):
# center = pts.sum(axis=0) / 4
# deltaxy = pts - np.tile(center, (4, 1))
# rad = np.arctan2(deltaxy[:, 1], deltaxy[:, 0])
# sortidx = np.argsort(rad)
# return pts[sortidx]
#
#
# def four_point_transform(image, pts, w, h):
# src = order_points(pts)
# dst = np.array([[0, 0], [w, 0], [w, h], [0, h]], dtype="float32")
# M = cv2.getPerspectiveTransform(src, dst)
# warped = cv2.warpPerspective(image, M, (w, h))
# return warped
def dataloader(file_path):
lists = os.listdir(file_path)
for list in lists:
if "train" in file_path:
generator(real_image_path + list)
else:
perspective_transform(file_path + list)
if __name__ == '__main__':
# perspective_transform(real_image_path + 'img_21.jpg')
dataloader(fake_tps_image_path)
# dataloader(fake_tps_image_path)
# perspective_transform(fake_image_path + 'img_21_gen.jpg')
| [
"cv2.createThinPlateSplineShapeTransformer",
"cv2.imwrite",
"os.listdir",
"random.randint",
"cv2.getPerspectiveTransform",
"cv2.copyMakeBorder",
"os.path.splitext",
"os.path.split",
"numpy.array",
"cv2.warpPerspective",
"cv2.resize",
"cv2.DMatch",
"cv2.imread",
"numpy.float32"
] | [((314, 352), 'cv2.imread', 'cv2.imread', (['img_path', 'cv2.IMREAD_COLOR'], {}), '(img_path, cv2.IMREAD_COLOR)\n', (324, 352), False, 'import cv2\n'), ((364, 390), 'cv2.resize', 'cv2.resize', (['img', '(180, 32)'], {}), '(img, (180, 32))\n', (374, 390), False, 'import cv2\n'), ((589, 646), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['img', '(4)', '(4)', '(0)', '(0)', 'cv2.BORDER_REPLICATE'], {}), '(img, 4, 4, 0, 0, cv2.BORDER_REPLICATE)\n', (607, 646), False, 'import cv2\n'), ((751, 794), 'cv2.createThinPlateSplineShapeTransformer', 'cv2.createThinPlateSplineShapeTransformer', ([], {}), '()\n', (792, 794), False, 'import cv2\n'), ((816, 842), 'numpy.array', 'np.array', (['points', 'np.int32'], {}), '(points, np.int32)\n', (824, 842), True, 'import numpy as np\n'), ((1304, 1333), 'numpy.array', 'np.array', (['newpoints', 'np.int32'], {}), '(newpoints, np.int32)\n', (1312, 1333), True, 'import numpy as np\n'), ((1525, 1548), 'os.path.split', 'os.path.split', (['img_path'], {}), '(img_path)\n', (1538, 1548), False, 'import os\n'), ((1568, 1595), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (1584, 1595), False, 'import os\n'), ((1659, 1703), 'cv2.imwrite', 'cv2.imwrite', (['(fake_tps_image_path + path)', 'img'], {}), '(fake_tps_image_path + path, img)\n', (1670, 1703), False, 'import cv2\n'), ((1753, 1772), 'cv2.imread', 'cv2.imread', (['path', '(1)'], {}), '(path, 1)\n', (1763, 1772), False, 'import cv2\n'), ((1867, 1923), 'numpy.float32', 'np.float32', (['[[0, 0], [cols, 0], [cols, rows], [0, rows]]'], {}), '([[0, 0], [cols, 0], [cols, rows], [0, rows]])\n', (1877, 1923), True, 'import numpy as np\n'), ((2579, 2598), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (2592, 2598), False, 'import os\n'), ((2618, 2645), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (2634, 2645), False, 'import os\n'), ((3549, 3570), 'os.listdir', 'os.listdir', (['file_path'], {}), '(file_path)\n', (3559, 3570), False, 'import os\n'), ((2683, 2719), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['p1', 'pts'], {}), '(p1, pts)\n', (2710, 2719), False, 'import cv2\n'), ((2735, 2776), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'M', '(cols, rows)'], {}), '(img, M, (cols, rows))\n', (2754, 2776), False, 'import cv2\n'), ((2870, 2920), 'cv2.imwrite', 'cv2.imwrite', (['(fake_transform_image_path + path)', 'dst'], {}), '(fake_transform_image_path + path, dst)\n', (2881, 2920), False, 'import cv2\n'), ((965, 984), 'cv2.DMatch', 'cv2.DMatch', (['i', 'i', '(0)'], {}), '(i, i, 0)\n', (975, 984), False, 'import cv2\n'), ((1096, 1125), 'random.randint', 'random.randint', (['(0)', 'PADDINGSIZ'], {}), '(0, PADDINGSIZ)\n', (1110, 1125), False, 'import random\n'), ((1172, 1201), 'random.randint', 'random.randint', (['(0)', 'PADDINGSIZ'], {}), '(0, PADDINGSIZ)\n', (1186, 1201), False, 'import random\n')] |
"""Test mesh """
import numpy as np
def test_empty_model(mapdl):
mapdl.clear()
assert mapdl.mesh.nnum.size == 0
assert mapdl.mesh.enum.size == 0
assert mapdl.mesh.n_elem == 0
assert mapdl.mesh.n_node == 0
def test_mesh_attributes(mapdl, cube_solve):
mapdl.allsel()
assert mapdl.mesh.n_node == mapdl.get("__par__", "node", "0", "count")
assert mapdl.mesh.n_elem == mapdl.get("__par__", "elem", "0", "count")
assert len(mapdl.mesh.nnum) == mapdl.get("__par__", "node", "0", "count")
assert len(mapdl.mesh.enum) == mapdl.get("__par__", "elem", "0", "count")
mapdl.dim("par", "", len(mapdl.mesh.nnum))
mapdl.starvget("par", "NODE", "0", "NLIST")
assert np.allclose(mapdl.parameters["par"].flatten(), mapdl.mesh.nnum)
mapdl.dim("par", "", len(mapdl.mesh.enum))
mapdl.starvget("par", "ELEM", "0", "ELIST")
assert np.allclose(mapdl.parameters["par"].flatten(), mapdl.mesh.enum)
def test_elem_num_in_mesh_elem(mapdl, cube_solve):
enums = np.array([each[8] for each in mapdl.mesh.elem])
assert np.allclose(mapdl.mesh.enum, enums)
| [
"numpy.array",
"numpy.allclose"
] | [((1009, 1056), 'numpy.array', 'np.array', (['[each[8] for each in mapdl.mesh.elem]'], {}), '([each[8] for each in mapdl.mesh.elem])\n', (1017, 1056), True, 'import numpy as np\n'), ((1068, 1103), 'numpy.allclose', 'np.allclose', (['mapdl.mesh.enum', 'enums'], {}), '(mapdl.mesh.enum, enums)\n', (1079, 1103), True, 'import numpy as np\n')] |
# Third-party modules
import pytest
import numpy as np
import matplotlib.pyplot as plt
# kalmus module being tested
import kalmus.utils.visualization_utils as visualization_utils
def test_show_color():
color = np.array([1, 2, 3])
color_image = visualization_utils.show_color(color, return_color=True)
assert color_image.shape == (30, 30, 3)
def test_show_colors_in_sequence():
color_sequence = np.array([[1, 2, 3], [4, 5, 6]])
color_sequence_image = visualization_utils.show_colors_in_sequence(color_sequence, return_color_sequence=True)
assert color_sequence_image.shape == (30, 30 * color_sequence.shape[0], 3)
def test_show_color_matrix(get_test_color_image):
color_2d_image = get_test_color_image
color_matrix = visualization_utils.show_color_matrix(color_2d_image, return_matrix=True, mode="padding")
assert color_matrix.shape == (color_2d_image.shape[1], color_2d_image.shape[0] + 1, color_2d_image.shape[2])
color_matrix = visualization_utils.show_color_matrix(color_2d_image, return_matrix=True, mode="truncate")
assert color_matrix.shape == (color_2d_image.shape[1], color_2d_image.shape[0], color_2d_image.shape[2])
figure = visualization_utils.show_color_matrix(color_2d_image, return_figure=True)
assert isinstance(figure, plt.Figure)
def test_show_colors_in_cube(get_test_color_image):
flatten_image = get_test_color_image.reshape(-1, 3)
sampled_colors = visualization_utils.show_colors_in_cube(flatten_image, sampling=-1, return_sampled_colors=True)
assert sampled_colors.size == flatten_image.size
samples = 100
sampled_colors = visualization_utils.show_colors_in_cube(flatten_image, sampling=samples, return_sampled_colors=True)
assert sampled_colors.size == samples * sampled_colors.shape[-1]
fig, ax = visualization_utils.show_colors_in_cube(flatten_image, sampling=samples, return_figure=True)
assert isinstance(fig, plt.Figure)
assert isinstance(ax, plt.Axes)
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_show_high_contrast_region(get_test_color_image):
color_image = get_test_color_image
image_with_high_contrast_region = visualization_utils.show_high_contrast_region(color_image,
return_region_image=True)
assert color_image.shape == image_with_high_contrast_region.shape
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_show_low_contrast_region(get_test_color_image):
color_image = get_test_color_image
image_with_low_contrast_region = visualization_utils.show_low_contrast_region(color_image,
return_region_image=True)
assert color_image.shape == image_with_low_contrast_region.shape
def test_extract_region_with_index(get_test_color_image):
color_image = get_test_color_image
# Make an artificial label mask with three regions (indexed with 0, 1, 2)
mask = np.zeros(shape=color_image.shape[:2])
mask[:, mask.shape[1] // 3: mask.shape[1] * 2 // 3] = 1
mask[:, mask.shape[1] * 2 // 3:] = 2
region_index = 1
image_with_extracted_region_only = visualization_utils.extract_region_with_index(color_image, region_index, mask)
assert image_with_extracted_region_only.shape == color_image.shape
assert np.all(image_with_extracted_region_only[mask == region_index] == color_image[mask == region_index])
| [
"kalmus.utils.visualization_utils.show_color_matrix",
"pytest.mark.filterwarnings",
"kalmus.utils.visualization_utils.show_low_contrast_region",
"kalmus.utils.visualization_utils.show_colors_in_sequence",
"kalmus.utils.visualization_utils.extract_region_with_index",
"numpy.array",
"numpy.zeros",
"kalm... | [((1984, 2040), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::DeprecationWarning"""'], {}), "('ignore::DeprecationWarning')\n", (2010, 2040), False, 'import pytest\n'), ((2418, 2474), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore::DeprecationWarning"""'], {}), "('ignore::DeprecationWarning')\n", (2444, 2474), False, 'import pytest\n'), ((217, 236), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (225, 236), True, 'import numpy as np\n'), ((255, 311), 'kalmus.utils.visualization_utils.show_color', 'visualization_utils.show_color', (['color'], {'return_color': '(True)'}), '(color, return_color=True)\n', (285, 311), True, 'import kalmus.utils.visualization_utils as visualization_utils\n'), ((415, 447), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (423, 447), True, 'import numpy as np\n'), ((475, 566), 'kalmus.utils.visualization_utils.show_colors_in_sequence', 'visualization_utils.show_colors_in_sequence', (['color_sequence'], {'return_color_sequence': '(True)'}), '(color_sequence,\n return_color_sequence=True)\n', (518, 566), True, 'import kalmus.utils.visualization_utils as visualization_utils\n'), ((755, 848), 'kalmus.utils.visualization_utils.show_color_matrix', 'visualization_utils.show_color_matrix', (['color_2d_image'], {'return_matrix': '(True)', 'mode': '"""padding"""'}), "(color_2d_image, return_matrix=True,\n mode='padding')\n", (792, 848), True, 'import kalmus.utils.visualization_utils as visualization_utils\n'), ((978, 1072), 'kalmus.utils.visualization_utils.show_color_matrix', 'visualization_utils.show_color_matrix', (['color_2d_image'], {'return_matrix': '(True)', 'mode': '"""truncate"""'}), "(color_2d_image, return_matrix=True,\n mode='truncate')\n", (1015, 1072), True, 'import kalmus.utils.visualization_utils as visualization_utils\n'), ((1192, 1265), 'kalmus.utils.visualization_utils.show_color_matrix', 'visualization_utils.show_color_matrix', (['color_2d_image'], {'return_figure': '(True)'}), '(color_2d_image, return_figure=True)\n', (1229, 1265), True, 'import kalmus.utils.visualization_utils as visualization_utils\n'), ((1439, 1538), 'kalmus.utils.visualization_utils.show_colors_in_cube', 'visualization_utils.show_colors_in_cube', (['flatten_image'], {'sampling': '(-1)', 'return_sampled_colors': '(True)'}), '(flatten_image, sampling=-1,\n return_sampled_colors=True)\n', (1478, 1538), True, 'import kalmus.utils.visualization_utils as visualization_utils\n'), ((1628, 1732), 'kalmus.utils.visualization_utils.show_colors_in_cube', 'visualization_utils.show_colors_in_cube', (['flatten_image'], {'sampling': 'samples', 'return_sampled_colors': '(True)'}), '(flatten_image, sampling=samples,\n return_sampled_colors=True)\n', (1667, 1732), True, 'import kalmus.utils.visualization_utils as visualization_utils\n'), ((1813, 1909), 'kalmus.utils.visualization_utils.show_colors_in_cube', 'visualization_utils.show_colors_in_cube', (['flatten_image'], {'sampling': 'samples', 'return_figure': '(True)'}), '(flatten_image, sampling=samples,\n return_figure=True)\n', (1852, 1909), True, 'import kalmus.utils.visualization_utils as visualization_utils\n'), ((2176, 2264), 'kalmus.utils.visualization_utils.show_high_contrast_region', 'visualization_utils.show_high_contrast_region', (['color_image'], {'return_region_image': '(True)'}), '(color_image,\n return_region_image=True)\n', (2221, 2264), True, 'import kalmus.utils.visualization_utils as visualization_utils\n'), ((2608, 2695), 'kalmus.utils.visualization_utils.show_low_contrast_region', 'visualization_utils.show_low_contrast_region', (['color_image'], {'return_region_image': '(True)'}), '(color_image,\n return_region_image=True)\n', (2652, 2695), True, 'import kalmus.utils.visualization_utils as visualization_utils\n'), ((3031, 3068), 'numpy.zeros', 'np.zeros', ([], {'shape': 'color_image.shape[:2]'}), '(shape=color_image.shape[:2])\n', (3039, 3068), True, 'import numpy as np\n'), ((3231, 3309), 'kalmus.utils.visualization_utils.extract_region_with_index', 'visualization_utils.extract_region_with_index', (['color_image', 'region_index', 'mask'], {}), '(color_image, region_index, mask)\n', (3276, 3309), True, 'import kalmus.utils.visualization_utils as visualization_utils\n'), ((3392, 3495), 'numpy.all', 'np.all', (['(image_with_extracted_region_only[mask == region_index] == color_image[mask ==\n region_index])'], {}), '(image_with_extracted_region_only[mask == region_index] ==\n color_image[mask == region_index])\n', (3398, 3495), True, 'import numpy as np\n')] |
from numpy.polynomial import Polynomial
def omega(roots):
"""
Finds w_n = (x-x_1) * (x-x_2) * ... * (x-x_n),
where roots = [x_1 ... x_n]
"""
result = 1
for x_i in roots:
cur_poly = Polynomial([-x_i, 1])
result *= cur_poly
return result
| [
"numpy.polynomial.Polynomial"
] | [((215, 236), 'numpy.polynomial.Polynomial', 'Polynomial', (['[-x_i, 1]'], {}), '([-x_i, 1])\n', (225, 236), False, 'from numpy.polynomial import Polynomial\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
def scatter_nd_impl(data, indices, updates):
# type: (np.ndarray, np.ndarray, np.ndarray) -> np.ndarray
# Check tensor shapes
assert indices.shape[-1] <= len(data.shape)
assert updates.shape == indices.shape[:-1] + data.shape[indices.shape[-1]:]
# Compute output
output = np.copy(data)
for i in np.ndindex(indices.shape[:-1]):
# NOTE: The order of iteration in this loop is not specified.
# In particular, indices should not have duplicate entries: that is, if idx1 != idx2, then indices[idx1] != indices[idx2].
# This ensures that the output value does not depend on the iteration order.
output[indices[i]] = updates[i]
return output
class ScatterND(Base):
@staticmethod
def export_scatternd(): # type: () -> None
node = onnx.helper.make_node(
'ScatterND',
inputs=['data', 'indices', 'updates'],
outputs=['y'],
)
data = np.array(
[[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
[[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]],
[[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)
indices = np.array([[0], [2]], dtype=np.int64)
updates = np.array(
[[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]], dtype=np.float32)
# Expecting output as np.array(
# [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
# [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]],
# [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]],
# [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]], dtype=np.float32)
output = scatter_nd_impl(data, indices, updates)
expect(node, inputs=[data, indices, updates], outputs=[output],
name='test_scatternd')
| [
"numpy.copy",
"numpy.array",
"onnx.helper.make_node",
"numpy.ndindex"
] | [((543, 556), 'numpy.copy', 'np.copy', (['data'], {}), '(data)\n', (550, 556), True, 'import numpy as np\n'), ((570, 600), 'numpy.ndindex', 'np.ndindex', (['indices.shape[:-1]'], {}), '(indices.shape[:-1])\n', (580, 600), True, 'import numpy as np\n'), ((1053, 1145), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""ScatterND"""'], {'inputs': "['data', 'indices', 'updates']", 'outputs': "['y']"}), "('ScatterND', inputs=['data', 'indices', 'updates'],\n outputs=['y'])\n", (1074, 1145), False, 'import onnx\n'), ((1204, 1476), 'numpy.array', 'np.array', (['[[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], [[1, 2, 3, 4], [\n 5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], [[8, 7, 6, 5], [4, 3, 2, 1],\n [1, 2, 3, 4], [5, 6, 7, 8]], [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4],\n [5, 6, 7, 8]]]'], {'dtype': 'np.float32'}), '([[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], [[1, 2,\n 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], [[8, 7, 6, 5], [4, 3,\n 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]], [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2,\n 3, 4], [5, 6, 7, 8]]], dtype=np.float32)\n', (1212, 1476), True, 'import numpy as np\n'), ((1535, 1571), 'numpy.array', 'np.array', (['[[0], [2]]'], {'dtype': 'np.int64'}), '([[0], [2]], dtype=np.int64)\n', (1543, 1571), True, 'import numpy as np\n'), ((1590, 1738), 'numpy.array', 'np.array', (['[[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], [[1, 1, 1, 1], [\n 2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]]'], {'dtype': 'np.float32'}), '([[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], [[1, 1,\n 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]], dtype=np.float32)\n', (1598, 1738), True, 'import numpy as np\n')] |
import numpy as np
class robot:
def __init__(self):
return
def take_action(self, wallet, current_price):
action = self.decide_action()
asset = "BTC"
if action == 1:
amount = self.decide_amount(wallet,'USD')
if amount > 0 :
asset_amount_to_be_purchased = round(amount/current_price[asset],10)
print(f"Robot has decided to buy {asset_amount_to_be_purchased} amount of {asset} with {amount} USD")
decision = [1,asset_amount_to_be_purchased, -amount]
else:
print(f"Robot has decided buy but the funds are not sufficient")
decision = [0,0,0]
return decision
elif action == 0:
print(f"Robot has decided not to take any action")
decision = [0,0,0]
return decision
elif action == -1:
amount = self.decide_amount(wallet,'BTC')
if amount > 0 :
asset_amount_to_be_sold = round(amount/current_price[asset],10)
print(f"Robot has decided to sell {asset_amount_to_be_sold} amount of {asset} for {amount} USD")
decision = [-1,-asset_amount_to_be_sold,amount]
else:
print(f"Robot has decided sell but the funds are not sufficient")
decision = [0,0,0]
return decision
def decide_action(self):
action_space = [-1,0,1]
selected_action = np.random.randint(-1,2)
return selected_action
def decide_amount(self,wallet,name):
minimum_amount = 0.001
maximum_amount = wallet.asset_amount(name)
if maximum_amount>0:
selected_amount = np.random.uniform(minimum_amount,maximum_amount)
return selected_amount
else:
return 0 | [
"numpy.random.randint",
"numpy.random.uniform"
] | [((1536, 1560), 'numpy.random.randint', 'np.random.randint', (['(-1)', '(2)'], {}), '(-1, 2)\n', (1553, 1560), True, 'import numpy as np\n'), ((1776, 1825), 'numpy.random.uniform', 'np.random.uniform', (['minimum_amount', 'maximum_amount'], {}), '(minimum_amount, maximum_amount)\n', (1793, 1825), True, 'import numpy as np\n')] |
import shutil
import subprocess
import sys
import pytest
from numpy.distutils import mingw32ccompiler
@pytest.mark.skipif(sys.platform != 'win32', reason='win32 only test')
def test_build_import():
'''Test the mingw32ccompiler.build_import_library, which builds a
`python.a` from the MSVC `python.lib`
'''
# make sure `nm.exe` exists and supports the current python version. This
# can get mixed up when the PATH has a 64-bit nm but the python is 32-bit
try:
out = subprocess.check_output(['nm.exe', '--help'])
except FileNotFoundError:
pytest.skip("'nm.exe' not on path, is mingw installed?")
supported = out[out.find(b'supported targets:'):]
if sys.maxsize < 2**32:
if b'pe-i386' not in supported:
raise ValueError("'nm.exe' found but it does not support 32-bit "
"dlls when using 32-bit python. Supported "
"formats: '%s'" % supported)
elif b'pe-x86-64' not in supported:
raise ValueError("'nm.exe' found but it does not support 64-bit "
"dlls when using 64-bit python. Supported "
"formats: '%s'" % supported)
# Hide the import library to force a build
has_import_lib, fullpath = mingw32ccompiler._check_for_import_lib()
if has_import_lib:
shutil.move(fullpath, fullpath + '.bak')
try:
# Whew, now we can actually test the function
mingw32ccompiler.build_import_library()
finally:
if has_import_lib:
shutil.move(fullpath + '.bak', fullpath)
| [
"subprocess.check_output",
"numpy.distutils.mingw32ccompiler.build_import_library",
"shutil.move",
"numpy.distutils.mingw32ccompiler._check_for_import_lib",
"pytest.mark.skipif",
"pytest.skip"
] | [((114, 183), 'pytest.mark.skipif', 'pytest.mark.skipif', (["(sys.platform != 'win32')"], {'reason': '"""win32 only test"""'}), "(sys.platform != 'win32', reason='win32 only test')\n", (132, 183), False, 'import pytest\n'), ((1319, 1359), 'numpy.distutils.mingw32ccompiler._check_for_import_lib', 'mingw32ccompiler._check_for_import_lib', ([], {}), '()\n', (1357, 1359), False, 'from numpy.distutils import mingw32ccompiler\n'), ((518, 563), 'subprocess.check_output', 'subprocess.check_output', (["['nm.exe', '--help']"], {}), "(['nm.exe', '--help'])\n", (541, 563), False, 'import subprocess\n'), ((1394, 1434), 'shutil.move', 'shutil.move', (['fullpath', "(fullpath + '.bak')"], {}), "(fullpath, fullpath + '.bak')\n", (1405, 1434), False, 'import shutil\n'), ((1512, 1551), 'numpy.distutils.mingw32ccompiler.build_import_library', 'mingw32ccompiler.build_import_library', ([], {}), '()\n', (1549, 1551), False, 'from numpy.distutils import mingw32ccompiler\n'), ((604, 660), 'pytest.skip', 'pytest.skip', (['"""\'nm.exe\' not on path, is mingw installed?"""'], {}), '("\'nm.exe\' not on path, is mingw installed?")\n', (615, 660), False, 'import pytest\n'), ((1609, 1649), 'shutil.move', 'shutil.move', (["(fullpath + '.bak')", 'fullpath'], {}), "(fullpath + '.bak', fullpath)\n", (1620, 1649), False, 'import shutil\n')] |
# coding: utf-8
import argparse
import gc
import os
import pickle
import random
import sys
from pathlib import Path
from timeit import default_timer
import matplotlib.pyplot as plt
import numpy
from common import (DATA_DIR, DEFAULT_METHOD, FIGURES_DIR, METHODS, RESULTS_DIR)
from make_figures import (
plot_results
)
import embfile
from embfile import (BinaryEmbFile, TextEmbFile, VVMEmbFile)
parser = argparse.ArgumentParser()
input_file = parser.add_mutually_exclusive_group(required=True)
input_file.add_argument(
'--file-path', '-i', help='path to the file with no extension')
input_file.add_argument(
'--generate', '-g', nargs=2, type=int, default=[None, None],
help='vocab_size and vector_size (space-separated)')
parser.add_argument(
'--fmts', '-f', nargs='+', default=embfile.FORMATS.format_ids(),
help='Formats to test (IDs)')
parser.add_argument(
'--query-sizes', '-q', nargs='+', type=int,
default=[1_000, 250_000, 500_000])
parser.add_argument(
'--missing-words', type=int, default=1)
parser.add_argument(
'--repeat', '-r', type=int, default=5)
parser.add_argument(
'--method', '-m', choices=METHODS, default=DEFAULT_METHOD)
parser.add_argument('--no-plot', action='store_true')
args = parser.parse_args()
classes_to_test = [embfile.FORMATS.id_to_class[fid] for fid in args.fmts]
vocab_size, vector_size = args.generate
num_missing_words = args.missing_words
query_sizes = sorted(filter(lambda s: s <= vocab_size, args.query_sizes))
if not query_sizes:
sys.exit('Error: no query size smaller than vocab_size in --query-sizes')
os.makedirs(FIGURES_DIR, exist_ok=True)
os.makedirs(RESULTS_DIR, exist_ok=True)
os.makedirs(DATA_DIR, exist_ok=True)
# Generate dummy data
def generate_vocab(vocab_size):
return ['word_%d_abc' % i for i in range(vocab_size)]
def generate_pairs(vocab, vector_size, dtype='float32', seed=12345):
# generator that generate always exactly the same word_vectors
numpy.random.seed(seed)
return ((word, numpy.random.rand(vector_size).astype('float32'))
for word in vocab)
def dummy_file_path(cls, vocab_size=vocab_size, vector_size=vector_size):
basename = DATA_DIR / '{}_{}_{}'.format(cls.__name__, vocab_size, vector_size)
return basename.with_suffix(cls.DEFAULT_EXTENSION)
def real_file_path(cls):
return Path(args.file_path).with_suffix(cls.DEFAULT_EXTENSION)
if args.file_path: # use real files
benchmark_name = Path(args.file_path).name
file_path = real_file_path
for cls in classes_to_test:
if not file_path(cls).exists():
sys.exit('The file %s does not exist' % file_path(cls))
for cls in [VVMEmbFile, BinaryEmbFile, TextEmbFile]: # VVM is the fastest for reading vocab
if file_path(cls).exists():
with cls(file_path(cls)) as f:
vocab = list(f.words())
vocab_size = len(vocab)
vector_size = f.vector_size
else: # use generated files
benchmark_name = '%d_%d__%d_reps' % (vocab_size, vector_size, args.repeat)
file_path = dummy_file_path
print('Generating the vocabulary')
vocab = generate_vocab(vocab_size)
for cls in classes_to_test:
cls_path = dummy_file_path(cls)
if not cls_path.exists():
print('File for class %s is missing.' % cls.__name__)
print('Creating %s file' % cls.DEFAULT_EXTENSION)
pairs = generate_pairs(vocab, vector_size) # generate always the same vectors
cls.create(cls_path, pairs, vocab_size=vocab_size)
print('Generating queries...')
full_query = random.sample(vocab, k=query_sizes[-1])
missing_words = ['<<missing_%d>>' % i for i in range(num_missing_words)]
# Table
COLUMNS = ['Class', 'Query Size', 'Best', 'Median', 'MedianAbsDev']
class_col_width = 2 + max(len(cls.__name__) for cls in classes_to_test)
WIDTHS = (class_col_width,) + tuple(4 + len(c) for c in COLUMNS[1:])
HEADER_FMT = '{:<%ds} {:^%ds} {:^%ds} {:^%ds} {:^%ds}' % WIDTHS
ROW_FMT = '{:<%ds} {:^%dd} {:^%d.2f} {:^%d.2f} {:^%d.2f}' % WIDTHS
HARD_LINE = ' '.join('=' * w for w in WIDTHS)
SOFT_LINE = ' '.join('-' * w for w in WIDTHS)
def print_header():
print(HARD_LINE)
print(HEADER_FMT.format(*COLUMNS))
print(HARD_LINE)
def print_row(row):
assert len(row) == len(COLUMNS)
print(ROW_FMT.format(*row))
print_header()
result = {(cls, size): None for size in query_sizes for cls in classes_to_test}
for query_size in query_sizes:
if query_size > vocab_size:
print('Skipping query size %d (> vocab size)' % query_size)
continue
query = full_query[:query_size]
query += missing_words
for cls in classes_to_test:
path = file_path(cls)
times = []
for i in range(args.repeat):
gc.collect()
start = default_timer()
with cls(path, verbose=0) as file:
file.find(query)
elapsed = default_timer() - start
times.append(elapsed)
result[cls, query_size] = times
median = numpy.median(times)
median_abs_dev = numpy.median([abs(t - median) for t in times])
print_row([cls.__name__, query_size,
min(times), median, median_abs_dev])
if query_size == query_sizes[-1]:
print(HARD_LINE)
else:
print(SOFT_LINE)
# Save results
pkl_path = (RESULTS_DIR / benchmark_name).with_suffix('.pkl')
summary = dict(data=result, vocab_size=vocab_size, vector_size=vector_size, repeat=args.repeat)
with open(pkl_path, 'wb') as f:
pickle.dump(summary, f)
if not args.no_plot:
plot_results(summary)
plt.show()
| [
"random.sample",
"numpy.median",
"pickle.dump",
"numpy.random.rand",
"os.makedirs",
"argparse.ArgumentParser",
"pathlib.Path",
"timeit.default_timer",
"embfile.FORMATS.format_ids",
"make_figures.plot_results",
"numpy.random.seed",
"gc.collect",
"sys.exit",
"matplotlib.pyplot.show"
] | [((410, 435), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (433, 435), False, 'import argparse\n'), ((1602, 1641), 'os.makedirs', 'os.makedirs', (['FIGURES_DIR'], {'exist_ok': '(True)'}), '(FIGURES_DIR, exist_ok=True)\n', (1613, 1641), False, 'import os\n'), ((1642, 1681), 'os.makedirs', 'os.makedirs', (['RESULTS_DIR'], {'exist_ok': '(True)'}), '(RESULTS_DIR, exist_ok=True)\n', (1653, 1681), False, 'import os\n'), ((1682, 1718), 'os.makedirs', 'os.makedirs', (['DATA_DIR'], {'exist_ok': '(True)'}), '(DATA_DIR, exist_ok=True)\n', (1693, 1718), False, 'import os\n'), ((3610, 3649), 'random.sample', 'random.sample', (['vocab'], {'k': 'query_sizes[-1]'}), '(vocab, k=query_sizes[-1])\n', (3623, 3649), False, 'import random\n'), ((1527, 1600), 'sys.exit', 'sys.exit', (['"""Error: no query size smaller than vocab_size in --query-sizes"""'], {}), "('Error: no query size smaller than vocab_size in --query-sizes')\n", (1535, 1600), False, 'import sys\n'), ((1975, 1998), 'numpy.random.seed', 'numpy.random.seed', (['seed'], {}), '(seed)\n', (1992, 1998), False, 'import numpy\n'), ((5568, 5591), 'pickle.dump', 'pickle.dump', (['summary', 'f'], {}), '(summary, f)\n', (5579, 5591), False, 'import pickle\n'), ((5618, 5639), 'make_figures.plot_results', 'plot_results', (['summary'], {}), '(summary)\n', (5630, 5639), False, 'from make_figures import plot_results\n'), ((5644, 5654), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5652, 5654), True, 'import matplotlib.pyplot as plt\n'), ((803, 831), 'embfile.FORMATS.format_ids', 'embfile.FORMATS.format_ids', ([], {}), '()\n', (829, 831), False, 'import embfile\n'), ((2467, 2487), 'pathlib.Path', 'Path', (['args.file_path'], {}), '(args.file_path)\n', (2471, 2487), False, 'from pathlib import Path\n'), ((5066, 5085), 'numpy.median', 'numpy.median', (['times'], {}), '(times)\n', (5078, 5085), False, 'import numpy\n'), ((2351, 2371), 'pathlib.Path', 'Path', (['args.file_path'], {}), '(args.file_path)\n', (2355, 2371), False, 'from pathlib import Path\n'), ((4799, 4811), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4809, 4811), False, 'import gc\n'), ((4832, 4847), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (4845, 4847), False, 'from timeit import default_timer\n'), ((4950, 4965), 'timeit.default_timer', 'default_timer', ([], {}), '()\n', (4963, 4965), False, 'from timeit import default_timer\n'), ((2018, 2048), 'numpy.random.rand', 'numpy.random.rand', (['vector_size'], {}), '(vector_size)\n', (2035, 2048), False, 'import numpy\n')] |
import os
import string
import numpy as np
from PIL import Image
import torch as th
from torchvision.transforms.functional import to_tensor
from . import utils, templates
class FontsDataset(th.utils.data.Dataset):
def __init__(self, root, chamfer, n_samples_per_curve, val=False):
self.root = root
self.chamfer = chamfer
self.n_samples_per_curve = n_samples_per_curve
self.files = [f[:-4] for f in os.listdir(os.path.join(self.root, 'pngs')) if f.endswith('.png')]
np.random.shuffle(self.files)
cutoff = int(0.9*len(self.files))
if val:
self.files = self.files[cutoff:]
else:
self.files = self.files[:cutoff]
self.n_loops_dict = templates.n_loops
def __repr__(self):
return "FontsDataset | {} entries".format(len(self))
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
fname = self.files[idx]
im = Image.open(os.path.join(self.root, 'pngs', fname + '.png')).convert('L')
distance_fields = th.from_numpy(
np.load(os.path.join(self.root, 'distances', fname + '.npy'))[31:-31,31:-31].astype(np.float32)) ** 2
alignment_fields = utils.compute_alignment_fields(distance_fields)
distance_fields = distance_fields[1:-1,1:-1]
occupancy_fields = utils.compute_occupancy_fields(distance_fields)
points = th.Tensor([])
if self.chamfer:
points = th.from_numpy(np.load(os.path.join(self.root, 'points', fname + '.npy')).astype(np.float32))
points = points[:self.n_samples_per_curve*sum(templates.topology)]
return {
'fname': fname,
'im': to_tensor(im),
'distance_fields': distance_fields,
'alignment_fields': alignment_fields,
'occupancy_fields': occupancy_fields,
'points': points,
'letter_idx': string.ascii_uppercase.index(fname[0]),
'n_loops': self.n_loops_dict[fname[0]]
}
| [
"torchvision.transforms.functional.to_tensor",
"string.ascii_uppercase.index",
"torch.Tensor",
"os.path.join",
"numpy.random.shuffle"
] | [((513, 542), 'numpy.random.shuffle', 'np.random.shuffle', (['self.files'], {}), '(self.files)\n', (530, 542), True, 'import numpy as np\n'), ((1422, 1435), 'torch.Tensor', 'th.Tensor', (['[]'], {}), '([])\n', (1431, 1435), True, 'import torch as th\n'), ((1718, 1731), 'torchvision.transforms.functional.to_tensor', 'to_tensor', (['im'], {}), '(im)\n', (1727, 1731), False, 'from torchvision.transforms.functional import to_tensor\n'), ((1937, 1975), 'string.ascii_uppercase.index', 'string.ascii_uppercase.index', (['fname[0]'], {}), '(fname[0])\n', (1965, 1975), False, 'import string\n'), ((449, 480), 'os.path.join', 'os.path.join', (['self.root', '"""pngs"""'], {}), "(self.root, 'pngs')\n", (461, 480), False, 'import os\n'), ((981, 1028), 'os.path.join', 'os.path.join', (['self.root', '"""pngs"""', "(fname + '.png')"], {}), "(self.root, 'pngs', fname + '.png')\n", (993, 1028), False, 'import os\n'), ((1504, 1553), 'os.path.join', 'os.path.join', (['self.root', '"""points"""', "(fname + '.npy')"], {}), "(self.root, 'points', fname + '.npy')\n", (1516, 1553), False, 'import os\n'), ((1108, 1160), 'os.path.join', 'os.path.join', (['self.root', '"""distances"""', "(fname + '.npy')"], {}), "(self.root, 'distances', fname + '.npy')\n", (1120, 1160), False, 'import os\n')] |
# -*- coding=utf-8 -*-
# 用调参后的模型生成用于第二层的stacking特征
import best_models as bm
import pandas as pd
import numpy as np
from sklearn.model_selection import KFold
import getpass
from sklearn.metrics import roc_auc_score
import time
time_begin = time.time()
SEED=36
# ===============data==================
DATA_DIR = '../../data/data_4/'
print('读取数据...')
print('位置:', DATA_DIR)
train_df = pd.read_csv(DATA_DIR + 'train_preprocessed1.csv', encoding='gbk')
test_df = pd.read_csv(DATA_DIR + 'test_preprocessed1.csv', encoding='gbk')
if getpass.getuser() == 'stone':
train_df = train_df[:20]
# ==============END data==================
kf = KFold(n_splits=5, shuffle=True, random_state=SEED)
def get_oof(model, x_train, y_train, x_test, model_name):
oof_train = np.zeros((x_train.shape[0],))
oof_test = np.zeros((x_test.shape[0],))
oof_test_skf = np.empty((5, x_test.shape[0]))
for i,(train_index, test_index) in enumerate(kf.split(x_train)):
kf_x_train = x_train[train_index]
kf_y_train = y_train[train_index]
kf_x_test = x_train[test_index]
print(model_name, 'trainning... 数据量:{},{}'.format(kf_x_train.shape, kf_y_train.shape))
model.fit(kf_x_train, kf_y_train)
oof_train[test_index] = model.predict_proba(kf_x_test)[:,1]
oof_test_skf[i,:] = model.predict_proba(x_test)[:,1]
oof_test[:] = oof_test_skf.mean(axis=0)
oof_train = oof_train.reshape(-1, 1)
oof_test = oof_test.reshape(-1, 1)
print('{}-CV roc_auc_score: {}'.format(model_name, roc_auc_score(y_train, oof_train)))
return oof_train, oof_test
# 初始化各个调过参数的模型,获取对应数据
xgb_model = bm.get_tuned_xgb()
x_train_xgb, y_train_xgb, x_test_xgb = bm.get_data(train_df=train_df,
test_df=test_df,
DATA_DIR=DATA_DIR,
model_name='xgb')
rf_model = bm.get_tuned_rf()
x_train_rf, y_train_rf, x_test_rf = bm.get_data(train_df=train_df,
test_df=test_df,
DATA_DIR=DATA_DIR,
model_name='rf')
# 产生在训练集上交叉预测的列,以及在测试集上预测的平均值
xgb_oof_train, xgb_oof_test = get_oof(xgb_model,
x_train=x_train_xgb,
y_train=y_train_xgb,
x_test=x_test_xgb,
model_name='xgb')
rf_oof_train, rf_oof_test = get_oof(rf_model,
x_train=x_train_rf,
y_train=y_train_rf,
x_test=x_test_rf,
model_name='rf')
# 产生新的训练集和测试集,即各个算法在训练集上交叉预测的列的并排
z_train = np.concatenate((xgb_oof_train,
rf_oof_train,), axis=1)
z_test = np.concatenate((xgb_oof_test,
rf_oof_test,), axis=1)
print("\nz_train:{}, z_test:{}".format(z_train.shape, z_test.shape))
# 保存新的训练集和测试集
print("\n存储数据中:")
print('位置:', DATA_DIR)
z_train_pd = pd.DataFrame(z_train, columns=['XGB', 'RF'])
z_test_pd = pd.DataFrame(z_test, columns=['XGB', 'RF'])
z_train_pd.to_csv(DATA_DIR + 'z_train1.csv',encoding='gbk',index=False)
z_test_pd.to_csv(DATA_DIR + 'z_test1.csv',encoding='gbk',index=False)
# ------------输出运行时间 不需要改---------------
time_spend = time.time() - time_begin
print('\n运行时间:%d 秒,约%d分钟\n' % (time_spend, time_spend // 60))
| [
"best_models.get_tuned_rf",
"best_models.get_tuned_xgb",
"pandas.read_csv",
"getpass.getuser",
"best_models.get_data",
"sklearn.metrics.roc_auc_score",
"numpy.zeros",
"numpy.empty",
"numpy.concatenate",
"pandas.DataFrame",
"sklearn.model_selection.KFold",
"time.time"
] | [((241, 252), 'time.time', 'time.time', ([], {}), '()\n', (250, 252), False, 'import time\n'), ((385, 450), 'pandas.read_csv', 'pd.read_csv', (["(DATA_DIR + 'train_preprocessed1.csv')"], {'encoding': '"""gbk"""'}), "(DATA_DIR + 'train_preprocessed1.csv', encoding='gbk')\n", (396, 450), True, 'import pandas as pd\n'), ((461, 525), 'pandas.read_csv', 'pd.read_csv', (["(DATA_DIR + 'test_preprocessed1.csv')"], {'encoding': '"""gbk"""'}), "(DATA_DIR + 'test_preprocessed1.csv', encoding='gbk')\n", (472, 525), True, 'import pandas as pd\n'), ((638, 688), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(5)', 'shuffle': '(True)', 'random_state': 'SEED'}), '(n_splits=5, shuffle=True, random_state=SEED)\n', (643, 688), False, 'from sklearn.model_selection import KFold\n'), ((1634, 1652), 'best_models.get_tuned_xgb', 'bm.get_tuned_xgb', ([], {}), '()\n', (1650, 1652), True, 'import best_models as bm\n'), ((1692, 1780), 'best_models.get_data', 'bm.get_data', ([], {'train_df': 'train_df', 'test_df': 'test_df', 'DATA_DIR': 'DATA_DIR', 'model_name': '"""xgb"""'}), "(train_df=train_df, test_df=test_df, DATA_DIR=DATA_DIR,\n model_name='xgb')\n", (1703, 1780), True, 'import best_models as bm\n'), ((1942, 1959), 'best_models.get_tuned_rf', 'bm.get_tuned_rf', ([], {}), '()\n', (1957, 1959), True, 'import best_models as bm\n'), ((1996, 2083), 'best_models.get_data', 'bm.get_data', ([], {'train_df': 'train_df', 'test_df': 'test_df', 'DATA_DIR': 'DATA_DIR', 'model_name': '"""rf"""'}), "(train_df=train_df, test_df=test_df, DATA_DIR=DATA_DIR,\n model_name='rf')\n", (2007, 2083), True, 'import best_models as bm\n'), ((2844, 2897), 'numpy.concatenate', 'np.concatenate', (['(xgb_oof_train, rf_oof_train)'], {'axis': '(1)'}), '((xgb_oof_train, rf_oof_train), axis=1)\n', (2858, 2897), True, 'import numpy as np\n'), ((2934, 2985), 'numpy.concatenate', 'np.concatenate', (['(xgb_oof_test, rf_oof_test)'], {'axis': '(1)'}), '((xgb_oof_test, rf_oof_test), axis=1)\n', (2948, 2985), True, 'import numpy as np\n'), ((3152, 3196), 'pandas.DataFrame', 'pd.DataFrame', (['z_train'], {'columns': "['XGB', 'RF']"}), "(z_train, columns=['XGB', 'RF'])\n", (3164, 3196), True, 'import pandas as pd\n'), ((3209, 3252), 'pandas.DataFrame', 'pd.DataFrame', (['z_test'], {'columns': "['XGB', 'RF']"}), "(z_test, columns=['XGB', 'RF'])\n", (3221, 3252), True, 'import pandas as pd\n'), ((530, 547), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (545, 547), False, 'import getpass\n'), ((765, 794), 'numpy.zeros', 'np.zeros', (['(x_train.shape[0],)'], {}), '((x_train.shape[0],))\n', (773, 794), True, 'import numpy as np\n'), ((810, 838), 'numpy.zeros', 'np.zeros', (['(x_test.shape[0],)'], {}), '((x_test.shape[0],))\n', (818, 838), True, 'import numpy as np\n'), ((858, 888), 'numpy.empty', 'np.empty', (['(5, x_test.shape[0])'], {}), '((5, x_test.shape[0]))\n', (866, 888), True, 'import numpy as np\n'), ((3450, 3461), 'time.time', 'time.time', ([], {}), '()\n', (3459, 3461), False, 'import time\n'), ((1532, 1565), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_train', 'oof_train'], {}), '(y_train, oof_train)\n', (1545, 1565), False, 'from sklearn.metrics import roc_auc_score\n')] |
import numpy as np
def mypolyval(p, x):
_p = list(p)
res = _p.pop(0)
while _p:
res = res * x + _p.pop(0)
return res
vpolyval = np.vectorize(mypolyval, excluded=['p'])
vpolyval(p=[1,2,3],x=[0,1]) | [
"numpy.vectorize"
] | [((136, 175), 'numpy.vectorize', 'np.vectorize', (['mypolyval'], {'excluded': "['p']"}), "(mypolyval, excluded=['p'])\n", (148, 175), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import os
import numpy as np
myfilename = "wine.data.txt"
with open(myfilename, 'r') as file_handle:
mylist = []
for line in file_handle.readlines():
line_clean = line.replace(' ', ' ').replace(' ', ' ')
line_clean = line_clean.strip()
values = line_clean.split(',')
#print(values)
linelist = [int,float,float,float,float,int,float,float,float,float,float,float,float,int]
newlist = [t(x) for t,x in zip(linelist,values)]
mylist += [newlist]
#print(mylist[0])
#print("")#spacer
rotated_list = [[mylist[jdx][idx]
for jdx, row in enumerate(mylist)]
for idx, column in enumerate(mylist[0])]
#print(rotated_list[0])
mean_list = []
sd_list = []
for x in rotated_list:
mean_list.append(np.mean(x))
sd_list.append(np.std(x))
print("")
print("List of Means:")
print("")
print(mean_list)
print("")
print("List of Standard Deviations:")
print("")
print(sd_list)
print("")
print('finished!')
| [
"numpy.mean",
"numpy.std"
] | [((811, 821), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (818, 821), True, 'import numpy as np\n'), ((842, 851), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (848, 851), True, 'import numpy as np\n')] |
# Copyright 2022 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Tuple
from cirq.ops.fsim_gate import PhasedFSimGate
import numpy as np
import pytest
import cirq, cirq_google
from cirq_google.devices.google_noise_properties import (
SYMMETRIC_TWO_QUBIT_GATES,
SINGLE_QUBIT_GATES,
)
from cirq.devices.noise_utils import (
OpIdentifier,
PHYSICAL_GATE_TAG,
)
from cirq_google.devices.google_noise_properties import (
GoogleNoiseProperties,
NoiseModelFromGoogleNoiseProperties,
)
DEFAULT_GATE_NS: Dict[type, float] = {
cirq.ZPowGate: 25.0,
cirq.MeasurementGate: 4000.0,
cirq.ResetChannel: 250.0,
cirq.PhasedXZGate: 25.0,
cirq.FSimGate: 32.0,
# SYC is normally 12ns, but setting it equal to other two-qubit gates
# simplifies the tests.
cirq_google.SycamoreGate: 32.0,
cirq.PhasedFSimGate: 32.0,
cirq.ISwapPowGate: 32.0,
cirq.CZPowGate: 32.0,
# cirq.WaitGate is a special case.
}
# Mock pauli error rates for 1- and 2-qubit gates.
SINGLE_QUBIT_ERROR = 0.001
TWO_QUBIT_ERROR = 0.01
# These properties are for testing purposes only - they are not representative
# of device behavior for any existing hardware.
def sample_noise_properties(
system_qubits: List[cirq.Qid], qubit_pairs: List[Tuple[cirq.Qid, cirq.Qid]]
):
# Known false positive: https://github.com/PyCQA/pylint/issues/5857
return GoogleNoiseProperties( # pylint: disable=unexpected-keyword-arg
gate_times_ns=DEFAULT_GATE_NS,
t1_ns={q: 1e5 for q in system_qubits},
tphi_ns={q: 2e5 for q in system_qubits},
readout_errors={q: np.array([SINGLE_QUBIT_ERROR, TWO_QUBIT_ERROR]) for q in system_qubits},
gate_pauli_errors={
**{OpIdentifier(g, q): 0.001 for g in SINGLE_QUBIT_GATES for q in system_qubits},
**{
OpIdentifier(g, q0, q1): 0.01
for g in SYMMETRIC_TWO_QUBIT_GATES
for q0, q1 in qubit_pairs
},
},
fsim_errors={
OpIdentifier(g, q0, q1): cirq.PhasedFSimGate(0.01, 0.03, 0.04, 0.05, 0.02)
for g in SYMMETRIC_TWO_QUBIT_GATES
for q0, q1 in qubit_pairs
},
)
def test_zphase_gates():
q0 = cirq.LineQubit(0)
props = sample_noise_properties([q0], [])
model = NoiseModelFromGoogleNoiseProperties(props)
circuit = cirq.Circuit(cirq.Z(q0) ** 0.3)
noisy_circuit = circuit.with_noise(model)
assert noisy_circuit == circuit
@pytest.mark.parametrize(
'op',
[
(cirq.Z(cirq.LineQubit(0)) ** 0.3).with_tags(cirq_google.PhysicalZTag),
cirq.PhasedXZGate(x_exponent=0.8, z_exponent=0.2, axis_phase_exponent=0.1).on(
cirq.LineQubit(0)
),
],
)
def test_single_qubit_gates(op):
q0 = cirq.LineQubit(0)
props = sample_noise_properties([q0], [])
model = NoiseModelFromGoogleNoiseProperties(props)
circuit = cirq.Circuit(op)
noisy_circuit = circuit.with_noise(model)
assert len(noisy_circuit.moments) == 3
assert len(noisy_circuit.moments[0].operations) == 1
assert noisy_circuit.moments[0].operations[0] == op.with_tags(PHYSICAL_GATE_TAG)
# Depolarizing noise
assert len(noisy_circuit.moments[1].operations) == 1
depol_op = noisy_circuit.moments[1].operations[0]
assert isinstance(depol_op.gate, cirq.DepolarizingChannel)
assert np.isclose(depol_op.gate.p, 0.00081252)
# Thermal noise
assert len(noisy_circuit.moments[2].operations) == 1
thermal_op = noisy_circuit.moments[2].operations[0]
assert isinstance(thermal_op.gate, cirq.KrausChannel)
thermal_choi = cirq.kraus_to_choi(cirq.kraus(thermal_op))
assert np.allclose(
thermal_choi,
[
[1, 0, 0, 9.99750031e-01],
[0, 2.49968753e-04, 0, 0],
[0, 0, 0, 0],
[9.99750031e-01, 0, 0, 9.99750031e-01],
],
)
# Pauli error for depol_op + thermal_op == total (0.001)
depol_pauli_err = 1 - cirq.qis.measures.entanglement_fidelity(depol_op)
thermal_pauli_err = 1 - cirq.qis.measures.entanglement_fidelity(thermal_op)
total_err = depol_pauli_err + thermal_pauli_err
assert np.isclose(total_err, SINGLE_QUBIT_ERROR)
@pytest.mark.parametrize(
'op',
[
cirq.ISWAP(*cirq.LineQubit.range(2)) ** 0.6,
cirq.CZ(*cirq.LineQubit.range(2)) ** 0.3,
cirq_google.SYC(*cirq.LineQubit.range(2)),
],
)
def test_two_qubit_gates(op):
q0, q1 = cirq.LineQubit.range(2)
props = sample_noise_properties([q0, q1], [(q0, q1), (q1, q0)])
model = NoiseModelFromGoogleNoiseProperties(props)
circuit = cirq.Circuit(op)
noisy_circuit = circuit.with_noise(model)
assert len(noisy_circuit.moments) == 4
assert len(noisy_circuit.moments[0].operations) == 1
assert noisy_circuit.moments[0].operations[0] == op.with_tags(PHYSICAL_GATE_TAG)
# Depolarizing noise
assert len(noisy_circuit.moments[1].operations) == 1
depol_op = noisy_circuit.moments[1].operations[0]
assert isinstance(depol_op.gate, cirq.DepolarizingChannel)
assert np.isclose(depol_op.gate.p, 0.00719705)
# FSim angle corrections
assert len(noisy_circuit.moments[2].operations) == 1
fsim_op = noisy_circuit.moments[2].operations[0]
assert isinstance(fsim_op.gate, cirq.PhasedFSimGate)
assert fsim_op == PhasedFSimGate(theta=0.01, zeta=0.03, chi=0.04, gamma=0.05, phi=0.02).on(
q0, q1
)
# Thermal noise
assert len(noisy_circuit.moments[3].operations) == 2
thermal_op_0 = noisy_circuit.moments[3].operation_at(q0)
thermal_op_1 = noisy_circuit.moments[3].operation_at(q1)
assert isinstance(thermal_op_0.gate, cirq.KrausChannel)
assert isinstance(thermal_op_1.gate, cirq.KrausChannel)
thermal_choi_0 = cirq.kraus_to_choi(cirq.kraus(thermal_op_0))
thermal_choi_1 = cirq.kraus_to_choi(cirq.kraus(thermal_op_1))
expected_thermal_choi = np.array(
[
[1, 0, 0, 9.99680051e-01],
[0, 3.19948805e-04, 0, 0],
[0, 0, 0, 0],
[9.99680051e-01, 0, 0, 9.99680051e-01],
]
)
assert np.allclose(thermal_choi_0, expected_thermal_choi)
assert np.allclose(thermal_choi_1, expected_thermal_choi)
# Pauli error for depol_op + fsim_op + thermal_op_(0|1) == total (0.01)
depol_pauli_err = 1 - cirq.qis.measures.entanglement_fidelity(depol_op)
fsim_pauli_err = 1 - cirq.qis.measures.entanglement_fidelity(fsim_op)
thermal0_pauli_err = 1 - cirq.qis.measures.entanglement_fidelity(thermal_op_0)
thermal1_pauli_err = 1 - cirq.qis.measures.entanglement_fidelity(thermal_op_1)
total_err = depol_pauli_err + thermal0_pauli_err + thermal1_pauli_err + fsim_pauli_err
assert np.isclose(total_err, TWO_QUBIT_ERROR)
def test_supertype_match():
# Verifies that ops in gate_pauli_errors which only appear as their
# supertypes in fsim_errors are properly accounted for.
q0, q1 = cirq.LineQubit.range(2)
op_id = OpIdentifier(cirq_google.SycamoreGate, q0, q1)
test_props = sample_noise_properties([q0, q1], [(q0, q1), (q1, q0)])
expected_err = test_props._depolarizing_error[op_id]
props = sample_noise_properties([q0, q1], [(q0, q1), (q1, q0)])
props.fsim_errors = {
k: cirq.PhasedFSimGate(0.5, 0.4, 0.3, 0.2, 0.1)
for k in [OpIdentifier(cirq.FSimGate, q0, q1), OpIdentifier(cirq.FSimGate, q1, q0)]
}
assert props._depolarizing_error[op_id] != expected_err
def test_measure_gates():
q00, q01, q10, q11 = cirq.GridQubit.rect(2, 2)
qubits = [q00, q01, q10, q11]
props = sample_noise_properties(
qubits,
[
(q00, q01),
(q01, q00),
(q10, q11),
(q11, q10),
(q00, q10),
(q10, q00),
(q01, q11),
(q11, q01),
],
)
model = NoiseModelFromGoogleNoiseProperties(props)
op = cirq.measure(*qubits, key='m')
circuit = cirq.Circuit(cirq.measure(*qubits, key='m'))
noisy_circuit = circuit.with_noise(model)
# Measurement gates are prepended by amplitude damping, and nothing else.
assert len(noisy_circuit.moments) == 2
# Amplitude damping before measurement
assert len(noisy_circuit.moments[0].operations) == 4
for q in qubits:
op = noisy_circuit.moments[0].operation_at(q)
assert isinstance(op.gate, cirq.GeneralizedAmplitudeDampingChannel), q
assert np.isclose(op.gate.p, 0.90909090), q
assert np.isclose(op.gate.gamma, 0.011), q
# Original measurement is after the noise.
assert len(noisy_circuit.moments[1].operations) == 1
# Measurements are untagged during reconstruction.
assert noisy_circuit.moments[1] == circuit.moments[0]
def test_wait_gates():
q0 = cirq.LineQubit(0)
props = sample_noise_properties([q0], [])
model = NoiseModelFromGoogleNoiseProperties(props)
op = cirq.wait(q0, nanos=100)
circuit = cirq.Circuit(op)
noisy_circuit = circuit.with_noise(model)
assert len(noisy_circuit.moments) == 2
assert noisy_circuit.moments[0].operations[0] == op.with_tags(PHYSICAL_GATE_TAG)
# No depolarizing noise because WaitGate has none.
assert len(noisy_circuit.moments[1].operations) == 1
thermal_op = noisy_circuit.moments[1].operations[0]
assert isinstance(thermal_op.gate, cirq.KrausChannel)
thermal_choi = cirq.kraus_to_choi(cirq.kraus(thermal_op))
assert np.allclose(
thermal_choi,
[
[1, 0, 0, 9.990005e-01],
[0, 9.99500167e-04, 0, 0],
[0, 0, 0, 0],
[9.990005e-01, 0, 0, 9.990005e-01],
],
)
| [
"cirq.wait",
"cirq.GridQubit.rect",
"cirq.PhasedFSimGate",
"numpy.allclose",
"numpy.isclose",
"cirq.qis.measures.entanglement_fidelity",
"cirq.devices.noise_utils.OpIdentifier",
"cirq.LineQubit.range",
"cirq.Z",
"cirq.ops.fsim_gate.PhasedFSimGate",
"cirq.LineQubit",
"cirq.Circuit",
"numpy.ar... | [((2778, 2795), 'cirq.LineQubit', 'cirq.LineQubit', (['(0)'], {}), '(0)\n', (2792, 2795), False, 'import cirq, cirq_google\n'), ((2854, 2896), 'cirq_google.devices.google_noise_properties.NoiseModelFromGoogleNoiseProperties', 'NoiseModelFromGoogleNoiseProperties', (['props'], {}), '(props)\n', (2889, 2896), False, 'from cirq_google.devices.google_noise_properties import GoogleNoiseProperties, NoiseModelFromGoogleNoiseProperties\n'), ((3328, 3345), 'cirq.LineQubit', 'cirq.LineQubit', (['(0)'], {}), '(0)\n', (3342, 3345), False, 'import cirq, cirq_google\n'), ((3404, 3446), 'cirq_google.devices.google_noise_properties.NoiseModelFromGoogleNoiseProperties', 'NoiseModelFromGoogleNoiseProperties', (['props'], {}), '(props)\n', (3439, 3446), False, 'from cirq_google.devices.google_noise_properties import GoogleNoiseProperties, NoiseModelFromGoogleNoiseProperties\n'), ((3461, 3477), 'cirq.Circuit', 'cirq.Circuit', (['op'], {}), '(op)\n', (3473, 3477), False, 'import cirq, cirq_google\n'), ((3920, 3959), 'numpy.isclose', 'np.isclose', (['depol_op.gate.p', '(0.00081252)'], {}), '(depol_op.gate.p, 0.00081252)\n', (3930, 3959), True, 'import numpy as np\n'), ((4225, 4356), 'numpy.allclose', 'np.allclose', (['thermal_choi', '[[1, 0, 0, 0.999750031], [0, 0.000249968753, 0, 0], [0, 0, 0, 0], [\n 0.999750031, 0, 0, 0.999750031]]'], {}), '(thermal_choi, [[1, 0, 0, 0.999750031], [0, 0.000249968753, 0, 0\n ], [0, 0, 0, 0], [0.999750031, 0, 0, 0.999750031]])\n', (4236, 4356), True, 'import numpy as np\n'), ((4724, 4765), 'numpy.isclose', 'np.isclose', (['total_err', 'SINGLE_QUBIT_ERROR'], {}), '(total_err, SINGLE_QUBIT_ERROR)\n', (4734, 4765), True, 'import numpy as np\n'), ((5016, 5039), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(2)'], {}), '(2)\n', (5036, 5039), False, 'import cirq, cirq_google\n'), ((5120, 5162), 'cirq_google.devices.google_noise_properties.NoiseModelFromGoogleNoiseProperties', 'NoiseModelFromGoogleNoiseProperties', (['props'], {}), '(props)\n', (5155, 5162), False, 'from cirq_google.devices.google_noise_properties import GoogleNoiseProperties, NoiseModelFromGoogleNoiseProperties\n'), ((5177, 5193), 'cirq.Circuit', 'cirq.Circuit', (['op'], {}), '(op)\n', (5189, 5193), False, 'import cirq, cirq_google\n'), ((5636, 5675), 'numpy.isclose', 'np.isclose', (['depol_op.gate.p', '(0.00719705)'], {}), '(depol_op.gate.p, 0.00719705)\n', (5646, 5675), True, 'import numpy as np\n'), ((6470, 6583), 'numpy.array', 'np.array', (['[[1, 0, 0, 0.999680051], [0, 0.000319948805, 0, 0], [0, 0, 0, 0], [\n 0.999680051, 0, 0, 0.999680051]]'], {}), '([[1, 0, 0, 0.999680051], [0, 0.000319948805, 0, 0], [0, 0, 0, 0],\n [0.999680051, 0, 0, 0.999680051]])\n', (6478, 6583), True, 'import numpy as np\n'), ((6673, 6723), 'numpy.allclose', 'np.allclose', (['thermal_choi_0', 'expected_thermal_choi'], {}), '(thermal_choi_0, expected_thermal_choi)\n', (6684, 6723), True, 'import numpy as np\n'), ((6735, 6785), 'numpy.allclose', 'np.allclose', (['thermal_choi_1', 'expected_thermal_choi'], {}), '(thermal_choi_1, expected_thermal_choi)\n', (6746, 6785), True, 'import numpy as np\n'), ((7281, 7319), 'numpy.isclose', 'np.isclose', (['total_err', 'TWO_QUBIT_ERROR'], {}), '(total_err, TWO_QUBIT_ERROR)\n', (7291, 7319), True, 'import numpy as np\n'), ((7495, 7518), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(2)'], {}), '(2)\n', (7515, 7518), False, 'import cirq, cirq_google\n'), ((7531, 7577), 'cirq.devices.noise_utils.OpIdentifier', 'OpIdentifier', (['cirq_google.SycamoreGate', 'q0', 'q1'], {}), '(cirq_google.SycamoreGate, q0, q1)\n', (7543, 7577), False, 'from cirq.devices.noise_utils import OpIdentifier, PHYSICAL_GATE_TAG\n'), ((8070, 8095), 'cirq.GridQubit.rect', 'cirq.GridQubit.rect', (['(2)', '(2)'], {}), '(2, 2)\n', (8089, 8095), False, 'import cirq, cirq_google\n'), ((8414, 8456), 'cirq_google.devices.google_noise_properties.NoiseModelFromGoogleNoiseProperties', 'NoiseModelFromGoogleNoiseProperties', (['props'], {}), '(props)\n', (8449, 8456), False, 'from cirq_google.devices.google_noise_properties import GoogleNoiseProperties, NoiseModelFromGoogleNoiseProperties\n'), ((8466, 8496), 'cirq.measure', 'cirq.measure', (['*qubits'], {'key': '"""m"""'}), "(*qubits, key='m')\n", (8478, 8496), False, 'import cirq, cirq_google\n'), ((9333, 9350), 'cirq.LineQubit', 'cirq.LineQubit', (['(0)'], {}), '(0)\n', (9347, 9350), False, 'import cirq, cirq_google\n'), ((9409, 9451), 'cirq_google.devices.google_noise_properties.NoiseModelFromGoogleNoiseProperties', 'NoiseModelFromGoogleNoiseProperties', (['props'], {}), '(props)\n', (9444, 9451), False, 'from cirq_google.devices.google_noise_properties import GoogleNoiseProperties, NoiseModelFromGoogleNoiseProperties\n'), ((9461, 9485), 'cirq.wait', 'cirq.wait', (['q0'], {'nanos': '(100)'}), '(q0, nanos=100)\n', (9470, 9485), False, 'import cirq, cirq_google\n'), ((9500, 9516), 'cirq.Circuit', 'cirq.Circuit', (['op'], {}), '(op)\n', (9512, 9516), False, 'import cirq, cirq_google\n'), ((9992, 10116), 'numpy.allclose', 'np.allclose', (['thermal_choi', '[[1, 0, 0, 0.9990005], [0, 0.000999500167, 0, 0], [0, 0, 0, 0], [0.9990005,\n 0, 0, 0.9990005]]'], {}), '(thermal_choi, [[1, 0, 0, 0.9990005], [0, 0.000999500167, 0, 0],\n [0, 0, 0, 0], [0.9990005, 0, 0, 0.9990005]])\n', (10003, 10116), True, 'import numpy as np\n'), ((4190, 4212), 'cirq.kraus', 'cirq.kraus', (['thermal_op'], {}), '(thermal_op)\n', (4200, 4212), False, 'import cirq, cirq_google\n'), ((4531, 4580), 'cirq.qis.measures.entanglement_fidelity', 'cirq.qis.measures.entanglement_fidelity', (['depol_op'], {}), '(depol_op)\n', (4570, 4580), False, 'import cirq, cirq_google\n'), ((4609, 4660), 'cirq.qis.measures.entanglement_fidelity', 'cirq.qis.measures.entanglement_fidelity', (['thermal_op'], {}), '(thermal_op)\n', (4648, 4660), False, 'import cirq, cirq_google\n'), ((6350, 6374), 'cirq.kraus', 'cirq.kraus', (['thermal_op_0'], {}), '(thermal_op_0)\n', (6360, 6374), False, 'import cirq, cirq_google\n'), ((6416, 6440), 'cirq.kraus', 'cirq.kraus', (['thermal_op_1'], {}), '(thermal_op_1)\n', (6426, 6440), False, 'import cirq, cirq_google\n'), ((6889, 6938), 'cirq.qis.measures.entanglement_fidelity', 'cirq.qis.measures.entanglement_fidelity', (['depol_op'], {}), '(depol_op)\n', (6928, 6938), False, 'import cirq, cirq_google\n'), ((6964, 7012), 'cirq.qis.measures.entanglement_fidelity', 'cirq.qis.measures.entanglement_fidelity', (['fsim_op'], {}), '(fsim_op)\n', (7003, 7012), False, 'import cirq, cirq_google\n'), ((7042, 7095), 'cirq.qis.measures.entanglement_fidelity', 'cirq.qis.measures.entanglement_fidelity', (['thermal_op_0'], {}), '(thermal_op_0)\n', (7081, 7095), False, 'import cirq, cirq_google\n'), ((7125, 7178), 'cirq.qis.measures.entanglement_fidelity', 'cirq.qis.measures.entanglement_fidelity', (['thermal_op_1'], {}), '(thermal_op_1)\n', (7164, 7178), False, 'import cirq, cirq_google\n'), ((7814, 7858), 'cirq.PhasedFSimGate', 'cirq.PhasedFSimGate', (['(0.5)', '(0.4)', '(0.3)', '(0.2)', '(0.1)'], {}), '(0.5, 0.4, 0.3, 0.2, 0.1)\n', (7833, 7858), False, 'import cirq, cirq_google\n'), ((8524, 8554), 'cirq.measure', 'cirq.measure', (['*qubits'], {'key': '"""m"""'}), "(*qubits, key='m')\n", (8536, 8554), False, 'import cirq, cirq_google\n'), ((8993, 9025), 'numpy.isclose', 'np.isclose', (['op.gate.p', '(0.9090909)'], {}), '(op.gate.p, 0.9090909)\n', (9003, 9025), True, 'import numpy as np\n'), ((9045, 9077), 'numpy.isclose', 'np.isclose', (['op.gate.gamma', '(0.011)'], {}), '(op.gate.gamma, 0.011)\n', (9055, 9077), True, 'import numpy as np\n'), ((9957, 9979), 'cirq.kraus', 'cirq.kraus', (['thermal_op'], {}), '(thermal_op)\n', (9967, 9979), False, 'import cirq, cirq_google\n'), ((2924, 2934), 'cirq.Z', 'cirq.Z', (['q0'], {}), '(q0)\n', (2930, 2934), False, 'import cirq, cirq_google\n'), ((3248, 3265), 'cirq.LineQubit', 'cirq.LineQubit', (['(0)'], {}), '(0)\n', (3262, 3265), False, 'import cirq, cirq_google\n'), ((2155, 2202), 'numpy.array', 'np.array', (['[SINGLE_QUBIT_ERROR, TWO_QUBIT_ERROR]'], {}), '([SINGLE_QUBIT_ERROR, TWO_QUBIT_ERROR])\n', (2163, 2202), True, 'import numpy as np\n'), ((2565, 2588), 'cirq.devices.noise_utils.OpIdentifier', 'OpIdentifier', (['g', 'q0', 'q1'], {}), '(g, q0, q1)\n', (2577, 2588), False, 'from cirq.devices.noise_utils import OpIdentifier, PHYSICAL_GATE_TAG\n'), ((2590, 2639), 'cirq.PhasedFSimGate', 'cirq.PhasedFSimGate', (['(0.01)', '(0.03)', '(0.04)', '(0.05)', '(0.02)'], {}), '(0.01, 0.03, 0.04, 0.05, 0.02)\n', (2609, 2639), False, 'import cirq, cirq_google\n'), ((3157, 3231), 'cirq.PhasedXZGate', 'cirq.PhasedXZGate', ([], {'x_exponent': '(0.8)', 'z_exponent': '(0.2)', 'axis_phase_exponent': '(0.1)'}), '(x_exponent=0.8, z_exponent=0.2, axis_phase_exponent=0.1)\n', (3174, 3231), False, 'import cirq, cirq_google\n'), ((5895, 5964), 'cirq.ops.fsim_gate.PhasedFSimGate', 'PhasedFSimGate', ([], {'theta': '(0.01)', 'zeta': '(0.03)', 'chi': '(0.04)', 'gamma': '(0.05)', 'phi': '(0.02)'}), '(theta=0.01, zeta=0.03, chi=0.04, gamma=0.05, phi=0.02)\n', (5909, 5964), False, 'from cirq.ops.fsim_gate import PhasedFSimGate\n'), ((4938, 4961), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(2)'], {}), '(2)\n', (4958, 4961), False, 'import cirq, cirq_google\n'), ((7877, 7912), 'cirq.devices.noise_utils.OpIdentifier', 'OpIdentifier', (['cirq.FSimGate', 'q0', 'q1'], {}), '(cirq.FSimGate, q0, q1)\n', (7889, 7912), False, 'from cirq.devices.noise_utils import OpIdentifier, PHYSICAL_GATE_TAG\n'), ((7914, 7949), 'cirq.devices.noise_utils.OpIdentifier', 'OpIdentifier', (['cirq.FSimGate', 'q1', 'q0'], {}), '(cirq.FSimGate, q1, q0)\n', (7926, 7949), False, 'from cirq.devices.noise_utils import OpIdentifier, PHYSICAL_GATE_TAG\n'), ((2271, 2289), 'cirq.devices.noise_utils.OpIdentifier', 'OpIdentifier', (['g', 'q'], {}), '(g, q)\n', (2283, 2289), False, 'from cirq.devices.noise_utils import OpIdentifier, PHYSICAL_GATE_TAG\n'), ((2382, 2405), 'cirq.devices.noise_utils.OpIdentifier', 'OpIdentifier', (['g', 'q0', 'q1'], {}), '(g, q0, q1)\n', (2394, 2405), False, 'from cirq.devices.noise_utils import OpIdentifier, PHYSICAL_GATE_TAG\n'), ((4830, 4853), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(2)'], {}), '(2)\n', (4850, 4853), False, 'import cirq, cirq_google\n'), ((4880, 4903), 'cirq.LineQubit.range', 'cirq.LineQubit.range', (['(2)'], {}), '(2)\n', (4900, 4903), False, 'import cirq, cirq_google\n'), ((3085, 3102), 'cirq.LineQubit', 'cirq.LineQubit', (['(0)'], {}), '(0)\n', (3099, 3102), False, 'import cirq, cirq_google\n')] |
import numbers
import tempfile
import imageio
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import numpy.linalg as linalg
import seaborn as sns
import pandas as pd
from matplotlib.colors import ListedColormap, to_hex
from matplotlib.patches import Ellipse, Rectangle, FancyArrowPatch
from scipy.stats import norm
from scipy.special import expit
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import check_random_state
from dynetlsm.plots import get_colors
__all__ = ['plot_network', 'make_network_animation',
'plot_sociability', 'plot_lambda', 'plot_node_trajectories',
'plot_pairwise_distances', 'plot_pairwise_probabilities']
def normal_contour(mean, cov, n_std=2, ax=None, **kwargs):
if cov.shape[0] != 2:
raise ValueError('Only for bivariate normal densities.')
eigenvalues, eigenvectors = linalg.eigh(cov)
# sort the eigenvalues and eigenvectors in descending order
order = eigenvalues.argsort()[::-1]
eigenvalues = eigenvalues[order]
eigenvectors = eigenvectors[:, order]
# determine the angle of rotation
angle = np.degrees(np.arctan2(*eigenvectors[:, 0][::-1]))
if ax is None:
ax = plt.gca()
if isinstance(n_std, numbers.Integral):
# the diameter of the ellipse is twice the square root of the evalues
width, height = 2 * n_std * np.sqrt(eigenvalues)
ellipse = Ellipse(xy=mean, width=width, height=height, angle=angle,
**kwargs)
ax.add_artist(ellipse)
return ellipse
ellipses = []
for std in n_std:
width, height = 2 * std * np.sqrt(eigenvalues)
ellipse = Ellipse(xy=mean, width=width, height=height, angle=angle,
**kwargs)
ax.add_artist(ellipse)
ellipses.append(ellipse)
return ellipses
def plot_network(Y, X, X_sigma=None, delta=None,
z=None, tau_sq=None, normalize=False, figsize=(8, 6),
node_color='orangered', color_distance=False,
colors=None, alpha=1.0, contour_alpha=0.25,
size=300, edgecolors='w',
edge_width=0.25, node_labels=None,
font_size=12, legend_fontsize=12,
with_labels=False, ax=None):
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
else:
fig = None
r = np.sqrt((X ** 2).sum(axis=1)).reshape(-1, 1)
if normalize:
X = X / r
cmap = None
if not isinstance(node_color, np.ndarray):
cmap = ListedColormap(
sns.light_palette(node_color, n_colors=np.unique(r).shape[0]))
G = nx.from_numpy_array(Y)
if node_labels is not None:
labels = {node_id : label for node_id, label in enumerate(node_labels)}
else:
labels = None
if z is None:
if not isinstance(node_color, np.ndarray):
if color_distance:
node_color = r.ravel() / r.min()
else:
node_color = np.asarray([node_color] * X.shape[0])
else:
encoder = LabelEncoder().fit(z)
if colors is None:
colors = get_colors(z.ravel())
node_color = colors[encoder.transform(z)]
# add a legend
for i in range(encoder.classes_.shape[0]):
ax.plot([0], [0], 'o', c=colors[i], label=encoder.classes_[i],
markeredgecolor='w', zorder=0)
ax.plot([0], [0], 'o', markeredgecolor='w', c='w', zorder=0)
# draw latent position credible interval ellipses
if X_sigma is not None:
for i in range(X.shape[0]):
if isinstance(contour_alpha, np.ndarray):
calpha = contour_alpha[i]
else:
calpha = contour_alpha
normal_contour(X[i], X_sigma[i], edgecolor='gray',
facecolor=node_color[i] if isinstance(node_color, np.ndarray) else 'gray',
alpha=calpha, ax=ax, n_std=[2])
nx.draw_networkx(G, X, edge_color='gray', width=edge_width,
node_color=node_color,
node_size=size if delta is None else 0,
alpha=alpha,
cmap=cmap,
labels=labels,
font_size=font_size,
with_labels=with_labels,
edgecolors=edgecolors,
ax=ax)
if delta is not None:
sizes = (delta - delta.min()) / (delta.max() - delta.min())
ax.scatter(X[:, 0], X[:, 1], s=size * sizes,
c='gray')
if X_sigma is not None:
ax.collections[0].set_edgecolor(None)
else:
ax.collections[0].set_edgecolor('white')
ax.axis('equal')
ax.axis('off')
# draw normal contour if available
if tau_sq is not None:
# draw center of latent space
ax.scatter(0, 0, color='k', marker='+', s=200)
# draw two standard deviation contour
normal_contour([0, 0], tau_sq * np.eye(X.shape[1]), n_std=[1],
linestyle='--', edgecolor='k',
facecolor='none', zorder=1, ax=ax)
if z is not None:
ax.legend(loc='lower center', bbox_to_anchor=(0.5, -0.05), ncol=6,
fontsize=legend_fontsize)
return fig, ax
def make_network_animation(filename, Y, X, X_sigma=None,
k=0, z=None, tau_sq=None, normalize=True,
figsize=(8, 6), node_color='orangered',
alpha=1.0, contour_alpha=0.25,
size=300, edge_width=0.25,
node_labels=None, font_size=12, with_labels=False,
layer_labels=None, time_labels=None,
title_fmt='{}, {}', border=0.5, duration=1):
# XXX: hack to shut off plotting within a jupyter notebook...
plt.ioff()
n_layers, n_time_steps, _, _ = Y.shape
if layer_labels is None:
layer_labels = ["k = {}".format(k) for k in range(n_layers)]
if time_labels is None:
time_labels = ["t = {}".format(t) for t in range(n_time_steps)]
with tempfile.TemporaryDirectory() as tempdir:
x_max, y_max = X.max(axis=(0, 1))
x_min, y_min = X.min(axis=(0, 1))
pngs = []
for t in range(Y.shape[1]):
fig, ax = plot_network(Y[k, t], X[t],
X_sigma=X_sigma[t] if X_sigma is not None else None,
z=z, tau_sq=tau_sq,
normalize=normalize, figsize=figsize, node_color=node_color,
alpha=alpha, contour_alpha=contour_alpha,
size=size, edge_width=edge_width,
node_labels=node_labels, font_size=font_size,
with_labels=with_labels,)
ax.set_title(title_fmt.format(layer_labels[k], time_labels[t]))
ax.set_xlim(x_min - border, x_max + border)
ax.set_ylim(y_min - border, y_max + border)
fname = tempfile.TemporaryFile(dir=tempdir, suffix='.png')
fig.savefig(fname, dpi=100)
fname.seek(0)
plt.close(fig) # necessary to free memory
pngs.append(fname)
images = []
for png in pngs:
images.append(imageio.imread(png))
imageio.mimsave(filename, images, duration=duration)
plt.ion()
def plot_static_sociability(model, k=0, node_labels=None, layer_label=None,
ax=None, figsize=(10, 12), color_code=False):
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
else:
fig = None
if node_labels is None:
node_labels = [str(i + 1) for i in range(model.X_.shape[1])]
node_labels = np.asarray(node_labels)
order = np.argsort(model.delta_[k])
odds = np.exp(model.delta_[k][order])
y_pos = np.arange(node_labels.shape[0])
if color_code:
colors = ['steelblue' if odds[i] >= 1. else 'gray' for i in
range(len(odds))]
else:
colors = 'gray'
ax.barh(y_pos, odds, align='center', color=colors)
ax.set_yticks(y_pos)
ax.set_yticklabels(node_labels[order])
ax.set_xlabel('odds [$\exp(\delta_k^i)]$')
if layer_label is not None:
ax.set_title(layer_label)
else:
ax.set_title('k = {}'.format(k))
return fig, ax
def plot_social_trajectories(
model, k=0, q_alpha=0.05, node_list=None, node_colors=None,
node_labels=None, layer_label=None, ref_value=0,
ref_label=None,
plot_hline=True, xlabel='Time', alpha=0.15, fill_alpha=0.2,
line_width=3, ax=None, figsize=(10, 6), label_offset=1,
fontsize=12, color_code=False):
n_layers, n_time_steps, n_nodes = model.delta_.shape
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
else:
fig = None
ax.set_clip_on(False)
if node_labels is None:
node_labels = [str(i + 1) for i in range(model.delta_.shape[2])]
node_labels = np.asarray(node_labels)
for i in range(n_nodes):
if model.X_ is None:
ax.plot(model.delta_[k, :, i].T, 'k-', alpha=alpha)
else:
ax.plot(model.gamma_[k, :, i].T, 'k-', alpha=alpha)
if node_list is not None:
node_list = np.asarray(node_list)
if node_colors is None:
node_colors = get_colors(np.arange(len(node_list)))
for i, node_label in enumerate(node_list):
node_id = np.where(node_labels == node_label)[0].item()
if model.X_ is None:
ax.plot(model.delta_[k, :, node_id].T, '--',
lw=line_width, c=node_colors[i])
else:
ax.plot(model.gamma_[k, :, node_id].T, '--',
lw=line_width, c=node_colors[i])
ax.annotate(node_label,
xy=(n_time_steps + label_offset,
model.delta_[k, -1, node_id]),
color=node_colors[i], fontsize=fontsize,
annotation_clip=False)
if q_alpha is not None and model.X_ is None:
x_upp = np.zeros(n_time_steps)
x_low = np.zeros(n_time_steps)
z_alpha = norm.ppf(1 - q_alpha / 2.)
ts = np.arange(n_time_steps)
for t in range(n_time_steps):
se = z_alpha * np.sqrt(model.delta_sigma_[k, t, node_id])
x_upp[t] = model.delta_[k, t, node_id] + se
x_low[t] = model.delta_[k, t, node_id] - se
ax.fill_between(
ts, x_low, x_upp, alpha=fill_alpha, color=node_colors[i])
elif q_alpha is not None:
gamma_ci = np.quantile(
model.gammas_, [q_alpha/2., 1 - q_alpha/2.], axis=0)
ax.fill_between(
np.arange(n_time_steps), gamma_ci[0, k, :, node_id],
gamma_ci[1, k, :, node_id],
alpha=fill_alpha, color=node_colors[i])
if plot_hline:
ax.hlines(
ref_value, 0, n_time_steps - 1, lw=2, linestyles='--', color='k')
if ref_label:
ax.annotate(ref_label,
xy=(n_time_steps + label_offset, ref_value),
color='k', fontsize=fontsize)
# remove spines
#ax.spines['right'].set_visible(False)
#ax.spines['left'].set_visible(False)
#ax.spines['top'].set_visible(False)
#ax.spines['bottom'].set_visible(False)
# axis-labels
ax.set_ylabel('Sociality', fontsize=fontsize)
ax.set_xlabel(xlabel, fontsize=fontsize)
if layer_label is not None:
ax.set_title(layer_label, fontsize=fontsize)
else:
ax.set_title('k = {}'.format(k), fontsize=fontsize)
return fig, ax
def plot_node_trajectories(model, node_list, q_alpha=0.05, node_labels=None,
node_colors=None, nrows=None, ncols=1, alpha=0.2,
linestyle='o--', fontsize=12,
figsize=(10, 8)):
if nrows is None:
nrows = model.X_.shape[2]
fig, axes = plt.subplots(nrows, ncols, figsize=figsize)
ax = axes.flat
if node_labels is None:
node_labels = [i for i in range(model.X_.shape[1])]
node_labels = np.asarray(node_labels)
if node_colors is None:
node_colors = get_colors(np.arange(len(node_list)))
n_time_steps, n_nodes, n_features = model.Z_.shape
z_alpha = norm.ppf(1 - q_alpha / 2.)
ts = np.arange(n_time_steps)
for i, node_label in enumerate(node_list):
node_id = np.where(node_labels == node_label)[0].item()
x_upp = np.zeros(n_time_steps)
x_low = np.zeros(n_time_steps)
for p in range(n_features):
ax[p].plot(ts, model.Z_[:, node_id, p], linestyle,
label=node_labels[node_id], c=node_colors[i])
for t in range(n_time_steps):
se = z_alpha * np.sqrt(model.Z_sigma_[t, node_id, p, p])
x_upp[t] = model.Z_[t, node_id, p] + se
x_low[t] = model.Z_[t, node_id, p] - se
ax[p].fill_between(
ts, x_low, x_upp, alpha=alpha, color=node_colors[i])
# accomodate legends and title
ax[0].legend(bbox_to_anchor=(1.04, 1), loc='upper left', fontsize=fontsize)
ax[-1].set_xlabel('t')
for p in range(n_features):
#ax[p].set_title('p = {}'.format(p + 1), fontsize=fontsize)
ax[p].hlines(0, 1, n_time_steps, lw=2, linestyles='dotted', color='k', alpha=0)
ax[p].set_ylabel('Latent Position [h = {}]'.format(p + 1),
fontsize=fontsize)
ax[p].tick_params(axis='x', labelsize=fontsize)
ax[p].tick_params(axis='y', labelsize=fontsize)
plt.subplots_adjust(right=0.7)
return fig, ax
def sample_distances(model, k, t, i, j, n_reps=1000, random_state=123):
rng = check_random_state(random_state)
Xi = rng.multivariate_normal(model.X_[t, i], model.X_sigma_[t, i],
size=n_reps)
Xj = rng.multivariate_normal(model.X_[t, j], model.X_sigma_[t, j],
size=n_reps)
if k == 0:
lmbdak = np.zeros((n_reps, model.lambda_.shape[1]))
for p in range(model.lambda_.shape[1]):
lmbdak[:, p] = (
2 * rng.binomial(1, model.lambda_proba_[p], size=n_reps) - 1)
else:
lmbdak = rng.multivariate_normal(
model.lambda_[k], model.lambda_sigma_[k], size=n_reps)
return np.sum(lmbdak * Xi * Xj, axis=1)
def plot_pairwise_distances(model, node_i, node_j,
node_labels=None,
layer_labels=None, q_alpha=0.05, n_reps=1000,
random_state=123, alpha=0.2, linestyle='--',
figsize=(10, 8), ax=None):
if ax is not None:
fig = None
else:
fig, ax = plt.subplots(figsize=figsize)
if node_labels is None:
node_labels = [i for i in range(model.X_.shape[1])]
node_labels = np.asarray(node_labels)
n_layers, n_time_steps, n_nodes, _ = model.dist_.shape
ts = np.arange(n_time_steps)
i = np.where(node_labels == node_i)[0].item()
j = np.where(node_labels == node_j)[0].item()
for k in range(n_layers):
if layer_labels is None:
label = 'k = {}'.format(k)
else:
label = layer_labels[k]
if q_alpha is None:
ax.plot(ts, model.dist_[k, :, i, j], linestyle,
label=label)
else:
dist_mean = np.zeros(n_time_steps)
dist_low = np.zeros(n_time_steps)
dist_upp = np.zeros(n_time_steps)
for t in range(n_time_steps):
dist = sample_distances(
model, k, t, i, j, n_reps=n_reps, random_state=random_state)
dist_mean[t] = dist.mean()
dist_low[t] = np.quantile(dist, q=q_alpha / 2.)
dist_upp[t] = np.quantile(dist, q=1 - q_alpha / 2.)
ax.plot(ts, dist_mean, linestyle, label=label)
ax.fill_between(ts, dist_low, dist_upp, alpha=alpha)
break
# accomodate legends and title
ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left')
ax.set_xlabel('t')
ax.set_ylabel('Distances ({} - {})'.format(node_i, node_j))
return fig, ax
def sample_link_probability(model, k, t, i, j, n_reps=1000, random_state=123):
rng = check_random_state(random_state)
deltai = rng.normal(
loc=model.delta_[k, t, i], scale=np.sqrt(model.delta_sigma_[k, t, i]),
size=n_reps)
deltaj = rng.normal(
loc=model.delta_[k, t, j], scale=np.sqrt(model.delta_sigma_[k, t, j]),
size=n_reps)
Xi = rng.multivariate_normal(model.X_[t, i], model.X_sigma_[t, i],
size=n_reps)
Xj = rng.multivariate_normal(model.X_[t, j], model.X_sigma_[t, j],
size=n_reps)
if k == 0:
lmbdak = np.zeros((n_reps, model.lambda_.shape[1]))
for p in range(model.lambda_.shape[1]):
lmbdak[:, p] = (
2 * rng.binomial(1, model.lambda_proba_[p], size=n_reps) - 1)
else:
lmbdak = rng.multivariate_normal(
model.lambda_[k], model.lambda_sigma_[k], size=n_reps)
return expit(deltai + deltaj + np.sum(lmbdak * Xi * Xj, axis=1))
def forecast_link_probability(model, k, i, j, horizon=1, n_reps=1000,
random_state=123):
rng = check_random_state(random_state)
n_features = model.X_.shape[-1]
if k == 0:
lmbdak = np.zeros((n_reps, model.lambda_.shape[1]))
for p in range(model.lambda_.shape[1]):
lmbdak[:, p] = (
2 * rng.binomial(1, model.lambda_proba_[p], size=n_reps) - 1)
else:
lmbdak = rng.multivariate_normal(
model.lambda_[k], model.lambda_sigma_[k], size=n_reps)
deltai = rng.normal(
loc=model.delta_[k, -1, i], scale=np.sqrt(model.delta_sigma_[k, -1, i]),
size=n_reps)
deltaj = rng.normal(
loc=model.delta_[k, -1, j], scale=np.sqrt(model.delta_sigma_[k, -1, j]),
size=n_reps)
Xi = rng.multivariate_normal(model.X_[-1, i], model.X_sigma_[-1, i],
size=n_reps)
Xj = rng.multivariate_normal(model.X_[-1, j], model.X_sigma_[-1, j],
size=n_reps)
pis = np.zeros((horizon, n_reps))
for h in range(horizon):
deltai = deltai + rng.normal(
loc=0, scale=np.sqrt(model.sigma_sq_delta_), size=n_reps)
deltaj = deltaj + rng.normal(
loc=0, scale=np.sqrt(model.sigma_sq_delta_), size=n_reps)
Xi = Xi + rng.multivariate_normal(
np.zeros(n_features), model.sigma_sq_ * np.eye(n_features),
size=n_reps)
Xj = Xj + rng.multivariate_normal(
np.zeros(n_features), model.sigma_sq_ * np.eye(n_features),
size=n_reps)
pis[h] = expit(deltai + deltaj + np.sum(lmbdak * Xi * Xj, axis=1))
return pis
def plot_pairwise_probabilities(model, node_i, node_j, horizon=0,
node_labels=None,
layer_labels=None, q_alpha=0.05, n_reps=1000,
random_state=123, alpha=0.2, linestyle='--',
fontsize=16, figsize=(10, 8), ax=None):
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
else:
fig = None
if node_labels is None:
node_labels = [i for i in range(model.X_.shape[1])]
node_labels = np.asarray(node_labels)
n_layers, n_time_steps, n_nodes, _ = model.probas_.shape
ts = np.arange(n_time_steps + horizon)
i = np.where(node_labels == node_i)[0].item()
j = np.where(node_labels == node_j)[0].item()
for k in range(n_layers):
if layer_labels is None:
label = 'k = {}'.format(k)
else:
label = layer_labels[k]
if q_alpha is None:
ax.plot(ts, model.probas_[k, :, i, j], linestyle,
label=label)
else:
pi_mean = np.zeros(n_time_steps + horizon)
pi_low = np.zeros(n_time_steps + horizon)
pi_upp = np.zeros(n_time_steps + horizon)
for t in range(n_time_steps):
pis = sample_link_probability(
model, k, t, i, j, n_reps=n_reps, random_state=random_state)
pi_mean[t] = pis.mean()
pi_low[t] = np.quantile(pis, q=q_alpha / 2.)
pi_upp[t] = np.quantile(pis, q=1 - q_alpha / 2.)
if horizon > 0:
pis = forecast_link_probability(
model, k, i, j, horizon=horizon, n_reps=n_reps,
random_state=random_state)
for h in range(horizon):
pi_mean[n_time_steps + h] = pis[h].mean()
pi_low[n_time_steps + h] = (
np.quantile(pis[h], q=q_alpha / 2.))
pi_upp[n_time_steps + h] = (
np.quantile(pis[h], q=1 - q_alpha / 2.))
ax.plot(ts, pi_mean, linestyle, label=label)
ax.fill_between(ts, pi_low, pi_upp, alpha=alpha)
# accomodate legends and title
ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left', fontsize=fontsize)
ax.set_xlabel('t')
ax.set_ylabel('Link Probability ({} - {})'.format(node_i, node_j), fontsize=fontsize)
return fig, ax
def plot_homophily_matrix(model, q_alpha=0.05,
layer_labels=None, height=0.5, hspace=1.,
fontsize=12, figsize=(12, 6)):
n_layers, n_features = model.lambda_.shape
if layer_labels is None:
layer_labels = ['k = {}'.format(k + 1) for k in range(n_layers)]
fig, axes = plt.subplots(n_layers, 1, figsize=figsize, sharex=True)
z_alpha = norm.ppf(1 - q_alpha / 2.)
for p in range(n_features):
xerr = z_alpha * np.sqrt(model.lambda_sigma_[:, p, p])
for k in range(n_layers):
colors = 'red' if model.lambda_[k, p] > 0 else 'blue'
axes[k].hlines(k + hspace * p, 0, model.lambda_[k, p], lw=1,
color=colors, linestyles='--')
axes[k].errorbar(model.lambda_[k, p], k + hspace * p,
fmt='o',
xerr=xerr[k], ecolor='k', capsize=5,
color='k', markersize=9, markeredgecolor='w')
# add text
for k in range(n_layers):
align = 'right' if model.lambda_[k, p] >= 0 else 'left'
lmbda = model.lambda_[k, p]
if k == 0:
txt = '{} (d = {})'.format(lmbda, p+1)
else:
txt = '{:.3f} ({:.3f}, {:.3f})'.format(
lmbda, lmbda - xerr[k], lmbda + xerr[k])
axes[k].text(lmbda, k + hspace * p - 0.1,
txt, horizontalalignment=align)
for k in range(n_layers):
axes[k].set_yticks([k + hspace / n_features])
axes[k].set_yticklabels([layer_labels[k]], fontsize=fontsize)
axes[k].invert_yaxis()
if k != (n_layers - 1):
axes[k].spines['bottom'].set_visible(False)
axes[k].tick_params(bottom=False)
axes[-1].set_xlabel('Homophily Parameter ($\lambda_{kd}$)',
fontsize=fontsize)
x_max = max([ax.get_xlim()[1] for ax in axes.flat])
for k in range(n_layers):
if np.all(model.lambda_ >= 0):
axes[k].set_xlim(0, axes[k].get_xlim()[1])
else:
axes[k].vlines(0, k, k + hspace * (n_features - 1),
linestyles='dotted', color='k')
sns.despine(ax=axes[k], bottom=True)
sns.set_style('white')
plt.subplots_adjust(hspace=0.5)
return fig, axes
#def plot_homophily_matrix(model, q_alpha=0.05, colors=None,
# layer_labels=None, height=0.5,
# fontsize=12, figsize=(12, 6)):
# n_layers, n_features = model.lambda_.shape
#
# if layer_labels is None:
# layer_labels = ['k = {}'.format(k + 1) for k in range(n_layers)]
#
# fig, ax = plt.subplots(figsize=figsize)
#
# if colors is None:
# colors = get_colors(np.arange(n_layers))
#
# z_alpha = norm.ppf(1 - q_alpha / 2.)
# for p in range(n_features):
# xerr = z_alpha * np.sqrt(model.lambda_sigma_[:, p, p])
#
# #colors = ['red' if model.lambda_[k, p] > 0 else 'blue' for
# # k in range(n_layers)]
# ax.hlines(np.arange(n_layers) + 0.5 * p, 0, model.lambda_[:, p], lw=1,
# color=colors, linestyles='--')
# ax.errorbar(model.lambda_[:, p], np.arange(n_layers) + 0.5 * p,
# fmt='o',
# xerr=xerr, ecolor='k', capsize=5,
# color='k', markersize=9, markeredgecolor='w')
#
# # add text
# for k in range(n_layers):
# align = 'right' if model.lambda_[k, p] >= 0 else 'left'
#
# lmbda = model.lambda_[k, p]
# if k == 0:
# txt = '{}'.format(lmbda)
# else:
# txt = '{:.3f} ({:.3f}, {:.3f})'.format(
# lmbda, lmbda - xerr[k], lmbda + xerr[k])
# ax.text(lmbda, k + 0.5 * p - 0.1, txt, horizontalalignment=align)
#
# ax.set_yticks(np.arange(n_layers) + 0.25)
# ax.set_yticklabels(layer_labels, fontsize=fontsize)
# ax.invert_yaxis()
# #ax.set_title('p = {}'.format(p + 1), fontsize=fontsize)
#
# ax.set_xlabel('Homophily Parameter ($\lambda_{kp}$)',
# fontsize=fontsize)
#
# #x_max = max([ax.get_xlim()[1] for ax in axes.flat])
# if np.all(model.lambda_ >= 0):
# ax.set_xlim(0, ax.get_xlim()[1])
# else:
# ax.vlines(0, 0, n_layers - 0.5 * (n_features - 1), linestyles='dotted', color='k')
# sns.despine(ax=ax, bottom=True)
#
# sns.set_style('white')
#
# return fig, ax
def plot_lambda(model, q_alpha=0.05, layer_labels=None, height=0.5,
fontsize=12,
figsize=(12, 6), include_gridlines=False):
n_layers, n_features = model.lambda_.shape
if layer_labels is None:
layer_labels = ['k = {}'.format(k + 1) for k in range(n_layers)]
if include_gridlines:
sns.set_style('whitegrid')
fig, axes = plt.subplots(n_features, 1, figsize=figsize, sharex=True)
colors = [to_hex(c) for c in sns.color_palette(
'muted', n_colors=n_layers, desat=0.75)]
z_alpha = norm.ppf(1 - q_alpha / 2.)
for p, ax in enumerate(axes.flat):
xerr = z_alpha * np.sqrt(model.lambda_sigma_[:, p, p])
colors = ['red' if model.lambda_[k, p] > 0 else 'blue' for
k in range(n_layers)]
ax.hlines(np.arange(n_layers), 0, model.lambda_[:, p], lw=1,
color=colors, linestyles='--')
ax.errorbar(model.lambda_[:, p], np.arange(n_layers), fmt='o',
xerr=xerr, ecolor='k', capsize=5,
color='k', markersize=9, markeredgecolor='w')
# add text
for k in range(n_layers):
align = 'right' if model.lambda_[k, p] >= 0 else 'left'
lmbda = model.lambda_[k, p]
if k == 0:
txt = '{}'.format(lmbda)
else:
txt = '{:.3f} ({:.3f}, {:.3f})'.format(
lmbda, lmbda - xerr[k], lmbda + xerr[k])
ax.text(lmbda, k - 0.1, txt, horizontalalignment=align)
ax.set_yticks(np.arange(n_layers))
ax.set_yticklabels(layer_labels, fontsize=fontsize)
ax.invert_yaxis()
ax.set_title('h = {}'.format(p + 1), fontsize=fontsize)
axes.flat[-1].set_xlabel('Homophily Parameter ($\lambda_{kh}$)',
fontsize=fontsize)
x_max = max([ax.get_xlim()[1] for ax in axes.flat])
for ax in axes.flat:
if np.all(model.lambda_ >= 0):
ax.set_xlim(0, x_max)
else:
ax.vlines(0, 0, n_layers - 1, linestyles='dotted', color='k')
sns.despine(ax=ax, bottom=True)
sns.set_style('white')
return fig, axes
def plot_network_statistics(stat_sim, stat_obs=None, nrow=1, ncol=None,
time_labels=None, stat_label='Statistic',
time_step=1,
layer_labels=None, figsize=(16, 10),
xlabel='Time'):
n_layers, n_time_steps, _ = stat_sim.shape
if ncol is None:
ncol = n_layers
fig, axes = plt.subplots(nrow, ncol, sharey=True, figsize=figsize)
if time_labels is None:
time_labels = np.arange(n_time_steps) + 1
if layer_labels is None:
layer_labels = np.arange(n_layers) + 1
for k, ax in enumerate(axes.flat):
data = pd.DataFrame()
for t in range(n_time_steps):
data[time_labels[t]] = stat_sim[k, t]
if stat_obs is not None:
ax.plot(np.arange(n_time_steps), stat_obs[k], 'o--', c='k')
sns.boxplot(x='variable', y='value', data=pd.melt(data),
ax=ax, color='white')
ax.set_xticklabels(time_labels[::time_step], rotation=45, fontsize=12)
plt.setp(ax.artists, edgecolor='black')
plt.setp(ax.lines, color='black')
ax.set_xticks([i for i in range(0, n_time_steps, time_step)])
ax.tick_params(axis='y', labelsize=16)
ax.tick_params(axis='x', labelsize=16)
ax.grid(axis='x')
ax.set_title(layer_labels[k], fontsize=24)
ax.set_xlabel(xlabel, fontsize=24)
if k == 0:
ax.set_ylabel(stat_label, fontsize=24)
else:
ax.set_ylabel('')
return fig, axes
| [
"sklearn.preprocessing.LabelEncoder",
"numpy.sqrt",
"numpy.argsort",
"seaborn.set_style",
"numpy.arctan2",
"numpy.arange",
"seaborn.despine",
"seaborn.color_palette",
"numpy.where",
"numpy.asarray",
"numpy.exp",
"matplotlib.pyplot.close",
"networkx.from_numpy_array",
"numpy.linalg.eigh",
... | [((885, 901), 'numpy.linalg.eigh', 'linalg.eigh', (['cov'], {}), '(cov)\n', (896, 901), True, 'import numpy.linalg as linalg\n'), ((2674, 2696), 'networkx.from_numpy_array', 'nx.from_numpy_array', (['Y'], {}), '(Y)\n', (2693, 2696), True, 'import networkx as nx\n'), ((4018, 4269), 'networkx.draw_networkx', 'nx.draw_networkx', (['G', 'X'], {'edge_color': '"""gray"""', 'width': 'edge_width', 'node_color': 'node_color', 'node_size': '(size if delta is None else 0)', 'alpha': 'alpha', 'cmap': 'cmap', 'labels': 'labels', 'font_size': 'font_size', 'with_labels': 'with_labels', 'edgecolors': 'edgecolors', 'ax': 'ax'}), "(G, X, edge_color='gray', width=edge_width, node_color=\n node_color, node_size=size if delta is None else 0, alpha=alpha, cmap=\n cmap, labels=labels, font_size=font_size, with_labels=with_labels,\n edgecolors=edgecolors, ax=ax)\n", (4034, 4269), True, 'import networkx as nx\n'), ((5941, 5951), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (5949, 5951), True, 'import matplotlib.pyplot as plt\n'), ((7403, 7412), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (7410, 7412), True, 'import matplotlib.pyplot as plt\n'), ((7777, 7800), 'numpy.asarray', 'np.asarray', (['node_labels'], {}), '(node_labels)\n', (7787, 7800), True, 'import numpy as np\n'), ((7814, 7841), 'numpy.argsort', 'np.argsort', (['model.delta_[k]'], {}), '(model.delta_[k])\n', (7824, 7841), True, 'import numpy as np\n'), ((7853, 7883), 'numpy.exp', 'np.exp', (['model.delta_[k][order]'], {}), '(model.delta_[k][order])\n', (7859, 7883), True, 'import numpy as np\n'), ((7896, 7927), 'numpy.arange', 'np.arange', (['node_labels.shape[0]'], {}), '(node_labels.shape[0])\n', (7905, 7927), True, 'import numpy as np\n'), ((9127, 9150), 'numpy.asarray', 'np.asarray', (['node_labels'], {}), '(node_labels)\n', (9137, 9150), True, 'import numpy as np\n'), ((12283, 12326), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nrows', 'ncols'], {'figsize': 'figsize'}), '(nrows, ncols, figsize=figsize)\n', (12295, 12326), True, 'import matplotlib.pyplot as plt\n'), ((12453, 12476), 'numpy.asarray', 'np.asarray', (['node_labels'], {}), '(node_labels)\n', (12463, 12476), True, 'import numpy as np\n'), ((12636, 12663), 'scipy.stats.norm.ppf', 'norm.ppf', (['(1 - q_alpha / 2.0)'], {}), '(1 - q_alpha / 2.0)\n', (12644, 12663), False, 'from scipy.stats import norm\n'), ((12672, 12695), 'numpy.arange', 'np.arange', (['n_time_steps'], {}), '(n_time_steps)\n', (12681, 12695), True, 'import numpy as np\n'), ((13940, 13970), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'right': '(0.7)'}), '(right=0.7)\n', (13959, 13970), True, 'import matplotlib.pyplot as plt\n'), ((14075, 14107), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (14093, 14107), False, 'from sklearn.utils import check_random_state\n'), ((14705, 14737), 'numpy.sum', 'np.sum', (['(lmbdak * Xi * Xj)'], {'axis': '(1)'}), '(lmbdak * Xi * Xj, axis=1)\n', (14711, 14737), True, 'import numpy as np\n'), ((15247, 15270), 'numpy.asarray', 'np.asarray', (['node_labels'], {}), '(node_labels)\n', (15257, 15270), True, 'import numpy as np\n'), ((15340, 15363), 'numpy.arange', 'np.arange', (['n_time_steps'], {}), '(n_time_steps)\n', (15349, 15363), True, 'import numpy as np\n'), ((16662, 16694), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (16680, 16694), False, 'from sklearn.utils import check_random_state\n'), ((17732, 17764), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (17750, 17764), False, 'from sklearn.utils import check_random_state\n'), ((18657, 18684), 'numpy.zeros', 'np.zeros', (['(horizon, n_reps)'], {}), '((horizon, n_reps))\n', (18665, 18684), True, 'import numpy as np\n'), ((19854, 19877), 'numpy.asarray', 'np.asarray', (['node_labels'], {}), '(node_labels)\n', (19864, 19877), True, 'import numpy as np\n'), ((19949, 19982), 'numpy.arange', 'np.arange', (['(n_time_steps + horizon)'], {}), '(n_time_steps + horizon)\n', (19958, 19982), True, 'import numpy as np\n'), ((22101, 22156), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_layers', '(1)'], {'figsize': 'figsize', 'sharex': '(True)'}), '(n_layers, 1, figsize=figsize, sharex=True)\n', (22113, 22156), True, 'import matplotlib.pyplot as plt\n'), ((22172, 22199), 'scipy.stats.norm.ppf', 'norm.ppf', (['(1 - q_alpha / 2.0)'], {}), '(1 - q_alpha / 2.0)\n', (22180, 22199), False, 'from scipy.stats import norm\n'), ((24061, 24083), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (24074, 24083), True, 'import seaborn as sns\n'), ((24088, 24119), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.5)'}), '(hspace=0.5)\n', (24107, 24119), True, 'import matplotlib.pyplot as plt\n'), ((26666, 26723), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_features', '(1)'], {'figsize': 'figsize', 'sharex': '(True)'}), '(n_features, 1, figsize=figsize, sharex=True)\n', (26678, 26723), True, 'import matplotlib.pyplot as plt\n'), ((26846, 26873), 'scipy.stats.norm.ppf', 'norm.ppf', (['(1 - q_alpha / 2.0)'], {}), '(1 - q_alpha / 2.0)\n', (26854, 26873), False, 'from scipy.stats import norm\n'), ((28431, 28453), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (28444, 28453), True, 'import seaborn as sns\n'), ((28879, 28933), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nrow', 'ncol'], {'sharey': '(True)', 'figsize': 'figsize'}), '(nrow, ncol, sharey=True, figsize=figsize)\n', (28891, 28933), True, 'import matplotlib.pyplot as plt\n'), ((1148, 1185), 'numpy.arctan2', 'np.arctan2', (['*eigenvectors[:, 0][::-1]'], {}), '(*eigenvectors[:, 0][::-1])\n', (1158, 1185), True, 'import numpy as np\n'), ((1220, 1229), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1227, 1229), True, 'import matplotlib.pyplot as plt\n'), ((1428, 1495), 'matplotlib.patches.Ellipse', 'Ellipse', ([], {'xy': 'mean', 'width': 'width', 'height': 'height', 'angle': 'angle'}), '(xy=mean, width=width, height=height, angle=angle, **kwargs)\n', (1435, 1495), False, 'from matplotlib.patches import Ellipse, Rectangle, FancyArrowPatch\n'), ((1691, 1758), 'matplotlib.patches.Ellipse', 'Ellipse', ([], {'xy': 'mean', 'width': 'width', 'height': 'height', 'angle': 'angle'}), '(xy=mean, width=width, height=height, angle=angle, **kwargs)\n', (1698, 1758), False, 'from matplotlib.patches import Ellipse, Rectangle, FancyArrowPatch\n'), ((2346, 2375), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (2358, 2375), True, 'import matplotlib.pyplot as plt\n'), ((6207, 6236), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (6234, 6236), False, 'import tempfile\n'), ((7345, 7397), 'imageio.mimsave', 'imageio.mimsave', (['filename', 'images'], {'duration': 'duration'}), '(filename, images, duration=duration)\n', (7360, 7397), False, 'import imageio\n'), ((7602, 7631), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (7614, 7631), True, 'import matplotlib.pyplot as plt\n'), ((8921, 8950), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (8933, 8950), True, 'import matplotlib.pyplot as plt\n'), ((9404, 9425), 'numpy.asarray', 'np.asarray', (['node_list'], {}), '(node_list)\n', (9414, 9425), True, 'import numpy as np\n'), ((12823, 12845), 'numpy.zeros', 'np.zeros', (['n_time_steps'], {}), '(n_time_steps)\n', (12831, 12845), True, 'import numpy as np\n'), ((12862, 12884), 'numpy.zeros', 'np.zeros', (['n_time_steps'], {}), '(n_time_steps)\n', (12870, 12884), True, 'import numpy as np\n'), ((14376, 14418), 'numpy.zeros', 'np.zeros', (['(n_reps, model.lambda_.shape[1])'], {}), '((n_reps, model.lambda_.shape[1]))\n', (14384, 14418), True, 'import numpy as np\n'), ((15109, 15138), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (15121, 15138), True, 'import matplotlib.pyplot as plt\n'), ((17214, 17256), 'numpy.zeros', 'np.zeros', (['(n_reps, model.lambda_.shape[1])'], {}), '((n_reps, model.lambda_.shape[1]))\n', (17222, 17256), True, 'import numpy as np\n'), ((17835, 17877), 'numpy.zeros', 'np.zeros', (['(n_reps, model.lambda_.shape[1])'], {}), '((n_reps, model.lambda_.shape[1]))\n', (17843, 17877), True, 'import numpy as np\n'), ((19687, 19716), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (19699, 19716), True, 'import matplotlib.pyplot as plt\n'), ((23790, 23816), 'numpy.all', 'np.all', (['(model.lambda_ >= 0)'], {}), '(model.lambda_ >= 0)\n', (23796, 23816), True, 'import numpy as np\n'), ((24018, 24054), 'seaborn.despine', 'sns.despine', ([], {'ax': 'axes[k]', 'bottom': '(True)'}), '(ax=axes[k], bottom=True)\n', (24029, 24054), True, 'import seaborn as sns\n'), ((26622, 26648), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (26635, 26648), True, 'import seaborn as sns\n'), ((26738, 26747), 'matplotlib.colors.to_hex', 'to_hex', (['c'], {}), '(c)\n', (26744, 26747), False, 'from matplotlib.colors import ListedColormap, to_hex\n'), ((28236, 28262), 'numpy.all', 'np.all', (['(model.lambda_ >= 0)'], {}), '(model.lambda_ >= 0)\n', (28242, 28262), True, 'import numpy as np\n'), ((28394, 28425), 'seaborn.despine', 'sns.despine', ([], {'ax': 'ax', 'bottom': '(True)'}), '(ax=ax, bottom=True)\n', (28405, 28425), True, 'import seaborn as sns\n'), ((29145, 29159), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (29157, 29159), True, 'import pandas as pd\n'), ((29549, 29588), 'matplotlib.pyplot.setp', 'plt.setp', (['ax.artists'], {'edgecolor': '"""black"""'}), "(ax.artists, edgecolor='black')\n", (29557, 29588), True, 'import matplotlib.pyplot as plt\n'), ((29597, 29630), 'matplotlib.pyplot.setp', 'plt.setp', (['ax.lines'], {'color': '"""black"""'}), "(ax.lines, color='black')\n", (29605, 29630), True, 'import matplotlib.pyplot as plt\n'), ((1389, 1409), 'numpy.sqrt', 'np.sqrt', (['eigenvalues'], {}), '(eigenvalues)\n', (1396, 1409), True, 'import numpy as np\n'), ((1652, 1672), 'numpy.sqrt', 'np.sqrt', (['eigenvalues'], {}), '(eigenvalues)\n', (1659, 1672), True, 'import numpy as np\n'), ((7041, 7091), 'tempfile.TemporaryFile', 'tempfile.TemporaryFile', ([], {'dir': 'tempdir', 'suffix': '""".png"""'}), "(dir=tempdir, suffix='.png')\n", (7063, 7091), False, 'import tempfile\n'), ((7170, 7184), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (7179, 7184), True, 'import matplotlib.pyplot as plt\n'), ((15777, 15799), 'numpy.zeros', 'np.zeros', (['n_time_steps'], {}), '(n_time_steps)\n', (15785, 15799), True, 'import numpy as np\n'), ((15823, 15845), 'numpy.zeros', 'np.zeros', (['n_time_steps'], {}), '(n_time_steps)\n', (15831, 15845), True, 'import numpy as np\n'), ((15869, 15891), 'numpy.zeros', 'np.zeros', (['n_time_steps'], {}), '(n_time_steps)\n', (15877, 15891), True, 'import numpy as np\n'), ((16762, 16798), 'numpy.sqrt', 'np.sqrt', (['model.delta_sigma_[k, t, i]'], {}), '(model.delta_sigma_[k, t, i])\n', (16769, 16798), True, 'import numpy as np\n'), ((16887, 16923), 'numpy.sqrt', 'np.sqrt', (['model.delta_sigma_[k, t, j]'], {}), '(model.delta_sigma_[k, t, j])\n', (16894, 16923), True, 'import numpy as np\n'), ((17567, 17599), 'numpy.sum', 'np.sum', (['(lmbdak * Xi * Xj)'], {'axis': '(1)'}), '(lmbdak * Xi * Xj, axis=1)\n', (17573, 17599), True, 'import numpy as np\n'), ((18220, 18257), 'numpy.sqrt', 'np.sqrt', (['model.delta_sigma_[k, -1, i]'], {}), '(model.delta_sigma_[k, -1, i])\n', (18227, 18257), True, 'import numpy as np\n'), ((18347, 18384), 'numpy.sqrt', 'np.sqrt', (['model.delta_sigma_[k, -1, j]'], {}), '(model.delta_sigma_[k, -1, j])\n', (18354, 18384), True, 'import numpy as np\n'), ((20396, 20428), 'numpy.zeros', 'np.zeros', (['(n_time_steps + horizon)'], {}), '(n_time_steps + horizon)\n', (20404, 20428), True, 'import numpy as np\n'), ((20450, 20482), 'numpy.zeros', 'np.zeros', (['(n_time_steps + horizon)'], {}), '(n_time_steps + horizon)\n', (20458, 20482), True, 'import numpy as np\n'), ((20504, 20536), 'numpy.zeros', 'np.zeros', (['(n_time_steps + horizon)'], {}), '(n_time_steps + horizon)\n', (20512, 20536), True, 'import numpy as np\n'), ((22256, 22293), 'numpy.sqrt', 'np.sqrt', (['model.lambda_sigma_[:, p, p]'], {}), '(model.lambda_sigma_[:, p, p])\n', (22263, 22293), True, 'import numpy as np\n'), ((26757, 26814), 'seaborn.color_palette', 'sns.color_palette', (['"""muted"""'], {'n_colors': 'n_layers', 'desat': '(0.75)'}), "('muted', n_colors=n_layers, desat=0.75)\n", (26774, 26814), True, 'import seaborn as sns\n'), ((26937, 26974), 'numpy.sqrt', 'np.sqrt', (['model.lambda_sigma_[:, p, p]'], {}), '(model.lambda_sigma_[:, p, p])\n', (26944, 26974), True, 'import numpy as np\n'), ((27101, 27120), 'numpy.arange', 'np.arange', (['n_layers'], {}), '(n_layers)\n', (27110, 27120), True, 'import numpy as np\n'), ((27242, 27261), 'numpy.arange', 'np.arange', (['n_layers'], {}), '(n_layers)\n', (27251, 27261), True, 'import numpy as np\n'), ((27846, 27865), 'numpy.arange', 'np.arange', (['n_layers'], {}), '(n_layers)\n', (27855, 27865), True, 'import numpy as np\n'), ((28985, 29008), 'numpy.arange', 'np.arange', (['n_time_steps'], {}), '(n_time_steps)\n', (28994, 29008), True, 'import numpy as np\n'), ((29066, 29085), 'numpy.arange', 'np.arange', (['n_layers'], {}), '(n_layers)\n', (29075, 29085), True, 'import numpy as np\n'), ((3038, 3075), 'numpy.asarray', 'np.asarray', (['([node_color] * X.shape[0])'], {}), '([node_color] * X.shape[0])\n', (3048, 3075), True, 'import numpy as np\n'), ((3104, 3118), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (3116, 3118), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((5044, 5062), 'numpy.eye', 'np.eye', (['X.shape[1]'], {}), '(X.shape[1])\n', (5050, 5062), True, 'import numpy as np\n'), ((7316, 7335), 'imageio.imread', 'imageio.imread', (['png'], {}), '(png)\n', (7330, 7335), False, 'import imageio\n'), ((10275, 10297), 'numpy.zeros', 'np.zeros', (['n_time_steps'], {}), '(n_time_steps)\n', (10283, 10297), True, 'import numpy as np\n'), ((10322, 10344), 'numpy.zeros', 'np.zeros', (['n_time_steps'], {}), '(n_time_steps)\n', (10330, 10344), True, 'import numpy as np\n'), ((10371, 10398), 'scipy.stats.norm.ppf', 'norm.ppf', (['(1 - q_alpha / 2.0)'], {}), '(1 - q_alpha / 2.0)\n', (10379, 10398), False, 'from scipy.stats import norm\n'), ((10419, 10442), 'numpy.arange', 'np.arange', (['n_time_steps'], {}), '(n_time_steps)\n', (10428, 10442), True, 'import numpy as np\n'), ((15372, 15403), 'numpy.where', 'np.where', (['(node_labels == node_i)'], {}), '(node_labels == node_i)\n', (15380, 15403), True, 'import numpy as np\n'), ((15422, 15453), 'numpy.where', 'np.where', (['(node_labels == node_j)'], {}), '(node_labels == node_j)\n', (15430, 15453), True, 'import numpy as np\n'), ((16129, 16163), 'numpy.quantile', 'np.quantile', (['dist'], {'q': '(q_alpha / 2.0)'}), '(dist, q=q_alpha / 2.0)\n', (16140, 16163), True, 'import numpy as np\n'), ((16193, 16231), 'numpy.quantile', 'np.quantile', (['dist'], {'q': '(1 - q_alpha / 2.0)'}), '(dist, q=1 - q_alpha / 2.0)\n', (16204, 16231), True, 'import numpy as np\n'), ((18986, 19006), 'numpy.zeros', 'np.zeros', (['n_features'], {}), '(n_features)\n', (18994, 19006), True, 'import numpy as np\n'), ((19126, 19146), 'numpy.zeros', 'np.zeros', (['n_features'], {}), '(n_features)\n', (19134, 19146), True, 'import numpy as np\n'), ((19254, 19286), 'numpy.sum', 'np.sum', (['(lmbdak * Xi * Xj)'], {'axis': '(1)'}), '(lmbdak * Xi * Xj, axis=1)\n', (19260, 19286), True, 'import numpy as np\n'), ((19991, 20022), 'numpy.where', 'np.where', (['(node_labels == node_i)'], {}), '(node_labels == node_i)\n', (19999, 20022), True, 'import numpy as np\n'), ((20041, 20072), 'numpy.where', 'np.where', (['(node_labels == node_j)'], {}), '(node_labels == node_j)\n', (20049, 20072), True, 'import numpy as np\n'), ((20775, 20808), 'numpy.quantile', 'np.quantile', (['pis'], {'q': '(q_alpha / 2.0)'}), '(pis, q=q_alpha / 2.0)\n', (20786, 20808), True, 'import numpy as np\n'), ((20836, 20873), 'numpy.quantile', 'np.quantile', (['pis'], {'q': '(1 - q_alpha / 2.0)'}), '(pis, q=1 - q_alpha / 2.0)\n', (20847, 20873), True, 'import numpy as np\n'), ((29302, 29325), 'numpy.arange', 'np.arange', (['n_time_steps'], {}), '(n_time_steps)\n', (29311, 29325), True, 'import numpy as np\n'), ((29404, 29417), 'pandas.melt', 'pd.melt', (['data'], {}), '(data)\n', (29411, 29417), True, 'import pandas as pd\n'), ((10871, 10941), 'numpy.quantile', 'np.quantile', (['model.gammas_', '[q_alpha / 2.0, 1 - q_alpha / 2.0]'], {'axis': '(0)'}), '(model.gammas_, [q_alpha / 2.0, 1 - q_alpha / 2.0], axis=0)\n', (10882, 10941), True, 'import numpy as np\n'), ((12761, 12796), 'numpy.where', 'np.where', (['(node_labels == node_label)'], {}), '(node_labels == node_label)\n', (12769, 12796), True, 'import numpy as np\n'), ((13126, 13167), 'numpy.sqrt', 'np.sqrt', (['model.Z_sigma_[t, node_id, p, p]'], {}), '(model.Z_sigma_[t, node_id, p, p])\n', (13133, 13167), True, 'import numpy as np\n'), ((18777, 18807), 'numpy.sqrt', 'np.sqrt', (['model.sigma_sq_delta_'], {}), '(model.sigma_sq_delta_)\n', (18784, 18807), True, 'import numpy as np\n'), ((18885, 18915), 'numpy.sqrt', 'np.sqrt', (['model.sigma_sq_delta_'], {}), '(model.sigma_sq_delta_)\n', (18892, 18915), True, 'import numpy as np\n'), ((19026, 19044), 'numpy.eye', 'np.eye', (['n_features'], {}), '(n_features)\n', (19032, 19044), True, 'import numpy as np\n'), ((19166, 19184), 'numpy.eye', 'np.eye', (['n_features'], {}), '(n_features)\n', (19172, 19184), True, 'import numpy as np\n'), ((21243, 21279), 'numpy.quantile', 'np.quantile', (['pis[h]'], {'q': '(q_alpha / 2.0)'}), '(pis[h], q=q_alpha / 2.0)\n', (21254, 21279), True, 'import numpy as np\n'), ((21353, 21393), 'numpy.quantile', 'np.quantile', (['pis[h]'], {'q': '(1 - q_alpha / 2.0)'}), '(pis[h], q=1 - q_alpha / 2.0)\n', (21364, 21393), True, 'import numpy as np\n'), ((9596, 9631), 'numpy.where', 'np.where', (['(node_labels == node_label)'], {}), '(node_labels == node_label)\n', (9604, 9631), True, 'import numpy as np\n'), ((10524, 10566), 'numpy.sqrt', 'np.sqrt', (['model.delta_sigma_[k, t, node_id]'], {}), '(model.delta_sigma_[k, t, node_id])\n', (10531, 10566), True, 'import numpy as np\n'), ((11010, 11033), 'numpy.arange', 'np.arange', (['n_time_steps'], {}), '(n_time_steps)\n', (11019, 11033), True, 'import numpy as np\n'), ((2641, 2653), 'numpy.unique', 'np.unique', (['r'], {}), '(r)\n', (2650, 2653), True, 'import numpy as np\n')] |
# Lint as: python3
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ferminet.mcmc."""
from absl.testing import parameterized
from ferminet import hamiltonian
from ferminet import mcmc
from ferminet.utils import system
import numpy as np
import tensorflow.compat.v1 as tf
def _hydrogen(xs):
"""Hydrogen (3D) atom ground state (1s) wavefunction.
Energy: -1/2 hartrees.
Args:
xs: tensor (batch_size, 3) of electron positions.
Returns:
tensor (batch_size, 1) of the (unnormalised) wavefunction at each position.
"""
with tf.name_scope('Psi_H'):
return tf.exp(-tf.norm(xs, axis=1, keepdims=True))
def _helium(xs):
"""Compact parametrized Helium wavefunction.
See https://opencommons.uconn.edu/chem_educ/30/ and Phys Rev A 74, 014501
(2006).
Energy: -2.901188 hartrees (compared to -2.9037243770 hartrees for the exact
ground state).
Args:
xs: tensor (batch_size, 6) of electron positions.
Returns:
tensor (batch_size, 1) of the (unnormalised) wavefunction at each pair of
positions.
"""
with tf.name_scope('Psi_He'):
x1, x2 = tf.split(xs, 2, axis=1)
x1n = tf.norm(x1, axis=1, keepdims=True)
x2n = tf.norm(x2, axis=1, keepdims=True)
s = x1n + x2n
t = x1n - x2n
u = tf.norm(x1 - x2, axis=1, keepdims=True)
return (tf.exp(-2*s)
* (1 + 0.5*u*tf.exp(-1.013*u))
* (1 + 0.2119*s*u + 0.1406*t*t - 0.003*u*u))
def _run_mcmc(atoms,
nelectrons,
net,
batch_size=1024,
steps=10,
dtype=tf.float32):
gen = mcmc.MCMC(
net,
batch_size,
[0] * 3 * nelectrons,
1.0,
0.1,
dtype=dtype)
kin, pot = hamiltonian.operators(atoms, nelectrons, 0.0)
walkers = tf.squeeze(gen.walkers)
psi, _ = net(walkers)
e_loc = tf.reduce_sum(kin(psi, walkers) + pot(walkers)) / batch_size
e = []
mcmc_step = gen.step()
with tf.train.MonitoredSession() as session:
for _ in range(steps):
session.run(mcmc_step)
e.append(session.run(e_loc))
return np.array(e)
class McmcTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
{'dtype': tf.float64},
{'dtype': tf.float64},
)
def test_hydrogen_atom(self, dtype):
atoms = [system.Atom(symbol='H', coords=(0, 0, 0))]
def net(x):
psi = _hydrogen(x)
return (tf.log(tf.abs(psi)), tf.sign(psi))
e = _run_mcmc(atoms, 1, net, dtype=dtype)
np.testing.assert_allclose(e, -np.ones_like(e) / 2)
def test_helium_atom(self):
atoms = [system.Atom(symbol='He', coords=(0, 0, 0))]
def net(x):
psi = _helium(x)
return (tf.log(tf.abs(psi)), tf.sign(psi))
e = _run_mcmc(atoms, 2, net, steps=500)
np.testing.assert_allclose(e[100:].mean(), -2.901188, atol=5.e-3)
if __name__ == '__main__':
tf.test.main()
| [
"tensorflow.compat.v1.name_scope",
"numpy.ones_like",
"tensorflow.compat.v1.split",
"tensorflow.compat.v1.exp",
"tensorflow.compat.v1.norm",
"absl.testing.parameterized.parameters",
"tensorflow.compat.v1.abs",
"tensorflow.compat.v1.sign",
"numpy.array",
"ferminet.hamiltonian.operators",
"tensorf... | [((2160, 2231), 'ferminet.mcmc.MCMC', 'mcmc.MCMC', (['net', 'batch_size', '([0] * 3 * nelectrons)', '(1.0)', '(0.1)'], {'dtype': 'dtype'}), '(net, batch_size, [0] * 3 * nelectrons, 1.0, 0.1, dtype=dtype)\n', (2169, 2231), False, 'from ferminet import mcmc\n'), ((2282, 2327), 'ferminet.hamiltonian.operators', 'hamiltonian.operators', (['atoms', 'nelectrons', '(0.0)'], {}), '(atoms, nelectrons, 0.0)\n', (2303, 2327), False, 'from ferminet import hamiltonian\n'), ((2340, 2363), 'tensorflow.compat.v1.squeeze', 'tf.squeeze', (['gen.walkers'], {}), '(gen.walkers)\n', (2350, 2363), True, 'import tensorflow.compat.v1 as tf\n'), ((2640, 2651), 'numpy.array', 'np.array', (['e'], {}), '(e)\n', (2648, 2651), True, 'import numpy as np\n'), ((2716, 2786), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (["{'dtype': tf.float64}", "{'dtype': tf.float64}"], {}), "({'dtype': tf.float64}, {'dtype': tf.float64})\n", (2740, 2786), False, 'from absl.testing import parameterized\n'), ((3411, 3425), 'tensorflow.compat.v1.test.main', 'tf.test.main', ([], {}), '()\n', (3423, 3425), True, 'import tensorflow.compat.v1 as tf\n'), ((1125, 1147), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""Psi_H"""'], {}), "('Psi_H')\n", (1138, 1147), True, 'import tensorflow.compat.v1 as tf\n'), ((1635, 1658), 'tensorflow.compat.v1.name_scope', 'tf.name_scope', (['"""Psi_He"""'], {}), "('Psi_He')\n", (1648, 1658), True, 'import tensorflow.compat.v1 as tf\n'), ((1673, 1696), 'tensorflow.compat.v1.split', 'tf.split', (['xs', '(2)'], {'axis': '(1)'}), '(xs, 2, axis=1)\n', (1681, 1696), True, 'import tensorflow.compat.v1 as tf\n'), ((1707, 1741), 'tensorflow.compat.v1.norm', 'tf.norm', (['x1'], {'axis': '(1)', 'keepdims': '(True)'}), '(x1, axis=1, keepdims=True)\n', (1714, 1741), True, 'import tensorflow.compat.v1 as tf\n'), ((1752, 1786), 'tensorflow.compat.v1.norm', 'tf.norm', (['x2'], {'axis': '(1)', 'keepdims': '(True)'}), '(x2, axis=1, keepdims=True)\n', (1759, 1786), True, 'import tensorflow.compat.v1 as tf\n'), ((1831, 1870), 'tensorflow.compat.v1.norm', 'tf.norm', (['(x1 - x2)'], {'axis': '(1)', 'keepdims': '(True)'}), '(x1 - x2, axis=1, keepdims=True)\n', (1838, 1870), True, 'import tensorflow.compat.v1 as tf\n'), ((2500, 2527), 'tensorflow.compat.v1.train.MonitoredSession', 'tf.train.MonitoredSession', ([], {}), '()\n', (2525, 2527), True, 'import tensorflow.compat.v1 as tf\n'), ((2856, 2897), 'ferminet.utils.system.Atom', 'system.Atom', ([], {'symbol': '"""H"""', 'coords': '(0, 0, 0)'}), "(symbol='H', coords=(0, 0, 0))\n", (2867, 2897), False, 'from ferminet.utils import system\n'), ((3135, 3177), 'ferminet.utils.system.Atom', 'system.Atom', ([], {'symbol': '"""He"""', 'coords': '(0, 0, 0)'}), "(symbol='He', coords=(0, 0, 0))\n", (3146, 3177), False, 'from ferminet.utils import system\n'), ((1168, 1202), 'tensorflow.compat.v1.norm', 'tf.norm', (['xs'], {'axis': '(1)', 'keepdims': '(True)'}), '(xs, axis=1, keepdims=True)\n', (1175, 1202), True, 'import tensorflow.compat.v1 as tf\n'), ((1883, 1897), 'tensorflow.compat.v1.exp', 'tf.exp', (['(-2 * s)'], {}), '(-2 * s)\n', (1889, 1897), True, 'import tensorflow.compat.v1 as tf\n'), ((2975, 2987), 'tensorflow.compat.v1.sign', 'tf.sign', (['psi'], {}), '(psi)\n', (2982, 2987), True, 'import tensorflow.compat.v1 as tf\n'), ((3253, 3265), 'tensorflow.compat.v1.sign', 'tf.sign', (['psi'], {}), '(psi)\n', (3260, 3265), True, 'import tensorflow.compat.v1 as tf\n'), ((2961, 2972), 'tensorflow.compat.v1.abs', 'tf.abs', (['psi'], {}), '(psi)\n', (2967, 2972), True, 'import tensorflow.compat.v1 as tf\n'), ((3070, 3085), 'numpy.ones_like', 'np.ones_like', (['e'], {}), '(e)\n', (3082, 3085), True, 'import numpy as np\n'), ((3239, 3250), 'tensorflow.compat.v1.abs', 'tf.abs', (['psi'], {}), '(psi)\n', (3245, 3250), True, 'import tensorflow.compat.v1 as tf\n'), ((1921, 1939), 'tensorflow.compat.v1.exp', 'tf.exp', (['(-1.013 * u)'], {}), '(-1.013 * u)\n', (1927, 1939), True, 'import tensorflow.compat.v1 as tf\n')] |
import open3d as o3d
import teaserpp_python
import numpy as np
import copy
from helpers import *
VOXEL_SIZE = 0.05
VISUALIZE = True
# Load and visualize two point clouds from 3DMatch dataset
A_pcd_raw = o3d.io.read_point_cloud('./data/cloud_bin_0.ply')
B_pcd_raw = o3d.io.read_point_cloud('./data/cloud_bin_4.ply')
A_pcd_raw.paint_uniform_color([0.0, 0.0, 1.0]) # show A_pcd in blue
B_pcd_raw.paint_uniform_color([1.0, 0.0, 0.0]) # show B_pcd in red
if VISUALIZE:
o3d.visualization.draw_geometries([A_pcd_raw,B_pcd_raw]) # plot A and B
# voxel downsample both clouds
A_pcd = A_pcd_raw.voxel_down_sample(voxel_size=VOXEL_SIZE)
B_pcd = B_pcd_raw.voxel_down_sample(voxel_size=VOXEL_SIZE)
if VISUALIZE:
o3d.visualization.draw_geometries([A_pcd,B_pcd]) # plot downsampled A and B
A_xyz = pcd2xyz(A_pcd) # np array of size 3 by N
B_xyz = pcd2xyz(B_pcd) # np array of size 3 by M
# extract FPFH features
A_feats = extract_fpfh(A_pcd,VOXEL_SIZE)
B_feats = extract_fpfh(B_pcd,VOXEL_SIZE)
# establish correspondences by nearest neighbour search in feature space
corrs_A, corrs_B = find_correspondences(
A_feats, B_feats, mutual_filter=True)
A_corr = A_xyz[:,corrs_A] # np array of size 3 by num_corrs
B_corr = B_xyz[:,corrs_B] # np array of size 3 by num_corrs
num_corrs = A_corr.shape[1]
print(f'FPFH generates {num_corrs} putative correspondences.')
# visualize the point clouds together with feature correspondences
points = np.concatenate((A_corr.T,B_corr.T),axis=0)
lines = []
for i in range(num_corrs):
lines.append([i,i+num_corrs])
colors = [[0, 1, 0] for i in range(len(lines))] # lines are shown in green
line_set = o3d.geometry.LineSet(
points=o3d.utility.Vector3dVector(points),
lines=o3d.utility.Vector2iVector(lines),
)
line_set.colors = o3d.utility.Vector3dVector(colors)
o3d.visualization.draw_geometries([A_pcd,B_pcd,line_set])
# robust global registration using TEASER++
NOISE_BOUND = VOXEL_SIZE
teaser_solver = get_teaser_solver(NOISE_BOUND)
teaser_solver.solve(A_corr,B_corr)
solution = teaser_solver.getSolution()
R_teaser = solution.rotation
t_teaser = solution.translation
T_teaser = Rt2T(R_teaser,t_teaser)
# Visualize the registration results
A_pcd_T_teaser = copy.deepcopy(A_pcd).transform(T_teaser)
o3d.visualization.draw_geometries([A_pcd_T_teaser,B_pcd])
# local refinement using ICP
icp_sol = o3d.pipelines.registration.registration_icp(
A_pcd, B_pcd, NOISE_BOUND, T_teaser,
o3d.pipelines.registration.TransformationEstimationPointToPoint(),
o3d.pipelines.registration.ICPConvergenceCriteria(max_iteration=100))
T_icp = icp_sol.transformation
# visualize the registration after ICP refinement
A_pcd_T_icp = copy.deepcopy(A_pcd).transform(T_icp)
o3d.visualization.draw_geometries([A_pcd_T_icp,B_pcd])
| [
"copy.deepcopy",
"open3d.utility.Vector2iVector",
"open3d.pipelines.registration.TransformationEstimationPointToPoint",
"open3d.pipelines.registration.ICPConvergenceCriteria",
"open3d.visualization.draw_geometries",
"numpy.concatenate",
"open3d.io.read_point_cloud",
"open3d.utility.Vector3dVector"
] | [((206, 255), 'open3d.io.read_point_cloud', 'o3d.io.read_point_cloud', (['"""./data/cloud_bin_0.ply"""'], {}), "('./data/cloud_bin_0.ply')\n", (229, 255), True, 'import open3d as o3d\n'), ((268, 317), 'open3d.io.read_point_cloud', 'o3d.io.read_point_cloud', (['"""./data/cloud_bin_4.ply"""'], {}), "('./data/cloud_bin_4.ply')\n", (291, 317), True, 'import open3d as o3d\n'), ((1441, 1485), 'numpy.concatenate', 'np.concatenate', (['(A_corr.T, B_corr.T)'], {'axis': '(0)'}), '((A_corr.T, B_corr.T), axis=0)\n', (1455, 1485), True, 'import numpy as np\n'), ((1776, 1810), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['colors'], {}), '(colors)\n', (1802, 1810), True, 'import open3d as o3d\n'), ((1811, 1870), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['[A_pcd, B_pcd, line_set]'], {}), '([A_pcd, B_pcd, line_set])\n', (1844, 1870), True, 'import open3d as o3d\n'), ((2252, 2310), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['[A_pcd_T_teaser, B_pcd]'], {}), '([A_pcd_T_teaser, B_pcd])\n', (2285, 2310), True, 'import open3d as o3d\n'), ((2721, 2776), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['[A_pcd_T_icp, B_pcd]'], {}), '([A_pcd_T_icp, B_pcd])\n', (2754, 2776), True, 'import open3d as o3d\n'), ((471, 528), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['[A_pcd_raw, B_pcd_raw]'], {}), '([A_pcd_raw, B_pcd_raw])\n', (504, 528), True, 'import open3d as o3d\n'), ((712, 761), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['[A_pcd, B_pcd]'], {}), '([A_pcd, B_pcd])\n', (745, 761), True, 'import open3d as o3d\n'), ((2444, 2509), 'open3d.pipelines.registration.TransformationEstimationPointToPoint', 'o3d.pipelines.registration.TransformationEstimationPointToPoint', ([], {}), '()\n', (2507, 2509), True, 'import open3d as o3d\n'), ((2517, 2585), 'open3d.pipelines.registration.ICPConvergenceCriteria', 'o3d.pipelines.registration.ICPConvergenceCriteria', ([], {'max_iteration': '(100)'}), '(max_iteration=100)\n', (2566, 2585), True, 'import open3d as o3d\n'), ((1675, 1709), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['points'], {}), '(points)\n', (1701, 1709), True, 'import open3d as o3d\n'), ((1721, 1754), 'open3d.utility.Vector2iVector', 'o3d.utility.Vector2iVector', (['lines'], {}), '(lines)\n', (1747, 1754), True, 'import open3d as o3d\n'), ((2211, 2231), 'copy.deepcopy', 'copy.deepcopy', (['A_pcd'], {}), '(A_pcd)\n', (2224, 2231), False, 'import copy\n'), ((2683, 2703), 'copy.deepcopy', 'copy.deepcopy', (['A_pcd'], {}), '(A_pcd)\n', (2696, 2703), False, 'import copy\n')] |
# Create the expert sample for Radio Galaxy Zoo.
# We'll see how this compares to the user classifications done so far.
# <NAME>, 10 Jun 2014
import rgz
from numpy.random import random
from random import shuffle
# Paths
rgz_dir = '/Users/willettk/Astronomy/Research/GalaxyZoo/rgz-analysis'
# Curated subjects from http://radiogalaxyzoo.pbworks.com/w/page/81017963/Expert%20classifications
curated_zid = open('%s/expert/expert_curated_zooniverse_ids.txt' % rgz_dir).read().splitlines()
# Note - <NAME> recommends chunking it into 25 or so at once.
# Not sure if the browser will like preloading and caching 100 subjects all at once.
# Formal max limit from the API is 100 subjects
# Select random galaxies to fill out sample; total will be 100 subjects
n_random = 100 - len(curated_zid)
# Load in the RGZ database through pymongo
subjects,classifications = rgz.load_rgz_data()
# Completed RGZ subjects
batch = subjects.find({'state':'complete','classification_count':20}).limit(n_random)
# Select random subjects from the completed database
random_zid = []
random_numbers = random(n_random)
for r in random_numbers:
sf = subjects.find({'random':{'$gte':random()}}).sort([("random", 1)]).limit(1)
s = list(sf)[0]
random_zid.append(s['zooniverse_id'])
# Combine the curated and randomly selected subjects
result = curated_zid + random_zid
# Randomize the order that they appear in
shuffle(result)
# Write out the URLs to a text file, batched in units of 25
# Note: these are later put through a URL shortener for ease in copying/pasting
with open('%s/expert/expert_urls.txt' % rgz_dir,'wb') as f:
for i in range(4):
url_str = 'http://radio.galaxyzoo.org/?subjects='
for r in result[i::4]:
url_str += '%s,' % r
f.write(url_str[:-1]+'\n')
# Write the Zooniverse IDs to a text file as a sorted list
with open('%s/expert/expert_all_zooniverse_ids.txt' % rgz_dir,'wb') as f:
sorted_result = result.sort()
for r in result:
f.write(r+'\n')
| [
"numpy.random.random",
"random.shuffle",
"rgz.load_rgz_data"
] | [((869, 888), 'rgz.load_rgz_data', 'rgz.load_rgz_data', ([], {}), '()\n', (886, 888), False, 'import rgz\n'), ((1089, 1105), 'numpy.random.random', 'random', (['n_random'], {}), '(n_random)\n', (1095, 1105), False, 'from numpy.random import random\n'), ((1409, 1424), 'random.shuffle', 'shuffle', (['result'], {}), '(result)\n', (1416, 1424), False, 'from random import shuffle\n'), ((1173, 1181), 'numpy.random.random', 'random', ([], {}), '()\n', (1179, 1181), False, 'from numpy.random import random\n')] |
from rtgraph.ui.mainWindow_ui import *
import cProfile
from rtgraph.core.worker import Worker
from rtgraph.core.constants import Constants, SourceType
from rtgraph.ui.popUp import PopUp
from rtgraph.common.logger import Logger as Log
TAG = "MainWindow"
TIME = 10
class MainWindow(QtGui.QMainWindow):
"""
Handles the ui elements and connects to worker service to execute processes.
"""
def __init__(self,
port=None,
bd=115200,
samples=Constants.argument_default_samples):
"""
Initializes values for the UI.
:param port: Default port name to be used. It will also disable scanning available ports.
:type port: str.
:param bd: Default baud rate to be used. It will be added to the common baud rate list if not available.
:type bd: int.
:param samples: # Default samples per second to be shown in the plot.
:type samples: int.
"""
QtGui.QMainWindow.__init__(self)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# Shared variables, initial values
self._plt = None
self._timer_plot = None
self.worker = Worker()
# plotting
self.curves = []
# configures
self.ui.cBox_Source.addItems(Constants.app_sources)
self._configure_plot()
self._configure_timers()
self._configure_signals()
import numpy as np
self._time_array = np.linspace(-10, 0, samples)
# populate combo box for serial ports
self._source_changed()
self.ui.cBox_Source.setCurrentIndex(SourceType.SocketServer.value)
self.ui.sBox_Samples.setValue(samples)
# enable ui
self._enable_ui(True)
def start(self):
"""
Starts the acquisition of the selected serial port.
This function is connected to the clicked signal of the Start button.
:return:
"""
Log.i(TAG, "Clicked start")
self.worker = Worker(port=self.ui.cBox_Port.currentText(),
speed=float(self.ui.cBox_Speed.currentText()),
samples=self.ui.sBox_Samples.value(),
source=self._get_source(),
export_enabled=self.ui.chBox_export.isChecked())
if self.worker.start():
self._timer_plot.start(Constants.plot_update_ms)
self._enable_ui(False)
else:
Log.i(TAG, "Port is not available")
PopUp.warning(
self, Constants.app_title,
"Selected port \"{}\" is not available".format(
self.ui.cBox_Port.currentText()))
def stop(self):
"""
Stops the acquisition of the selected serial port.
This function is connected to the clicked signal of the Stop button.
:return:
"""
Log.i(TAG, "Clicked stop")
self._timer_plot.stop()
self._enable_ui(True)
self.worker.stop()
def closeEvent(self, evnt):
"""
Overrides the QTCloseEvent.
This function is connected to the clicked signal of the close button of the window.
:param evnt: QT evnt.
:return:
"""
if self.worker.is_running():
Log.i(TAG, "Window closed without stopping capture, stopping it")
self.stop()
def _enable_ui(self, enabled):
"""
Enables or disables the UI elements of the window.
:param enabled: The value to be set at the enabled characteristic of the UI elements.
:type enabled: bool
:return:
"""
self.ui.cBox_Port.setEnabled(enabled)
self.ui.cBox_Speed.setEnabled(enabled)
self.ui.pButton_Start.setEnabled(enabled)
self.ui.chBox_export.setEnabled(enabled)
self.ui.cBox_Source.setEnabled(enabled)
self.ui.pButton_Stop.setEnabled(not enabled)
def _configure_plot(self):
"""
Configures specific elements of the PyQtGraph plots.
:return:
"""
self.ui.plt.setBackground(background=None)
self.ui.plt.setAntialiasing(True)
self._plt = self.ui.plt.addPlot(row=1, col=1)
self._plt.setLabel('bottom', Constants.plot_xlabel_title,
Constants.plot_xlabel_unit)
# TODO: make previous time an option
# this is in seconds, take sample freq into account
self._plt.setXRange(-TIME, 0)
self._plt.setYRange(0, Constants.plot_y_max)
self._lines = []
for i in range(Constants.default_num_lines):
self._lines.append(self._plt.plot())
def _configure_timers(self):
"""
Configures specific elements of the QTimers.
:return:
"""
self._timer_plot = QtCore.QTimer(self)
self._timer_plot.timeout.connect(self._update_plot)
def _configure_signals(self):
"""
Configures the connections between signals and UI elements.
:return:
"""
self.ui.pButton_Start.clicked.connect(self.start)
self.ui.pButton_Stop.clicked.connect(self.stop)
self.ui.sBox_Samples.valueChanged.connect(self._update_sample_size)
self.ui.cBox_Source.currentIndexChanged.connect(self._source_changed)
def _update_sample_size(self):
"""
Updates the sample size of the plot.
This function is connected to the valueChanged signal of the sample Spin Box.
:return:
"""
if self.worker is not None:
Log.i(TAG, "Changing sample size")
self.worker.reset_buffers(self.ui.sBox_Samples.value())
def _update_plot(self):
"""
Updates and redraws the graphics in the plot.
This function is connected to the timeout signal of a QTimer.
:return:
"""
self.worker.consume_queue()
num_times = len(self.worker.get_time_buffer())
time = self._time_array[-num_times:]
# plot data
for idx in range(Constants.default_num_lines):
data = self.worker.get_values_buffer(idx)
self._lines[idx].setData(x=time,
y=data,
pen=Constants.plot_colors[idx])
def _source_changed(self):
"""
Updates the source and depending boxes on change.
This function is connected to the indexValueChanged signal of the Source ComboBox.
:return:
"""
Log.i(TAG, "Scanning source {}".format(self._get_source().name))
# clear boxes before adding new
self.ui.cBox_Port.clear()
self.ui.cBox_Speed.clear()
source = self._get_source()
ports = self.worker.get_source_ports(source)
speeds = self.worker.get_source_speeds(source)
if ports is not None:
self.ui.cBox_Port.addItems(ports)
if speeds is not None:
self.ui.cBox_Speed.addItems(speeds)
if self._get_source() == SourceType.serial:
self.ui.cBox_Speed.setCurrentIndex(len(speeds) - 1)
def _get_source(self):
"""
Gets the current source type.
:return: Current Source type.
:rtype: SourceType.
"""
return SourceType(self.ui.cBox_Source.currentIndex())
| [
"numpy.linspace",
"rtgraph.common.logger.Logger.i",
"rtgraph.core.worker.Worker"
] | [((1198, 1206), 'rtgraph.core.worker.Worker', 'Worker', ([], {}), '()\n', (1204, 1206), False, 'from rtgraph.core.worker import Worker\n'), ((1487, 1515), 'numpy.linspace', 'np.linspace', (['(-10)', '(0)', 'samples'], {}), '(-10, 0, samples)\n', (1498, 1515), True, 'import numpy as np\n'), ((1977, 2004), 'rtgraph.common.logger.Logger.i', 'Log.i', (['TAG', '"""Clicked start"""'], {}), "(TAG, 'Clicked start')\n", (1982, 2004), True, 'from rtgraph.common.logger import Logger as Log\n'), ((2933, 2959), 'rtgraph.common.logger.Logger.i', 'Log.i', (['TAG', '"""Clicked stop"""'], {}), "(TAG, 'Clicked stop')\n", (2938, 2959), True, 'from rtgraph.common.logger import Logger as Log\n'), ((2503, 2538), 'rtgraph.common.logger.Logger.i', 'Log.i', (['TAG', '"""Port is not available"""'], {}), "(TAG, 'Port is not available')\n", (2508, 2538), True, 'from rtgraph.common.logger import Logger as Log\n'), ((3330, 3395), 'rtgraph.common.logger.Logger.i', 'Log.i', (['TAG', '"""Window closed without stopping capture, stopping it"""'], {}), "(TAG, 'Window closed without stopping capture, stopping it')\n", (3335, 3395), True, 'from rtgraph.common.logger import Logger as Log\n'), ((5600, 5634), 'rtgraph.common.logger.Logger.i', 'Log.i', (['TAG', '"""Changing sample size"""'], {}), "(TAG, 'Changing sample size')\n", (5605, 5634), True, 'from rtgraph.common.logger import Logger as Log\n')] |
from flask import Flask, render_template, request, send_file, Response
import cv2
import numpy as np
import json
import requests
import base64
import matplotlib.pyplot as plt
import time
import os
app = Flask(__name__)
app.config['static'] = os.path.join('static', 'images')
app.config['upload_folder'] = os.path.join('static', 'results')
secret_file = "api_address.json"
with open(secret_file) as f:
addresses = json.loads(f.read())
@app.route('/')
def home():
return render_template('index.html')
########################################################
@app.route('/upload/strawberries')
def upload_straw():
return render_template('uploadstrawberries.html')
@app.route('/upload/tomatoes')
def upload_tomato():
return render_template('uploadtomatoes.html')
@app.route('/upload/paprika')
def upload_pap():
return render_template('uploadpaprika.html')
@app.route('/upload/melon')
def upload_mel():
return render_template('uploadmelon.html')
###########################################################
@app.route('/tomato', methods=['GET', 'POST'])
def result_toma():
try:
# Set content_type to header.
content_type = 'application/json'
headers = {'content-type': content_type}
# upload image string array data.
img_file = request.files['file'].stream.read()
img = cv2.imdecode(np.fromstring(img_file, np.uint8), cv2.IMREAD_COLOR)
# map to json.
send = base64.b64encode(np.array(img))
request_json = json.dumps({'input_img': send.decode(),
'info': {
'height': img.shape[0],
'width': img.shape[1],
'channel': img.shape[2]
}
})
# http request.
# print(request_json)
response = requests.post(addresses["TOMATO_SERVER"], data=request_json, headers=headers)
# print(response)
# ['data', 'time', 'is_gpu']
response_json = response.json()
print(response_json)
cost = response_json['time']
is_gpu = response_json['is_gpu']
# change to numpy array.
r = base64.decodebytes(response_json['data'].encode())
response_dat = np.fromstring(r, dtype=np.float)
response_dat = response_dat.reshape((response_json['info']['height'],
response_json['info']['width'],
response_json['info']['channel']))
# decodeed numpy image.
plt.figure(figsize=(int(7 * (response_json['info']['width'] / response_json['info']['height'])), 7))
plt.imshow(response_dat)
timenow = str(time.time())
fname = os.path.join('static', 'results', 'tomato', timenow + '.png')
plt.axis('off')
plt.savefig(fname)
return render_template('tom_result.html', outimg=fname, timenow=timenow, cost=cost, is_gpu=is_gpu)
except:
return render_template('uploadtomatoes.html', alertflag="이미지가 너무 크거나 올바르지 않은 파일입니다.")
@app.route('/strawberry', methods=['GET', 'POST'])
def result_straw():
try:
# Set content_type to header.
content_type = 'application/json'
headers = {'content-type': content_type}
# upload image string array data.
img_file = request.files['file'].stream.read()
img = cv2.imdecode(np.fromstring(img_file, np.uint8), cv2.IMREAD_COLOR)
# map to json.
send = base64.b64encode(np.array(img))
request_json = json.dumps({'input_img': send.decode(),
'info': {
'height': img.shape[0],
'width': img.shape[1],
'channel': img.shape[2]
}
})
# http request.
response = requests.post(addresses["STRAWBERRY_SERVER"], data=request_json, headers=headers)
# print(response)
# ['data', 'time', 'is_gpu']
response_json = response.json()
cost = response_json['time']
is_gpu = response_json['is_gpu']
# change to numpy array.
r = base64.decodebytes(response_json['data'].encode())
response_dat = np.fromstring(r, np.float)
response_dat = response_dat.reshape((response_json['info']['height'],
response_json['info']['width'],
response_json['info']['channel']))
# decodeed numpy image.
plt.figure(figsize=(7, 7))
plt.imshow(response_dat)
timenow = str(time.time())
fname = os.path.join('static', 'results', 'strawberry', timenow + '.png')
plt.axis('off')
plt.savefig(fname)
return render_template('str_result.html', outimg=fname, timenow=timenow, cost=cost, is_gpu=is_gpu)
except:
return render_template('uploadstrawberries.html', alertflag="이미지가 너무 크거나 올바르지 않은 파일입니다.")
@app.route('/paprika', methods=['GET', 'POST'])
def result_pap():
try:
# Set content_type to header.
content_type = 'application/json'
headers = {'content-type': content_type}
# upload image string array data.
img_file = request.files['file'].stream.read()
img = cv2.imdecode(np.fromstring(img_file, np.uint8), cv2.IMREAD_COLOR)
# map to json.
send = base64.b64encode(np.array(img))
request_json = json.dumps({'input_img': send.decode(),
'info': {
'height': img.shape[0],
'width': img.shape[1],
'channel': img.shape[2]
}
})
# http request.
response = requests.post(addresses["PAPRIKA_SERVER"], data=request_json, headers=headers)
# print(response)
# ['data', 'time', 'is_gpu']
response_json = response.json()
cost = response_json['time']
is_gpu = response_json['is_gpu']
# change to numpy array.
r = base64.decodebytes(response_json['data'].encode())
response_dat = np.fromstring(r, dtype=np.float)
response_dat = response_dat.reshape((response_json['info']['height'],
response_json['info']['width'],
response_json['info']['channel']))
# decodeed numpy image.
plt.figure(figsize=(7, 7))
plt.imshow(response_dat)
timenow = str(time.time())
fname = os.path.join('static', 'results', 'paprika', timenow + '.png')
plt.axis('off')
plt.savefig(fname)
return render_template('pap_result.html', outimg=fname, timenow=timenow, cost=cost, is_gpu=is_gpu)
except:
return render_template('uploadpaprika.html', alertflag="이미지가 너무 크거나 올바르지 않은 파일입니다.")
@app.route('/melon', methods=['GET', 'POST'])
def result_mel():
""" 아직 모델이 없는 관계로 파프리카 모델 임시 사용 """
try:
# Set content_type to header.
content_type = 'application/json'
headers = {'content-type': content_type}
# upload image string array data.
img_file = request.files['file'].stream.read()
img = cv2.imdecode(np.fromstring(img_file, np.uint8), cv2.IMREAD_COLOR)
# map to json.
send = base64.b64encode(np.array(img))
request_json = json.dumps({'input_img': send.decode(),
'info': {
'height': img.shape[0],
'width': img.shape[1],
'channel': img.shape[2]
}
})
# http request.
response = requests.post(addresses["PAPRIKA_SERVER"], data=request_json, headers=headers)
# print(response)
# ['data', 'time', 'is_gpu']
response_json = response.json()
cost = response_json['time']
is_gpu = response_json['is_gpu']
# change to numpy array.
r = base64.decodebytes(response_json['data'].encode())
response_dat = np.fromstring(r, dtype=np.float)
response_dat = response_dat.reshape((response_json['info']['height'],
response_json['info']['width'],
response_json['info']['channel']))
# decodeed numpy image.
plt.figure(figsize=(7, 7))
plt.imshow(response_dat)
timenow = str(time.time())
fname = os.path.join('static', 'results', 'paprika', timenow + '.png')
plt.axis('off')
plt.savefig(fname)
return render_template('mel_result.html', outimg=fname, timenow=timenow, cost=cost, is_gpu=is_gpu)
except:
return render_template('uploadmelon.html', alertflag="이미지가 너무 크거나 올바르지 않은 파일입니다.")
################################################################################
@app.route('/delete_file', methods=['POST'])
def delete():
if request.method == "POST":
filename = request.form['filename']
if os.path.isfile(filename):
os.remove(filename)
return Response(status="ok")
################################################################################
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=5000)
| [
"flask.render_template",
"matplotlib.pyplot.imshow",
"requests.post",
"matplotlib.pyplot.savefig",
"flask.Flask",
"os.path.join",
"os.path.isfile",
"numpy.array",
"matplotlib.pyplot.figure",
"flask.Response",
"matplotlib.pyplot.axis",
"numpy.fromstring",
"time.time",
"os.remove"
] | [((204, 219), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (209, 219), False, 'from flask import Flask, render_template, request, send_file, Response\n'), ((243, 275), 'os.path.join', 'os.path.join', (['"""static"""', '"""images"""'], {}), "('static', 'images')\n", (255, 275), False, 'import os\n'), ((306, 339), 'os.path.join', 'os.path.join', (['"""static"""', '"""results"""'], {}), "('static', 'results')\n", (318, 339), False, 'import os\n'), ((481, 510), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (496, 510), False, 'from flask import Flask, render_template, request, send_file, Response\n'), ((637, 679), 'flask.render_template', 'render_template', (['"""uploadstrawberries.html"""'], {}), "('uploadstrawberries.html')\n", (652, 679), False, 'from flask import Flask, render_template, request, send_file, Response\n'), ((745, 783), 'flask.render_template', 'render_template', (['"""uploadtomatoes.html"""'], {}), "('uploadtomatoes.html')\n", (760, 783), False, 'from flask import Flask, render_template, request, send_file, Response\n'), ((845, 882), 'flask.render_template', 'render_template', (['"""uploadpaprika.html"""'], {}), "('uploadpaprika.html')\n", (860, 882), False, 'from flask import Flask, render_template, request, send_file, Response\n'), ((941, 976), 'flask.render_template', 'render_template', (['"""uploadmelon.html"""'], {}), "('uploadmelon.html')\n", (956, 976), False, 'from flask import Flask, render_template, request, send_file, Response\n'), ((9529, 9550), 'flask.Response', 'Response', ([], {'status': '"""ok"""'}), "(status='ok')\n", (9537, 9550), False, 'from flask import Flask, render_template, request, send_file, Response\n'), ((1941, 2018), 'requests.post', 'requests.post', (["addresses['TOMATO_SERVER']"], {'data': 'request_json', 'headers': 'headers'}), "(addresses['TOMATO_SERVER'], data=request_json, headers=headers)\n", (1954, 2018), False, 'import requests\n'), ((2350, 2382), 'numpy.fromstring', 'np.fromstring', (['r'], {'dtype': 'np.float'}), '(r, dtype=np.float)\n', (2363, 2382), True, 'import numpy as np\n'), ((2769, 2793), 'matplotlib.pyplot.imshow', 'plt.imshow', (['response_dat'], {}), '(response_dat)\n', (2779, 2793), True, 'import matplotlib.pyplot as plt\n'), ((2845, 2906), 'os.path.join', 'os.path.join', (['"""static"""', '"""results"""', '"""tomato"""', "(timenow + '.png')"], {}), "('static', 'results', 'tomato', timenow + '.png')\n", (2857, 2906), False, 'import os\n'), ((2915, 2930), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2923, 2930), True, 'import matplotlib.pyplot as plt\n'), ((2939, 2957), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (2950, 2957), True, 'import matplotlib.pyplot as plt\n'), ((2974, 3069), 'flask.render_template', 'render_template', (['"""tom_result.html"""'], {'outimg': 'fname', 'timenow': 'timenow', 'cost': 'cost', 'is_gpu': 'is_gpu'}), "('tom_result.html', outimg=fname, timenow=timenow, cost=cost,\n is_gpu=is_gpu)\n", (2989, 3069), False, 'from flask import Flask, render_template, request, send_file, Response\n'), ((4051, 4137), 'requests.post', 'requests.post', (["addresses['STRAWBERRY_SERVER']"], {'data': 'request_json', 'headers': 'headers'}), "(addresses['STRAWBERRY_SERVER'], data=request_json, headers=\n headers)\n", (4064, 4137), False, 'import requests\n'), ((4435, 4461), 'numpy.fromstring', 'np.fromstring', (['r', 'np.float'], {}), '(r, np.float)\n', (4448, 4461), True, 'import numpy as np\n'), ((4739, 4765), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 7)'}), '(figsize=(7, 7))\n', (4749, 4765), True, 'import matplotlib.pyplot as plt\n'), ((4774, 4798), 'matplotlib.pyplot.imshow', 'plt.imshow', (['response_dat'], {}), '(response_dat)\n', (4784, 4798), True, 'import matplotlib.pyplot as plt\n'), ((4850, 4915), 'os.path.join', 'os.path.join', (['"""static"""', '"""results"""', '"""strawberry"""', "(timenow + '.png')"], {}), "('static', 'results', 'strawberry', timenow + '.png')\n", (4862, 4915), False, 'import os\n'), ((4924, 4939), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4932, 4939), True, 'import matplotlib.pyplot as plt\n'), ((4948, 4966), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (4959, 4966), True, 'import matplotlib.pyplot as plt\n'), ((4983, 5078), 'flask.render_template', 'render_template', (['"""str_result.html"""'], {'outimg': 'fname', 'timenow': 'timenow', 'cost': 'cost', 'is_gpu': 'is_gpu'}), "('str_result.html', outimg=fname, timenow=timenow, cost=cost,\n is_gpu=is_gpu)\n", (4998, 5078), False, 'from flask import Flask, render_template, request, send_file, Response\n'), ((6060, 6138), 'requests.post', 'requests.post', (["addresses['PAPRIKA_SERVER']"], {'data': 'request_json', 'headers': 'headers'}), "(addresses['PAPRIKA_SERVER'], data=request_json, headers=headers)\n", (6073, 6138), False, 'import requests\n'), ((6441, 6473), 'numpy.fromstring', 'np.fromstring', (['r'], {'dtype': 'np.float'}), '(r, dtype=np.float)\n', (6454, 6473), True, 'import numpy as np\n'), ((6751, 6777), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 7)'}), '(figsize=(7, 7))\n', (6761, 6777), True, 'import matplotlib.pyplot as plt\n'), ((6786, 6810), 'matplotlib.pyplot.imshow', 'plt.imshow', (['response_dat'], {}), '(response_dat)\n', (6796, 6810), True, 'import matplotlib.pyplot as plt\n'), ((6862, 6924), 'os.path.join', 'os.path.join', (['"""static"""', '"""results"""', '"""paprika"""', "(timenow + '.png')"], {}), "('static', 'results', 'paprika', timenow + '.png')\n", (6874, 6924), False, 'import os\n'), ((6933, 6948), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (6941, 6948), True, 'import matplotlib.pyplot as plt\n'), ((6957, 6975), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (6968, 6975), True, 'import matplotlib.pyplot as plt\n'), ((6992, 7087), 'flask.render_template', 'render_template', (['"""pap_result.html"""'], {'outimg': 'fname', 'timenow': 'timenow', 'cost': 'cost', 'is_gpu': 'is_gpu'}), "('pap_result.html', outimg=fname, timenow=timenow, cost=cost,\n is_gpu=is_gpu)\n", (7007, 7087), False, 'from flask import Flask, render_template, request, send_file, Response\n'), ((8100, 8178), 'requests.post', 'requests.post', (["addresses['PAPRIKA_SERVER']"], {'data': 'request_json', 'headers': 'headers'}), "(addresses['PAPRIKA_SERVER'], data=request_json, headers=headers)\n", (8113, 8178), False, 'import requests\n'), ((8481, 8513), 'numpy.fromstring', 'np.fromstring', (['r'], {'dtype': 'np.float'}), '(r, dtype=np.float)\n', (8494, 8513), True, 'import numpy as np\n'), ((8791, 8817), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 7)'}), '(figsize=(7, 7))\n', (8801, 8817), True, 'import matplotlib.pyplot as plt\n'), ((8826, 8850), 'matplotlib.pyplot.imshow', 'plt.imshow', (['response_dat'], {}), '(response_dat)\n', (8836, 8850), True, 'import matplotlib.pyplot as plt\n'), ((8902, 8964), 'os.path.join', 'os.path.join', (['"""static"""', '"""results"""', '"""paprika"""', "(timenow + '.png')"], {}), "('static', 'results', 'paprika', timenow + '.png')\n", (8914, 8964), False, 'import os\n'), ((8973, 8988), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (8981, 8988), True, 'import matplotlib.pyplot as plt\n'), ((8997, 9015), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (9008, 9015), True, 'import matplotlib.pyplot as plt\n'), ((9032, 9127), 'flask.render_template', 'render_template', (['"""mel_result.html"""'], {'outimg': 'fname', 'timenow': 'timenow', 'cost': 'cost', 'is_gpu': 'is_gpu'}), "('mel_result.html', outimg=fname, timenow=timenow, cost=cost,\n is_gpu=is_gpu)\n", (9047, 9127), False, 'from flask import Flask, render_template, request, send_file, Response\n'), ((9459, 9483), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (9473, 9483), False, 'import os\n'), ((1371, 1404), 'numpy.fromstring', 'np.fromstring', (['img_file', 'np.uint8'], {}), '(img_file, np.uint8)\n', (1384, 1404), True, 'import numpy as np\n'), ((1480, 1493), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1488, 1493), True, 'import numpy as np\n'), ((2816, 2827), 'time.time', 'time.time', ([], {}), '()\n', (2825, 2827), False, 'import time\n'), ((3094, 3172), 'flask.render_template', 'render_template', (['"""uploadtomatoes.html"""'], {'alertflag': '"""이미지가 너무 크거나 올바르지 않은 파일입니다."""'}), "('uploadtomatoes.html', alertflag='이미지가 너무 크거나 올바르지 않은 파일입니다.')\n", (3109, 3172), False, 'from flask import Flask, render_template, request, send_file, Response\n'), ((3511, 3544), 'numpy.fromstring', 'np.fromstring', (['img_file', 'np.uint8'], {}), '(img_file, np.uint8)\n', (3524, 3544), True, 'import numpy as np\n'), ((3620, 3633), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (3628, 3633), True, 'import numpy as np\n'), ((4821, 4832), 'time.time', 'time.time', ([], {}), '()\n', (4830, 4832), False, 'import time\n'), ((5103, 5190), 'flask.render_template', 'render_template', (['"""uploadstrawberries.html"""'], {'alertflag': '"""이미지가 너무 크거나 올바르지 않은 파일입니다."""'}), "('uploadstrawberries.html', alertflag=\n '이미지가 너무 크거나 올바르지 않은 파일입니다.')\n", (5118, 5190), False, 'from flask import Flask, render_template, request, send_file, Response\n'), ((5519, 5552), 'numpy.fromstring', 'np.fromstring', (['img_file', 'np.uint8'], {}), '(img_file, np.uint8)\n', (5532, 5552), True, 'import numpy as np\n'), ((5628, 5641), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (5636, 5641), True, 'import numpy as np\n'), ((6833, 6844), 'time.time', 'time.time', ([], {}), '()\n', (6842, 6844), False, 'import time\n'), ((7112, 7189), 'flask.render_template', 'render_template', (['"""uploadpaprika.html"""'], {'alertflag': '"""이미지가 너무 크거나 올바르지 않은 파일입니다."""'}), "('uploadpaprika.html', alertflag='이미지가 너무 크거나 올바르지 않은 파일입니다.')\n", (7127, 7189), False, 'from flask import Flask, render_template, request, send_file, Response\n'), ((7559, 7592), 'numpy.fromstring', 'np.fromstring', (['img_file', 'np.uint8'], {}), '(img_file, np.uint8)\n', (7572, 7592), True, 'import numpy as np\n'), ((7668, 7681), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (7676, 7681), True, 'import numpy as np\n'), ((8873, 8884), 'time.time', 'time.time', ([], {}), '()\n', (8882, 8884), False, 'import time\n'), ((9152, 9227), 'flask.render_template', 'render_template', (['"""uploadmelon.html"""'], {'alertflag': '"""이미지가 너무 크거나 올바르지 않은 파일입니다."""'}), "('uploadmelon.html', alertflag='이미지가 너무 크거나 올바르지 않은 파일입니다.')\n", (9167, 9227), False, 'from flask import Flask, render_template, request, send_file, Response\n'), ((9497, 9516), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (9506, 9516), False, 'import os\n')] |
# coding=utf-8
# Copyright 2018-2020 EVA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from eva.catalog.catalog_manager import CatalogManager
from eva.server.command_handler import execute_query_fetch_all
from test.util import create_sample_video, file_remove
class InsertExecutorTest(unittest.TestCase):
def setUp(self):
# reset the catalog manager before running each test
CatalogManager().reset()
create_sample_video()
def tearDown(self):
file_remove('dummy.avi')
# integration test
@unittest.skip('Not supported in current version')
def test_should_load_video_in_table(self):
query = """LOAD DATA INFILE 'dummy.avi' INTO MyVideo;"""
execute_query_fetch_all(query)
insert_query = """ INSERT INTO MyVideo (id, data) VALUES (40,
[[[40, 40, 40] , [40, 40, 40]],
[[40, 40, 40], [40, 40, 40]]]);"""
execute_query_fetch_all(insert_query)
insert_query_2 = """ INSERT INTO MyVideo (id, data) VALUES (41,
[[[41, 41, 41] , [41, 41, 41]],
[[41, 41, 41], [41, 41, 41]]]);"""
execute_query_fetch_all(insert_query_2)
query = 'SELECT id, data FROM MyVideo WHERE id = 40'
batch = execute_query_fetch_all(query)
self.assertIsNone(np.testing.assert_array_equal(
batch.frames['data'][0],
np.array([[[40, 40, 40], [40, 40, 40]],
[[40, 40, 40], [40, 40, 40]]])))
query = 'SELECT id, data FROM MyVideo WHERE id = 41;'
batch = execute_query_fetch_all(query)
self.assertIsNone(np.testing.assert_array_equal(
batch.frames['data'][0],
np.array([[[41, 41, 41], [41, 41, 41]],
[[41, 41, 41], [41, 41, 41]]])))
| [
"test.util.file_remove",
"eva.catalog.catalog_manager.CatalogManager",
"numpy.array",
"eva.server.command_handler.execute_query_fetch_all",
"test.util.create_sample_video",
"unittest.skip"
] | [((1077, 1126), 'unittest.skip', 'unittest.skip', (['"""Not supported in current version"""'], {}), "('Not supported in current version')\n", (1090, 1126), False, 'import unittest\n'), ((968, 989), 'test.util.create_sample_video', 'create_sample_video', ([], {}), '()\n', (987, 989), False, 'from test.util import create_sample_video, file_remove\n'), ((1023, 1047), 'test.util.file_remove', 'file_remove', (['"""dummy.avi"""'], {}), "('dummy.avi')\n", (1034, 1047), False, 'from test.util import create_sample_video, file_remove\n'), ((1247, 1277), 'eva.server.command_handler.execute_query_fetch_all', 'execute_query_fetch_all', (['query'], {}), '(query)\n', (1270, 1277), False, 'from eva.server.command_handler import execute_query_fetch_all\n'), ((1480, 1517), 'eva.server.command_handler.execute_query_fetch_all', 'execute_query_fetch_all', (['insert_query'], {}), '(insert_query)\n', (1503, 1517), False, 'from eva.server.command_handler import execute_query_fetch_all\n'), ((1722, 1761), 'eva.server.command_handler.execute_query_fetch_all', 'execute_query_fetch_all', (['insert_query_2'], {}), '(insert_query_2)\n', (1745, 1761), False, 'from eva.server.command_handler import execute_query_fetch_all\n'), ((1840, 1870), 'eva.server.command_handler.execute_query_fetch_all', 'execute_query_fetch_all', (['query'], {}), '(query)\n', (1863, 1870), False, 'from eva.server.command_handler import execute_query_fetch_all\n'), ((2151, 2181), 'eva.server.command_handler.execute_query_fetch_all', 'execute_query_fetch_all', (['query'], {}), '(query)\n', (2174, 2181), False, 'from eva.server.command_handler import execute_query_fetch_all\n'), ((935, 951), 'eva.catalog.catalog_manager.CatalogManager', 'CatalogManager', ([], {}), '()\n', (949, 951), False, 'from eva.catalog.catalog_manager import CatalogManager\n'), ((1977, 2047), 'numpy.array', 'np.array', (['[[[40, 40, 40], [40, 40, 40]], [[40, 40, 40], [40, 40, 40]]]'], {}), '([[[40, 40, 40], [40, 40, 40]], [[40, 40, 40], [40, 40, 40]]])\n', (1985, 2047), True, 'import numpy as np\n'), ((2288, 2358), 'numpy.array', 'np.array', (['[[[41, 41, 41], [41, 41, 41]], [[41, 41, 41], [41, 41, 41]]]'], {}), '([[[41, 41, 41], [41, 41, 41]], [[41, 41, 41], [41, 41, 41]]])\n', (2296, 2358), True, 'import numpy as np\n')] |
"""
Module containing all the routines to process input settings, from the dict-like configuration. Those routines are
intended for internal use only, and are implicitly exported by the ``process_settings`` function.
Each of the function with a @parameter decorator are registered in the private module level {__parameter_registry}
dict, which makes the visible to the ``process_settings`` function
"""
import numbers
import numpy as np
import typing as T
import collections
import collections.abc
from functools import partial
from attrdict import AttrDict
from operator import itemgetter as item
from sqsgenerator.io import read_structure_from_file
from sqsgenerator.settings.exceptions import BadSettings
from sqsgenerator.core import IterationMode, Structure, make_supercell
from sqsgenerator.compat import Feature, have_mpi_support, have_feature
from sqsgenerator.adapters import from_ase_atoms, from_pymatgen_structure
from sqsgenerator.settings.functional import parameter as parameter_, if_, isa
from sqsgenerator.settings.defaults import defaults, random_mode, num_shells, num_species
from sqsgenerator.settings.utils import ensure_array_shape, ensure_array_symmetric, convert, int_safe, \
build_structure, to_internal_composition_specs
__parameter_registry = collections.OrderedDict({})
# the parameter decorator registers all the "processor" function with their names in __parameter_registry
# the ordering will be according to their definition in this file
parameter = partial(parameter_, registry=__parameter_registry)
@parameter('atol', default=defaults.atol)
def read_atol(settings: AttrDict):
if not isinstance(settings.atol, float) or settings.atol < 0:
raise BadSettings(f'The absolute tolerance can be only a positive floating point number')
return settings.atol
@parameter('rtol', default=defaults.rtol)
def read_rtol(settings: AttrDict):
if not isinstance(settings.rtol, float) or settings.rtol < 0:
raise BadSettings(f'The relative tolerance can be only a positive floating point number')
return settings.rtol
@parameter('mode', default=defaults.mode, required=True)
def read_mode(settings: AttrDict):
if isinstance(settings.mode, IterationMode):
return settings.mode
if settings.mode not in IterationMode.names:
raise BadSettings(f'Unknown iteration mode "{settings.mode}". '
f'Available iteration modes are {list(IterationMode.names.keys())}')
return IterationMode.names[settings.mode]
@parameter('iterations', default=defaults.iterations, required=if_(random_mode)(True)(False))
def read_iterations(settings: AttrDict):
num_iterations = convert(settings.iterations, converter=int_safe,
message=f'Cannot convert "{settings.iterations}" to int')
if num_iterations < 0:
raise BadSettings('"iterations" must be positive')
return num_iterations
@parameter('max_output_configurations', default=defaults.max_output_configurations)
def read_max_output_configurations(settings: AttrDict):
num_confs = convert(settings.max_output_configurations, converter=int_safe,
message=f'Cannot convert "{settings.max_output_configurations}" to int')
if num_confs < 0:
raise BadSettings('"max_output_configurations" must be positive')
return num_confs
@parameter('structure')
def read_structure(settings: AttrDict) -> Structure:
needed_fields = {'lattice', 'coords', 'species'}
s = settings.structure
structure = None
if isinstance(s, Structure):
structure = s
if have_feature(Feature.ase) and structure is None:
from ase import Atoms
if isinstance(s, Atoms):
structure = from_ase_atoms(s)
if have_feature(Feature.pymatgen) and structure is None:
from pymatgen.core import Structure as PymatgenStructure, PeriodicSite
if isinstance(s, PymatgenStructure):
structure = from_pymatgen_structure(s)
elif isinstance(s, collections.abc.Iterable):
site_list = list(s)
if site_list and all(map(isa(PeriodicSite), site_list)):
structure = from_pymatgen_structure(PymatgenStructure.from_sites(site_list))
if isinstance(s, dict) and structure is None:
if 'file' in s:
structure = read_structure_from_file(settings)
elif all(field in s for field in needed_fields):
lattice = np.array(s['lattice'])
coords = np.array(s['coords'])
species = list(s['species'])
structure = Structure(lattice, coords, species, (True, True, True))
else:
raise BadSettings(f'A structure dictionary needs the following fields {needed_fields}')
if structure is None:
raise BadSettings(f'Cannot read structure from the settings, "{type(s)}"')
if isinstance(s, dict) and 'supercell' in s:
sizes = settings.structure.supercell
if len(sizes) != 3:
raise BadSettings('To create a supercell you need to specify three lengths')
structure = make_supercell(structure, *sizes)
del settings.structure['supercell']
return structure
@parameter('which', default=defaults.which, required=True)
def read_which(settings: AttrDict):
structure = settings.structure
if isinstance(settings.which, str):
sublattice = settings.which
allowed_sublattices = {'all', }.union(structure.unique_species)
if sublattice not in allowed_sublattices:
raise BadSettings(f'The structure does not have an "{sublattice}" sublattice. '
f'Possible values would be {allowed_sublattices}')
if sublattice == 'all':
mask = tuple(range(structure.num_atoms))
else:
mask = tuple(i for i, sp in enumerate(structure.species) if sp.symbol == sublattice)
which = mask
elif isinstance(settings.which, (list, tuple)):
sublattice = tuple(set(settings.which)) # eliminate duplicates
if len(sublattice) < 2:
raise BadSettings('You need to at least specify two different lattice positions to define a sublattice')
if not all(map(isa(int), sublattice)):
raise BadSettings(f'I do only understand integer lists to specify a sublattice')
if not all(map(lambda _: 0 <= _ < structure.num_atoms, sublattice)):
raise BadSettings(f'All indices in the list must be 0 <= index < {structure.num_atoms}')
which = tuple(settings.which)
else:
raise BadSettings('I do not understand your composition specification')
# we remove the which clause from the settings, since we are replacing the structure with the sublattice structure
return which
@parameter('composition', default=defaults.composition, required=True)
def read_composition(settings: AttrDict):
structure = settings.structure[settings.which]
if not isinstance(settings.composition, dict):
raise BadSettings(f'Cannot interpret "composition" settings. I expect a dictionary')
build_structure(settings.composition, structure)
return settings.composition
@parameter('shell_distances', default=defaults.shell_distances, required=True)
def read_shell_distances(settings: AttrDict):
if not isinstance(settings.shell_distances, (list, tuple, set)):
raise BadSettings('I only understand list, sets, and tuples for the shell-distances parameter')
if not all(map(isa(numbers.Real), settings.shell_distances)):
raise BadSettings('All shell distances must be real values')
distances = list(filter(lambda d: not np.isclose(d, 0.0), settings.shell_distances))
if len(distances) < 1:
raise BadSettings('You need to specify at least one shell-distance')
for distance in distances:
if distance < 0.0:
raise BadSettings(f'A distance can never be less than zero. You specified "{distance}"')
sorted_distances = list(sorted(distances))
sorted_distances.insert(0, 0.0)
return sorted_distances
@parameter('shell_weights', default=defaults.shell_weights, required=True)
def read_shell_weights(settings: AttrDict):
if not isinstance(settings.shell_weights, dict):
raise BadSettings('I only understand dictionaries for the shell-weight parameter')
if len(settings.shell_weights) < 1:
raise BadSettings('You have to include at least one coordination shell to carry out the optimization')
allowed_indices = set(range(1, len(settings.shell_distances)))
parsed_weights = {
convert(shell, to=int, message=f'A shell must be an integer. You specified {shell}'):
convert(weight, to=float, message=f'A weight must be a floating point number. You specified {weight}')
for shell, weight in settings.shell_weights.items()
}
for shell in parsed_weights.keys():
if shell not in allowed_indices:
raise BadSettings(f'The shell {shell} you specified is not allowed. Allowed values are {allowed_indices}')
return settings.shell_weights
@parameter('pair_weights', default=defaults.pair_weights, required=True)
def read_pair_weights(settings: AttrDict):
nums = num_species(settings)
nshells = num_shells(settings)
if isinstance(settings.pair_weights, (list, tuple, np.ndarray)):
w = np.array(settings.pair_weights).astype(float)
if w.ndim in {2, 3}:
expected_shape = (nshells, nums, nums) if w.ndim == 3 else (nums, nums)
ensure_array_shape(w, expected_shape, f'The 3D "pair_weights" you have specified '
f'has a wrong shape ({w.shape}). Expected {expected_shape}')
ensure_array_symmetric(w, f'The "pair_weights" parameters are not symmetric')
sorted_weights = sorted(settings.shell_weights.items(), key=item(0))
weights = w if w.ndim == 3 else np.stack([w * shell_weight for _, shell_weight in sorted_weights])
return weights
raise BadSettings(f'As "pair_weights" I do expect a {nums}x{nums} matrix, '
f'since your structure contains {nums} different species')
@parameter('target_objective', default=defaults.target_objective, required=True)
def read_target_objective(settings: AttrDict):
nums = num_species(settings)
nshells = num_shells(settings)
if isinstance(settings.target_objective, (int, float)):
return np.ones((nshells, nums, nums)).astype(float) * float(settings.target_objective)
if isinstance(settings.target_objective, (list, tuple, np.ndarray)):
o = np.array(settings.target_objective).astype(float)
if o.ndim in {2, 3}:
expected_shape = (nshells, nums, nums) if o.ndim == 3 else (nums, nums)
ensure_array_shape(o, expected_shape, f'The 3D "target_objective" you have specified '
f'has a wrong shape ({o.shape}). Expected {expected_shape}')
ensure_array_symmetric(o, f'The "target_objective" parameters are not symmetric')
objectives = o if o.ndim == 3 else np.stack([o] * nshells)
return objectives
raise BadSettings(f'The "target_objective" you have specified has a {o.ndim} dimensions. '
f'I only can cope with 2 and 3')
raise BadSettings(f'Cannot interpret "target_objective" setting. '
f'Acceptable values are a single number, {nums}x{nums} or {nshells}x{nums}x{nums} matrices!')
@parameter('threads_per_rank', default=defaults.threads_per_rank, required=True)
def read_threads_per_rank(settings: AttrDict):
converter = partial(convert, to=int, converter=int_safe, message='Cannot parse "threads_per_rank" argument')
if isinstance(settings.threads_per_rank, (float, int)):
return [converter(settings.threads_per_rank)]
if isinstance(settings.threads_per_rank, (list, tuple, np.ndarray)):
if len(settings.threads_per_rank) != 1:
if not have_mpi_support():
raise BadSettings(f'The module sqsgenerator.core.iteration was not compiled with MPI support')
return list(map(converter, settings.threads_per_rank))
raise BadSettings(f'Cannot interpret "threads_per_rank" setting.')
def process_settings(settings: AttrDict, params: T.Optional[T.Set[str]] = None, ignore: T.Iterable[str]=()) -> AttrDict:
"""
Process an dict-like input parameters, according to the rules specified in the
`Input parameter documentation <https://sqsgenerator.readthedocs.io/en/latest/input_parameters.html>`_. This function
should be used for processing user input. Therefore, exports the parser functions defined in
``sqsgenerator.settings.readers``. To specify a specify subset of parameters the {params} argument is used.
To {ignore} specifc parameters pass a list of parameter names
:param settings: the dict-like user configuration
:type settings: AttrDict
:param params: If specified only the subset of {params} is processed (default is ``None``)
:type params: Optional[Set[``None``]]
:param ignore: a list/iterable of params to ignore (default is ``()``)
:type ignore: Iterable[``str``]
:return: the processed settings dictionary
:rtype: AttrDict
"""
params = params if params is not None else set(parameter_list())
last_needed_parameter = max(params, key=parameter_index)
ignore = set(ignore)
for index, (param, processor) in enumerate(__parameter_registry.items()):
if param not in params:
# we can only skip this parameter if None of the other parameters depends on param
if parameter_index(param) > parameter_index(last_needed_parameter):
continue
if param in ignore:
continue
settings[param] = processor(settings)
return settings
def parameter_list() -> T.List[str]:
return list(__parameter_registry.keys())
def parameter_index(p: str) -> int:
return parameter_list().index(p)
| [
"numpy.array",
"sqsgenerator.compat.have_feature",
"sqsgenerator.adapters.from_ase_atoms",
"operator.itemgetter",
"sqsgenerator.settings.utils.convert",
"sqsgenerator.core.Structure",
"sqsgenerator.settings.utils.ensure_array_shape",
"sqsgenerator.settings.utils.ensure_array_symmetric",
"numpy.stack... | [((1277, 1304), 'collections.OrderedDict', 'collections.OrderedDict', (['{}'], {}), '({})\n', (1300, 1304), False, 'import collections\n'), ((1490, 1540), 'functools.partial', 'partial', (['parameter_'], {'registry': '__parameter_registry'}), '(parameter_, registry=__parameter_registry)\n', (1497, 1540), False, 'from functools import partial\n'), ((2669, 2780), 'sqsgenerator.settings.utils.convert', 'convert', (['settings.iterations'], {'converter': 'int_safe', 'message': 'f"""Cannot convert "{settings.iterations}" to int"""'}), '(settings.iterations, converter=int_safe, message=\n f\'Cannot convert "{settings.iterations}" to int\')\n', (2676, 2780), False, 'from sqsgenerator.settings.utils import ensure_array_shape, ensure_array_symmetric, convert, int_safe, build_structure, to_internal_composition_specs\n'), ((3075, 3216), 'sqsgenerator.settings.utils.convert', 'convert', (['settings.max_output_configurations'], {'converter': 'int_safe', 'message': 'f"""Cannot convert "{settings.max_output_configurations}" to int"""'}), '(settings.max_output_configurations, converter=int_safe, message=\n f\'Cannot convert "{settings.max_output_configurations}" to int\')\n', (3082, 3216), False, 'from sqsgenerator.settings.utils import ensure_array_shape, ensure_array_symmetric, convert, int_safe, build_structure, to_internal_composition_specs\n'), ((7079, 7127), 'sqsgenerator.settings.utils.build_structure', 'build_structure', (['settings.composition', 'structure'], {}), '(settings.composition, structure)\n', (7094, 7127), False, 'from sqsgenerator.settings.utils import ensure_array_shape, ensure_array_symmetric, convert, int_safe, build_structure, to_internal_composition_specs\n'), ((9204, 9225), 'sqsgenerator.settings.defaults.num_species', 'num_species', (['settings'], {}), '(settings)\n', (9215, 9225), False, 'from sqsgenerator.settings.defaults import defaults, random_mode, num_shells, num_species\n'), ((9240, 9260), 'sqsgenerator.settings.defaults.num_shells', 'num_shells', (['settings'], {}), '(settings)\n', (9250, 9260), False, 'from sqsgenerator.settings.defaults import defaults, random_mode, num_shells, num_species\n'), ((10330, 10351), 'sqsgenerator.settings.defaults.num_species', 'num_species', (['settings'], {}), '(settings)\n', (10341, 10351), False, 'from sqsgenerator.settings.defaults import defaults, random_mode, num_shells, num_species\n'), ((10366, 10386), 'sqsgenerator.settings.defaults.num_shells', 'num_shells', (['settings'], {}), '(settings)\n', (10376, 10386), False, 'from sqsgenerator.settings.defaults import defaults, random_mode, num_shells, num_species\n'), ((11368, 11528), 'sqsgenerator.settings.exceptions.BadSettings', 'BadSettings', (['f"""Cannot interpret "target_objective" setting. Acceptable values are a single number, {nums}x{nums} or {nshells}x{nums}x{nums} matrices!"""'], {}), '(\n f\'Cannot interpret "target_objective" setting. Acceptable values are a single number, {nums}x{nums} or {nshells}x{nums}x{nums} matrices!\'\n )\n', (11379, 11528), False, 'from sqsgenerator.settings.exceptions import BadSettings\n'), ((11691, 11792), 'functools.partial', 'partial', (['convert'], {'to': 'int', 'converter': 'int_safe', 'message': '"""Cannot parse "threads_per_rank" argument"""'}), '(convert, to=int, converter=int_safe, message=\n \'Cannot parse "threads_per_rank" argument\')\n', (11698, 11792), False, 'from functools import partial\n'), ((12247, 12307), 'sqsgenerator.settings.exceptions.BadSettings', 'BadSettings', (['f"""Cannot interpret "threads_per_rank" setting."""'], {}), '(f\'Cannot interpret "threads_per_rank" setting.\')\n', (12258, 12307), False, 'from sqsgenerator.settings.exceptions import BadSettings\n'), ((1700, 1788), 'sqsgenerator.settings.exceptions.BadSettings', 'BadSettings', (['f"""The absolute tolerance can be only a positive floating point number"""'], {}), "(\n f'The absolute tolerance can be only a positive floating point number')\n", (1711, 1788), False, 'from sqsgenerator.settings.exceptions import BadSettings\n'), ((1968, 2056), 'sqsgenerator.settings.exceptions.BadSettings', 'BadSettings', (['f"""The relative tolerance can be only a positive floating point number"""'], {}), "(\n f'The relative tolerance can be only a positive floating point number')\n", (1979, 2056), False, 'from sqsgenerator.settings.exceptions import BadSettings\n'), ((2846, 2890), 'sqsgenerator.settings.exceptions.BadSettings', 'BadSettings', (['""""iterations" must be positive"""'], {}), '(\'"iterations" must be positive\')\n', (2857, 2890), False, 'from sqsgenerator.settings.exceptions import BadSettings\n'), ((3272, 3331), 'sqsgenerator.settings.exceptions.BadSettings', 'BadSettings', (['""""max_output_configurations" must be positive"""'], {}), '(\'"max_output_configurations" must be positive\')\n', (3283, 3331), False, 'from sqsgenerator.settings.exceptions import BadSettings\n'), ((3596, 3621), 'sqsgenerator.compat.have_feature', 'have_feature', (['Feature.ase'], {}), '(Feature.ase)\n', (3608, 3621), False, 'from sqsgenerator.compat import Feature, have_mpi_support, have_feature\n'), ((3757, 3787), 'sqsgenerator.compat.have_feature', 'have_feature', (['Feature.pymatgen'], {}), '(Feature.pymatgen)\n', (3769, 3787), False, 'from sqsgenerator.compat import Feature, have_mpi_support, have_feature\n'), ((5089, 5122), 'sqsgenerator.core.make_supercell', 'make_supercell', (['structure', '*sizes'], {}), '(structure, *sizes)\n', (5103, 5122), False, 'from sqsgenerator.core import IterationMode, Structure, make_supercell\n'), ((6995, 7073), 'sqsgenerator.settings.exceptions.BadSettings', 'BadSettings', (['f"""Cannot interpret "composition" settings. I expect a dictionary"""'], {}), '(f\'Cannot interpret "composition" settings. I expect a dictionary\')\n', (7006, 7073), False, 'from sqsgenerator.settings.exceptions import BadSettings\n'), ((7370, 7469), 'sqsgenerator.settings.exceptions.BadSettings', 'BadSettings', (['"""I only understand list, sets, and tuples for the shell-distances parameter"""'], {}), "(\n 'I only understand list, sets, and tuples for the shell-distances parameter'\n )\n", (7381, 7469), False, 'from sqsgenerator.settings.exceptions import BadSettings\n'), ((7540, 7594), 'sqsgenerator.settings.exceptions.BadSettings', 'BadSettings', (['"""All shell distances must be real values"""'], {}), "('All shell distances must be real values')\n", (7551, 7594), False, 'from sqsgenerator.settings.exceptions import BadSettings\n'), ((7725, 7787), 'sqsgenerator.settings.exceptions.BadSettings', 'BadSettings', (['"""You need to specify at least one shell-distance"""'], {}), "('You need to specify at least one shell-distance')\n", (7736, 7787), False, 'from sqsgenerator.settings.exceptions import BadSettings\n'), ((8246, 8322), 'sqsgenerator.settings.exceptions.BadSettings', 'BadSettings', (['"""I only understand dictionaries for the shell-weight parameter"""'], {}), "('I only understand dictionaries for the shell-weight parameter')\n", (8257, 8322), False, 'from sqsgenerator.settings.exceptions import BadSettings\n'), ((8377, 8483), 'sqsgenerator.settings.exceptions.BadSettings', 'BadSettings', (['"""You have to include at least one coordination shell to carry out the optimization"""'], {}), "(\n 'You have to include at least one coordination shell to carry out the optimization'\n )\n", (8388, 8483), False, 'from sqsgenerator.settings.exceptions import BadSettings\n'), ((8573, 8662), 'sqsgenerator.settings.utils.convert', 'convert', (['shell'], {'to': 'int', 'message': 'f"""A shell must be an integer. You specified {shell}"""'}), "(shell, to=int, message=\n f'A shell must be an integer. You specified {shell}')\n", (8580, 8662), False, 'from sqsgenerator.settings.utils import ensure_array_shape, ensure_array_symmetric, convert, int_safe, build_structure, to_internal_composition_specs\n'), ((8671, 8778), 'sqsgenerator.settings.utils.convert', 'convert', (['weight'], {'to': 'float', 'message': 'f"""A weight must be a floating point number. You specified {weight}"""'}), "(weight, to=float, message=\n f'A weight must be a floating point number. You specified {weight}')\n", (8678, 8778), False, 'from sqsgenerator.settings.utils import ensure_array_shape, ensure_array_symmetric, convert, int_safe, build_structure, to_internal_composition_specs\n'), ((10034, 10168), 'sqsgenerator.settings.exceptions.BadSettings', 'BadSettings', (['f"""As "pair_weights" I do expect a {nums}x{nums} matrix, since your structure contains {nums} different species"""'], {}), '(\n f\'As "pair_weights" I do expect a {nums}x{nums} matrix, since your structure contains {nums} different species\'\n )\n', (10045, 10168), False, 'from sqsgenerator.settings.exceptions import BadSettings\n'), ((11213, 11336), 'sqsgenerator.settings.exceptions.BadSettings', 'BadSettings', (['f"""The "target_objective" you have specified has a {o.ndim} dimensions. I only can cope with 2 and 3"""'], {}), '(\n f\'The "target_objective" you have specified has a {o.ndim} dimensions. I only can cope with 2 and 3\'\n )\n', (11224, 11336), False, 'from sqsgenerator.settings.exceptions import BadSettings\n'), ((3732, 3749), 'sqsgenerator.adapters.from_ase_atoms', 'from_ase_atoms', (['s'], {}), '(s)\n', (3746, 3749), False, 'from sqsgenerator.adapters import from_ase_atoms, from_pymatgen_structure\n'), ((3959, 3985), 'sqsgenerator.adapters.from_pymatgen_structure', 'from_pymatgen_structure', (['s'], {}), '(s)\n', (3982, 3985), False, 'from sqsgenerator.adapters import from_ase_atoms, from_pymatgen_structure\n'), ((4333, 4367), 'sqsgenerator.io.read_structure_from_file', 'read_structure_from_file', (['settings'], {}), '(settings)\n', (4357, 4367), False, 'from sqsgenerator.io import read_structure_from_file\n'), ((4998, 5068), 'sqsgenerator.settings.exceptions.BadSettings', 'BadSettings', (['"""To create a supercell you need to specify three lengths"""'], {}), "('To create a supercell you need to specify three lengths')\n", (5009, 5068), False, 'from sqsgenerator.settings.exceptions import BadSettings\n'), ((5537, 5667), 'sqsgenerator.settings.exceptions.BadSettings', 'BadSettings', (['f"""The structure does not have an "{sublattice}" sublattice. Possible values would be {allowed_sublattices}"""'], {}), '(\n f\'The structure does not have an "{sublattice}" sublattice. Possible values would be {allowed_sublattices}\'\n )\n', (5548, 5667), False, 'from sqsgenerator.settings.exceptions import BadSettings\n'), ((6562, 6627), 'sqsgenerator.settings.exceptions.BadSettings', 'BadSettings', (['"""I do not understand your composition specification"""'], {}), "('I do not understand your composition specification')\n", (6573, 6627), False, 'from sqsgenerator.settings.exceptions import BadSettings\n'), ((7864, 7951), 'sqsgenerator.settings.exceptions.BadSettings', 'BadSettings', (['f"""A distance can never be less than zero. You specified "{distance}\\""""'], {}), '(\n f\'A distance can never be less than zero. You specified "{distance}"\')\n', (7875, 7951), False, 'from sqsgenerator.settings.exceptions import BadSettings\n'), ((8940, 9050), 'sqsgenerator.settings.exceptions.BadSettings', 'BadSettings', (['f"""The shell {shell} you specified is not allowed. Allowed values are {allowed_indices}"""'], {}), "(\n f'The shell {shell} you specified is not allowed. Allowed values are {allowed_indices}'\n )\n", (8951, 9050), False, 'from sqsgenerator.settings.exceptions import BadSettings\n'), ((9515, 9663), 'sqsgenerator.settings.utils.ensure_array_shape', 'ensure_array_shape', (['w', 'expected_shape', 'f"""The 3D "pair_weights" you have specified has a wrong shape ({w.shape}). Expected {expected_shape}"""'], {}), '(w, expected_shape,\n f\'The 3D "pair_weights" you have specified has a wrong shape ({w.shape}). Expected {expected_shape}\'\n )\n', (9533, 9663), False, 'from sqsgenerator.settings.utils import ensure_array_shape, ensure_array_symmetric, convert, int_safe, build_structure, to_internal_composition_specs\n'), ((9721, 9798), 'sqsgenerator.settings.utils.ensure_array_symmetric', 'ensure_array_symmetric', (['w', 'f"""The "pair_weights" parameters are not symmetric"""'], {}), '(w, f\'The "pair_weights" parameters are not symmetric\')\n', (9743, 9798), False, 'from sqsgenerator.settings.utils import ensure_array_shape, ensure_array_symmetric, convert, int_safe, build_structure, to_internal_composition_specs\n'), ((10804, 10956), 'sqsgenerator.settings.utils.ensure_array_shape', 'ensure_array_shape', (['o', 'expected_shape', 'f"""The 3D "target_objective" you have specified has a wrong shape ({o.shape}). Expected {expected_shape}"""'], {}), '(o, expected_shape,\n f\'The 3D "target_objective" you have specified has a wrong shape ({o.shape}). Expected {expected_shape}\'\n )\n', (10822, 10956), False, 'from sqsgenerator.settings.utils import ensure_array_shape, ensure_array_symmetric, convert, int_safe, build_structure, to_internal_composition_specs\n'), ((11015, 11100), 'sqsgenerator.settings.utils.ensure_array_symmetric', 'ensure_array_symmetric', (['o', 'f"""The "target_objective" parameters are not symmetric"""'], {}), '(o,\n f\'The "target_objective" parameters are not symmetric\')\n', (11037, 11100), False, 'from sqsgenerator.settings.utils import ensure_array_shape, ensure_array_symmetric, convert, int_safe, build_structure, to_internal_composition_specs\n'), ((2576, 2592), 'sqsgenerator.settings.functional.if_', 'if_', (['random_mode'], {}), '(random_mode)\n', (2579, 2592), False, 'from sqsgenerator.settings.functional import parameter as parameter_, if_, isa\n'), ((4447, 4469), 'numpy.array', 'np.array', (["s['lattice']"], {}), "(s['lattice'])\n", (4455, 4469), True, 'import numpy as np\n'), ((4491, 4512), 'numpy.array', 'np.array', (["s['coords']"], {}), "(s['coords'])\n", (4499, 4512), True, 'import numpy as np\n'), ((4578, 4633), 'sqsgenerator.core.Structure', 'Structure', (['lattice', 'coords', 'species', '(True, True, True)'], {}), '(lattice, coords, species, (True, True, True))\n', (4587, 4633), False, 'from sqsgenerator.core import IterationMode, Structure, make_supercell\n'), ((4666, 4752), 'sqsgenerator.settings.exceptions.BadSettings', 'BadSettings', (['f"""A structure dictionary needs the following fields {needed_fields}"""'], {}), "(\n f'A structure dictionary needs the following fields {needed_fields}')\n", (4677, 4752), False, 'from sqsgenerator.settings.exceptions import BadSettings\n'), ((6083, 6191), 'sqsgenerator.settings.exceptions.BadSettings', 'BadSettings', (['"""You need to at least specify two different lattice positions to define a sublattice"""'], {}), "(\n 'You need to at least specify two different lattice positions to define a sublattice'\n )\n", (6094, 6191), False, 'from sqsgenerator.settings.exceptions import BadSettings\n'), ((6247, 6321), 'sqsgenerator.settings.exceptions.BadSettings', 'BadSettings', (['f"""I do only understand integer lists to specify a sublattice"""'], {}), "(f'I do only understand integer lists to specify a sublattice')\n", (6258, 6321), False, 'from sqsgenerator.settings.exceptions import BadSettings\n'), ((6417, 6504), 'sqsgenerator.settings.exceptions.BadSettings', 'BadSettings', (['f"""All indices in the list must be 0 <= index < {structure.num_atoms}"""'], {}), "(\n f'All indices in the list must be 0 <= index < {structure.num_atoms}')\n", (6428, 6504), False, 'from sqsgenerator.settings.exceptions import BadSettings\n'), ((7479, 7496), 'sqsgenerator.settings.functional.isa', 'isa', (['numbers.Real'], {}), '(numbers.Real)\n', (7482, 7496), False, 'from sqsgenerator.settings.functional import parameter as parameter_, if_, isa\n'), ((9343, 9374), 'numpy.array', 'np.array', (['settings.pair_weights'], {}), '(settings.pair_weights)\n', (9351, 9374), True, 'import numpy as np\n'), ((9925, 9993), 'numpy.stack', 'np.stack', (['[(w * shell_weight) for _, shell_weight in sorted_weights]'], {}), '([(w * shell_weight) for _, shell_weight in sorted_weights])\n', (9933, 9993), True, 'import numpy as np\n'), ((10629, 10664), 'numpy.array', 'np.array', (['settings.target_objective'], {}), '(settings.target_objective)\n', (10637, 10664), True, 'import numpy as np\n'), ((11144, 11167), 'numpy.stack', 'np.stack', (['([o] * nshells)'], {}), '([o] * nshells)\n', (11152, 11167), True, 'import numpy as np\n'), ((12042, 12060), 'sqsgenerator.compat.have_mpi_support', 'have_mpi_support', ([], {}), '()\n', (12058, 12060), False, 'from sqsgenerator.compat import Feature, have_mpi_support, have_feature\n'), ((12084, 12182), 'sqsgenerator.settings.exceptions.BadSettings', 'BadSettings', (['f"""The module sqsgenerator.core.iteration was not compiled with MPI support"""'], {}), "(\n f'The module sqsgenerator.core.iteration was not compiled with MPI support'\n )\n", (12095, 12182), False, 'from sqsgenerator.settings.exceptions import BadSettings\n'), ((7637, 7655), 'numpy.isclose', 'np.isclose', (['d', '(0.0)'], {}), '(d, 0.0)\n', (7647, 7655), True, 'import numpy as np\n'), ((9872, 9879), 'operator.itemgetter', 'item', (['(0)'], {}), '(0)\n', (9876, 9879), True, 'from operator import itemgetter as item\n'), ((10463, 10493), 'numpy.ones', 'np.ones', (['(nshells, nums, nums)'], {}), '((nshells, nums, nums))\n', (10470, 10493), True, 'import numpy as np\n'), ((2434, 2460), 'sqsgenerator.core.IterationMode.names.keys', 'IterationMode.names.keys', ([], {}), '()\n', (2458, 2460), False, 'from sqsgenerator.core import IterationMode, Structure, make_supercell\n'), ((4193, 4232), 'pymatgen.core.Structure.from_sites', 'PymatgenStructure.from_sites', (['site_list'], {}), '(site_list)\n', (4221, 4232), True, 'from pymatgen.core import Structure as PymatgenStructure, PeriodicSite\n'), ((6205, 6213), 'sqsgenerator.settings.functional.isa', 'isa', (['int'], {}), '(int)\n', (6208, 6213), False, 'from sqsgenerator.settings.functional import parameter as parameter_, if_, isa\n'), ((4109, 4126), 'sqsgenerator.settings.functional.isa', 'isa', (['PeriodicSite'], {}), '(PeriodicSite)\n', (4112, 4126), False, 'from sqsgenerator.settings.functional import parameter as parameter_, if_, isa\n')] |
import io
from PIL import Image
from torchvision import models, transforms
from torch.autograd import Variable
from torch.nn import functional as F
import numpy as np
import cv2
import json
import torch
import pandas as pd
import os
import csv
data_dir = "D:\\Users\\Anude\\Documents\\CS 497\\CAM\\celebA\\img_align_celeba\\img_align_celeba"
test_df = pd.read_pickle('test.pickle')
test_file_endings = list(test_df.drop('labels', 1).index)
test_file_names_full = [os.path.join(data_dir, name) for name in test_file_endings]
# networks such as googlenet, resnet, densenet already use global average pooling at the end, so CAM could be used directly.
model_id = 2
if model_id == 1:
net = models.squeezenet1_1(pretrained=True)
finalconv_name = 'features' # this is the last conv layer of the network
elif model_id == 2:
net = models.resnet18(pretrained=True)
net = torch.load('model_training/resnet_bsize8_epoch5_full_1_bal.pt')
finalconv_name = 'layer4'
elif model_id == 3:
net = models.densenet161(pretrained=True)
finalconv_name = 'features'
net.eval()
# hook the feature extractor
features_blobs = []
def hook_feature(module, input, output):
features_blobs.append(output.data.cpu().numpy())
net._modules["0"]._modules.get(finalconv_name).register_forward_hook(hook_feature)
# get the softmax weight
params = list(net.parameters())
weight_softmax = np.squeeze(params[-4].data.numpy())
def returnCAM(feature_conv, weight_softmax, class_idx):
# generate the class activation maps upsample to 256x256
size_upsample = (256, 256)
bz, nc, h, w = feature_conv.shape
output_cam = []
for idx in class_idx:
cam = weight_softmax[idx].dot(feature_conv.reshape((nc, h*w)))
cam = cam.reshape(h, w)
cam = cam - np.min(cam)
cam_img = cam / np.max(cam)
cam_img = np.uint8(255 * cam_img)
output_cam.append(cv2.resize(cam_img, size_upsample))
return output_cam
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
preprocess = transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor(),
normalize
])
classes = ['Female','Male']
predictions = []
for i, image_file in enumerate(test_file_names_full):
# load test image
img_pil = Image.open(image_file)
img_tensor = preprocess(img_pil)
img_variable = Variable(img_tensor.unsqueeze(0))
logit = net(img_variable)
index_pred = torch.argmax(logit).item()
predictions.append(index_pred)
h_x = F.softmax(logit, dim=1).data.squeeze()
probs, idx = h_x.sort(0, True)
probs = probs.numpy()
idx = idx.numpy()
CAMs = returnCAM(features_blobs[0], weight_softmax, [idx[0]])
# render the CAM and output
print('output CAM.jpg for the top1 prediction: %s'%classes[idx[0]])
img = cv2.imread(image_file)
height, width, _ = img.shape
heatmap = cv2.applyColorMap(cv2.resize(CAMs[0],(width, height)), cv2.COLORMAP_JET)
result = heatmap * 0.3 + img * 0.5
cv2.imwrite(f'./results_test_balance/{test_file_endings[i]}', result)
with open("model_bias_analysis/predictions_test_balance.csv", "w") as f:
wr = csv.writer(f, delimiter="\n")
wr.writerow(predictions) | [
"numpy.uint8",
"torchvision.models.densenet161",
"torchvision.models.resnet18",
"torch.nn.functional.softmax",
"pandas.read_pickle",
"numpy.max",
"numpy.min",
"torchvision.transforms.ToTensor",
"torch.argmax",
"csv.writer",
"torchvision.transforms.Normalize",
"torchvision.transforms.Resize",
... | [((353, 382), 'pandas.read_pickle', 'pd.read_pickle', (['"""test.pickle"""'], {}), "('test.pickle')\n", (367, 382), True, 'import pandas as pd\n'), ((1964, 2039), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (1984, 2039), False, 'from torchvision import models, transforms\n'), ((465, 493), 'os.path.join', 'os.path.join', (['data_dir', 'name'], {}), '(data_dir, name)\n', (477, 493), False, 'import os\n'), ((692, 729), 'torchvision.models.squeezenet1_1', 'models.squeezenet1_1', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (712, 729), False, 'from torchvision import models, transforms\n'), ((2294, 2316), 'PIL.Image.open', 'Image.open', (['image_file'], {}), '(image_file)\n', (2304, 2316), False, 'from PIL import Image\n'), ((2839, 2861), 'cv2.imread', 'cv2.imread', (['image_file'], {}), '(image_file)\n', (2849, 2861), False, 'import cv2\n'), ((3025, 3094), 'cv2.imwrite', 'cv2.imwrite', (['f"""./results_test_balance/{test_file_endings[i]}"""', 'result'], {}), "(f'./results_test_balance/{test_file_endings[i]}', result)\n", (3036, 3094), False, 'import cv2\n'), ((3183, 3212), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '"""\n"""'}), "(f, delimiter='\\n')\n", (3193, 3212), False, 'import csv\n'), ((837, 869), 'torchvision.models.resnet18', 'models.resnet18', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (852, 869), False, 'from torchvision import models, transforms\n'), ((880, 943), 'torch.load', 'torch.load', (['"""model_training/resnet_bsize8_epoch5_full_1_bal.pt"""'], {}), "('model_training/resnet_bsize8_epoch5_full_1_bal.pt')\n", (890, 943), False, 'import torch\n'), ((1843, 1866), 'numpy.uint8', 'np.uint8', (['(255 * cam_img)'], {}), '(255 * cam_img)\n', (1851, 1866), True, 'import numpy as np\n'), ((2085, 2114), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (2102, 2114), False, 'from torchvision import models, transforms\n'), ((2118, 2139), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2137, 2139), False, 'from torchvision import models, transforms\n'), ((2927, 2963), 'cv2.resize', 'cv2.resize', (['CAMs[0]', '(width, height)'], {}), '(CAMs[0], (width, height))\n', (2937, 2963), False, 'import cv2\n'), ((1004, 1039), 'torchvision.models.densenet161', 'models.densenet161', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (1022, 1039), False, 'from torchvision import models, transforms\n'), ((1777, 1788), 'numpy.min', 'np.min', (['cam'], {}), '(cam)\n', (1783, 1788), True, 'import numpy as np\n'), ((1813, 1824), 'numpy.max', 'np.max', (['cam'], {}), '(cam)\n', (1819, 1824), True, 'import numpy as np\n'), ((1893, 1927), 'cv2.resize', 'cv2.resize', (['cam_img', 'size_upsample'], {}), '(cam_img, size_upsample)\n', (1903, 1927), False, 'import cv2\n'), ((2454, 2473), 'torch.argmax', 'torch.argmax', (['logit'], {}), '(logit)\n', (2466, 2473), False, 'import torch\n'), ((2527, 2550), 'torch.nn.functional.softmax', 'F.softmax', (['logit'], {'dim': '(1)'}), '(logit, dim=1)\n', (2536, 2550), True, 'from torch.nn import functional as F\n')] |
from __future__ import annotations
import warnings
import numpy as np
import napari
import os
from ..arrays import *
from ..frame import *
from .._const import Const
from ..core import imread, lazy_imread
def copy_layer(layer):
args, kwargs, *_ = layer.as_layer_data_tuple()
# linear interpolation is valid only in 3D mode.
if kwargs.get("interpolation", None) == "linear":
kwargs = kwargs.copy()
kwargs["interpolation"] = "nearest"
# This is necessarry for text bound layers.
kwargs.pop("properties", None)
kwargs.pop("property_choices", None)
copy = layer.__class__(args, **kwargs)
return copy
def iter_layer(viewer:"napari.Viewer", layer_type:str):
"""
Iterate over layers and yield only certain type of layers.
Parameters
----------
layer_type : str, {"shape", "image", "point"}
Type of layer.
Yields
-------
napari.layers
Layers specified by layer_type
"""
if isinstance(layer_type, str):
layer_type = [layer_type]
layer_type = tuple(getattr(napari.layers, t) for t in layer_type)
for layer in viewer.layers:
if isinstance(layer, layer_type):
yield layer
def iter_selected_layer(viewer:"napari.Viewer", layer_type:str|list[str]):
if isinstance(layer_type, str):
layer_type = [layer_type]
layer_type = tuple(getattr(napari.layers, t) for t in layer_type)
for layer in viewer.layers.selection:
if isinstance(layer, layer_type):
yield layer
def front_image(viewer:"napari.Viewer"):
"""
From list of image layers return the most front visible image.
"""
front = None
for img in iter_layer(viewer, "Image"):
if img.visible:
front = img # This is ImgArray
if front is None:
raise ValueError("There is no visible image layer.")
return front
def to_labels(layer:napari.layers.Shapes, labels_shape, zoom_factor=1):
return layer._data_view.to_labels(labels_shape=labels_shape, zoom_factor=zoom_factor)
def make_world_scale(obj):
scale = []
for a in obj._axes:
if a in "zyx":
scale.append(obj.scale[a])
elif a == "c":
pass
else:
scale.append(1)
return scale
def upon_add_layer(event):
try:
new_layer = event.sources[0][-1]
except IndexError:
return None
new_layer.translate = new_layer.translate.astype(np.float64)
if isinstance(new_layer, napari.layers.Shapes):
_text_bound_init(new_layer)
new_layer._rotation_handle_length = 20/np.mean(new_layer.scale[-2:])
@new_layer.bind_key("Left", overwrite=True)
def left(layer):
_translate_shape(layer, -1, -1)
@new_layer.bind_key("Right", overwrite=True)
def right(layer):
_translate_shape(layer, -1, 1)
@new_layer.bind_key("Up", overwrite=True)
def up(layer):
_translate_shape(layer, -2, -1)
@new_layer.bind_key("Down", overwrite=True)
def down(layer):
_translate_shape(layer, -2, 1)
elif isinstance(new_layer, napari.layers.Points):
_text_bound_init(new_layer)
new_layer.metadata["init_translate"] = new_layer.translate.copy()
new_layer.metadata["init_scale"] = new_layer.scale.copy()
return None
def image_tuple(input:"napari.layers.Image", out:ImgArray, translate="inherit", **kwargs):
data = input.data
scale = make_world_scale(data)
if out.dtype.kind == "c":
out = np.abs(out)
contrast_limits = [float(x) for x in out.range]
if data.ndim == out.ndim:
if isinstance(translate, str) and translate == "inherit":
translate = input.translate
elif data.ndim > out.ndim:
if isinstance(translate, str) and translate == "inherit":
translate = [input.translate[i] for i in range(data.ndim) if data.axes[i] in out.axes]
scale = [scale[i] for i in range(data.ndim) if data.axes[i] in out.axes]
else:
if isinstance(translate, str) and translate == "inherit":
translate = [0.0] + list(input.translate)
scale = [1.0] + list(scale)
kw = dict(scale=scale, colormap=input.colormap, translate=translate,
blending=input.blending, contrast_limits=contrast_limits)
kw.update(kwargs)
return (out, kw, "image")
def label_tuple(input:"napari.layers.Labels", out:Label, translate="inherit", **kwargs):
data = input.data
scale = make_world_scale(data)
if isinstance(translate, str) and translate == "inherit":
translate = input.translate
kw = dict(opacity=0.3, scale=scale, translate=translate)
kw.update(kwargs)
return (out, kw, "labels")
def _translate_shape(layer, ind, direction):
data = layer.data
selected = layer.selected_data
for i in selected:
data[i][:, ind] += direction
layer.data = data
layer.selected_data = selected
layer._set_highlight()
return None
def _text_bound_init(new_layer):
@new_layer.bind_key("Alt-A", overwrite=True)
def select_all(layer):
layer.selected_data = set(np.arange(len(layer.data)))
layer._set_highlight()
@new_layer.bind_key("Control-Shift-<", overwrite=True)
def size_down(layer):
if layer.text.size > 4:
layer.text.size -= 1.0
else:
layer.text.size *= 0.8
@new_layer.bind_key("Control-Shift->", overwrite=True)
def size_up(layer):
if layer.text.size < 4:
layer.text.size += 1.0
else:
layer.text.size /= 0.8
return None
def viewer_imread(viewer:"napari.Viewer", path:str):
if "*" in path or os.path.getsize(path)/1e9 < Const["MAX_GB"]:
img = imread(path)
else:
img = lazy_imread(path)
layer = add_labeledarray(viewer, img)
viewer.text_overlay.font_size = 4 * Const["FONT_SIZE_FACTOR"]
viewer.text_overlay.visible = True
viewer.text_overlay.color = "white"
viewer.text_overlay.text = repr(img)
return layer
def add_labeledarray(viewer:"napari.Viewer", img:LabeledArray, **kwargs):
if not img.axes.is_sorted() and img.ndim > 2:
msg = f"Input image has axes that are not correctly sorted: {img.axes}. "\
"This may cause unexpected results."
warnings.warn(msg, UserWarning)
chn_ax = img.axisof("c") if "c" in img.axes else None
if isinstance(img, PhaseArray) and not "colormap" in kwargs.keys():
kwargs["colormap"] = "hsv"
kwargs["contrast_limits"] = img.border
elif img.dtype.kind == "c" and not "colormap" in kwargs.keys():
kwargs["colormap"] = "plasma"
scale = make_world_scale(img)
if "name" in kwargs:
name = kwargs.pop("name")
else:
name = "No-Name" if img.name is None else img.name
if chn_ax is not None:
name = [f"[C{i}]{name}" for i in range(img.shape.c)]
else:
name = [name]
if img.dtype.kind == "c":
img = np.abs(img)
layer = viewer.add_image(img, channel_axis=chn_ax, scale=scale,
name=name if len(name)>1 else name[0],
**kwargs)
if viewer.scale_bar.unit:
if viewer.scale_bar.unit != img.scale_unit:
msg = f"Incompatible scales. Viewer is {viewer.scale_bar.unit} while image is {img.scale_unit}."
warnings.warn(msg)
else:
viewer.scale_bar.unit = img.scale_unit
new_axes = [a for a in img.axes if a != "c"]
# add axis labels to slide bars and image orientation.
if len(new_axes) >= len(viewer.dims.axis_labels):
viewer.dims.axis_labels = new_axes
return layer
def add_labels(viewer:"napari.Viewer", labels:Label, opacity:float=0.3, name:str|list[str]=None,
**kwargs):
scale = make_world_scale(labels)
# prepare label list
if "c" in labels.axes:
lbls = labels.split("c")
else:
lbls = [labels]
# prepare name list
if isinstance(name, list):
names = [f"[L]{n}" for n in name]
elif isinstance(name, str):
names = [f"[L]{name}"] * len(lbls)
else:
names = [labels.name]
kw = dict(opacity=opacity, scale=scale)
kw.update(kwargs)
out_layers = []
for lbl, name in zip(lbls, names):
layer = viewer.add_labels(lbl.value, name=name, **kw)
out_layers.append(layer)
return out_layers
def add_dask(viewer:"napari.Viewer", img:LazyImgArray, **kwargs):
chn_ax = img.axisof("c") if "c" in img.axes else None
scale = make_world_scale(img)
if "contrast_limits" not in kwargs.keys():
# contrast limits should be determined quickly.
leny, lenx = img.shape[-2:]
sample = img.img[..., ::leny//min(10, leny), ::lenx//min(10, lenx)]
kwargs["contrast_limits"] = [float(sample.min().compute()),
float(sample.max().compute())]
name = "No-Name" if img.name is None else img.name
if chn_ax is not None:
name = [f"[Lazy][C{i}]{name}" for i in range(img.shape.c)]
else:
name = ["[Lazy]" + name]
layer = viewer.add_image(img, channel_axis=chn_ax, scale=scale,
name=name if len(name)>1 else name[0], **kwargs)
viewer.scale_bar.unit = img.scale_unit
new_axes = [a for a in img.axes if a != "c"]
# add axis labels to slide bars and image orientation.
if len(new_axes) >= len(viewer.dims.axis_labels):
viewer.dims.axis_labels = new_axes
return layer
def add_points(viewer:"napari.Viewer", points, **kwargs):
if isinstance(points, MarkerFrame):
scale = make_world_scale(points)
points = points.get_coords()
else:
scale=None
if "c" in points._axes:
pnts = points.split("c")
else:
pnts = [points]
for each in pnts:
metadata = {"axes": str(each._axes), "scale": each.scale}
kw = dict(size=3.2, face_color=[0,0,0,0], metadata=metadata, edge_color=viewer.window.cmap())
kw.update(kwargs)
viewer.add_points(each.values, scale=scale, **kw)
return None
def add_tracks(viewer:"napari.Viewer", track:TrackFrame, **kwargs):
if "c" in track._axes:
track_list = track.split("c")
else:
track_list = [track]
scale = make_world_scale(track[[a for a in track._axes if a != Const["ID_AXIS"]]])
for tr in track_list:
metadata = {"axes": str(tr._axes), "scale": tr.scale}
viewer.add_tracks(tr, scale=scale, metadata=metadata, **kwargs)
return None
def add_paths(viewer:"napari.Viewer", paths:PathFrame, **kwargs):
if "c" in paths._axes:
path_list = paths.split("c")
else:
path_list = [paths]
scale = make_world_scale(paths[[a for a in paths._axes if a != Const["ID_AXIS"]]])
kw = {"edge_color":"lime", "edge_width":0.3, "shape_type":"path"}
kw.update(kwargs)
for path in path_list:
metadata = {"axes": str(path._axes), "scale": path.scale}
paths = [single_path.values for single_path in path.split(Const["ID_AXIS"])]
viewer.add_shapes(paths, scale=scale, metadata=metadata, **kw)
return None
def add_table(viewer:"napari.Viewer", data=None, columns=None, name=None):
from .widgets import TableWidget
table = TableWidget(viewer, data, columns=columns, name=name)
viewer.window.add_dock_widget(table, area="right", name=table.name)
return table
def get_viewer_scale(viewer:"napari.Viewer"):
return {a: r[2] for a, r in zip(viewer.dims.axis_labels, viewer.dims.range)}
def layer_to_impy_object(viewer:"napari.Viewer", layer):
"""
Convert layer to real data.
Parameters
----------
layer : napari.layers.Layer
Input layer.
Returns
-------
ImgArray, Label, MarkerFrame or TrackFrame, or Shape features.
"""
data = layer.data
axes = "".join(viewer.dims.axis_labels)
scale = get_viewer_scale(viewer)
if isinstance(layer, (napari.layers.Image, napari.layers.Labels)):
# manually drawn ones are np.ndarray, need conversion
if type(data) is np.ndarray:
ndim = data.ndim
axes = axes[-ndim:]
if isinstance(layer, napari.layers.Image):
data = ImgArray(data, name=layer.name, axes=axes, dtype=layer.data.dtype)
else:
try:
data = layer.metadata["destination_image"].labels
except (KeyError, AttributeError):
data = Label(data, name=layer.name, axes=axes)
data.set_scale({k: v for k, v in scale.items() if k in axes})
return data
elif isinstance(layer, napari.layers.Shapes):
return data
elif isinstance(layer, napari.layers.Points):
ndim = data.shape[1]
axes = axes[-ndim:]
df = MarkerFrame(data, columns=layer.metadata.get("axes", axes))
df.set_scale(layer.metadata.get("scale",
{k: v for k, v in scale.items() if k in axes}))
return df.as_standard_type()
elif isinstance(layer, napari.layers.Tracks):
ndim = data.shape[1]
axes = axes[-ndim:]
df = TrackFrame(data, columns=layer.metadata.get("axes", axes))
df.set_scale(layer.metadata.get("scale",
{k: v for k, v in scale.items() if k in axes}))
return df.as_standard_type()
else:
raise NotImplementedError(type(layer))
def get_a_selected_layer(viewer:"napari.Viewer"):
selected = list(viewer.layers.selection)
if len(selected) == 0:
raise ValueError("No layer is selected.")
elif len(selected) > 1:
raise ValueError("More than one layers are selected.")
return selected[0]
def crop_rotated_rectangle(img:LabeledArray, crds:np.ndarray, dims="yx"):
translate = np.min(crds, axis=0)
# check is sorted
ids = [img.axisof(a) for a in dims]
if sorted(ids) == ids:
cropped_img = img.rotated_crop(crds[1], crds[0], crds[2], dims=dims)
else:
crds = np.fliplr(crds)
cropped_img = img.rotated_crop(crds[3], crds[0], crds[2], dims=dims)
return cropped_img, translate
def crop_rectangle(img:LabeledArray, crds:np.ndarray, dims="yx") -> tuple[LabeledArray, np.ndarray]:
start = crds[0]
end = crds[2]
sl = []
translate = np.empty(2)
for i in [0, 1]:
sl0 = sorted([start[i], end[i]])
x0 = max(int(sl0[0]), 0)
x1 = min(int(sl0[1]), img.sizeof(dims[i]))
sl.append(f"{dims[i]}={x0}:{x1}")
translate[i] = x0
area_to_crop = ";".join(sl)
cropped_img = img[area_to_crop]
return cropped_img, translate
class ColorCycle:
def __init__(self, cmap="rainbow") -> None:
import matplotlib.pyplot as plt
self.cmap = plt.get_cmap(cmap, 16)
self.color_id = 0
def __call__(self):
"""return next colormap"""
self.color_id += 1
return list(self.cmap(self.color_id * (self.cmap.N//2+1) % self.cmap.N))
| [
"numpy.abs",
"numpy.mean",
"os.path.getsize",
"numpy.fliplr",
"numpy.empty",
"numpy.min",
"warnings.warn",
"matplotlib.pyplot.get_cmap"
] | [((14183, 14203), 'numpy.min', 'np.min', (['crds'], {'axis': '(0)'}), '(crds, axis=0)\n', (14189, 14203), True, 'import numpy as np\n'), ((14700, 14711), 'numpy.empty', 'np.empty', (['(2)'], {}), '(2)\n', (14708, 14711), True, 'import numpy as np\n'), ((3668, 3679), 'numpy.abs', 'np.abs', (['out'], {}), '(out)\n', (3674, 3679), True, 'import numpy as np\n'), ((6473, 6504), 'warnings.warn', 'warnings.warn', (['msg', 'UserWarning'], {}), '(msg, UserWarning)\n', (6486, 6504), False, 'import warnings\n'), ((7190, 7201), 'numpy.abs', 'np.abs', (['img'], {}), '(img)\n', (7196, 7201), True, 'import numpy as np\n'), ((14400, 14415), 'numpy.fliplr', 'np.fliplr', (['crds'], {}), '(crds)\n', (14409, 14415), True, 'import numpy as np\n'), ((15166, 15188), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap', '(16)'], {}), '(cmap, 16)\n', (15178, 15188), True, 'import matplotlib.pyplot as plt\n'), ((2648, 2677), 'numpy.mean', 'np.mean', (['new_layer.scale[-2:]'], {}), '(new_layer.scale[-2:])\n', (2655, 2677), True, 'import numpy as np\n'), ((7586, 7604), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (7599, 7604), False, 'import warnings\n'), ((5849, 5870), 'os.path.getsize', 'os.path.getsize', (['path'], {}), '(path)\n', (5864, 5870), False, 'import os\n')] |
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, <NAME> and <NAME>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
import numpy as np
from numpy.testing import assert_equal, assert_, run_module_suite
import unittest
from qutip import *
import qutip.settings as qset
if qset.has_openmp:
from qutip.cy.openmp.benchmark import _spmvpy, _spmvpy_openmp
@unittest.skipIf(qset.has_openmp == False, 'OPENMP not available.')
def test_openmp_spmv():
"OPENMP : spmvpy_openmp == spmvpy"
for k in range(100):
L = rand_herm(10,0.25).data
vec = rand_ket(L.shape[0],0.25).full().ravel()
out = np.zeros_like(vec)
out_openmp = np.zeros_like(vec)
_spmvpy(L.data, L.indices, L.indptr, vec, 1, out)
_spmvpy_openmp(L.data, L.indices, L.indptr, vec, 1, out_openmp, 2)
assert_(np.allclose(out, out_openmp, 1e-15))
@unittest.skipIf(qset.has_openmp == False, 'OPENMP not available.')
def test_openmp_mesolve():
"OPENMP : mesolve"
N = 100
wc = 1.0 * 2 * np.pi # cavity frequency
wa = 1.0 * 2 * np.pi # atom frequency
g = 0.05 * 2 * np.pi # coupling strength
kappa = 0.005 # cavity dissipation rate
gamma = 0.05 # atom dissipation rate
n_th_a = 1 # temperature in frequency units
use_rwa = 0
# operators
a = tensor(destroy(N), qeye(2))
sm = tensor(qeye(N), destroy(2))
# Hamiltonian
if use_rwa:
H = wc * a.dag() * a + wa * sm.dag() * sm + g * (a.dag() * sm + a * sm.dag())
else:
H = wc * a.dag() * a + wa * sm.dag() * sm + g * (a.dag() + a) * (sm + sm.dag())
c_op_list = []
rate = kappa * (1 + n_th_a)
if rate > 0.0:
c_op_list.append(np.sqrt(rate) * a)
rate = kappa * n_th_a
if rate > 0.0:
c_op_list.append(np.sqrt(rate) * a.dag())
rate = gamma
if rate > 0.0:
c_op_list.append(np.sqrt(rate) * sm)
n = N - 2
psi0 = tensor(basis(N, n), basis(2, 1))
tlist = np.linspace(0, 1, 100)
opts = Options(use_openmp=False)
out = mesolve(H, psi0, tlist, c_op_list, [a.dag() * a, sm.dag() * sm], options=opts)
opts = Options(use_openmp=True)
out_omp = mesolve(H, psi0, tlist, c_op_list, [a.dag() * a, sm.dag() * sm], options=opts)
assert_(np.allclose(out.expect[0],out_omp.expect[0]))
assert_(np.allclose(out.expect[1],out_omp.expect[1]))
@unittest.skipIf(qset.has_openmp == False, 'OPENMP not available.')
def test_openmp_mesolve_td():
"OPENMP : mesolve (td)"
N = 100
wc = 1.0 * 2 * np.pi # cavity frequency
wa = 1.0 * 2 * np.pi # atom frequency
g = 0.5 * 2 * np.pi # coupling strength
kappa = 0.005 # cavity dissipation rate
gamma = 0.05 # atom dissipation rate
n_th_a = 1 # temperature in frequency units
use_rwa = 0
# operators
a = tensor(destroy(N), qeye(2))
sm = tensor(qeye(N), destroy(2))
# Hamiltonian
H0 = wc * a.dag() * a + wa * sm.dag() * sm
H1 = g * (a.dag() + a) * (sm + sm.dag())
H = [H0, [H1,'sin(t)']]
c_op_list = []
rate = kappa * (1 + n_th_a)
if rate > 0.0:
c_op_list.append(np.sqrt(rate) * a)
rate = kappa * n_th_a
if rate > 0.0:
c_op_list.append(np.sqrt(rate) * a.dag())
rate = gamma
if rate > 0.0:
c_op_list.append(np.sqrt(rate) * sm)
n = N - 10
psi0 = tensor(basis(N, n), basis(2, 1))
tlist = np.linspace(0, 1, 100)
opts = Options(use_openmp=True)
out_omp = mesolve(H, psi0, tlist, c_op_list, [a.dag() * a, sm.dag() * sm], options=opts)
opts = Options(use_openmp=False)
out = mesolve(H, psi0, tlist, c_op_list, [a.dag() * a, sm.dag() * sm], options=opts)
assert_(np.allclose(out.expect[0],out_omp.expect[0]))
assert_(np.allclose(out.expect[1],out_omp.expect[1]))
if __name__ == "__main__":
run_module_suite()
| [
"numpy.allclose",
"numpy.sqrt",
"unittest.skipIf",
"numpy.linspace",
"qutip.cy.openmp.benchmark._spmvpy",
"numpy.testing.run_module_suite",
"numpy.zeros_like",
"qutip.cy.openmp.benchmark._spmvpy_openmp"
] | [((2038, 2104), 'unittest.skipIf', 'unittest.skipIf', (['(qset.has_openmp == False)', '"""OPENMP not available."""'], {}), "(qset.has_openmp == False, 'OPENMP not available.')\n", (2053, 2104), False, 'import unittest\n'), ((2545, 2611), 'unittest.skipIf', 'unittest.skipIf', (['(qset.has_openmp == False)', '"""OPENMP not available."""'], {}), "(qset.has_openmp == False, 'OPENMP not available.')\n", (2560, 2611), False, 'import unittest\n'), ((4062, 4128), 'unittest.skipIf', 'unittest.skipIf', (['(qset.has_openmp == False)', '"""OPENMP not available."""'], {}), "(qset.has_openmp == False, 'OPENMP not available.')\n", (4077, 4128), False, 'import unittest\n'), ((3665, 3687), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (3676, 3687), True, 'import numpy as np\n'), ((5120, 5142), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (5131, 5142), True, 'import numpy as np\n'), ((5546, 5564), 'numpy.testing.run_module_suite', 'run_module_suite', ([], {}), '()\n', (5562, 5564), False, 'from numpy.testing import assert_equal, assert_, run_module_suite\n'), ((2298, 2316), 'numpy.zeros_like', 'np.zeros_like', (['vec'], {}), '(vec)\n', (2311, 2316), True, 'import numpy as np\n'), ((2338, 2356), 'numpy.zeros_like', 'np.zeros_like', (['vec'], {}), '(vec)\n', (2351, 2356), True, 'import numpy as np\n'), ((2365, 2414), 'qutip.cy.openmp.benchmark._spmvpy', '_spmvpy', (['L.data', 'L.indices', 'L.indptr', 'vec', '(1)', 'out'], {}), '(L.data, L.indices, L.indptr, vec, 1, out)\n', (2372, 2414), False, 'from qutip.cy.openmp.benchmark import _spmvpy, _spmvpy_openmp\n'), ((2423, 2489), 'qutip.cy.openmp.benchmark._spmvpy_openmp', '_spmvpy_openmp', (['L.data', 'L.indices', 'L.indptr', 'vec', '(1)', 'out_openmp', '(2)'], {}), '(L.data, L.indices, L.indptr, vec, 1, out_openmp, 2)\n', (2437, 2489), False, 'from qutip.cy.openmp.benchmark import _spmvpy, _spmvpy_openmp\n'), ((3955, 4000), 'numpy.allclose', 'np.allclose', (['out.expect[0]', 'out_omp.expect[0]'], {}), '(out.expect[0], out_omp.expect[0])\n', (3966, 4000), True, 'import numpy as np\n'), ((4013, 4058), 'numpy.allclose', 'np.allclose', (['out.expect[1]', 'out_omp.expect[1]'], {}), '(out.expect[1], out_omp.expect[1])\n', (4024, 4058), True, 'import numpy as np\n'), ((5410, 5455), 'numpy.allclose', 'np.allclose', (['out.expect[0]', 'out_omp.expect[0]'], {}), '(out.expect[0], out_omp.expect[0])\n', (5421, 5455), True, 'import numpy as np\n'), ((5468, 5513), 'numpy.allclose', 'np.allclose', (['out.expect[1]', 'out_omp.expect[1]'], {}), '(out.expect[1], out_omp.expect[1])\n', (5479, 5513), True, 'import numpy as np\n'), ((2506, 2541), 'numpy.allclose', 'np.allclose', (['out', 'out_openmp', '(1e-15)'], {}), '(out, out_openmp, 1e-15)\n', (2517, 2541), True, 'import numpy as np\n'), ((3393, 3406), 'numpy.sqrt', 'np.sqrt', (['rate'], {}), '(rate)\n', (3400, 3406), True, 'import numpy as np\n'), ((3483, 3496), 'numpy.sqrt', 'np.sqrt', (['rate'], {}), '(rate)\n', (3490, 3496), True, 'import numpy as np\n'), ((3570, 3583), 'numpy.sqrt', 'np.sqrt', (['rate'], {}), '(rate)\n', (3577, 3583), True, 'import numpy as np\n'), ((4847, 4860), 'numpy.sqrt', 'np.sqrt', (['rate'], {}), '(rate)\n', (4854, 4860), True, 'import numpy as np\n'), ((4937, 4950), 'numpy.sqrt', 'np.sqrt', (['rate'], {}), '(rate)\n', (4944, 4950), True, 'import numpy as np\n'), ((5024, 5037), 'numpy.sqrt', 'np.sqrt', (['rate'], {}), '(rate)\n', (5031, 5037), True, 'import numpy as np\n')] |
# coding: utf-8
import numpy as np
import math
import functools
import json
import datetime
import uuid
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import Variable, serializers
from chainer.backends import cuda
def choice_random(player):
_, _, putable_position_nums = player
choice_data = { 'position_num': np.random.choice(putable_position_nums) }
return choice_data
class Reversi:
@classmethod
def get_init_board(cls):
board = np.array([0] * 64, dtype=np.float32)
board[28] = board[35] = 1
board[27] = board[36] = -1
return board
@classmethod
def get_flip_positions_by_row_column(cls, board, is_black, row, column, row_add, column_add):
position_nums = []
own, pair = (1, -1) if is_black else (-1, 1)
row += row_add
column += column_add
exists = False
valid = False
while row >= 0 and row < 8 and column >= 0 and column < 8:
position_num = row * 8 + column
if exists == True and board[position_num] == own:
valid = True
break
if board[position_num] != pair:
break
position_nums.append(position_num)
exists = True
row += row_add
column += column_add
if valid == False:
position_nums = []
return position_nums
@classmethod
def get_flip_positions(cls, board, is_black, position_num):
position_nums = []
if not (position_num >= 0 and position_num <= 63):
return position_nums
if board[position_num] != 0:
return position_nums
column = position_num % 8
row = int(position_num / 8)
row_column_adds = ((0,1),(0,-1),(1,0),(-1,0),(1,1),(1,-1),(-1,1),(-1,-1))
for row_add, column_add in row_column_adds:
position_nums.extend(cls.get_flip_positions_by_row_column(board, is_black, row, column, row_add, column_add))
return position_nums
@classmethod
def is_putable_position_num(cls, board, is_black, position_num):
position_nums = cls.get_flip_positions(board, is_black, position_num)
return len(position_nums) != 0
@classmethod
def get_putable_position_nums(cls, board, is_black):
return [num for num in range(64) if cls.is_putable_position_num(board, is_black, num)]
@classmethod
def get_player(cls, board, is_black = True):
return (board, is_black, cls.get_putable_position_nums(board, is_black))
@classmethod
def is_end_board(cls, board):
return len(np.where(board == 0)[0]) == 0
@classmethod
def get_stone_num(cls, board):
black_num = len(np.where(board == 1)[0])
white_num = len(np.where(board == -1)[0])
return black_num, white_num
@classmethod
def put(cls, player, position_num):
board, is_black, _ = player
board = board.copy()
if position_num is not None:
own = 1 if is_black else -1
position_nums = cls.get_flip_positions(board, is_black, position_num)
if len(position_nums) > 0:
board[position_num] = own
for position_num in position_nums:
board[position_num] = own
return board
@classmethod
def playout(cls, player, position_num):
_, is_black, _ = player
board = cls.put(player, position_num)
puts = cls.game(choice_random, choice_random, board, False)
is_win = cls.is_win_game(puts, is_black)
return is_win
@classmethod
def is_putable(cls, player):
_, _, putable_position_nums = player
return len(putable_position_nums) > 0
@classmethod
def render_board(cls, player):
board, is_black, putable_position_nums = player
black, white = "○", "●" # 1, -1
black_num, white_num = cls.get_stone_num(board)
display_board = [i if v == 0 else " {} ".format(black if v == 1 else white) for i, v in enumerate(board)]
row = " {:>3} | {:>3} | {:>3} | {:>3} | {:>3} | {:>3} | {:>3} | {:>3} "
hr = "\n------------------------------------------------\n"
layout = row + hr + row + hr + row + hr + row + hr + row + hr + row + hr + row + hr + row
print((layout).format(*display_board))
print("{}:{} {}:{}".format(black, black_num, white, white_num))
print("{}: putable position numbers are {}".format(black if is_black else white, putable_position_nums))
@classmethod
def is_pass_last_put(cls, game):
if len(game) == 0:
return False
_, choice_data = game[-1]
position_num = choice_data['position_num']
return position_num == None
@classmethod
def is_win_game(cls, game, is_black):
is_win = False
if len(game) > 0:
player_last, _ = game[-1]
board_last, _, _ = player_last
black_num, white_num = cls.get_stone_num(board_last)
if (is_black and black_num > white_num) or (not is_black and black_num < white_num):
is_win = True
return is_win
@classmethod
def is_end_game(cls, game, player):
board, _, _ = player
return cls.is_end_board(board) or (cls.is_pass_last_put(game) and not cls.is_putable(player))
@classmethod
def game(cls, choice_black, choice_white, board = None, is_render = True, limit_step_num = None):
steps = []
if board is None:
board = cls.get_init_board()
player = cls.get_player(board)
step_num = 0
while True:
if limit_step_num is not None and step_num >= limit_step_num:
break
board, is_black, _ = player
if is_render:
cls.render_board(player)
if cls.is_end_game(steps, player):
break
position_num = None
if cls.is_putable(player):
choice = choice_black if is_black else choice_white
choice_data = choice(player)
position_num = choice_data["position_num"]
if is_render:
print(position_num)
board = cls.put(player, position_num)
else:
if is_render:
print("pass")
steps.append((player, choice_data))
player = cls.get_player(board, not is_black)
step_num += 1
return steps
class DualNet(chainer.Chain):
def __init__(self):
super(DualNet, self).__init__()
with self.init_scope():
self.conv0 = L.Convolution2D(4, 48, 3, pad=1)
self.conv1 = L.Convolution2D(48, 48, 3, pad=1)
self.conv2 = L.Convolution2D(48, 48, 3, pad=1)
self.conv3 = L.Convolution2D(48, 48, 3, pad=1)
self.conv4 = L.Convolution2D(48, 48, 3, pad=1)
self.bn0 = L.BatchNormalization(48)
self.bn1 = L.BatchNormalization(48)
self.bn2 = L.BatchNormalization(48)
self.bn3 = L.BatchNormalization(48)
self.bn4 = L.BatchNormalization(48)
self.conv_p1 = L.Convolution2D(48, 2, 1)
self.bn_p1 = L.BatchNormalization(2)
self.fc_p2 = L.Linear(8 * 8 * 2, 8 * 8)
self.conv_v1 = L.Convolution2D(48, 1, 1)
self.bn_v1 = L.BatchNormalization(1)
self.fc_v2 = L.Linear(8 * 8, 48)
self.fc_v3 = L.Linear(48, 1)
def __call__(self, x):
# tiny ResNet
h0 = F.relu(self.bn0(self.conv0(x)))
h1 = F.relu(self.bn1(self.conv1(h0)))
h2 = F.relu(self.bn2(self.conv2(h1)) + h0)
h3 = F.relu(self.bn3(self.conv3(h2)))
h4 = F.relu(self.bn4(self.conv4(h3)) + h2)
h_p1 = F.relu(self.bn_p1(self.conv_p1(h4)))
policy = self.fc_p2(h_p1)
h_v1 = F.relu(self.bn_v1(self.conv_v1(h4)))
h_v2 = F.relu(self.fc_v2(h_v1))
value = F.tanh(self.fc_v3(h_v2))
return policy, value
def load(self, filename):
serializers.load_npz(filename, self)
def save(self, filename):
serializers.save_npz(filename, self)
@classmethod
def get_input_data(cls, player):
board, is_black, putable_position_nums = player
mine = np.array([1 if (is_black and v == 1) or (not is_black and v == -1) else 0 for v in board], dtype=np.float32)
yours = np.array([1 if (is_black and v == -1) or (not is_black and v == 1) else 0 for v in board], dtype=np.float32)
blank = np.array([1 if v == 0 else 0 for v in board], dtype=np.float32)
putable = np.array([1 if i in putable_position_nums else 0 for i in range(64)], dtype=np.float32)
# 64 + 64 + 64 + 64
x = np.concatenate((mine, yours, blank, putable)).reshape((1, 4, 8, 8))
return x
class ChoiceReplaySteps:
def __init__(self, steps):
self._i = 0
self._steps = steps
def __call__(self, player):
_, _, putable_position_nums = player
# skip -1
while True:
step = self._steps[self._i]
if step != -1:
break
self._i += 1
if step not in putable_position_nums:
step = np.random.choice(putable_position_nums)
self._i += 1
choice_data = { 'position_num': step }
return choice_data
def choice_primitive_monte_carlo(player, try_num = 150):
_, _, putable_position_nums = player
position_scores = np.zeros(len(putable_position_nums))
for _ in range(try_num):
playouts = [Reversi.playout(player, position_num) for position_num in putable_position_nums]
position_scores += np.array([1 if is_win else 0 for is_win in playouts])
index = np.random.choice(np.where(position_scores == position_scores.max())[0])
choice_data = { 'position_num': putable_position_nums[index] }
return choice_data
class ChoiceMonteCarloTreeSearch:
def _get_node(self, player, position_num):
return {
'player': player,
'position_num': position_num,
'try_num': 0,
'win_num': 0,
'child_nodes': None
}
def _get_initial_nodes(self, player):
board, is_black, putable_position_nums = player
nodes = [self._get_node(Reversi.get_player(Reversi.put(player, position_num), not is_black), position_num) for position_num in putable_position_nums]
if len(putable_position_nums) == 0:
nodes = [self._get_node(Reversi.get_player(board, not is_black), None)]
return nodes
def _get_ucb1(self, node, total_num):
return (node['win_num'] / node['try_num']) + math.sqrt(2 * math.log(total_num) / node['try_num'])
def _selection_node_index(self, nodes):
total_num = functools.reduce(lambda total_num, node: total_num + node['try_num'], nodes, 0)
ucb1s = np.array([self._get_ucb1(node, total_num) if node['try_num'] != 0 else -1 for node in nodes])
indexs = np.where(ucb1s == -1)[0] # -1 is infinity
if len(indexs) == 0:
indexs = np.where(ucb1s == ucb1s.max())[0]
index = np.random.choice(indexs)
return index
def _selection_expansion(self, nodes, expansion_num):
game = []
node, path = None, []
target_nodes = nodes
while True:
index = self._selection_node_index(target_nodes)
path.append(index)
node = target_nodes[index]
if node['child_nodes'] is None:
if node['try_num'] >= expansion_num:
if Reversi.is_end_game(game, node['player']):
break
# expansion
node['child_nodes'] = self._get_initial_nodes(node['player'])
else:
break
target_nodes = node['child_nodes']
choice_data = { 'position_num': node['position_num'] }
game.append((node['player'], choice_data))
return nodes, node, path
def _evaluation(self, node, is_black):
is_win = Reversi.playout(node['player'], node['position_num'])
_, node_is_black, _ = node['player']
node_is_win = (is_black == node_is_black and is_win) or (is_black != node_is_black and not is_win)
return node_is_win
def _backup(self, nodes, path, is_win):
target_nodes = nodes
for index in path:
target_nodes[index]['try_num'] += 1
if is_win:
target_nodes[index]['win_num'] += 1
target_nodes = target_nodes[index]['child_nodes']
return nodes
def _choice_node_index(self, nodes):
try_nums = np.array([node['try_num'] for node in nodes])
indexs = np.where(try_nums == try_nums.max())[0]
index = np.random.choice(indexs)
return index
def __call__(self, player, try_num = 1500, expansion_num = 5):
_, is_black, _ = player
nodes = self._get_initial_nodes(player)
for _ in range(try_num):
nodes, node, path = self._selection_expansion(nodes, expansion_num)
is_win = self._evaluation(node, is_black)
nodes = self._backup(nodes, path, is_win)
index = self._choice_node_index(nodes)
choice = nodes[index]['position_num']
choice_data = { 'position_num': choice }
return choice_data
class ChoiceSupervisedLearningPolicyNetwork:
def __init__(self, model):
self.model = model
def __call__(self, player):
_, _, putable_position_nums = player
policy, _ = self.model(DualNet.get_input_data(player))
putable_position_probabilities = np.array([policy[0].data[num] for num in putable_position_nums])
indexs = np.where(putable_position_probabilities == putable_position_probabilities.max())[0]
index = np.random.choice(indexs)
choice = putable_position_nums[index]
choice_data = { 'position_num': choice }
return choice_data
class ChoiceAsynchronousPolicyAndValueMonteCarloTreeSearch:
def __init__(self, model, is_strict_choice = True, try_num = 1500):
self.model = model
self.is_strict_choice = is_strict_choice
self.default_params = {
'try_num': try_num
}
def _get_node(self, player, position_num, probability):
return {
'player': player,
'position_num': position_num,
'try_num': 0,
'win_num': 0,
'probability': probability,
'value': None,
'child_nodes': None
}
def _get_initial_nodes(self, player):
board, is_black, putable_position_nums = player
policy, value = self.model(DualNet.get_input_data(player))
putable_position_probabilities = np.array([policy[0].data[num] for num in putable_position_nums])
putable_position_probabilities /= putable_position_probabilities.sum()
v = value[0][0].data
nodes = [self._get_node(Reversi.get_player(Reversi.put(player, position_num), not is_black), position_num, putable_position_probabilities[i]) for i, position_num in enumerate(putable_position_nums)]
if len(putable_position_nums) == 0:
nodes = [self._get_node(Reversi.get_player(board, not is_black), None, 1.0)]
return v, nodes
def _get_score(self, node, total_num):
return (node['win_num'] / (1 + node['try_num'])) + node['probability'] * (math.sqrt(total_num) / (1 + node['try_num']))
def _selection_node_index(self, nodes):
total_num = functools.reduce(lambda total_num, node: total_num + node['try_num'], nodes, 0)
scores = np.array([self._get_score(node, total_num) for node in nodes])
indexs = np.where(scores == scores.max())[0]
index = np.random.choice(indexs)
return index
def _selection_expansion(self, nodes):
game = []
node, path = None, []
target_nodes = nodes
while True:
index = self._selection_node_index(target_nodes)
path.append(index)
node = target_nodes[index]
if node['child_nodes'] is None:
# expansion
value, child_nodes = self._get_initial_nodes(node['player'])
node['value'] = value
if not Reversi.is_end_game(game, node['player']):
node['child_nodes'] = child_nodes
break
target_nodes = node['child_nodes']
choice_data = { 'position_num': node['position_num'] }
game.append((node['player'], choice_data))
return nodes, node, path
def _backup(self, nodes, path, value):
target_nodes = nodes
for index in path:
target_nodes[index]['try_num'] += 1
target_nodes[index]['win_num'] += value
target_nodes = target_nodes[index]['child_nodes']
return nodes
def _choice_node_index(self, nodes):
try_nums = np.array([node['try_num'] for node in nodes])
try_nums_sum = try_nums.sum()
index = 0
if self.is_strict_choice or try_nums_sum == 0:
indexs = np.where(try_nums == try_nums.max())[0]
index = np.random.choice(indexs)
else:
try_nums = try_nums.astype(np.float32)
try_nums /= try_nums_sum # to probability
index = np.random.choice(range(len(try_nums)), p = try_nums)
return index
def __call__(self, player, try_num = None):
try_num = self.default_params['try_num'] if try_num is None else try_num
_, nodes = self._get_initial_nodes(player)
for _ in range(try_num):
nodes, node, path = self._selection_expansion(nodes)
nodes = self._backup(nodes, path, node['value'])
index = self._choice_node_index(nodes)
choice = nodes[index]['position_num']
candidates = [{ 'position_num': "{}".format(node['position_num']), 'try_num': "{}".format(node['try_num']) } for node in nodes]
choice_data = { 'position_num': choice, 'candidates': candidates }
return choice_data
class DualNetTrainer:
def __init__(self, model = None, self_play_try_num = 2500, create_new_model_epoch_num = 100, evaluation_try_num = 400, evaluation_win_num = 220, try_num = 100, apv_mcts_try_num = 1500, gpu_device = -1):
if model is None:
model = DualNet()
self.default_params = {
'self_play_try_num': self_play_try_num,
'create_new_model_epoch_num': create_new_model_epoch_num,
'evaluation_try_num': evaluation_try_num,
'evaluation_win_num': evaluation_win_num,
'try_num': try_num,
'apv_mcts_try_num': apv_mcts_try_num
}
self.gpu_device = gpu_device
self._set_model(model)
def _set_model(self, model):
self.model = model
date_str = datetime.date.today().strftime("%Y%m%d")
unique_str = str(uuid.uuid4())
self.model_filename = 'data/model_{}_{}.dat'.format(date_str, unique_str)
self.model.save(self.model_filename)
print("[set model] model_filename: {}".format(self.model_filename))
def _save_self_playdata(self, steps, filename):
self_playdata = []
is_black_win = Reversi.is_win_game(steps, is_black = True)
is_white_win = Reversi.is_win_game(steps, is_black = False)
is_black = True
for step in steps:
_, choice_data = step
position_num = choice_data['position_num']
win_score = -1
if is_black_win and is_white_win:
win_score = 0
elif (is_black and is_black_win) or (not is_black and is_white_win):
win_score = 1
self_playdata.append({
"position_num": "{}".format(position_num if position_num is not None else -1),
"win_score": "{}".format(win_score),
'candidates': choice_data['candidates']
})
is_black = not is_black
with open(filename, 'a') as f:
f.write("{}\n".format(json.dumps(self_playdata)))
def _self_play(self, model1, model2, try_num = None, is_save_data = True, is_strict_choice = True):
try_num = self.default_params['self_play_try_num'] if try_num is None else try_num
player1 = {
'is_model1': True,
'choice': ChoiceAsynchronousPolicyAndValueMonteCarloTreeSearch(model1, is_strict_choice, try_num = self.default_params['apv_mcts_try_num']),
'win_num': 0
}
player2 = {
'is_model1': False,
'choice': ChoiceAsynchronousPolicyAndValueMonteCarloTreeSearch(model2, is_strict_choice, try_num = self.default_params['apv_mcts_try_num']),
'win_num': 0
}
date_str = datetime.date.today().strftime("%Y%m%d")
unique_str = str(uuid.uuid4())
data_filename = 'data/self_playdata_{}_{}.dat'.format(date_str, unique_str)
print("[self play] data_filename: {}".format(data_filename))
for i in range(try_num):
steps = Reversi.game(player1['choice'], player2['choice'], is_render = False)
if Reversi.is_win_game(steps, is_black = True):
player1['win_num'] += 1
if Reversi.is_win_game(steps, is_black = False):
player2['win_num'] += 1
if is_save_data:
self._save_self_playdata(steps, data_filename)
player1, player2 = player2, player1
(model1_player, model2_player) = (player1, player2) if player1['is_model1'] else (player2, player1)
print("[self play] epoch: {} / {}, model1_win_num: {}, model2_win_num: {}".format(i + 1, try_num, model1_player['win_num'], model2_player['win_num']))
(model1_win_num, model2_win_num) = (player1['win_num'], player2['win_num']) if player1['is_model1'] else (player2['win_num'], player1['win_num'])
return model1_win_num, model2_win_num, data_filename
def _get_train_y_policy(self, candidates, temperature = 0.5):
y_policy = np.array([0] * 64, dtype=np.float32)
sum_try_num = np.array([int(candidate['try_num']) ** (1 / temperature) for candidate in candidates]).sum()
for candidate in candidates:
y_policy[int(candidate['position_num'])] = (int(candidate['try_num']) ** (1 / temperature)) / sum_try_num
return y_policy
def _get_train_x(self, steps, step_num):
position_nums = [int(step['position_num']) for step in steps]
choice1 = ChoiceReplaySteps(np.array(position_nums, dtype=np.int32)[::2])
choice2 = ChoiceReplaySteps(np.array(position_nums, dtype=np.int32)[1::2])
steps = Reversi.game(choice1, choice2, is_render = False, limit_step_num = step_num)
player, _ = steps[-1]
x = DualNet.get_input_data(player)
return x
def _get_train_random(self, steps_list):
steps = json.loads(np.random.choice(steps_list))
step_index = np.random.randint(0, len(steps) - 1)
y_policy = self._get_train_y_policy(steps[step_index]['candidates'])
y_value = np.array([[int(steps[step_index]['win_score'])]], dtype=np.float32)
x = self._get_train_x(steps, (step_index + 1))
return x, y_policy, y_value
def _get_train_batch(self, steps_list, batch_size):
batch_x, batch_y_policy, batch_y_value = [], [], []
for _ in range(batch_size):
x, y_policy, y_value = self._get_train_random(steps_list)
batch_x.append(x)
batch_y_policy.append(y_policy)
batch_y_value.append(y_value)
xp = np
if self.gpu_device >= 0:
xp = cuda.cupy
x_train = Variable(xp.array(batch_x)).reshape(-1, 4, 8, 8)
y_train_policy = Variable(xp.array(batch_y_policy)).reshape(-1, 64)
y_train_value = Variable(xp.array(batch_y_value)).reshape(-1, 1)
return x_train, y_train_policy, y_train_value
def _create_new_model(self, steps_list, epoch_num = None, batch_size = 2048):
epoch_num = self.default_params['create_new_model_epoch_num'] if epoch_num is None else epoch_num
model = DualNet()
model.load(self.model_filename)
if self.gpu_device >= 0:
cuda.get_device(self.gpu_device).use()
model.to_gpu(self.gpu_device)
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)
for i in range(epoch_num):
x_train, y_train_policy, y_train_value = self._get_train_batch(steps_list, batch_size)
y_policy, y_value = model(x_train)
model.cleargrads()
loss = F.mean_squared_error(y_policy, y_train_policy) + F.mean_squared_error(y_value, y_train_value)
loss.backward()
optimizer.update()
print("[new nodel] epoch: {} / {}, loss: {}".format(i + 1, epoch_num, loss))
if self.gpu_device >= 0:
model.to_cpu()
return model
def _evaluation(self, new_model, try_num = None, win_num = None):
try_num = self.default_params['evaluation_try_num'] if try_num is None else try_num
win_num = self.default_params['evaluation_win_num'] if win_num is None else win_num
_, new_model_win_num, _ = self._self_play(self.model, new_model, try_num = try_num, is_save_data = False)
if new_model_win_num >= win_num:
self._set_model(new_model)
def __call__(self, try_num = None):
try_num = self.default_params['try_num'] if try_num is None else try_num
for i in range(try_num):
_, _, data_filename = self._self_play(self.model, self.model, is_strict_choice = False)
steps_list = []
with open(data_filename, 'r') as f:
steps_list = f.readlines()
new_model = self._create_new_model(steps_list)
self._evaluation(new_model)
print("[train] epoch: {} / {}".format(i + 1, try_num))
return self.model, self.model_filename
def choice_human(player):
_, _, putable_position_nums = player
choice = None
while True:
try:
choice = input("Please enter number in {}: ".format(putable_position_nums))
choice = int(choice)
if choice in putable_position_nums:
break
else:
print("{} is invalid".format(choice))
except Exception:
print("{} is invalid".format(choice))
choice_data = { 'position_num': choice }
return choice_data
| [
"chainer.functions.mean_squared_error",
"numpy.random.choice",
"functools.reduce",
"chainer.serializers.save_npz",
"chainer.optimizers.Adam",
"numpy.where",
"math.sqrt",
"chainer.links.BatchNormalization",
"uuid.uuid4",
"json.dumps",
"numpy.array",
"chainer.links.Linear",
"math.log",
"chai... | [((357, 396), 'numpy.random.choice', 'np.random.choice', (['putable_position_nums'], {}), '(putable_position_nums)\n', (373, 396), True, 'import numpy as np\n'), ((500, 536), 'numpy.array', 'np.array', (['([0] * 64)'], {'dtype': 'np.float32'}), '([0] * 64, dtype=np.float32)\n', (508, 536), True, 'import numpy as np\n'), ((8168, 8204), 'chainer.serializers.load_npz', 'serializers.load_npz', (['filename', 'self'], {}), '(filename, self)\n', (8188, 8204), False, 'from chainer import Variable, serializers\n'), ((8244, 8280), 'chainer.serializers.save_npz', 'serializers.save_npz', (['filename', 'self'], {}), '(filename, self)\n', (8264, 8280), False, 'from chainer import Variable, serializers\n'), ((8411, 8521), 'numpy.array', 'np.array', (['[(1 if is_black and v == 1 or not is_black and v == -1 else 0) for v in board]'], {'dtype': 'np.float32'}), '([(1 if is_black and v == 1 or not is_black and v == -1 else 0) for\n v in board], dtype=np.float32)\n', (8419, 8521), True, 'import numpy as np\n'), ((8538, 8648), 'numpy.array', 'np.array', (['[(1 if is_black and v == -1 or not is_black and v == 1 else 0) for v in board]'], {'dtype': 'np.float32'}), '([(1 if is_black and v == -1 or not is_black and v == 1 else 0) for\n v in board], dtype=np.float32)\n', (8546, 8648), True, 'import numpy as np\n'), ((8665, 8730), 'numpy.array', 'np.array', (['[(1 if v == 0 else 0) for v in board]'], {'dtype': 'np.float32'}), '([(1 if v == 0 else 0) for v in board], dtype=np.float32)\n', (8673, 8730), True, 'import numpy as np\n'), ((9811, 9866), 'numpy.array', 'np.array', (['[(1 if is_win else 0) for is_win in playouts]'], {}), '([(1 if is_win else 0) for is_win in playouts])\n', (9819, 9866), True, 'import numpy as np\n'), ((10924, 11003), 'functools.reduce', 'functools.reduce', (["(lambda total_num, node: total_num + node['try_num'])", 'nodes', '(0)'], {}), "(lambda total_num, node: total_num + node['try_num'], nodes, 0)\n", (10940, 11003), False, 'import functools\n'), ((11274, 11298), 'numpy.random.choice', 'np.random.choice', (['indexs'], {}), '(indexs)\n', (11290, 11298), True, 'import numpy as np\n'), ((12826, 12871), 'numpy.array', 'np.array', (["[node['try_num'] for node in nodes]"], {}), "([node['try_num'] for node in nodes])\n", (12834, 12871), True, 'import numpy as np\n'), ((12945, 12969), 'numpy.random.choice', 'np.random.choice', (['indexs'], {}), '(indexs)\n', (12961, 12969), True, 'import numpy as np\n'), ((13817, 13881), 'numpy.array', 'np.array', (['[policy[0].data[num] for num in putable_position_nums]'], {}), '([policy[0].data[num] for num in putable_position_nums])\n', (13825, 13881), True, 'import numpy as np\n'), ((13999, 14023), 'numpy.random.choice', 'np.random.choice', (['indexs'], {}), '(indexs)\n', (14015, 14023), True, 'import numpy as np\n'), ((14948, 15012), 'numpy.array', 'np.array', (['[policy[0].data[num] for num in putable_position_nums]'], {}), '([policy[0].data[num] for num in putable_position_nums])\n', (14956, 15012), True, 'import numpy as np\n'), ((15725, 15804), 'functools.reduce', 'functools.reduce', (["(lambda total_num, node: total_num + node['try_num'])", 'nodes', '(0)'], {}), "(lambda total_num, node: total_num + node['try_num'], nodes, 0)\n", (15741, 15804), False, 'import functools\n'), ((15954, 15978), 'numpy.random.choice', 'np.random.choice', (['indexs'], {}), '(indexs)\n', (15970, 15978), True, 'import numpy as np\n'), ((17147, 17192), 'numpy.array', 'np.array', (["[node['try_num'] for node in nodes]"], {}), "([node['try_num'] for node in nodes])\n", (17155, 17192), True, 'import numpy as np\n'), ((22299, 22335), 'numpy.array', 'np.array', (['([0] * 64)'], {'dtype': 'np.float32'}), '([0] * 64, dtype=np.float32)\n', (22307, 22335), True, 'import numpy as np\n'), ((24622, 24647), 'chainer.optimizers.Adam', 'chainer.optimizers.Adam', ([], {}), '()\n', (24645, 24647), False, 'import chainer\n'), ((6722, 6754), 'chainer.links.Convolution2D', 'L.Convolution2D', (['(4)', '(48)', '(3)'], {'pad': '(1)'}), '(4, 48, 3, pad=1)\n', (6737, 6754), True, 'import chainer.links as L\n'), ((6781, 6814), 'chainer.links.Convolution2D', 'L.Convolution2D', (['(48)', '(48)', '(3)'], {'pad': '(1)'}), '(48, 48, 3, pad=1)\n', (6796, 6814), True, 'import chainer.links as L\n'), ((6840, 6873), 'chainer.links.Convolution2D', 'L.Convolution2D', (['(48)', '(48)', '(3)'], {'pad': '(1)'}), '(48, 48, 3, pad=1)\n', (6855, 6873), True, 'import chainer.links as L\n'), ((6899, 6932), 'chainer.links.Convolution2D', 'L.Convolution2D', (['(48)', '(48)', '(3)'], {'pad': '(1)'}), '(48, 48, 3, pad=1)\n', (6914, 6932), True, 'import chainer.links as L\n'), ((6958, 6991), 'chainer.links.Convolution2D', 'L.Convolution2D', (['(48)', '(48)', '(3)'], {'pad': '(1)'}), '(48, 48, 3, pad=1)\n', (6973, 6991), True, 'import chainer.links as L\n'), ((7016, 7040), 'chainer.links.BatchNormalization', 'L.BatchNormalization', (['(48)'], {}), '(48)\n', (7036, 7040), True, 'import chainer.links as L\n'), ((7064, 7088), 'chainer.links.BatchNormalization', 'L.BatchNormalization', (['(48)'], {}), '(48)\n', (7084, 7088), True, 'import chainer.links as L\n'), ((7112, 7136), 'chainer.links.BatchNormalization', 'L.BatchNormalization', (['(48)'], {}), '(48)\n', (7132, 7136), True, 'import chainer.links as L\n'), ((7160, 7184), 'chainer.links.BatchNormalization', 'L.BatchNormalization', (['(48)'], {}), '(48)\n', (7180, 7184), True, 'import chainer.links as L\n'), ((7208, 7232), 'chainer.links.BatchNormalization', 'L.BatchNormalization', (['(48)'], {}), '(48)\n', (7228, 7232), True, 'import chainer.links as L\n'), ((7261, 7286), 'chainer.links.Convolution2D', 'L.Convolution2D', (['(48)', '(2)', '(1)'], {}), '(48, 2, 1)\n', (7276, 7286), True, 'import chainer.links as L\n'), ((7314, 7337), 'chainer.links.BatchNormalization', 'L.BatchNormalization', (['(2)'], {}), '(2)\n', (7334, 7337), True, 'import chainer.links as L\n'), ((7365, 7391), 'chainer.links.Linear', 'L.Linear', (['(8 * 8 * 2)', '(8 * 8)'], {}), '(8 * 8 * 2, 8 * 8)\n', (7373, 7391), True, 'import chainer.links as L\n'), ((7420, 7445), 'chainer.links.Convolution2D', 'L.Convolution2D', (['(48)', '(1)', '(1)'], {}), '(48, 1, 1)\n', (7435, 7445), True, 'import chainer.links as L\n'), ((7473, 7496), 'chainer.links.BatchNormalization', 'L.BatchNormalization', (['(1)'], {}), '(1)\n', (7493, 7496), True, 'import chainer.links as L\n'), ((7524, 7543), 'chainer.links.Linear', 'L.Linear', (['(8 * 8)', '(48)'], {}), '(8 * 8, 48)\n', (7532, 7543), True, 'import chainer.links as L\n'), ((7571, 7586), 'chainer.links.Linear', 'L.Linear', (['(48)', '(1)'], {}), '(48, 1)\n', (7579, 7586), True, 'import chainer.links as L\n'), ((9361, 9400), 'numpy.random.choice', 'np.random.choice', (['putable_position_nums'], {}), '(putable_position_nums)\n', (9377, 9400), True, 'import numpy as np\n'), ((11131, 11152), 'numpy.where', 'np.where', (['(ucb1s == -1)'], {}), '(ucb1s == -1)\n', (11139, 11152), True, 'import numpy as np\n'), ((17385, 17409), 'numpy.random.choice', 'np.random.choice', (['indexs'], {}), '(indexs)\n', (17401, 17409), True, 'import numpy as np\n'), ((19151, 19163), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (19161, 19163), False, 'import uuid\n'), ((21092, 21104), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (21102, 21104), False, 'import uuid\n'), ((23172, 23200), 'numpy.random.choice', 'np.random.choice', (['steps_list'], {}), '(steps_list)\n', (23188, 23200), True, 'import numpy as np\n'), ((2777, 2797), 'numpy.where', 'np.where', (['(board == 1)'], {}), '(board == 1)\n', (2785, 2797), True, 'import numpy as np\n'), ((2827, 2848), 'numpy.where', 'np.where', (['(board == -1)'], {}), '(board == -1)\n', (2835, 2848), True, 'import numpy as np\n'), ((8876, 8921), 'numpy.concatenate', 'np.concatenate', (['(mine, yours, blank, putable)'], {}), '((mine, yours, blank, putable))\n', (8890, 8921), True, 'import numpy as np\n'), ((19085, 19106), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (19104, 19106), False, 'import datetime\n'), ((21026, 21047), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (21045, 21047), False, 'import datetime\n'), ((22782, 22821), 'numpy.array', 'np.array', (['position_nums'], {'dtype': 'np.int32'}), '(position_nums, dtype=np.int32)\n', (22790, 22821), True, 'import numpy as np\n'), ((22864, 22903), 'numpy.array', 'np.array', (['position_nums'], {'dtype': 'np.int32'}), '(position_nums, dtype=np.int32)\n', (22872, 22903), True, 'import numpy as np\n'), ((24910, 24956), 'chainer.functions.mean_squared_error', 'F.mean_squared_error', (['y_policy', 'y_train_policy'], {}), '(y_policy, y_train_policy)\n', (24930, 24956), True, 'import chainer.functions as F\n'), ((24959, 25003), 'chainer.functions.mean_squared_error', 'F.mean_squared_error', (['y_value', 'y_train_value'], {}), '(y_value, y_train_value)\n', (24979, 25003), True, 'import chainer.functions as F\n'), ((2670, 2690), 'numpy.where', 'np.where', (['(board == 0)'], {}), '(board == 0)\n', (2678, 2690), True, 'import numpy as np\n'), ((15614, 15634), 'math.sqrt', 'math.sqrt', (['total_num'], {}), '(total_num)\n', (15623, 15634), False, 'import math\n'), ((20301, 20326), 'json.dumps', 'json.dumps', (['self_playdata'], {}), '(self_playdata)\n', (20311, 20326), False, 'import json\n'), ((24520, 24552), 'chainer.backends.cuda.get_device', 'cuda.get_device', (['self.gpu_device'], {}), '(self.gpu_device)\n', (24535, 24552), False, 'from chainer.backends import cuda\n'), ((10820, 10839), 'math.log', 'math.log', (['total_num'], {}), '(total_num)\n', (10828, 10839), False, 'import math\n')] |
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
""" Tests constant folding with globals. """
import dace
import numpy as np
from dace.frontend.python import astutils
from dace.frontend.python.newast import (GlobalResolver,
ConditionalCodeResolver,
DeadCodeEliminator)
from dace.frontend.python.parser import DaceProgram
class MyConfiguration:
def __init__(self, parameter):
self.p = parameter * 2
@property
def q(self):
return self.p * 2
def get_parameter(self):
return self.p // 2
@staticmethod
def get_random_number():
return 4
@property
def cloned(self):
return MyConfiguration(self.get_parameter())
N = 2
cfg = MyConfiguration(N)
val = 5
# Confuse AST parser with global of the same name as array
A = 5
@dace.program
def instantiated_global(A):
A[cfg.q] = (A[cfg.get_parameter()] * MyConfiguration.get_random_number() +
cfg.p) + val
def test_instantiated_global():
"""
Tests constant/symbolic values with predetermined global values.
"""
A = np.random.rand(10)
reg_A = np.copy(A)
reg_A[cfg.q] = (reg_A[cfg.get_parameter()] *
MyConfiguration.get_random_number() + cfg.p) + val
instantiated_global(A)
assert np.allclose(A, reg_A)
def test_nested_globals():
"""
Tests constant/symbolic values with multiple nesting levels.
"""
@dace.program
def instantiated_global2(A):
A[cfg.q] = cfg.cloned.p
A = np.random.rand(10)
reg_A = np.copy(A)
reg_A[cfg.q] = cfg.cloned.p
instantiated_global2(A)
assert np.allclose(A, reg_A)
def _analyze_and_unparse_code(func: DaceProgram) -> str:
src_ast, _, _, _ = astutils.function_to_ast(func.f)
resolved = {
k: v
for k, v in func.global_vars.items() if k not in func.argnames
}
src_ast = GlobalResolver(resolved).visit(src_ast)
src_ast = ConditionalCodeResolver(resolved).visit(src_ast)
src_ast = DeadCodeEliminator().visit(src_ast)
return astutils.unparse(src_ast)
def test_dead_code_elimination_if():
"""
Tests dead code elimination with compile-time if conditions.
"""
sym = dace.symbol('sym', positive=True)
cfg_symbolic = MyConfiguration(sym)
@dace.program
def test(A):
if cfg_symbolic.q > sym:
return 2 * A
else:
return 4 * A
parsed_code = _analyze_and_unparse_code(test)
assert '4' not in parsed_code
assert '2' in parsed_code
def test_dead_code_elimination_ifexp():
"""
Tests dead code elimination with compile-time ternary expressions.
"""
sym = dace.symbol('sym', positive=True)
cfg_symbolic = MyConfiguration(sym)
@dace.program
def test(A):
return 2 * A if cfg_symbolic.q > sym else 4 * A
parsed_code = _analyze_and_unparse_code(test)
assert '4' not in parsed_code
assert '2' in parsed_code
def test_dead_code_elimination_noelse():
"""
Tests dead code elimination with compile-time if conditions (without else).
"""
scale = None
@dace.program
def test(A):
if scale is None:
return 2 * A
return scale * A
parsed_code = _analyze_and_unparse_code(test)
assert 'scale' not in parsed_code
assert '2' in parsed_code
def test_dead_code_elimination_unreachable():
"""
Tests dead code elimination with unreachable code.
"""
@dace.program
def test(A):
if A[5] > 1:
return 3 * A
return 6 * A
return 2 * A
return 4 * A
parsed_code = _analyze_and_unparse_code(test)
assert '6' not in parsed_code and '4' not in parsed_code # Dead code
assert '5' in parsed_code and '1' in parsed_code # Condition
assert '3' in parsed_code and '2' in parsed_code # Reachable code
# TODO: dace.constant should signal that argument evaluation is deferred to
# (nested) call time
# dace.constant = lambda x: None
# def test_constant_parameter():
# """
# Tests nested functions with constant parameters passed in as arguments.
# """
# @dace.program
# def nested_func(cfg: dace.constant(MyConfiguration), A: dace.float64[20]):
# return A[cfg.p]
# @dace.program
# def constant_parameter(
# cfg: dace.constant(MyConfiguration),
# cfg2: dace.constant(MyConfiguration), A: dace.float64[20]):
# A[cfg.q] = nested_func(cfg, A)
# A[MyConfiguration.get_random_number()] = nested_func(cfg2, A)
# cfg1 = MyConfiguration(3)
# cfg2 = MyConfiguration(4)
# A = np.random.rand(20)
# reg_A = np.copy(A)
# reg_A[12] = reg_A[6]
# reg_A[4] = reg_A[8]
# constant_parameter(cfg1, cfg2, A)
# assert np.allclose(A, reg_A)
if __name__ == '__main__':
test_instantiated_global()
test_nested_globals()
test_dead_code_elimination_if()
test_dead_code_elimination_ifexp()
test_dead_code_elimination_noelse()
test_dead_code_elimination_unreachable()
# test_constant_parameter()
| [
"numpy.copy",
"numpy.allclose",
"numpy.random.rand",
"dace.frontend.python.newast.DeadCodeEliminator",
"dace.symbol",
"dace.frontend.python.astutils.unparse",
"dace.frontend.python.astutils.function_to_ast",
"dace.frontend.python.newast.GlobalResolver",
"dace.frontend.python.newast.ConditionalCodeRe... | [((1178, 1196), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (1192, 1196), True, 'import numpy as np\n'), ((1209, 1219), 'numpy.copy', 'np.copy', (['A'], {}), '(A)\n', (1216, 1219), True, 'import numpy as np\n'), ((1380, 1401), 'numpy.allclose', 'np.allclose', (['A', 'reg_A'], {}), '(A, reg_A)\n', (1391, 1401), True, 'import numpy as np\n'), ((1604, 1622), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (1618, 1622), True, 'import numpy as np\n'), ((1635, 1645), 'numpy.copy', 'np.copy', (['A'], {}), '(A)\n', (1642, 1645), True, 'import numpy as np\n'), ((1719, 1740), 'numpy.allclose', 'np.allclose', (['A', 'reg_A'], {}), '(A, reg_A)\n', (1730, 1740), True, 'import numpy as np\n'), ((1823, 1855), 'dace.frontend.python.astutils.function_to_ast', 'astutils.function_to_ast', (['func.f'], {}), '(func.f)\n', (1847, 1855), False, 'from dace.frontend.python import astutils\n'), ((2142, 2167), 'dace.frontend.python.astutils.unparse', 'astutils.unparse', (['src_ast'], {}), '(src_ast)\n', (2158, 2167), False, 'from dace.frontend.python import astutils\n'), ((2298, 2331), 'dace.symbol', 'dace.symbol', (['"""sym"""'], {'positive': '(True)'}), "('sym', positive=True)\n", (2309, 2331), False, 'import dace\n'), ((2759, 2792), 'dace.symbol', 'dace.symbol', (['"""sym"""'], {'positive': '(True)'}), "('sym', positive=True)\n", (2770, 2792), False, 'import dace\n'), ((1977, 2001), 'dace.frontend.python.newast.GlobalResolver', 'GlobalResolver', (['resolved'], {}), '(resolved)\n', (1991, 2001), False, 'from dace.frontend.python.newast import GlobalResolver, ConditionalCodeResolver, DeadCodeEliminator\n'), ((2031, 2064), 'dace.frontend.python.newast.ConditionalCodeResolver', 'ConditionalCodeResolver', (['resolved'], {}), '(resolved)\n', (2054, 2064), False, 'from dace.frontend.python.newast import GlobalResolver, ConditionalCodeResolver, DeadCodeEliminator\n'), ((2094, 2114), 'dace.frontend.python.newast.DeadCodeEliminator', 'DeadCodeEliminator', ([], {}), '()\n', (2112, 2114), False, 'from dace.frontend.python.newast import GlobalResolver, ConditionalCodeResolver, DeadCodeEliminator\n')] |
"""Example with fitting a 32 triangles soup to an image."""
import copy
import os
import cv2
import deodr
from deodr import differentiable_renderer_cython
from deodr.differentiable_renderer import Scene2D
from imageio import imread
import matplotlib.pyplot as plt
import numpy as np
def create_example_scene(n_tri=30, width=200, height=200, clockwise=False):
material = np.double(imread(os.path.join(deodr.data_path, "trefle.jpg"))) / 255
height_material = material.shape[0]
width_material = material.shape[1]
scale_matrix = np.array([[height, 0], [0, width]])
scale_material = np.array([[height_material - 1, 0], [0, width_material - 1]])
triangles = []
for _ in range(n_tri):
tmp = scale_matrix.dot(
np.random.rand(2, 1).dot(np.ones((1, 3)))
+ 0.5 * (-0.5 + np.random.rand(2, 3))
)
while np.abs(np.linalg.det(np.vstack((tmp, np.ones((3)))))) < 1500:
tmp = scale_matrix.dot(
np.random.rand(2, 1).dot(np.ones((1, 3)))
+ 0.5 * (-0.5 + np.random.rand(2, 3))
)
if np.linalg.det(np.vstack((tmp, np.ones((3))))) > 0:
tmp = np.fliplr(tmp)
triangle = {}
triangle["ij"] = tmp.T
triangle["depths"] = np.random.rand(1) * np.ones(
(3, 1)
) # constant depth triangles to avoid collisions
triangle["textured"] = np.random.rand(1) > 0.5
if triangle["textured"]:
triangle["uv"] = (
scale_material.dot(np.array([[0, 1, 0.2], [0, 0.2, 1]])).T + 1
) # texture coordinate of the vertices
triangle["shade"] = np.random.rand(3, 1) # shade intensity at each vertex
triangle["colors"] = np.zeros((3, 3))
triangle["shaded"] = True
else:
triangle["uv"] = np.zeros((3, 2))
triangle["shade"] = np.zeros((3, 1))
triangle["colors"] = np.random.rand(3, 3)
# colors of the vertices (can be gray, rgb color,or even other dimension
# vectors) when using simple linear interpolation across triangles
triangle["shaded"] = False
triangle["edgeflags"] = np.array(
[True, True, True]
) # all edges are discontinuity edges as no triangle pair share an edge
triangles.append(triangle)
scene = {}
for key in triangles[0].keys():
scene[key] = np.squeeze(
np.vstack([np.array(triangle[key]) for triangle in triangles])
)
scene["faces"] = np.arange(3 * n_tri).reshape(-1, 3).astype(np.uint32)
scene["faces_uv"] = np.arange(3 * n_tri).reshape(-1, 3).astype(np.uint32)
if clockwise:
scene["faces"] = np.fliplr(scene["faces"])
scene["faces_uv"] = np.fliplr(scene["faces_uv"])
scene["clockwise"] = clockwise
scene["height"] = height
scene["width"] = width
scene["texture"] = material
scene["nb_colors"] = 3
scene["background_color"] = None
scene["background_image"] = np.tile(
np.array([0.3, 0.5, 0.7])[None, None, :], (height, width, 1)
)
scene["perspective_correct"] = False
scene["backface_culling"] = True
return Scene2D(**scene)
def run(nb_max_iter=500, display=True, clockwise=False):
print("process id=%d" % os.getpid())
np.random.seed(2)
scene_gt = create_example_scene(clockwise=clockwise)
antialiase_error = False
sigma = 1
image_target = np.zeros((scene_gt.height, scene_gt.width, scene_gt.nb_colors))
z_buffer = np.zeros((scene_gt.height, scene_gt.width))
differentiable_renderer_cython.renderScene(scene_gt, sigma, image_target, z_buffer)
n_vertices = len(scene_gt.depths)
displacement_magnitude_ij = 10
displacement_magnitude_uv = 0
displacement_magnitude_colors = 0
alpha_ij = 0.01
beta_ij = 0.80
alpha_uv = 0.03
beta_uv = 0.80
alpha_color = 0.001
beta_color = 0.70
max_uv = np.array(scene_gt.texture.shape[:2]) - 1
scene_init = copy.deepcopy(scene_gt)
scene_init.ij = (
scene_gt.ij + np.random.randn(n_vertices, 2) * displacement_magnitude_ij
)
scene_init.uv = (
scene_gt.uv + np.random.randn(n_vertices, 2) * displacement_magnitude_uv
)
scene_init.uv = np.maximum(scene_init.uv, 0)
scene_init.uv = np.minimum(scene_init.uv, max_uv)
scene_init.colors = (
scene_gt.colors + np.random.randn(n_vertices, 3) * displacement_magnitude_colors
)
final_loss = {}
for antialiase_error in [True, False]:
np.random.seed(2)
scene_iter = copy.deepcopy(scene_init)
speed_ij = np.zeros((n_vertices, 2))
speed_uv = np.zeros((n_vertices, 2))
speed_color = np.zeros((n_vertices, 3))
losses = []
for niter in range(nb_max_iter):
image, depth, loss_image, loss = scene_iter.render_compare_and_backward(
sigma, antialiase_error, image_target
)
print(f"iter {niter} loss = {loss}")
# imsave(os.path.join(iterfolder,f'soup_{niter}.png'), combinedIMage)
losses.append(loss)
if loss_image.ndim == 2:
loss_image = np.broadcast_to(loss_image[:, :, None], image.shape)
if display:
cv2.waitKey(1)
cv2.imshow(
"animation",
np.column_stack((image_target, image, loss_image))[:, :, ::-1],
)
if displacement_magnitude_ij > 0:
speed_ij = beta_ij * speed_ij - scene_iter.ij_b * alpha_ij
scene_iter.ij = scene_iter.ij + speed_ij
if displacement_magnitude_colors > 0:
speed_color = (
beta_color * speed_color - scene_iter.colors_b * alpha_color
)
scene_iter.colors = scene_iter.colors + speed_color
if displacement_magnitude_uv > 0:
speed_uv = beta_uv * speed_uv - scene_iter.uv_b * alpha_uv
scene_iter.uv = scene_iter.uv + speed_uv
scene_iter.uv = max(scene_iter.uv, 0)
scene_iter.uv = min(scene_iter.uv, max_uv)
if display:
plt.plot(losses, label="antialiaseError=%d" % antialiase_error)
final_loss[antialiase_error] = loss
if display:
plt.legend()
plt.show()
return final_loss
if __name__ == "__main__":
run()
| [
"numpy.random.rand",
"numpy.column_stack",
"numpy.array",
"copy.deepcopy",
"numpy.arange",
"matplotlib.pyplot.plot",
"numpy.random.seed",
"os.getpid",
"numpy.maximum",
"cv2.waitKey",
"numpy.ones",
"numpy.fliplr",
"numpy.random.randn",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",... | [((552, 587), 'numpy.array', 'np.array', (['[[height, 0], [0, width]]'], {}), '([[height, 0], [0, width]])\n', (560, 587), True, 'import numpy as np\n'), ((609, 670), 'numpy.array', 'np.array', (['[[height_material - 1, 0], [0, width_material - 1]]'], {}), '([[height_material - 1, 0], [0, width_material - 1]])\n', (617, 670), True, 'import numpy as np\n'), ((3213, 3229), 'deodr.differentiable_renderer.Scene2D', 'Scene2D', ([], {}), '(**scene)\n', (3220, 3229), False, 'from deodr.differentiable_renderer import Scene2D\n'), ((3335, 3352), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (3349, 3352), True, 'import numpy as np\n'), ((3473, 3536), 'numpy.zeros', 'np.zeros', (['(scene_gt.height, scene_gt.width, scene_gt.nb_colors)'], {}), '((scene_gt.height, scene_gt.width, scene_gt.nb_colors))\n', (3481, 3536), True, 'import numpy as np\n'), ((3553, 3596), 'numpy.zeros', 'np.zeros', (['(scene_gt.height, scene_gt.width)'], {}), '((scene_gt.height, scene_gt.width))\n', (3561, 3596), True, 'import numpy as np\n'), ((3601, 3688), 'deodr.differentiable_renderer_cython.renderScene', 'differentiable_renderer_cython.renderScene', (['scene_gt', 'sigma', 'image_target', 'z_buffer'], {}), '(scene_gt, sigma, image_target,\n z_buffer)\n', (3643, 3688), False, 'from deodr import differentiable_renderer_cython\n'), ((4030, 4053), 'copy.deepcopy', 'copy.deepcopy', (['scene_gt'], {}), '(scene_gt)\n', (4043, 4053), False, 'import copy\n'), ((4292, 4320), 'numpy.maximum', 'np.maximum', (['scene_init.uv', '(0)'], {}), '(scene_init.uv, 0)\n', (4302, 4320), True, 'import numpy as np\n'), ((4341, 4374), 'numpy.minimum', 'np.minimum', (['scene_init.uv', 'max_uv'], {}), '(scene_init.uv, max_uv)\n', (4351, 4374), True, 'import numpy as np\n'), ((2213, 2241), 'numpy.array', 'np.array', (['[True, True, True]'], {}), '([True, True, True])\n', (2221, 2241), True, 'import numpy as np\n'), ((2737, 2762), 'numpy.fliplr', 'np.fliplr', (["scene['faces']"], {}), "(scene['faces'])\n", (2746, 2762), True, 'import numpy as np\n'), ((2791, 2819), 'numpy.fliplr', 'np.fliplr', (["scene['faces_uv']"], {}), "(scene['faces_uv'])\n", (2800, 2819), True, 'import numpy as np\n'), ((3971, 4007), 'numpy.array', 'np.array', (['scene_gt.texture.shape[:2]'], {}), '(scene_gt.texture.shape[:2])\n', (3979, 4007), True, 'import numpy as np\n'), ((4569, 4586), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (4583, 4586), True, 'import numpy as np\n'), ((4608, 4633), 'copy.deepcopy', 'copy.deepcopy', (['scene_init'], {}), '(scene_init)\n', (4621, 4633), False, 'import copy\n'), ((4654, 4679), 'numpy.zeros', 'np.zeros', (['(n_vertices, 2)'], {}), '((n_vertices, 2))\n', (4662, 4679), True, 'import numpy as np\n'), ((4699, 4724), 'numpy.zeros', 'np.zeros', (['(n_vertices, 2)'], {}), '((n_vertices, 2))\n', (4707, 4724), True, 'import numpy as np\n'), ((4747, 4772), 'numpy.zeros', 'np.zeros', (['(n_vertices, 3)'], {}), '((n_vertices, 3))\n', (4755, 4772), True, 'import numpy as np\n'), ((6374, 6386), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6384, 6386), True, 'import matplotlib.pyplot as plt\n'), ((6395, 6405), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6403, 6405), True, 'import matplotlib.pyplot as plt\n'), ((1184, 1198), 'numpy.fliplr', 'np.fliplr', (['tmp'], {}), '(tmp)\n', (1193, 1198), True, 'import numpy as np\n'), ((1281, 1298), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (1295, 1298), True, 'import numpy as np\n'), ((1301, 1316), 'numpy.ones', 'np.ones', (['(3, 1)'], {}), '((3, 1))\n', (1308, 1316), True, 'import numpy as np\n'), ((1418, 1435), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (1432, 1435), True, 'import numpy as np\n'), ((1670, 1690), 'numpy.random.rand', 'np.random.rand', (['(3)', '(1)'], {}), '(3, 1)\n', (1684, 1690), True, 'import numpy as np\n'), ((1759, 1775), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (1767, 1775), True, 'import numpy as np\n'), ((1857, 1873), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {}), '((3, 2))\n', (1865, 1873), True, 'import numpy as np\n'), ((1906, 1922), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (1914, 1922), True, 'import numpy as np\n'), ((1956, 1976), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)'], {}), '(3, 3)\n', (1970, 1976), True, 'import numpy as np\n'), ((3057, 3082), 'numpy.array', 'np.array', (['[0.3, 0.5, 0.7]'], {}), '([0.3, 0.5, 0.7])\n', (3065, 3082), True, 'import numpy as np\n'), ((3317, 3328), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3326, 3328), False, 'import os\n'), ((4098, 4128), 'numpy.random.randn', 'np.random.randn', (['n_vertices', '(2)'], {}), '(n_vertices, 2)\n', (4113, 4128), True, 'import numpy as np\n'), ((4207, 4237), 'numpy.random.randn', 'np.random.randn', (['n_vertices', '(2)'], {}), '(n_vertices, 2)\n', (4222, 4237), True, 'import numpy as np\n'), ((4427, 4457), 'numpy.random.randn', 'np.random.randn', (['n_vertices', '(3)'], {}), '(n_vertices, 3)\n', (4442, 4457), True, 'import numpy as np\n'), ((6242, 6305), 'matplotlib.pyplot.plot', 'plt.plot', (['losses'], {'label': "('antialiaseError=%d' % antialiase_error)"}), "(losses, label='antialiaseError=%d' % antialiase_error)\n", (6250, 6305), True, 'import matplotlib.pyplot as plt\n'), ((401, 444), 'os.path.join', 'os.path.join', (['deodr.data_path', '"""trefle.jpg"""'], {}), "(deodr.data_path, 'trefle.jpg')\n", (413, 444), False, 'import os\n'), ((5218, 5270), 'numpy.broadcast_to', 'np.broadcast_to', (['loss_image[:, :, None]', 'image.shape'], {}), '(loss_image[:, :, None], image.shape)\n', (5233, 5270), True, 'import numpy as np\n'), ((5311, 5325), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5322, 5325), False, 'import cv2\n'), ((788, 803), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (795, 803), True, 'import numpy as np\n'), ((2478, 2501), 'numpy.array', 'np.array', (['triangle[key]'], {}), '(triangle[key])\n', (2486, 2501), True, 'import numpy as np\n'), ((2561, 2581), 'numpy.arange', 'np.arange', (['(3 * n_tri)'], {}), '(3 * n_tri)\n', (2570, 2581), True, 'import numpy as np\n'), ((2639, 2659), 'numpy.arange', 'np.arange', (['(3 * n_tri)'], {}), '(3 * n_tri)\n', (2648, 2659), True, 'import numpy as np\n'), ((763, 783), 'numpy.random.rand', 'np.random.rand', (['(2)', '(1)'], {}), '(2, 1)\n', (777, 783), True, 'import numpy as np\n'), ((833, 853), 'numpy.random.rand', 'np.random.rand', (['(2)', '(3)'], {}), '(2, 3)\n', (847, 853), True, 'import numpy as np\n'), ((1018, 1033), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (1025, 1033), True, 'import numpy as np\n'), ((1145, 1155), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1152, 1155), True, 'import numpy as np\n'), ((1542, 1578), 'numpy.array', 'np.array', (['[[0, 1, 0.2], [0, 0.2, 1]]'], {}), '([[0, 1, 0.2], [0, 0.2, 1]])\n', (1550, 1578), True, 'import numpy as np\n'), ((5407, 5457), 'numpy.column_stack', 'np.column_stack', (['(image_target, image, loss_image)'], {}), '((image_target, image, loss_image))\n', (5422, 5457), True, 'import numpy as np\n'), ((916, 926), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (923, 926), True, 'import numpy as np\n'), ((993, 1013), 'numpy.random.rand', 'np.random.rand', (['(2)', '(1)'], {}), '(2, 1)\n', (1007, 1013), True, 'import numpy as np\n'), ((1067, 1087), 'numpy.random.rand', 'np.random.rand', (['(2)', '(3)'], {}), '(2, 3)\n', (1081, 1087), True, 'import numpy as np\n')] |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops.operations import _grad_ops as G
class ResizeBilinearGradNet(nn.Cell):
def __init__(self, align_corners=False):
super(ResizeBilinearGradNet, self).__init__()
self.rb1 = G.ResizeBilinearGrad(align_corners=align_corners)
def construct(self, dy, size):
return self.rb1(dy, size)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_resize_bilinear_grad_align_corners():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
dy = np.array([[[[1, 2], [3, 4]]]]).astype(np.float16)
x = np.array([[[[1.1, 2.2, 3.2, 2.5],
[3.3, 4.4, 5.7, 8.1],
[3.3, 4.4, 5.7, 8.1],
[3.3, 4.4, 5.7, 8.1]]]]).astype(np.float16)
expect = np.array([[[[1., 0., 0., 2.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[3., 0., 0., 4.]]]]).astype(np.float16)
net = ResizeBilinearGradNet(align_corners=True)
output = net(Tensor(dy), Tensor(x))
assert np.all(output.asnumpy() == expect)
dy = np.array([[[[1, 2], [3, 4]]]]).astype(np.float32)
x = np.array([[[[1.1, 2.2, 3.2, 2.5],
[3.3, 4.4, 5.7, 8.1],
[3.3, 4.4, 5.7, 8.1],
[3.3, 4.4, 5.7, 8.1]]]]).astype(np.float32)
expect = np.array([[[[1., 0., 0., 2.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[3., 0., 0., 4.]]]]).astype(np.float32)
net = ResizeBilinearGradNet(align_corners=True)
output = net(Tensor(dy), Tensor(x))
assert np.all(output.asnumpy() == expect)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_resize_bilinear_grad():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
dy = np.array([[[[1, 1, 0, 0],
[1, 1, 0, 0],
[0, 0, 1, 1],
[0, 0, 1, 1]]]]).astype(np.float16)
x = np.array([[[[1.1, 2.2], [3.3, 4.4]]]]).astype(np.float16)
expect = np.array([[[[2.25, 0.75],
[0.75, 4.25]]]]).astype(np.float16)
net = ResizeBilinearGradNet()
output = net(Tensor(dy), Tensor(x))
assert np.all(output.asnumpy() == expect)
dy = np.array([[[[1, 1, 0, 0],
[1, 1, 0, 0],
[0, 0, 1, 1],
[0, 0, 1, 1]]]]).astype(np.float32)
x = np.array([[[[1.1, 2.2], [3.3, 4.4]]]]).astype(np.float32)
expect = np.array([[[[2.25, 0.75],
[0.75, 4.25]]]]).astype(np.float32)
net = ResizeBilinearGradNet()
output = net(Tensor(dy), Tensor(x))
assert np.all(output.asnumpy() == expect)
| [
"numpy.array",
"mindspore.ops.operations._grad_ops.ResizeBilinearGrad",
"mindspore.context.set_context",
"mindspore.Tensor"
] | [((1260, 1325), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_target': '"""GPU"""'}), "(mode=context.GRAPH_MODE, device_target='GPU')\n", (1279, 1325), True, 'import mindspore.context as context\n'), ((2613, 2678), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_target': '"""GPU"""'}), "(mode=context.GRAPH_MODE, device_target='GPU')\n", (2632, 2678), True, 'import mindspore.context as context\n'), ((1003, 1052), 'mindspore.ops.operations._grad_ops.ResizeBilinearGrad', 'G.ResizeBilinearGrad', ([], {'align_corners': 'align_corners'}), '(align_corners=align_corners)\n', (1023, 1052), True, 'from mindspore.ops.operations import _grad_ops as G\n'), ((1839, 1849), 'mindspore.Tensor', 'Tensor', (['dy'], {}), '(dy)\n', (1845, 1849), False, 'from mindspore import Tensor\n'), ((1851, 1860), 'mindspore.Tensor', 'Tensor', (['x'], {}), '(x)\n', (1857, 1860), False, 'from mindspore import Tensor\n'), ((2421, 2431), 'mindspore.Tensor', 'Tensor', (['dy'], {}), '(dy)\n', (2427, 2431), False, 'from mindspore import Tensor\n'), ((2433, 2442), 'mindspore.Tensor', 'Tensor', (['x'], {}), '(x)\n', (2439, 2442), False, 'from mindspore import Tensor\n'), ((3059, 3069), 'mindspore.Tensor', 'Tensor', (['dy'], {}), '(dy)\n', (3065, 3069), False, 'from mindspore import Tensor\n'), ((3071, 3080), 'mindspore.Tensor', 'Tensor', (['x'], {}), '(x)\n', (3077, 3080), False, 'from mindspore import Tensor\n'), ((3508, 3518), 'mindspore.Tensor', 'Tensor', (['dy'], {}), '(dy)\n', (3514, 3518), False, 'from mindspore import Tensor\n'), ((3520, 3529), 'mindspore.Tensor', 'Tensor', (['x'], {}), '(x)\n', (3526, 3529), False, 'from mindspore import Tensor\n'), ((1335, 1365), 'numpy.array', 'np.array', (['[[[[1, 2], [3, 4]]]]'], {}), '([[[[1, 2], [3, 4]]]])\n', (1343, 1365), True, 'import numpy as np\n'), ((1394, 1501), 'numpy.array', 'np.array', (['[[[[1.1, 2.2, 3.2, 2.5], [3.3, 4.4, 5.7, 8.1], [3.3, 4.4, 5.7, 8.1], [3.3, \n 4.4, 5.7, 8.1]]]]'], {}), '([[[[1.1, 2.2, 3.2, 2.5], [3.3, 4.4, 5.7, 8.1], [3.3, 4.4, 5.7, 8.1\n ], [3.3, 4.4, 5.7, 8.1]]]])\n', (1402, 1501), True, 'import numpy as np\n'), ((1589, 1696), 'numpy.array', 'np.array', (['[[[[1.0, 0.0, 0.0, 2.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [3.0, \n 0.0, 0.0, 4.0]]]]'], {}), '([[[[1.0, 0.0, 0.0, 2.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0\n ], [3.0, 0.0, 0.0, 4.0]]]])\n', (1597, 1696), True, 'import numpy as np\n'), ((1917, 1947), 'numpy.array', 'np.array', (['[[[[1, 2], [3, 4]]]]'], {}), '([[[[1, 2], [3, 4]]]])\n', (1925, 1947), True, 'import numpy as np\n'), ((1976, 2083), 'numpy.array', 'np.array', (['[[[[1.1, 2.2, 3.2, 2.5], [3.3, 4.4, 5.7, 8.1], [3.3, 4.4, 5.7, 8.1], [3.3, \n 4.4, 5.7, 8.1]]]]'], {}), '([[[[1.1, 2.2, 3.2, 2.5], [3.3, 4.4, 5.7, 8.1], [3.3, 4.4, 5.7, 8.1\n ], [3.3, 4.4, 5.7, 8.1]]]])\n', (1984, 2083), True, 'import numpy as np\n'), ((2171, 2278), 'numpy.array', 'np.array', (['[[[[1.0, 0.0, 0.0, 2.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [3.0, \n 0.0, 0.0, 4.0]]]]'], {}), '([[[[1.0, 0.0, 0.0, 2.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0\n ], [3.0, 0.0, 0.0, 4.0]]]])\n', (2179, 2278), True, 'import numpy as np\n'), ((2688, 2758), 'numpy.array', 'np.array', (['[[[[1, 1, 0, 0], [1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1]]]]'], {}), '([[[[1, 1, 0, 0], [1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1]]]])\n', (2696, 2758), True, 'import numpy as np\n'), ((2850, 2888), 'numpy.array', 'np.array', (['[[[[1.1, 2.2], [3.3, 4.4]]]]'], {}), '([[[[1.1, 2.2], [3.3, 4.4]]]])\n', (2858, 2888), True, 'import numpy as np\n'), ((2921, 2963), 'numpy.array', 'np.array', (['[[[[2.25, 0.75], [0.75, 4.25]]]]'], {}), '([[[[2.25, 0.75], [0.75, 4.25]]]])\n', (2929, 2963), True, 'import numpy as np\n'), ((3138, 3208), 'numpy.array', 'np.array', (['[[[[1, 1, 0, 0], [1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1]]]]'], {}), '([[[[1, 1, 0, 0], [1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 1, 1]]]])\n', (3146, 3208), True, 'import numpy as np\n'), ((3299, 3337), 'numpy.array', 'np.array', (['[[[[1.1, 2.2], [3.3, 4.4]]]]'], {}), '([[[[1.1, 2.2], [3.3, 4.4]]]])\n', (3307, 3337), True, 'import numpy as np\n'), ((3370, 3412), 'numpy.array', 'np.array', (['[[[[2.25, 0.75], [0.75, 4.25]]]]'], {}), '([[[[2.25, 0.75], [0.75, 4.25]]]])\n', (3378, 3412), True, 'import numpy as np\n')] |
import cv2
import csv
import numpy as np
import os
import sklearn
from keras.models import Sequential, Model
from keras.layers import Flatten, Dense, Lambda, Convolution2D, Cropping2D
from keras.layers.pooling import MaxPooling2D
from sklearn.model_selection import train_test_split
def model():
model = Sequential()
model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=(160,320,3)))
model.add(Cropping2D(cropping=((50,20), (0,0))))
model.add(Convolution2D(16,3,3, subsample=(2,2), activation='relu'))
model.add(Convolution2D(32,3,3, subsample=(2,2), activation='relu'))
model.add(Convolution2D(48,3,3, subsample=(2,2), activation='relu'))
model.add(Convolution2D(64,3,3, activation='relu'))
model.add(Convolution2D(128,3,3, activation='relu'))
model.add(Convolution2D(128,3,3, activation='relu'))
model.add(Flatten())
model.add(Dense(320))
model.add(Dense(160))
model.add(Dense(80))
model.add(Dense(40))
model.add(Dense(20))
model.add(Dense(10))
model.add(Dense(1))
return model
def generator(data):
num_data = len(data)
bs = 64
while 1:
samples = sklearn.utils.shuffle(data)
for offset in range(0, num_data, bs):
minibatch = data[offset:offset+bs]
images = []
measurements = []
for image, measurement in minibatch:
images.append(image)
measurements.append(measurement)
input = np.array(images)
label = np.array(measurements)
yield sklearn.utils.shuffle(inputs, label)
lines = []
with open('/root/Desktop/data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
lines.append(line)
images = []
measurements = []
for line in lines:
source_path_center = line[0]
source_path_left = line[1]
source_path_right = line[2]
source_path_measurement = line[3]
filename_center = source_path_center.split('/')[-1]
filename_left = source_path_left.split('/')[-1]
filename_right = source_path_right.split('/')[-1]
filename_measurement = source_path_measurement
current_path_center = '/root/Desktop/data/IMG/' + filename_center
current_path_left = '/root/Desktop/data/IMG/' + filename_left
current_path_right = '/root/Desktop/data/IMG/' + filename_right
current_path_measurement = filename_measurement
image_center = cv2.imread(current_path_center)
image_left = cv2.imread(current_path_left)
image_right = cv2.imread(current_path_right)
measurement = float(current_path_measurement)
images.append(image_center)
measurements.append(measurement)
images.append(image_left)
measurements.append(measurement+0.2)
images.append(image_right)
measurements.append(measurement-0.2)
images.append(cv2.flip(image_center,1))
measurements.append(measurement*-1.0)
data = list(zip(images, measurements))
# print('Total samples: {}'.format( samples))
train_data, val_data = train_test_split(data, test_size=0.2)
train_gen = generator(train_data)
val_gen = generator(val_data)
model = model()
model.compile(loss='mse', optimizer='adam')
result = model.fit_generator(train_gen, samples_per_epoch= len(train_data), validation_data=val_gen, nb_val_samples=len(val_data), nb_epoch=3, verbose=1)
model.save('model.h5')
| [
"keras.layers.Convolution2D",
"keras.layers.Flatten",
"cv2.flip",
"sklearn.model_selection.train_test_split",
"sklearn.utils.shuffle",
"keras.layers.Lambda",
"keras.models.Sequential",
"numpy.array",
"keras.layers.Cropping2D",
"csv.reader",
"keras.layers.Dense",
"cv2.imread"
] | [((3038, 3075), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data'], {'test_size': '(0.2)'}), '(data, test_size=0.2)\n', (3054, 3075), False, 'from sklearn.model_selection import train_test_split\n'), ((309, 321), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (319, 321), False, 'from keras.models import Sequential, Model\n'), ((1697, 1716), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (1707, 1716), False, 'import csv\n'), ((2453, 2484), 'cv2.imread', 'cv2.imread', (['current_path_center'], {}), '(current_path_center)\n', (2463, 2484), False, 'import cv2\n'), ((2502, 2531), 'cv2.imread', 'cv2.imread', (['current_path_left'], {}), '(current_path_left)\n', (2512, 2531), False, 'import cv2\n'), ((2550, 2580), 'cv2.imread', 'cv2.imread', (['current_path_right'], {}), '(current_path_right)\n', (2560, 2580), False, 'import cv2\n'), ((336, 396), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': '(160, 320, 3)'}), '(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3))\n', (342, 396), False, 'from keras.layers import Flatten, Dense, Lambda, Convolution2D, Cropping2D\n'), ((412, 451), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((50, 20), (0, 0))'}), '(cropping=((50, 20), (0, 0)))\n', (422, 451), False, 'from keras.layers import Flatten, Dense, Lambda, Convolution2D, Cropping2D\n'), ((470, 530), 'keras.layers.Convolution2D', 'Convolution2D', (['(16)', '(3)', '(3)'], {'subsample': '(2, 2)', 'activation': '"""relu"""'}), "(16, 3, 3, subsample=(2, 2), activation='relu')\n", (483, 530), False, 'from keras.layers import Flatten, Dense, Lambda, Convolution2D, Cropping2D\n'), ((543, 603), 'keras.layers.Convolution2D', 'Convolution2D', (['(32)', '(3)', '(3)'], {'subsample': '(2, 2)', 'activation': '"""relu"""'}), "(32, 3, 3, subsample=(2, 2), activation='relu')\n", (556, 603), False, 'from keras.layers import Flatten, Dense, Lambda, Convolution2D, Cropping2D\n'), ((616, 676), 'keras.layers.Convolution2D', 'Convolution2D', (['(48)', '(3)', '(3)'], {'subsample': '(2, 2)', 'activation': '"""relu"""'}), "(48, 3, 3, subsample=(2, 2), activation='relu')\n", (629, 676), False, 'from keras.layers import Flatten, Dense, Lambda, Convolution2D, Cropping2D\n'), ((689, 731), 'keras.layers.Convolution2D', 'Convolution2D', (['(64)', '(3)', '(3)'], {'activation': '"""relu"""'}), "(64, 3, 3, activation='relu')\n", (702, 731), False, 'from keras.layers import Flatten, Dense, Lambda, Convolution2D, Cropping2D\n'), ((745, 788), 'keras.layers.Convolution2D', 'Convolution2D', (['(128)', '(3)', '(3)'], {'activation': '"""relu"""'}), "(128, 3, 3, activation='relu')\n", (758, 788), False, 'from keras.layers import Flatten, Dense, Lambda, Convolution2D, Cropping2D\n'), ((802, 845), 'keras.layers.Convolution2D', 'Convolution2D', (['(128)', '(3)', '(3)'], {'activation': '"""relu"""'}), "(128, 3, 3, activation='relu')\n", (815, 845), False, 'from keras.layers import Flatten, Dense, Lambda, Convolution2D, Cropping2D\n'), ((859, 868), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (866, 868), False, 'from keras.layers import Flatten, Dense, Lambda, Convolution2D, Cropping2D\n'), ((884, 894), 'keras.layers.Dense', 'Dense', (['(320)'], {}), '(320)\n', (889, 894), False, 'from keras.layers import Flatten, Dense, Lambda, Convolution2D, Cropping2D\n'), ((910, 920), 'keras.layers.Dense', 'Dense', (['(160)'], {}), '(160)\n', (915, 920), False, 'from keras.layers import Flatten, Dense, Lambda, Convolution2D, Cropping2D\n'), ((936, 945), 'keras.layers.Dense', 'Dense', (['(80)'], {}), '(80)\n', (941, 945), False, 'from keras.layers import Flatten, Dense, Lambda, Convolution2D, Cropping2D\n'), ((961, 970), 'keras.layers.Dense', 'Dense', (['(40)'], {}), '(40)\n', (966, 970), False, 'from keras.layers import Flatten, Dense, Lambda, Convolution2D, Cropping2D\n'), ((986, 995), 'keras.layers.Dense', 'Dense', (['(20)'], {}), '(20)\n', (991, 995), False, 'from keras.layers import Flatten, Dense, Lambda, Convolution2D, Cropping2D\n'), ((1011, 1020), 'keras.layers.Dense', 'Dense', (['(10)'], {}), '(10)\n', (1016, 1020), False, 'from keras.layers import Flatten, Dense, Lambda, Convolution2D, Cropping2D\n'), ((1036, 1044), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (1041, 1044), False, 'from keras.layers import Flatten, Dense, Lambda, Convolution2D, Cropping2D\n'), ((1154, 1181), 'sklearn.utils.shuffle', 'sklearn.utils.shuffle', (['data'], {}), '(data)\n', (1175, 1181), False, 'import sklearn\n'), ((2861, 2886), 'cv2.flip', 'cv2.flip', (['image_center', '(1)'], {}), '(image_center, 1)\n', (2869, 2886), False, 'import cv2\n'), ((1485, 1501), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (1493, 1501), True, 'import numpy as np\n'), ((1522, 1544), 'numpy.array', 'np.array', (['measurements'], {}), '(measurements)\n', (1530, 1544), True, 'import numpy as np\n'), ((1563, 1599), 'sklearn.utils.shuffle', 'sklearn.utils.shuffle', (['inputs', 'label'], {}), '(inputs, label)\n', (1584, 1599), False, 'import sklearn\n')] |
"""
This module provides the `PerformanceMetrics` class and supporting
functionality for tracking and computing model performance.
"""
from collections import defaultdict, namedtuple
import logging
import os
import warnings
import pandas as pd
import numpy as np
from sklearn.metrics import average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from scipy.stats import rankdata
logger = logging.getLogger("selene")
Metric = namedtuple("Metric", ["fn", "transform", "data"])
"""
A tuple containing a metric function and the results from applying that
metric to some values.
Parameters
----------
fn : types.FunctionType
A metric.
transfrom : types.FunctionType
A transform function which should be applied to data before measuring metric
data : list(float)
A list holding the results from applying the metric.
Attributes
----------
fn : types.FunctionType
A metric.
transfrom : types.FunctionType
A transform function which should be applied to data before measuring metric
data : list(float)
A list holding the results from applying the metric.
"""
def visualize_roc_curves(prediction,
target,
output_dir,
target_mask=None,
report_gt_feature_n_positives=50,
style="seaborn-colorblind",
fig_title="Feature ROC curves",
dpi=500):
"""
Output the ROC curves for each feature predicted by a model
as an SVG.
Parameters
----------
prediction : numpy.ndarray
Value predicted by user model.
target : numpy.ndarray
True value that the user model was trying to predict.
output_dir : str
The path to the directory to output the figures. Directories that
do not currently exist will be automatically created.
report_gt_feature_n_positives : int, optional
Default is 50. Do not visualize an ROC curve for a feature with
less than 50 positive examples in `target`.
style : str, optional
Default is "seaborn-colorblind". Specify a style available in
`matplotlib.pyplot.style.available` to use.
fig_title : str, optional
Default is "Feature ROC curves". Set the figure title.
dpi : int, optional
Default is 500. Specify dots per inch (resolution) of the figure.
Returns
-------
None
Outputs the figure in `output_dir`.
"""
os.makedirs(output_dir, exist_ok=True)
import matplotlib
backend = matplotlib.get_backend()
if "inline" not in backend:
matplotlib.use("SVG")
import matplotlib.pyplot as plt
plt.style.use(style)
plt.figure()
n_features = prediction.shape[-1]
for index in range(n_features):
feature_preds = prediction[..., index]
feature_targets = target[..., index]
if target_mask is not None:
feature_mask = target_mask[..., index]
# if mask is n_samples x n_cell_types,
# feature_targets and feature_preds get flattened but that's ok
# b/c each item is a separate sample anyway
feature_targets = feature_targets[feature_mask]
feature_preds = feature_preds[feature_mask]
if len(np.unique(feature_targets)) > 1 and \
np.sum(feature_targets) > report_gt_feature_n_positives:
fpr, tpr, _ = roc_curve(feature_targets, feature_preds)
plt.plot(fpr, tpr, 'r-', color="black", alpha=0.3, lw=1)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
if fig_title:
plt.title(fig_title)
plt.savefig(os.path.join(output_dir, "roc_curves.svg"),
format="svg",
dpi=dpi)
def visualize_precision_recall_curves(
prediction,
target,
output_dir,
target_mask=None,
report_gt_feature_n_positives=50,
style="seaborn-colorblind",
fig_title="Feature precision-recall curves",
dpi=500):
"""
Output the precision-recall (PR) curves for each feature predicted by
a model as an SVG.
Parameters
----------
prediction : numpy.ndarray
Value predicted by user model.
target : numpy.ndarray
True value that the user model was trying to predict.
output_dir : str
The path to the directory to output the figures. Directories that
do not currently exist will be automatically created.
report_gt_feature_n_positives : int, optional
Default is 50. Do not visualize an PR curve for a feature with
less than 50 positive examples in `target`.
style : str, optional
Default is "seaborn-colorblind". Specify a style available in
`matplotlib.pyplot.style.available` to use.
fig_title : str, optional
Default is "Feature precision-recall curves". Set the figure title.
dpi : int, optional
Default is 500. Specify dots per inch (resolution) of the figure.
Returns
-------
None
Outputs the figure in `output_dir`.
"""
os.makedirs(output_dir, exist_ok=True)
# TODO: fix this
import matplotlib
backend = matplotlib.get_backend()
if "inline" not in backend:
matplotlib.use("SVG")
import matplotlib.pyplot as plt
plt.style.use(style)
plt.figure()
n_features = prediction.shape[-1]
for index in range(n_features):
feature_preds = prediction[..., index]
feature_targets = target[..., index]
if target_mask is not None:
feature_mask = target_mask[..., index]
# if mask is n_samples x n_cell_types,
# feature_targets and feature_preds get flattened but that's ok
# b/c each item is a separate sample anyway
feature_targets = feature_targets[feature_mask]
feature_preds = feature_preds[feature_mask]
if len(np.unique(feature_targets)) > 1 and \
np.sum(feature_targets) > report_gt_feature_n_positives:
precision, recall, _ = precision_recall_curve(
feature_targets, feature_preds)
plt.step(
recall, precision, 'r-',
color="black", alpha=0.3, lw=1, where="post")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
if fig_title:
plt.title(fig_title)
plt.savefig(os.path.join(output_dir, "precision_recall_curves.svg"),
format="svg",
dpi=dpi)
def compute_score(prediction, target, metric_fn, target_mask=None,
report_gt_feature_n_positives=10):
"""
Using a user-specified metric, computes the distance between
two tensors.
Parameters
----------
prediction : numpy.ndarray
Value predicted by user model.
target : numpy.ndarray
True value that the user model was trying to predict.
metric_fn : types.FunctionType
A metric that can measure the distance between the prediction
and target variables.
target_mask: numpy.ndarray, optional
A mask of shape `target.shape` that indicates which values
should be considered when computing the scores.
report_gt_feature_n_positives : int, optional
Default is 10. The minimum number of positive examples for a
feature in order to compute the score for it.
Returns
-------
average_score, feature_scores : tuple(float, numpy.ndarray)
A tuple containing the average of all feature scores, and a
vector containing the scores for each feature. If there were
no features meeting our filtering thresholds, will return
`(None, [])`.
"""
# prediction_shape:
# batch_size*n_batches, n_cell_types, n_features
n_features = prediction.shape[-1]
n_cell_types = prediction.shape[1]
track_scores = np.ones(shape=(n_cell_types,n_features)) * np.nan
for feature_index in range(n_features):
for cell_type_index in range(n_cell_types):
feature_preds = np.ravel(prediction[:, cell_type_index, feature_index])
feature_targets = np.ravel(target[:, cell_type_index, feature_index])
if target_mask is not None:
track_masks_arr = target_mask[:, cell_type_index, feature_index]
# we assume that if track is masked, it is masked for all sequences
track_mask = np.ravel(track_masks_arr)[0]
assert np.all(track_masks_arr==track_mask)
if not track_mask: # track was not measured or is masked
# should put nan into feature_scores:
# feature_scores[cell_type_index,feature_index] = np.nan
# but it's already filled with nans so just continue
continue
if len(np.unique(feature_targets)) > 0 and \
np.count_nonzero(feature_targets) > report_gt_feature_n_positives:
try:
track_scores[cell_type_index,feature_index] = metric_fn(
feature_targets, feature_preds)
except ValueError: # do I need to make this more generic?
continue
# now we compute average score for all features
# if all elements of feature_scores are nans
# following will produce warning and return np.nan,
# which we just ignore
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
average_score = np.nanmean(track_scores)
if np.isnan(average_score):
return None, track_scores
else:
return average_score, track_scores
def get_feature_specific_scores(data,
get_feature_from_index_fn,
get_ct_from_index_fn):
"""
Generates a dictionary mapping feature names to feature scores from
an intermediate representation.
Parameters
----------
data : list(tuple(int, float))
A list of tuples, where each tuple contains a feature's index
and the score for that feature.
get_feature_from_index_fn : types.FunctionType
A function that takes an index (`int`) and returns a feature
name (`str`).
get_ct_from_index_fn : types.FunctionType
A function that takes an index (`int`) and returns a cell type
name (`str`).
Returns
-------
dict
A dictionary mapping feature names (`str`) to scores (`float`).
If there was no score for a feature, its score will be set to
`None`.
"""
feature_score_dict = {}
for index, score in enumerate(data):
feature = get_feature_from_index_fn(index)
if not np.isnan(score):
feature_score_dict[feature] = score
else:
feature_score_dict[feature] = None
return feature_score_dict
def auc_u_test(labels, predictions):
"""
Outputs the area under the the ROC curve associated with a certain
set of labels and the predictions given by the training model.
Computed from the U statistic.
Parameters
----------
labels: numpy.ndarray
Known labels of values predicted by model. Must be one dimensional.
predictions: numpy.ndarray
Value predicted by user model. Must be one dimensional, with matching
dimension to `labels`
Returns
-------
float
AUC value of given label, prediction pairs
"""
len_pos = int(np.sum(labels))
len_neg = len(labels) - len_pos
rank_sum = np.sum(rankdata(predictions)[labels == 1])
u_value = rank_sum - (len_pos * (len_pos + 1)) / 2
auc = u_value / (len_pos * len_neg)
return auc
class PerformanceMetrics(object):
"""
Tracks and calculates metrics to evaluate how closely a model's
predictions match the true values it was designed to predict.
Parameters
----------
get_feature_from_index_fn : types.FunctionType
A function that takes an index (`int`) and returns a feature
name (`str`).
get_ct_from_index_fn : types.FunctionType
A function that takes an index (`int`) and returns a cell type
name (`str`).
report_gt_feature_n_positives : int, optional
Default is 10. The minimum number of positive examples for a
feature in order to compute the score for it.
metrics : dict
A dictionary that maps metric names (`str`) to metric functions.
By default, this contains `"roc_auc"`, which maps to
`sklearn.metrics.roc_auc_score`, and `"average_precision"`,
which maps to `sklearn.metrics.average_precision_score`.
Attributes
----------
skip_threshold : int
The minimum number of positive examples of a feature that must
be included in an update for a metric score to be
calculated for it.
get_feature_from_index : types.FunctionType
A function that takes an index (`int`) and returns a feature
name (`str`).
get_feature_from_index : types.FunctionType
A function that takes an index (`int`) and returns a cell type
name (`str`).
metrics : dict
A dictionary that maps metric names (`str`) to metric objects
(`Metric`). By default, this contains `"roc_auc"` and
`"average_precision"`.
metrics_transforms: dict
A dictionary mapping metrics name to transformation function,
which should be applied tp to data prior to metrics computation.
"""
def __init__(self,
get_feature_from_index_fn,
get_ct_from_index_fn,
report_gt_feature_n_positives=10,
metrics=dict(roc_auc=roc_auc_score, average_precision=average_precision_score),
metrics_transforms=dict(roc_auc=None,
average_precision=None)):
"""
Creates a new object of the `PerformanceMetrics` class.
"""
self.skip_threshold = report_gt_feature_n_positives
self.get_feature_from_index = get_feature_from_index_fn
self.get_ct_from_index = get_ct_from_index_fn
self.metrics = dict()
for k, v in metrics.items():
if k in metrics_transforms:
self.metrics[k] = Metric(fn=v,
transform=metrics_transforms[k],
data=[])
else:
self.metrics[k] = Metric(fn=v,
transform=None,
data=[])
def add_metric(self, name, metric_fn, transform_function = None):
"""
Begins tracking of the specified metric.
Parameters
----------
name : str
The name of the metric.
metric_fn : types.FunctionType
A metric function.
transform_function: types.FunctionType
A tranform function which should be
applied to data before metrics computation
if None, no transform will be applied
"""
self.metrics[name] = Metric(fn=metric_fn,
transform=transform_function,
data=[])
def remove_metric(self, name):
"""
Ends the tracking of the specified metric, and returns the
previous scores associated with that metric.
Parameters
----------
name : str
The name of the metric.
Returns
-------
list(float)
The list of feature-specific scores obtained by previous
uses of the specified metric.
"""
data = self.metrics[name].data
del self.metrics[name]
return data
def update(self, prediction, target, target_mask=None):
"""
Evaluates the tracked metrics on a model prediction and its
target value, and adds this to the metric histories.
Parameters
----------
prediction : numpy.ndarray
Value predicted by user model.
target : numpy.ndarray
True value that the user model was trying to predict.
target_mask : numpy.ndarray, optional
A mask of shape `target.shape` that indicates which values
should be considered when computing the scores.
Returns
-------
dict
A dictionary mapping each metric names (`str`) to the
average score of that metric across all features
(`float`).
"""
metric_scores = {}
for name, metric in self.metrics.items():
if metric.transform is not None:
tr_prediction, tr_target, tr_target_mask = metric.transform((prediction, target, target_mask))
else:
tr_prediction, tr_target, tr_target_mask = prediction, target, target_mask
assert tr_prediction.shape == tr_target.shape == tr_target_mask.shape, "shape of prediction, target and mask are not equal: "+"\n".join(map(str,[tr_prediction.shape,tr_target.shape,tr_target_mask.shape]))
avg_score, track_scores = compute_score(
tr_prediction, tr_target, metric.fn, target_mask=tr_target_mask,
report_gt_feature_n_positives=self.skip_threshold)
metric.data.append(track_scores)
metric_scores[name] = avg_score
return metric_scores
def visualize(self, prediction, target, output_dir, target_mask=None, **kwargs):
"""
Outputs ROC and PR curves. Does not support other metrics
currently.
Parameters
----------
prediction : numpy.ndarray
Value predicted by user model.
target : numpy.ndarray
True value that the user model was trying to predict.
output_dir : str
The path to the directory to output the figures. Directories that
do not currently exist will be automatically created.
**kwargs : dict
Keyword arguments to pass to each visualization function. Each
function accepts the following args:
* style : str - Default is "seaborn-colorblind". Specify a \
style available in \
`matplotlib.pyplot.style.available` to use.
* dpi : int - Default is 500. Specify dots per inch \
(resolution) of the figure.
Returns
-------
None
Outputs figures to `output_dir`.
"""
print ("This function is not consistent with new transform functions")
raise NotImplementedError
os.makedirs(output_dir, exist_ok=True)
if "roc_auc" in self.metrics:
visualize_roc_curves(
prediction, target, output_dir, target_mask,
report_gt_feature_n_positives=self.skip_threshold,
**kwargs)
if "average_precision" in self.metrics:
visualize_precision_recall_curves(
prediction, target, output_dir, target_mask,
report_gt_feature_n_positives=self.skip_threshold,
**kwargs)
def write_feature_scores_to_file(self, output_path):
"""
Writes each metric's score for each feature to a specified
file.
Parameters
----------
output_path : str
The path to the output file where performance metrics will
be written.
Returns
-------
pd.DataFeame
A dataFrame with columns:
cell_type,feature,metric_name,value
"""
feature_scores = defaultdict(dict)
full_metrics_results = []
for name, metric in self.metrics.items():
# metric.data contains n_cell_type x n_features array
# of metric value computed for track[i,j]
n_cell_types = metric.data[-1].shape[0]
n_features = metric.data[-1].shape[1]
cell_type_names = [self.get_ct_from_index(ct) for ct in range(n_cell_types)]
feature_names = [self.get_feature_from_index(f) for f in range(n_features)]
metric_df = pd.DataFrame(metric.data[-1],
index=cell_type_names,
columns=feature_names).reset_index()
metric_df = pd.melt(metric_df, id_vars='index').rename(
columns={"index":"cell_type","variable":"feature"}
)
metric_df["metric_name"] = [name]*len(metric_df)
full_metrics_results.append(metric_df)
full_metrics_results = pd.concat(full_metrics_results)
full_metrics_results[["metric_name","cell_type","feature","value"]].to_csv(output_path,
sep="\t", index=False)
return full_metrics_results
| [
"logging.getLogger",
"matplotlib.pyplot.ylabel",
"numpy.count_nonzero",
"numpy.nanmean",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.get_backend",
"matplotlib.pyplot.style.use",
"warnings.simplefilter",
"matplotlib.pyplot.ylim",
"pandas.DataFr... | [((491, 518), 'logging.getLogger', 'logging.getLogger', (['"""selene"""'], {}), "('selene')\n", (508, 518), False, 'import logging\n'), ((530, 579), 'collections.namedtuple', 'namedtuple', (['"""Metric"""', "['fn', 'transform', 'data']"], {}), "('Metric', ['fn', 'transform', 'data'])\n", (540, 579), False, 'from collections import defaultdict, namedtuple\n'), ((2576, 2614), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (2587, 2614), False, 'import os\n'), ((2652, 2676), 'matplotlib.get_backend', 'matplotlib.get_backend', ([], {}), '()\n', (2674, 2676), False, 'import matplotlib\n'), ((2780, 2800), 'matplotlib.pyplot.style.use', 'plt.style.use', (['style'], {}), '(style)\n', (2793, 2800), True, 'import matplotlib.pyplot as plt\n'), ((2805, 2817), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2815, 2817), True, 'import matplotlib.pyplot as plt\n'), ((3638, 3658), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (3646, 3658), True, 'import matplotlib.pyplot as plt\n'), ((3663, 3684), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (3671, 3684), True, 'import matplotlib.pyplot as plt\n'), ((3689, 3722), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (3699, 3722), True, 'import matplotlib.pyplot as plt\n'), ((3727, 3759), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (3737, 3759), True, 'import matplotlib.pyplot as plt\n'), ((5262, 5300), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (5273, 5300), False, 'import os\n'), ((5359, 5383), 'matplotlib.get_backend', 'matplotlib.get_backend', ([], {}), '()\n', (5381, 5383), False, 'import matplotlib\n'), ((5487, 5507), 'matplotlib.pyplot.style.use', 'plt.style.use', (['style'], {}), '(style)\n', (5500, 5507), True, 'import matplotlib.pyplot as plt\n'), ((5512, 5524), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5522, 5524), True, 'import matplotlib.pyplot as plt\n'), ((6440, 6460), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (6448, 6460), True, 'import matplotlib.pyplot as plt\n'), ((6465, 6486), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (6473, 6486), True, 'import matplotlib.pyplot as plt\n'), ((6491, 6511), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {}), "('Recall')\n", (6501, 6511), True, 'import matplotlib.pyplot as plt\n'), ((6516, 6539), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {}), "('Precision')\n", (6526, 6539), True, 'import matplotlib.pyplot as plt\n'), ((9817, 9840), 'numpy.isnan', 'np.isnan', (['average_score'], {}), '(average_score)\n', (9825, 9840), True, 'import numpy as np\n'), ((2717, 2738), 'matplotlib.use', 'matplotlib.use', (['"""SVG"""'], {}), "('SVG')\n", (2731, 2738), False, 'import matplotlib\n'), ((3786, 3806), 'matplotlib.pyplot.title', 'plt.title', (['fig_title'], {}), '(fig_title)\n', (3795, 3806), True, 'import matplotlib.pyplot as plt\n'), ((3823, 3865), 'os.path.join', 'os.path.join', (['output_dir', '"""roc_curves.svg"""'], {}), "(output_dir, 'roc_curves.svg')\n", (3835, 3865), False, 'import os\n'), ((5424, 5445), 'matplotlib.use', 'matplotlib.use', (['"""SVG"""'], {}), "('SVG')\n", (5438, 5445), False, 'import matplotlib\n'), ((6566, 6586), 'matplotlib.pyplot.title', 'plt.title', (['fig_title'], {}), '(fig_title)\n', (6575, 6586), True, 'import matplotlib.pyplot as plt\n'), ((6603, 6658), 'os.path.join', 'os.path.join', (['output_dir', '"""precision_recall_curves.svg"""'], {}), "(output_dir, 'precision_recall_curves.svg')\n", (6615, 6658), False, 'import os\n'), ((8085, 8126), 'numpy.ones', 'np.ones', ([], {'shape': '(n_cell_types, n_features)'}), '(shape=(n_cell_types, n_features))\n', (8092, 8126), True, 'import numpy as np\n'), ((9669, 9694), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (9692, 9694), False, 'import warnings\n'), ((9704, 9760), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (9725, 9760), False, 'import warnings\n'), ((9785, 9809), 'numpy.nanmean', 'np.nanmean', (['track_scores'], {}), '(track_scores)\n', (9795, 9809), True, 'import numpy as np\n'), ((11759, 11773), 'numpy.sum', 'np.sum', (['labels'], {}), '(labels)\n', (11765, 11773), True, 'import numpy as np\n'), ((19028, 19066), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (19039, 19066), False, 'import os\n'), ((20031, 20048), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (20042, 20048), False, 'from collections import defaultdict, namedtuple\n'), ((21020, 21051), 'pandas.concat', 'pd.concat', (['full_metrics_results'], {}), '(full_metrics_results)\n', (21029, 21051), True, 'import pandas as pd\n'), ((3523, 3564), 'sklearn.metrics.roc_curve', 'roc_curve', (['feature_targets', 'feature_preds'], {}), '(feature_targets, feature_preds)\n', (3532, 3564), False, 'from sklearn.metrics import roc_curve\n'), ((3577, 3633), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr', '"""r-"""'], {'color': '"""black"""', 'alpha': '(0.3)', 'lw': '(1)'}), "(fpr, tpr, 'r-', color='black', alpha=0.3, lw=1)\n", (3585, 3633), True, 'import matplotlib.pyplot as plt\n'), ((6239, 6293), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['feature_targets', 'feature_preds'], {}), '(feature_targets, feature_preds)\n', (6261, 6293), False, 'from sklearn.metrics import precision_recall_curve\n'), ((6323, 6402), 'matplotlib.pyplot.step', 'plt.step', (['recall', 'precision', '"""r-"""'], {'color': '"""black"""', 'alpha': '(0.3)', 'lw': '(1)', 'where': '"""post"""'}), "(recall, precision, 'r-', color='black', alpha=0.3, lw=1, where='post')\n", (6331, 6402), True, 'import matplotlib.pyplot as plt\n'), ((8262, 8317), 'numpy.ravel', 'np.ravel', (['prediction[:, cell_type_index, feature_index]'], {}), '(prediction[:, cell_type_index, feature_index])\n', (8270, 8317), True, 'import numpy as np\n'), ((8348, 8399), 'numpy.ravel', 'np.ravel', (['target[:, cell_type_index, feature_index]'], {}), '(target[:, cell_type_index, feature_index])\n', (8356, 8399), True, 'import numpy as np\n'), ((10992, 11007), 'numpy.isnan', 'np.isnan', (['score'], {}), '(score)\n', (11000, 11007), True, 'import numpy as np\n'), ((11833, 11854), 'scipy.stats.rankdata', 'rankdata', (['predictions'], {}), '(predictions)\n', (11841, 11854), False, 'from scipy.stats import rankdata\n'), ((3440, 3463), 'numpy.sum', 'np.sum', (['feature_targets'], {}), '(feature_targets)\n', (3446, 3463), True, 'import numpy as np\n'), ((6147, 6170), 'numpy.sum', 'np.sum', (['feature_targets'], {}), '(feature_targets)\n', (6153, 6170), True, 'import numpy as np\n'), ((8703, 8740), 'numpy.all', 'np.all', (['(track_masks_arr == track_mask)'], {}), '(track_masks_arr == track_mask)\n', (8709, 8740), True, 'import numpy as np\n'), ((3386, 3412), 'numpy.unique', 'np.unique', (['feature_targets'], {}), '(feature_targets)\n', (3395, 3412), True, 'import numpy as np\n'), ((6093, 6119), 'numpy.unique', 'np.unique', (['feature_targets'], {}), '(feature_targets)\n', (6102, 6119), True, 'import numpy as np\n'), ((8651, 8676), 'numpy.ravel', 'np.ravel', (['track_masks_arr'], {}), '(track_masks_arr)\n', (8659, 8676), True, 'import numpy as np\n'), ((9140, 9173), 'numpy.count_nonzero', 'np.count_nonzero', (['feature_targets'], {}), '(feature_targets)\n', (9156, 9173), True, 'import numpy as np\n'), ((20557, 20632), 'pandas.DataFrame', 'pd.DataFrame', (['metric.data[-1]'], {'index': 'cell_type_names', 'columns': 'feature_names'}), '(metric.data[-1], index=cell_type_names, columns=feature_names)\n', (20569, 20632), True, 'import pandas as pd\n'), ((20751, 20786), 'pandas.melt', 'pd.melt', (['metric_df'], {'id_vars': '"""index"""'}), "(metric_df, id_vars='index')\n", (20758, 20786), True, 'import pandas as pd\n'), ((9086, 9112), 'numpy.unique', 'np.unique', (['feature_targets'], {}), '(feature_targets)\n', (9095, 9112), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os, sys, random, time, math
import tensorflow as tf
import cv2
sys.path.append(os.path.realpath('./datasets'))
import mask_rcnn_coco as coco
sys.path.append(os.path.realpath('./Mask_RCNN/mrcnn'))
import utils
import model as modellib
sys.path.append(os.path.realpath('./utils'))
from image_sequence import ImageSequence
tf.app.flags.DEFINE_string('checkpoint_path', None, 'Path to checkpoint file.')
tf.app.flags.DEFINE_string('image_in_path', None, 'Path to input image directory.')
tf.app.flags.DEFINE_string('detections_out_path', None, 'Path to detections output file.')
tf.app.flags.DEFINE_integer('max_frames', 100000, 'Maximum number of frames to log.')
tf.app.flags.DEFINE_integer('stride', 5, 'Interval at which detections are computed.')
FLAGS = tf.app.flags.FLAGS
# COCO Class names
# Index of the class in the list is its ID. For example, to get ID of
# the teddy bear class, use: class_names.index('teddy bear')
class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']
def draw_boxes(frame, boxes):
for bidx, bbox in enumerate(boxes):
iy = int(bbox[0] * frame.shape[0])
ix = int(bbox[1] * frame.shape[1])
h = int((bbox[2] - bbox[0]) * frame.shape[0])
w = int((bbox[3] - bbox[1]) * frame.shape[1])
cv2.rectangle(frame, (ix, iy),
(ix + w, iy + h),
(0, 255, 0), 8)
def apply_mask(image, mask, color, alpha=0.5):
"""Apply the given mask to the image.
"""
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] *
(1 - alpha) + alpha * color[c],
image[:, :, c])
return image
def process_detections(detections, mrcnn_mask, image_shape, window):
# How many detections do we have?
# Detections array is padded with zeros. Find the first class_id == 0.
zero_ix = np.where(detections[:, 4] == 0)[0]
N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]
# Extract boxes, class_ids, scores, and class-specific masks
boxes = detections[:N, :4]
class_ids = detections[:N, 4].astype(np.int32)
scores = detections[:N, 5]
masks = mrcnn_mask[np.arange(N), :, :, class_ids]
# Compute scale and shift to translate coordinates to image domain.
h_scale = float(image_shape[0]) / (window[2] - window[0])
w_scale = float(image_shape[1]) / (window[3] - window[1])
scale = min(h_scale, w_scale)
shift = window[:2] # y, x
scales = np.array([scale, scale, scale, scale])
shifts = np.array([shift[0], shift[1], shift[0], shift[1]])
# Translate bounding boxes to image domain
boxes = np.multiply(boxes - shifts, scales).astype(np.int32)
# Filter out detections with zero area. Often only happens in early
# stages of training when the network weights are still a bit random.
exclude_ix = np.where(
(boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]
if exclude_ix.shape[0] > 0:
boxes = np.delete(boxes, exclude_ix, axis=0)
class_ids = np.delete(class_ids, exclude_ix, axis=0)
scores = np.delete(scores, exclude_ix, axis=0)
masks = np.delete(masks, exclude_ix, axis=0)
N = class_ids.shape[0]
return boxes, class_ids, scores, masks
def display_instances(image, boxes, masks, class_ids, class_names,
scores=None):
"""
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
masks: [num_instances, height, width]
class_ids: [num_instances]
class_names: list of class names of the dataset
"""
# Number of instances
N = boxes.shape[0]
if N == 0:
return image.copy()
assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
# Show area outside image boundaries.
height, width = image.shape[:2]
masked_image = image.copy()
for i in range(N):
color = (0, 0, 255)
# Bounding box
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in image cropping.
continue
y1, x1, y2, x2 = boxes[i]
cv2.rectangle(masked_image, (x1, y1), (x2, y2), color, 4)
# Label
class_id = class_ids[i]
score = scores[i] if scores is not None else None
label = class_names[class_id]
# Mask
mask = masks[:, :, i]
masked_image = apply_mask(masked_image, mask, color)
return masked_image
class InferenceConfig(coco.CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0.5
def run_model(checkpoint_path, image_in_path, detections_out_path):
config = InferenceConfig()
config.display()
model = modellib.MaskRCNN(mode="inference", model_dir='./logs', config=config)
model.load_weights(checkpoint_path, by_name=True)
s = ImageSequence(image_in_path, '*.png')
count = 0
frame_detections = {}
for frame, name in s:
start = time.time()
molded_images, image_metas, windows = model.mold_inputs([frame])
# Run object detection
detections, mrcnn_class, mrcnn_bbox, mrcnn_mask, \
rois, rpn_class, rpn_bbox = \
model.keras_model.predict([molded_images, image_metas], verbose=0)
zero_ix = np.where(detections[0][:, 4] == 0)[0]
N = zero_ix[0] if zero_ix.shape[0] > 0 else detections[0].shape[0]
# Extract boxes, class_ids, scores, and class-specific masks
boxes, class_ids, scores, masks = process_detections(detections[0],
mrcnn_mask[0],
frame.shape,
windows[0])
print(boxes.shape, class_ids.shape, scores.shape, masks.shape)
boxes = boxes.astype(np.float32)
boxes[:, 0] = boxes[:, 0]/frame.shape[0]
boxes[:, 2] = boxes[:, 2]/frame.shape[0]
boxes[:, 1] = boxes[:, 1]/frame.shape[1]
boxes[:, 3] = boxes[:, 3]/frame.shape[1]
frame_detections[name] = [boxes, class_ids, scores, masks]
print(class_ids)
end = time.time()
print('time', count, end - start)
count = count + 1
if detections_out_path:
np.save(detections_out_path, frame_detections)
run_model(FLAGS.checkpoint_path,
FLAGS.image_in_path,
FLAGS.detections_out_path)
| [
"cv2.rectangle",
"numpy.multiply",
"image_sequence.ImageSequence",
"tensorflow.app.flags.DEFINE_integer",
"numpy.arange",
"numpy.where",
"numpy.delete",
"tensorflow.app.flags.DEFINE_string",
"numpy.any",
"os.path.realpath",
"numpy.array",
"model.MaskRCNN",
"time.time",
"numpy.save"
] | [((460, 539), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""checkpoint_path"""', 'None', '"""Path to checkpoint file."""'], {}), "('checkpoint_path', None, 'Path to checkpoint file.')\n", (486, 539), True, 'import tensorflow as tf\n'), ((540, 627), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""image_in_path"""', 'None', '"""Path to input image directory."""'], {}), "('image_in_path', None,\n 'Path to input image directory.')\n", (566, 627), True, 'import tensorflow as tf\n'), ((624, 718), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""detections_out_path"""', 'None', '"""Path to detections output file."""'], {}), "('detections_out_path', None,\n 'Path to detections output file.')\n", (650, 718), True, 'import tensorflow as tf\n'), ((715, 804), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""max_frames"""', '(100000)', '"""Maximum number of frames to log."""'], {}), "('max_frames', 100000,\n 'Maximum number of frames to log.')\n", (742, 804), True, 'import tensorflow as tf\n'), ((801, 891), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""stride"""', '(5)', '"""Interval at which detections are computed."""'], {}), "('stride', 5,\n 'Interval at which detections are computed.')\n", (828, 891), True, 'import tensorflow as tf\n'), ((216, 246), 'os.path.realpath', 'os.path.realpath', (['"""./datasets"""'], {}), "('./datasets')\n", (232, 246), False, 'import os, sys, random, time, math\n'), ((295, 332), 'os.path.realpath', 'os.path.realpath', (['"""./Mask_RCNN/mrcnn"""'], {}), "('./Mask_RCNN/mrcnn')\n", (311, 332), False, 'import os, sys, random, time, math\n'), ((389, 416), 'os.path.realpath', 'os.path.realpath', (['"""./utils"""'], {}), "('./utils')\n", (405, 416), False, 'import os, sys, random, time, math\n'), ((3724, 3762), 'numpy.array', 'np.array', (['[scale, scale, scale, scale]'], {}), '([scale, scale, scale, scale])\n', (3732, 3762), True, 'import numpy as np\n'), ((3776, 3826), 'numpy.array', 'np.array', (['[shift[0], shift[1], shift[0], shift[1]]'], {}), '([shift[0], shift[1], shift[0], shift[1]])\n', (3784, 3826), True, 'import numpy as np\n'), ((6078, 6148), 'model.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""inference"""', 'model_dir': '"""./logs"""', 'config': 'config'}), "(mode='inference', model_dir='./logs', config=config)\n", (6095, 6148), True, 'import model as modellib\n'), ((6212, 6249), 'image_sequence.ImageSequence', 'ImageSequence', (['image_in_path', '"""*.png"""'], {}), "(image_in_path, '*.png')\n", (6225, 6249), False, 'from image_sequence import ImageSequence\n'), ((2433, 2497), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(ix, iy)', '(ix + w, iy + h)', '(0, 255, 0)', '(8)'], {}), '(frame, (ix, iy), (ix + w, iy + h), (0, 255, 0), 8)\n', (2446, 2497), False, 'import cv2\n'), ((2712, 2801), 'numpy.where', 'np.where', (['(mask == 1)', '(image[:, :, c] * (1 - alpha) + alpha * color[c])', 'image[:, :, c]'], {}), '(mask == 1, image[:, :, c] * (1 - alpha) + alpha * color[c], image[\n :, :, c])\n', (2720, 2801), True, 'import numpy as np\n'), ((3113, 3144), 'numpy.where', 'np.where', (['(detections[:, 4] == 0)'], {}), '(detections[:, 4] == 0)\n', (3121, 3144), True, 'import numpy as np\n'), ((4104, 4176), 'numpy.where', 'np.where', (['((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)'], {}), '((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)\n', (4112, 4176), True, 'import numpy as np\n'), ((4237, 4273), 'numpy.delete', 'np.delete', (['boxes', 'exclude_ix'], {'axis': '(0)'}), '(boxes, exclude_ix, axis=0)\n', (4246, 4273), True, 'import numpy as np\n'), ((4294, 4334), 'numpy.delete', 'np.delete', (['class_ids', 'exclude_ix'], {'axis': '(0)'}), '(class_ids, exclude_ix, axis=0)\n', (4303, 4334), True, 'import numpy as np\n'), ((4352, 4389), 'numpy.delete', 'np.delete', (['scores', 'exclude_ix'], {'axis': '(0)'}), '(scores, exclude_ix, axis=0)\n', (4361, 4389), True, 'import numpy as np\n'), ((4406, 4442), 'numpy.delete', 'np.delete', (['masks', 'exclude_ix'], {'axis': '(0)'}), '(masks, exclude_ix, axis=0)\n', (4415, 4442), True, 'import numpy as np\n'), ((5362, 5419), 'cv2.rectangle', 'cv2.rectangle', (['masked_image', '(x1, y1)', '(x2, y2)', 'color', '(4)'], {}), '(masked_image, (x1, y1), (x2, y2), color, 4)\n', (5375, 5419), False, 'import cv2\n'), ((6335, 6346), 'time.time', 'time.time', ([], {}), '()\n', (6344, 6346), False, 'import os, sys, random, time, math\n'), ((7552, 7563), 'time.time', 'time.time', ([], {}), '()\n', (7561, 7563), False, 'import os, sys, random, time, math\n'), ((7669, 7715), 'numpy.save', 'np.save', (['detections_out_path', 'frame_detections'], {}), '(detections_out_path, frame_detections)\n', (7676, 7715), True, 'import numpy as np\n'), ((3418, 3430), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (3427, 3430), True, 'import numpy as np\n'), ((3887, 3922), 'numpy.multiply', 'np.multiply', (['(boxes - shifts)', 'scales'], {}), '(boxes - shifts, scales)\n', (3898, 3922), True, 'import numpy as np\n'), ((5202, 5218), 'numpy.any', 'np.any', (['boxes[i]'], {}), '(boxes[i])\n', (5208, 5218), True, 'import numpy as np\n'), ((6652, 6686), 'numpy.where', 'np.where', (['(detections[0][:, 4] == 0)'], {}), '(detections[0][:, 4] == 0)\n', (6660, 6686), True, 'import numpy as np\n')] |
import torch
from torchvision import datasets, transforms
from utils.sampling import mnist_iid, mnist_iid_normal, mnist_noniid, cifar_iid, mnist_noniid_normal, minmax_dataset, fmnist_iid_normal, fmnist_noniid_normal
import numpy as np
import random
def load_data(args):
# load dataset and split users
if args.dataset == 'mnist':
trans_mnist = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
dataset_train = datasets.MNIST(
'../data/mnist/', train=True, download=True, transform=trans_mnist)
dataset_test = datasets.MNIST(
'../data/mnist/', train=False, download=True, transform=trans_mnist)
# sample users
if args.iid:
dict_users, dataset_train_real = mnist_iid_normal(dataset_train, args.num_users)
else:
dict_users, dataset_train_real = mnist_noniid_normal(dataset_train, args.num_users)
elif args.dataset == 'cifar':
trans_cifar = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
dataset_train = datasets.CIFAR10(
'../data/cifar', train=True, download=True, transform=trans_cifar)
dataset_test = datasets.CIFAR10(
'../data/cifar', train=False, download=True, transform=trans_cifar)
if args.iid:
dict_users, dataset_train_real = cifar_iid(dataset_train, args.num_users)
else:
exit('Error: only consider IID setting in CIFAR10')
elif args.dataset == 'minmax_synthetic':
dataset_train, dataset_test, dict_users, img_size, dataset_train_real = minmax_dataset(args)
elif args.dataset == 'fmnist':
trans_mnist = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.3476,), (0.3568,))])
dataset_train = datasets.FashionMNIST("../data/fmnist/",train = True, transform=trans_mnist)
dataset_test = datasets.FashionMNIST("../data/fmnist", train = False, transform=trans_mnist)
labels_train = dataset_train.targets.numpy()
labels_test = dataset_test.targets.numpy()
train_index = np.any([labels_train == 4, labels_train == 6, labels_train == 0], axis=0)
test_index = np.any([labels_test == 4, labels_test == 6, labels_test == 0], axis=0)
dataset_train.data, dataset_train.targets = dataset_train.data[train_index].float()/255., dataset_train.targets[train_index]
dataset_test.data, dataset_test.targets = dataset_test.data[test_index].float()/255., dataset_test.targets[test_index]
train_index=list(range(dataset_train.data.shape[0]))
random.shuffle(train_index)
dataset_train.data, dataset_train.targets = dataset_train.data[train_index].float()/255., dataset_train.targets[train_index]
#print(torch.mean(dataset_train.data.float().view(-1)), torch.std(dataset_train.data.float().view(-1)))
#dataset_train.data = dataset_train.data.view(18000,1,28,28)
#dataset_test.data = dataset_test.data.view(3000,1,28,28)
labels_train = dataset_train.targets.numpy()
labels_test = dataset_test.targets.numpy()
print(labels_train)
for i in range(labels_train.shape[0]):
if labels_train[i]==4:
labels_train[i]=1
elif labels_train[i]==6:
labels_train[i]=2
for i in range(labels_test.shape[0]):
if labels_test[i]==4:
labels_test[i]=1
elif labels_test[i]==6:
labels_test[i]=2
if args.iid:
dict_users, dataset_train_real = fmnist_iid_normal(dataset_train, args.num_users)
else:
dict_users, dataset_train_real = fmnist_noniid_normal(dataset_train, args.num_users)
else:
exit('Error: unrecognized dataset')
img_size = dataset_train[0][0].shape
return dataset_train, dataset_test, dict_users, img_size, dataset_train_real
| [
"utils.sampling.minmax_dataset",
"random.shuffle",
"utils.sampling.cifar_iid",
"torchvision.datasets.FashionMNIST",
"numpy.any",
"utils.sampling.fmnist_noniid_normal",
"torchvision.datasets.MNIST",
"torchvision.datasets.CIFAR10",
"torchvision.transforms.Normalize",
"utils.sampling.mnist_iid_normal... | [((484, 571), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['"""../data/mnist/"""'], {'train': '(True)', 'download': '(True)', 'transform': 'trans_mnist'}), "('../data/mnist/', train=True, download=True, transform=\n trans_mnist)\n", (498, 571), False, 'from torchvision import datasets, transforms\n'), ((603, 691), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['"""../data/mnist/"""'], {'train': '(False)', 'download': '(True)', 'transform': 'trans_mnist'}), "('../data/mnist/', train=False, download=True, transform=\n trans_mnist)\n", (617, 691), False, 'from torchvision import datasets, transforms\n'), ((789, 836), 'utils.sampling.mnist_iid_normal', 'mnist_iid_normal', (['dataset_train', 'args.num_users'], {}), '(dataset_train, args.num_users)\n', (805, 836), False, 'from utils.sampling import mnist_iid, mnist_iid_normal, mnist_noniid, cifar_iid, mnist_noniid_normal, minmax_dataset, fmnist_iid_normal, fmnist_noniid_normal\n'), ((896, 946), 'utils.sampling.mnist_noniid_normal', 'mnist_noniid_normal', (['dataset_train', 'args.num_users'], {}), '(dataset_train, args.num_users)\n', (915, 946), False, 'from utils.sampling import mnist_iid, mnist_iid_normal, mnist_noniid, cifar_iid, mnist_noniid_normal, minmax_dataset, fmnist_iid_normal, fmnist_noniid_normal\n'), ((1140, 1228), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', (['"""../data/cifar"""'], {'train': '(True)', 'download': '(True)', 'transform': 'trans_cifar'}), "('../data/cifar', train=True, download=True, transform=\n trans_cifar)\n", (1156, 1228), False, 'from torchvision import datasets, transforms\n'), ((1260, 1349), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', (['"""../data/cifar"""'], {'train': '(False)', 'download': '(True)', 'transform': 'trans_cifar'}), "('../data/cifar', train=False, download=True, transform=\n trans_cifar)\n", (1276, 1349), False, 'from torchvision import datasets, transforms\n'), ((392, 413), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (411, 413), False, 'from torchvision import datasets, transforms\n'), ((415, 457), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (435, 457), False, 'from torchvision import datasets, transforms\n'), ((1424, 1464), 'utils.sampling.cifar_iid', 'cifar_iid', (['dataset_train', 'args.num_users'], {}), '(dataset_train, args.num_users)\n', (1433, 1464), False, 'from utils.sampling import mnist_iid, mnist_iid_normal, mnist_noniid, cifar_iid, mnist_noniid_normal, minmax_dataset, fmnist_iid_normal, fmnist_noniid_normal\n'), ((1668, 1688), 'utils.sampling.minmax_dataset', 'minmax_dataset', (['args'], {}), '(args)\n', (1682, 1688), False, 'from utils.sampling import mnist_iid, mnist_iid_normal, mnist_noniid, cifar_iid, mnist_noniid_normal, minmax_dataset, fmnist_iid_normal, fmnist_noniid_normal\n'), ((1036, 1057), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1055, 1057), False, 'from torchvision import datasets, transforms\n'), ((1059, 1113), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (1079, 1113), False, 'from torchvision import datasets, transforms\n'), ((1871, 1946), 'torchvision.datasets.FashionMNIST', 'datasets.FashionMNIST', (['"""../data/fmnist/"""'], {'train': '(True)', 'transform': 'trans_mnist'}), "('../data/fmnist/', train=True, transform=trans_mnist)\n", (1892, 1946), False, 'from torchvision import datasets, transforms\n'), ((1971, 2046), 'torchvision.datasets.FashionMNIST', 'datasets.FashionMNIST', (['"""../data/fmnist"""'], {'train': '(False)', 'transform': 'trans_mnist'}), "('../data/fmnist', train=False, transform=trans_mnist)\n", (1992, 2046), False, 'from torchvision import datasets, transforms\n'), ((2175, 2248), 'numpy.any', 'np.any', (['[labels_train == 4, labels_train == 6, labels_train == 0]'], {'axis': '(0)'}), '([labels_train == 4, labels_train == 6, labels_train == 0], axis=0)\n', (2181, 2248), True, 'import numpy as np\n'), ((2270, 2340), 'numpy.any', 'np.any', (['[labels_test == 4, labels_test == 6, labels_test == 0]'], {'axis': '(0)'}), '([labels_test == 4, labels_test == 6, labels_test == 0], axis=0)\n', (2276, 2340), True, 'import numpy as np\n'), ((2679, 2706), 'random.shuffle', 'random.shuffle', (['train_index'], {}), '(train_index)\n', (2693, 2706), False, 'import random\n'), ((3655, 3703), 'utils.sampling.fmnist_iid_normal', 'fmnist_iid_normal', (['dataset_train', 'args.num_users'], {}), '(dataset_train, args.num_users)\n', (3672, 3703), False, 'from utils.sampling import mnist_iid, mnist_iid_normal, mnist_noniid, cifar_iid, mnist_noniid_normal, minmax_dataset, fmnist_iid_normal, fmnist_noniid_normal\n'), ((3763, 3814), 'utils.sampling.fmnist_noniid_normal', 'fmnist_noniid_normal', (['dataset_train', 'args.num_users'], {}), '(dataset_train, args.num_users)\n', (3783, 3814), False, 'from utils.sampling import mnist_iid, mnist_iid_normal, mnist_noniid, cifar_iid, mnist_noniid_normal, minmax_dataset, fmnist_iid_normal, fmnist_noniid_normal\n'), ((1779, 1800), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1798, 1800), False, 'from torchvision import datasets, transforms\n'), ((1802, 1844), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.3476,)', '(0.3568,)'], {}), '((0.3476,), (0.3568,))\n', (1822, 1844), False, 'from torchvision import datasets, transforms\n')] |
# Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for The Walrus quantum functions"""
# pylint: disable=no-self-use,redefined-outer-name
import numpy as np
import pytest
from scipy.linalg import block_diag
from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant
from thewalrus.symplectic import expand, interferometer, two_mode_squeezing, loss, squeezing
from thewalrus.random import random_interferometer
@pytest.mark.parametrize("hbar", [0.1, 0.5, 1, 2, 1.0/137])
def test_cubic_phase(hbar):
"""Test that all the possible ways of obtaining a cubic phase state using the different methods agree"""
mu = np.sqrt(hbar/2.0) * np.array([-0.50047867, 0.37373598, 0.01421683, 0.26999427, 0.04450994, 0.01903583])
cov = (hbar/2.0) * np.array(
[
[1.57884241, 0.81035494, 1.03468307, 1.14908791, 0.09179507, -0.11893174],
[0.81035494, 1.06942863, 0.89359234, 0.20145142, 0.16202296, 0.4578259],
[1.03468307, 0.89359234, 1.87560498, 0.16915661, 1.0836528, -0.09405278],
[1.14908791, 0.20145142, 0.16915661, 2.37765137, -0.93543385, -0.6544286],
[0.09179507, 0.16202296, 1.0836528, -0.93543385, 2.78903152, -0.76519088],
[-0.11893174, 0.4578259, -0.09405278, -0.6544286, -0.76519088, 1.51724222],
]
)
cutoff = 7
# the Fock state measurement of mode 0 to be post-selected
m1 = 1
# the Fock state measurement of mode 1 to be post-selected
m2 = 2
psi = state_vector(mu, cov, post_select={0: m1, 1: m2}, cutoff=cutoff, hbar=hbar)
psi_c = state_vector(mu, cov, cutoff=cutoff, hbar=hbar)[m1, m2, :]
rho = density_matrix(mu, cov, post_select={0: m1, 1: m2}, cutoff=cutoff, hbar=hbar)
rho_c = density_matrix(mu, cov, cutoff=cutoff, hbar=hbar)[m1, m1, m2, m2, :, :]
assert np.allclose(np.outer(psi, psi.conj()), rho)
assert np.allclose(np.outer(psi_c, psi_c.conj()), rho)
assert np.allclose(rho_c, rho)
@pytest.mark.parametrize("hbar", [2.0, 1.0/137])
def test_four_modes(hbar):
""" Test that probabilities are correctly updates for a four modes system under loss"""
# All this block is to generate the correct covariance matrix.
# It correnponds to num_modes=4 modes that undergo two mode squeezing between modes i and i + (num_modes / 2).
# Then they undergo displacement.
# The signal and idlers see and interferometer with unitary matrix u2x2.
# And then they see loss by amount etas[i].
num_modes = 4
theta = 0.45
phi = 0.7
u2x2 = np.array([[np.cos(theta / 2), np.exp(1j * phi) * np.sin(theta / 2)],
[-np.exp(-1j * phi) * np.sin(theta / 2), np.cos(theta / 2)]])
u4x4 = block_diag(u2x2, u2x2)
cov = np.identity(2 * num_modes) * hbar / 2
means = 0.5 * np.random.rand(2 * num_modes) * np.sqrt(hbar / 2)
rs = [0.1, 0.9]
n_half = num_modes // 2
for i, r_val in enumerate(rs):
Sexpanded = expand(two_mode_squeezing(r_val, 0.0), [i, n_half + i], num_modes)
cov = Sexpanded @ cov @ (Sexpanded.T)
Su = expand(interferometer(u4x4), range(num_modes), num_modes)
cov = Su @ cov @ (Su.T)
cov_lossless = np.copy(cov)
means_lossless = np.copy(means)
etas = [0.9, 0.7, 0.9, 0.1]
for i, eta in enumerate(etas):
means, cov = loss(means, cov, eta, i, hbar=hbar)
cutoff = 3
probs_lossless = probabilities(means_lossless, cov_lossless, 4 * cutoff, hbar=hbar)
probs = probabilities(means, cov, cutoff, hbar=hbar)
probs_updated = update_probabilities_with_loss(etas, probs_lossless)
assert np.allclose(probs, probs_updated[:cutoff, :cutoff, :cutoff, :cutoff], atol=1e-6)
@pytest.mark.parametrize("hbar", [0.5, 1.0, 1.7, 2.0])
def test_cumulants_three_mode_random_state(hbar): # pylint: disable=too-many-statements
"""Tests third order cumulants for a random state"""
M = 3
O = interferometer(random_interferometer(3))
mu = np.random.rand(2 * M) - 0.5
hbar = 2
cov = 0.5 * hbar * O @ squeezing(np.random.rand(M)) @ O.T
cutoff = 50
probs = probabilities(mu, cov, cutoff, hbar=hbar)
n = np.arange(cutoff)
probs0 = np.sum(probs, axis=(1, 2))
probs1 = np.sum(probs, axis=(0, 2))
probs2 = np.sum(probs, axis=(0, 1))
# Check one body cumulants
n0_1 = n @ probs0
n1_1 = n @ probs1
n2_1 = n @ probs2
assert np.allclose(photon_number_cumulant(mu, cov, [0], hbar=hbar), n0_1)
assert np.allclose(photon_number_cumulant(mu, cov, [1], hbar=hbar), n1_1)
assert np.allclose(photon_number_cumulant(mu, cov, [2], hbar=hbar), n2_1)
n0_2 = n ** 2 @ probs0
n1_2 = n ** 2 @ probs1
n2_2 = n ** 2 @ probs2
var0 = n0_2 - n0_1 ** 2
var1 = n1_2 - n1_1 ** 2
var2 = n2_2 - n2_1 ** 2
assert np.allclose(photon_number_cumulant(mu, cov, [0, 0], hbar=hbar), var0)
assert np.allclose(photon_number_cumulant(mu, cov, [1, 1], hbar=hbar), var1)
assert np.allclose(photon_number_cumulant(mu, cov, [2, 2], hbar=hbar), var2)
n0_3 = n ** 3 @ probs0 - 3 * n0_2 * n0_1 + 2 * n0_1 ** 3
n1_3 = n ** 3 @ probs1 - 3 * n1_2 * n1_1 + 2 * n1_1 ** 3
n2_3 = n ** 3 @ probs2 - 3 * n2_2 * n2_1 + 2 * n2_1 ** 3
assert np.allclose(photon_number_cumulant(mu, cov, [0, 0, 0], hbar=hbar), n0_3)
assert np.allclose(photon_number_cumulant(mu, cov, [1, 1, 1], hbar=hbar), n1_3)
assert np.allclose(photon_number_cumulant(mu, cov, [2, 2, 2], hbar=hbar), n2_3)
# Check two body cumulants
probs01 = np.sum(probs, axis=(2))
probs02 = np.sum(probs, axis=(1))
probs12 = np.sum(probs, axis=(0))
n0n1 = n @ probs01 @ n
n0n2 = n @ probs02 @ n
n1n2 = n @ probs12 @ n
covar01 = n0n1 - n0_1 * n1_1
covar02 = n0n2 - n0_1 * n2_1
covar12 = n1n2 - n1_1 * n2_1
assert np.allclose(photon_number_cumulant(mu, cov, [0, 1], hbar=hbar), covar01)
assert np.allclose(photon_number_cumulant(mu, cov, [0, 2], hbar=hbar), covar02)
assert np.allclose(photon_number_cumulant(mu, cov, [1, 2], hbar=hbar), covar12)
kappa001 = n ** 2 @ probs01 @ n - 2 * n0n1 * n0_1 - n0_2 * n1_1 + 2 * n0_1 ** 2 * n1_1
kappa011 = n @ probs01 @ n ** 2 - 2 * n0n1 * n1_1 - n1_2 * n0_1 + 2 * n1_1 ** 2 * n0_1
kappa002 = n ** 2 @ probs02 @ n - 2 * n0n2 * n0_1 - n0_2 * n2_1 + 2 * n0_1 ** 2 * n2_1
kappa022 = n @ probs02 @ n ** 2 - 2 * n0n2 * n2_1 - n2_2 * n0_1 + 2 * n2_1 ** 2 * n0_1
kappa112 = n ** 2 @ probs12 @ n - 2 * n1n2 * n1_1 - n1_2 * n2_1 + 2 * n1_1 ** 2 * n2_1
kappa122 = n @ probs12 @ n ** 2 - 2 * n1n2 * n2_1 - n2_2 * n1_1 + 2 * n2_1 ** 2 * n1_1
assert np.allclose(photon_number_cumulant(mu, cov, [0, 0, 1], hbar=hbar), kappa001)
assert np.allclose(photon_number_cumulant(mu, cov, [0, 1, 1], hbar=hbar), kappa011)
assert np.allclose(photon_number_cumulant(mu, cov, [0, 0, 2], hbar=hbar), kappa002)
assert np.allclose(photon_number_cumulant(mu, cov, [0, 2, 2], hbar=hbar), kappa022)
assert np.allclose(photon_number_cumulant(mu, cov, [1, 1, 2], hbar=hbar), kappa112)
assert np.allclose(photon_number_cumulant(mu, cov, [1, 2, 2], hbar=hbar), kappa122)
# Finally, the three body cumulant
n0n1n2 = np.einsum("ijk, i, j, k", probs, n, n, n)
kappa012 = n0n1n2 - n0n1 * n2_1 - n0n2 * n1_1 - n1n2 * n0_1 + 2 * n0_1 * n1_1 * n2_1
assert np.allclose(photon_number_cumulant(mu, cov, [0, 1, 2], hbar=hbar), kappa012)
| [
"numpy.sqrt",
"numpy.random.rand",
"thewalrus.symplectic.two_mode_squeezing",
"numpy.array",
"numpy.einsum",
"numpy.sin",
"thewalrus.symplectic.loss",
"numpy.arange",
"thewalrus.symplectic.interferometer",
"thewalrus.quantum.density_matrix",
"thewalrus.quantum.state_vector",
"numpy.exp",
"nu... | [((1035, 1095), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""hbar"""', '[0.1, 0.5, 1, 2, 1.0 / 137]'], {}), "('hbar', [0.1, 0.5, 1, 2, 1.0 / 137])\n", (1058, 1095), False, 'import pytest\n'), ((2570, 2619), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""hbar"""', '[2.0, 1.0 / 137]'], {}), "('hbar', [2.0, 1.0 / 137])\n", (2593, 2619), False, 'import pytest\n'), ((4280, 4333), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""hbar"""', '[0.5, 1.0, 1.7, 2.0]'], {}), "('hbar', [0.5, 1.0, 1.7, 2.0])\n", (4303, 4333), False, 'import pytest\n'), ((2099, 2178), 'thewalrus.quantum.state_vector', 'state_vector', (['mu', 'cov'], {'post_select': '{(0): m1, (1): m2}', 'cutoff': 'cutoff', 'hbar': 'hbar'}), '(mu, cov, post_select={(0): m1, (1): m2}, cutoff=cutoff, hbar=hbar)\n', (2111, 2178), False, 'from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant\n'), ((2256, 2342), 'thewalrus.quantum.density_matrix', 'density_matrix', (['mu', 'cov'], {'post_select': '{(0): m1, (1): m2}', 'cutoff': 'cutoff', 'hbar': 'hbar'}), '(mu, cov, post_select={(0): m1, (1): m2}, cutoff=cutoff, hbar\n =hbar)\n', (2270, 2342), False, 'from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant\n'), ((2543, 2566), 'numpy.allclose', 'np.allclose', (['rho_c', 'rho'], {}), '(rho_c, rho)\n', (2554, 2566), True, 'import numpy as np\n'), ((3305, 3327), 'scipy.linalg.block_diag', 'block_diag', (['u2x2', 'u2x2'], {}), '(u2x2, u2x2)\n', (3315, 3327), False, 'from scipy.linalg import block_diag\n'), ((3777, 3789), 'numpy.copy', 'np.copy', (['cov'], {}), '(cov)\n', (3784, 3789), True, 'import numpy as np\n'), ((3811, 3825), 'numpy.copy', 'np.copy', (['means'], {}), '(means)\n', (3818, 3825), True, 'import numpy as np\n'), ((3988, 4054), 'thewalrus.quantum.probabilities', 'probabilities', (['means_lossless', 'cov_lossless', '(4 * cutoff)'], {'hbar': 'hbar'}), '(means_lossless, cov_lossless, 4 * cutoff, hbar=hbar)\n', (4001, 4054), False, 'from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant\n'), ((4067, 4111), 'thewalrus.quantum.probabilities', 'probabilities', (['means', 'cov', 'cutoff'], {'hbar': 'hbar'}), '(means, cov, cutoff, hbar=hbar)\n', (4080, 4111), False, 'from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant\n'), ((4132, 4184), 'thewalrus.quantum.update_probabilities_with_loss', 'update_probabilities_with_loss', (['etas', 'probs_lossless'], {}), '(etas, probs_lossless)\n', (4162, 4184), False, 'from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant\n'), ((4196, 4282), 'numpy.allclose', 'np.allclose', (['probs', 'probs_updated[:cutoff, :cutoff, :cutoff, :cutoff]'], {'atol': '(1e-06)'}), '(probs, probs_updated[:cutoff, :cutoff, :cutoff, :cutoff], atol=\n 1e-06)\n', (4207, 4282), True, 'import numpy as np\n'), ((4678, 4719), 'thewalrus.quantum.probabilities', 'probabilities', (['mu', 'cov', 'cutoff'], {'hbar': 'hbar'}), '(mu, cov, cutoff, hbar=hbar)\n', (4691, 4719), False, 'from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant\n'), ((4728, 4745), 'numpy.arange', 'np.arange', (['cutoff'], {}), '(cutoff)\n', (4737, 4745), True, 'import numpy as np\n'), ((4759, 4785), 'numpy.sum', 'np.sum', (['probs'], {'axis': '(1, 2)'}), '(probs, axis=(1, 2))\n', (4765, 4785), True, 'import numpy as np\n'), ((4799, 4825), 'numpy.sum', 'np.sum', (['probs'], {'axis': '(0, 2)'}), '(probs, axis=(0, 2))\n', (4805, 4825), True, 'import numpy as np\n'), ((4839, 4865), 'numpy.sum', 'np.sum', (['probs'], {'axis': '(0, 1)'}), '(probs, axis=(0, 1))\n', (4845, 4865), True, 'import numpy as np\n'), ((6089, 6110), 'numpy.sum', 'np.sum', (['probs'], {'axis': '(2)'}), '(probs, axis=2)\n', (6095, 6110), True, 'import numpy as np\n'), ((6127, 6148), 'numpy.sum', 'np.sum', (['probs'], {'axis': '(1)'}), '(probs, axis=1)\n', (6133, 6148), True, 'import numpy as np\n'), ((6165, 6186), 'numpy.sum', 'np.sum', (['probs'], {'axis': '(0)'}), '(probs, axis=0)\n', (6171, 6186), True, 'import numpy as np\n'), ((7752, 7793), 'numpy.einsum', 'np.einsum', (['"""ijk, i, j, k"""', 'probs', 'n', 'n', 'n'], {}), "('ijk, i, j, k', probs, n, n, n)\n", (7761, 7793), True, 'import numpy as np\n'), ((1240, 1259), 'numpy.sqrt', 'np.sqrt', (['(hbar / 2.0)'], {}), '(hbar / 2.0)\n', (1247, 1259), True, 'import numpy as np\n'), ((1260, 1348), 'numpy.array', 'np.array', (['[-0.50047867, 0.37373598, 0.01421683, 0.26999427, 0.04450994, 0.01903583]'], {}), '([-0.50047867, 0.37373598, 0.01421683, 0.26999427, 0.04450994, \n 0.01903583])\n', (1268, 1348), True, 'import numpy as np\n'), ((1368, 1855), 'numpy.array', 'np.array', (['[[1.57884241, 0.81035494, 1.03468307, 1.14908791, 0.09179507, -0.11893174],\n [0.81035494, 1.06942863, 0.89359234, 0.20145142, 0.16202296, 0.4578259],\n [1.03468307, 0.89359234, 1.87560498, 0.16915661, 1.0836528, -0.09405278\n ], [1.14908791, 0.20145142, 0.16915661, 2.37765137, -0.93543385, -\n 0.6544286], [0.09179507, 0.16202296, 1.0836528, -0.93543385, 2.78903152,\n -0.76519088], [-0.11893174, 0.4578259, -0.09405278, -0.6544286, -\n 0.76519088, 1.51724222]]'], {}), '([[1.57884241, 0.81035494, 1.03468307, 1.14908791, 0.09179507, -\n 0.11893174], [0.81035494, 1.06942863, 0.89359234, 0.20145142, \n 0.16202296, 0.4578259], [1.03468307, 0.89359234, 1.87560498, 0.16915661,\n 1.0836528, -0.09405278], [1.14908791, 0.20145142, 0.16915661, \n 2.37765137, -0.93543385, -0.6544286], [0.09179507, 0.16202296, \n 1.0836528, -0.93543385, 2.78903152, -0.76519088], [-0.11893174, \n 0.4578259, -0.09405278, -0.6544286, -0.76519088, 1.51724222]])\n', (1376, 1855), True, 'import numpy as np\n'), ((2187, 2234), 'thewalrus.quantum.state_vector', 'state_vector', (['mu', 'cov'], {'cutoff': 'cutoff', 'hbar': 'hbar'}), '(mu, cov, cutoff=cutoff, hbar=hbar)\n', (2199, 2234), False, 'from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant\n'), ((2346, 2395), 'thewalrus.quantum.density_matrix', 'density_matrix', (['mu', 'cov'], {'cutoff': 'cutoff', 'hbar': 'hbar'}), '(mu, cov, cutoff=cutoff, hbar=hbar)\n', (2360, 2395), False, 'from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant\n'), ((3427, 3444), 'numpy.sqrt', 'np.sqrt', (['(hbar / 2)'], {}), '(hbar / 2)\n', (3434, 3444), True, 'import numpy as np\n'), ((3679, 3699), 'thewalrus.symplectic.interferometer', 'interferometer', (['u4x4'], {}), '(u4x4)\n', (3693, 3699), False, 'from thewalrus.symplectic import expand, interferometer, two_mode_squeezing, loss, squeezing\n'), ((3915, 3950), 'thewalrus.symplectic.loss', 'loss', (['means', 'cov', 'eta', 'i'], {'hbar': 'hbar'}), '(means, cov, eta, i, hbar=hbar)\n', (3919, 3950), False, 'from thewalrus.symplectic import expand, interferometer, two_mode_squeezing, loss, squeezing\n'), ((4512, 4536), 'thewalrus.random.random_interferometer', 'random_interferometer', (['(3)'], {}), '(3)\n', (4533, 4536), False, 'from thewalrus.random import random_interferometer\n'), ((4547, 4568), 'numpy.random.rand', 'np.random.rand', (['(2 * M)'], {}), '(2 * M)\n', (4561, 4568), True, 'import numpy as np\n'), ((4987, 5034), 'thewalrus.quantum.photon_number_cumulant', 'photon_number_cumulant', (['mu', 'cov', '[0]'], {'hbar': 'hbar'}), '(mu, cov, [0], hbar=hbar)\n', (5009, 5034), False, 'from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant\n'), ((5065, 5112), 'thewalrus.quantum.photon_number_cumulant', 'photon_number_cumulant', (['mu', 'cov', '[1]'], {'hbar': 'hbar'}), '(mu, cov, [1], hbar=hbar)\n', (5087, 5112), False, 'from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant\n'), ((5143, 5190), 'thewalrus.quantum.photon_number_cumulant', 'photon_number_cumulant', (['mu', 'cov', '[2]'], {'hbar': 'hbar'}), '(mu, cov, [2], hbar=hbar)\n', (5165, 5190), False, 'from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant\n'), ((5387, 5437), 'thewalrus.quantum.photon_number_cumulant', 'photon_number_cumulant', (['mu', 'cov', '[0, 0]'], {'hbar': 'hbar'}), '(mu, cov, [0, 0], hbar=hbar)\n', (5409, 5437), False, 'from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant\n'), ((5468, 5518), 'thewalrus.quantum.photon_number_cumulant', 'photon_number_cumulant', (['mu', 'cov', '[1, 1]'], {'hbar': 'hbar'}), '(mu, cov, [1, 1], hbar=hbar)\n', (5490, 5518), False, 'from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant\n'), ((5549, 5599), 'thewalrus.quantum.photon_number_cumulant', 'photon_number_cumulant', (['mu', 'cov', '[2, 2]'], {'hbar': 'hbar'}), '(mu, cov, [2, 2], hbar=hbar)\n', (5571, 5599), False, 'from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant\n'), ((5814, 5867), 'thewalrus.quantum.photon_number_cumulant', 'photon_number_cumulant', (['mu', 'cov', '[0, 0, 0]'], {'hbar': 'hbar'}), '(mu, cov, [0, 0, 0], hbar=hbar)\n', (5836, 5867), False, 'from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant\n'), ((5898, 5951), 'thewalrus.quantum.photon_number_cumulant', 'photon_number_cumulant', (['mu', 'cov', '[1, 1, 1]'], {'hbar': 'hbar'}), '(mu, cov, [1, 1, 1], hbar=hbar)\n', (5920, 5951), False, 'from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant\n'), ((5982, 6035), 'thewalrus.quantum.photon_number_cumulant', 'photon_number_cumulant', (['mu', 'cov', '[2, 2, 2]'], {'hbar': 'hbar'}), '(mu, cov, [2, 2, 2], hbar=hbar)\n', (6004, 6035), False, 'from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant\n'), ((6394, 6444), 'thewalrus.quantum.photon_number_cumulant', 'photon_number_cumulant', (['mu', 'cov', '[0, 1]'], {'hbar': 'hbar'}), '(mu, cov, [0, 1], hbar=hbar)\n', (6416, 6444), False, 'from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant\n'), ((6478, 6528), 'thewalrus.quantum.photon_number_cumulant', 'photon_number_cumulant', (['mu', 'cov', '[0, 2]'], {'hbar': 'hbar'}), '(mu, cov, [0, 2], hbar=hbar)\n', (6500, 6528), False, 'from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant\n'), ((6562, 6612), 'thewalrus.quantum.photon_number_cumulant', 'photon_number_cumulant', (['mu', 'cov', '[1, 2]'], {'hbar': 'hbar'}), '(mu, cov, [1, 2], hbar=hbar)\n', (6584, 6612), False, 'from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant\n'), ((7194, 7247), 'thewalrus.quantum.photon_number_cumulant', 'photon_number_cumulant', (['mu', 'cov', '[0, 0, 1]'], {'hbar': 'hbar'}), '(mu, cov, [0, 0, 1], hbar=hbar)\n', (7216, 7247), False, 'from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant\n'), ((7282, 7335), 'thewalrus.quantum.photon_number_cumulant', 'photon_number_cumulant', (['mu', 'cov', '[0, 1, 1]'], {'hbar': 'hbar'}), '(mu, cov, [0, 1, 1], hbar=hbar)\n', (7304, 7335), False, 'from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant\n'), ((7370, 7423), 'thewalrus.quantum.photon_number_cumulant', 'photon_number_cumulant', (['mu', 'cov', '[0, 0, 2]'], {'hbar': 'hbar'}), '(mu, cov, [0, 0, 2], hbar=hbar)\n', (7392, 7423), False, 'from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant\n'), ((7458, 7511), 'thewalrus.quantum.photon_number_cumulant', 'photon_number_cumulant', (['mu', 'cov', '[0, 2, 2]'], {'hbar': 'hbar'}), '(mu, cov, [0, 2, 2], hbar=hbar)\n', (7480, 7511), False, 'from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant\n'), ((7546, 7599), 'thewalrus.quantum.photon_number_cumulant', 'photon_number_cumulant', (['mu', 'cov', '[1, 1, 2]'], {'hbar': 'hbar'}), '(mu, cov, [1, 1, 2], hbar=hbar)\n', (7568, 7599), False, 'from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant\n'), ((7634, 7687), 'thewalrus.quantum.photon_number_cumulant', 'photon_number_cumulant', (['mu', 'cov', '[1, 2, 2]'], {'hbar': 'hbar'}), '(mu, cov, [1, 2, 2], hbar=hbar)\n', (7656, 7687), False, 'from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant\n'), ((7906, 7959), 'thewalrus.quantum.photon_number_cumulant', 'photon_number_cumulant', (['mu', 'cov', '[0, 1, 2]'], {'hbar': 'hbar'}), '(mu, cov, [0, 1, 2], hbar=hbar)\n', (7928, 7959), False, 'from thewalrus.quantum import density_matrix, state_vector, probabilities, update_probabilities_with_loss, photon_number_cumulant\n'), ((3339, 3365), 'numpy.identity', 'np.identity', (['(2 * num_modes)'], {}), '(2 * num_modes)\n', (3350, 3365), True, 'import numpy as np\n'), ((3395, 3424), 'numpy.random.rand', 'np.random.rand', (['(2 * num_modes)'], {}), '(2 * num_modes)\n', (3409, 3424), True, 'import numpy as np\n'), ((3556, 3586), 'thewalrus.symplectic.two_mode_squeezing', 'two_mode_squeezing', (['r_val', '(0.0)'], {}), '(r_val, 0.0)\n', (3574, 3586), False, 'from thewalrus.symplectic import expand, interferometer, two_mode_squeezing, loss, squeezing\n'), ((3153, 3170), 'numpy.cos', 'np.cos', (['(theta / 2)'], {}), '(theta / 2)\n', (3159, 3170), True, 'import numpy as np\n'), ((3272, 3289), 'numpy.cos', 'np.cos', (['(theta / 2)'], {}), '(theta / 2)\n', (3278, 3289), True, 'import numpy as np\n'), ((4625, 4642), 'numpy.random.rand', 'np.random.rand', (['M'], {}), '(M)\n', (4639, 4642), True, 'import numpy as np\n'), ((3172, 3190), 'numpy.exp', 'np.exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (3178, 3190), True, 'import numpy as np\n'), ((3191, 3208), 'numpy.sin', 'np.sin', (['(theta / 2)'], {}), '(theta / 2)\n', (3197, 3208), True, 'import numpy as np\n'), ((3253, 3270), 'numpy.sin', 'np.sin', (['(theta / 2)'], {}), '(theta / 2)\n', (3259, 3270), True, 'import numpy as np\n'), ((3233, 3252), 'numpy.exp', 'np.exp', (['(-1.0j * phi)'], {}), '(-1.0j * phi)\n', (3239, 3252), True, 'import numpy as np\n')] |
import datetime
import numpy as np
from PIL import Image
from display import LED_Display
from led_clock import LedClock
im1 = Image.open("./imgs/left_smiley.bmp")
im2 = Image.open("./imgs/straight_smiley.bmp")
im3 = Image.open("./imgs/right_smiley.bmp")
im4 = Image.open("./imgs/sun.bmp")
im5 = Image.open("./imgs/line.bmp")
p1 = 1. - np.clip(np.array(im1), 0, 1)
p2 = 1. - np.clip(np.array(im2), 0, 1)
p3 = 1. - np.clip(np.array(im3), 0, 1)
p4 = 1. - np.clip(np.array(im4), 0, 1)
txt_matrix = 1. - np.clip(np.array(im5), 0, 1)
im_array = [p1, p2, p3, p4]
hour_pins = [11, 9, 10, 27, 22, 4]
minute_pins = [26, 19, 13, 15, 14, 17]
second_pins = [21, 20, 16, 12, 7, 25]
bin_clock = LedClock(hour_pins, minute_pins, second_pins)
led_display = LED_Display()
# Next row will scroll text on the display if uncommented
#led_display.scroll_text("<NAME> ", 1, 100)
while True:
bin_clock.display_time()
currtime = datetime.datetime.now()
if currtime.second == 0:
print("in here")
im_idx = np.random.randint(0, len(im_array))
#led_display.scroll_matrix(matrix=im_array[im_idx], sweeps=10, speed=100)
led_display.display_matrix(mat=im_array[im_idx],steps=20)
| [
"led_clock.LedClock",
"PIL.Image.open",
"numpy.array",
"datetime.datetime.now",
"display.LED_Display"
] | [((137, 173), 'PIL.Image.open', 'Image.open', (['"""./imgs/left_smiley.bmp"""'], {}), "('./imgs/left_smiley.bmp')\n", (147, 173), False, 'from PIL import Image\n'), ((181, 221), 'PIL.Image.open', 'Image.open', (['"""./imgs/straight_smiley.bmp"""'], {}), "('./imgs/straight_smiley.bmp')\n", (191, 221), False, 'from PIL import Image\n'), ((229, 266), 'PIL.Image.open', 'Image.open', (['"""./imgs/right_smiley.bmp"""'], {}), "('./imgs/right_smiley.bmp')\n", (239, 266), False, 'from PIL import Image\n'), ((274, 302), 'PIL.Image.open', 'Image.open', (['"""./imgs/sun.bmp"""'], {}), "('./imgs/sun.bmp')\n", (284, 302), False, 'from PIL import Image\n'), ((310, 339), 'PIL.Image.open', 'Image.open', (['"""./imgs/line.bmp"""'], {}), "('./imgs/line.bmp')\n", (320, 339), False, 'from PIL import Image\n'), ((709, 754), 'led_clock.LedClock', 'LedClock', (['hour_pins', 'minute_pins', 'second_pins'], {}), '(hour_pins, minute_pins, second_pins)\n', (717, 754), False, 'from led_clock import LedClock\n'), ((770, 783), 'display.LED_Display', 'LED_Display', ([], {}), '()\n', (781, 783), False, 'from display import LED_Display\n'), ((949, 972), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (970, 972), False, 'import datetime\n'), ((361, 374), 'numpy.array', 'np.array', (['im1'], {}), '(im1)\n', (369, 374), True, 'import numpy as np\n'), ((401, 414), 'numpy.array', 'np.array', (['im2'], {}), '(im2)\n', (409, 414), True, 'import numpy as np\n'), ((441, 454), 'numpy.array', 'np.array', (['im3'], {}), '(im3)\n', (449, 454), True, 'import numpy as np\n'), ((481, 494), 'numpy.array', 'np.array', (['im4'], {}), '(im4)\n', (489, 494), True, 'import numpy as np\n'), ((529, 542), 'numpy.array', 'np.array', (['im5'], {}), '(im5)\n', (537, 542), True, 'import numpy as np\n')] |
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pylab as plt
import sys, os
sys.path.append(os.path.join(os.path.dirname("__file__"), '..', '..'))
from AI_scientist.util import plot_matrices, make_dir, get_args, Early_Stopping, record_data
from AI_scientist.settings.filepath import variational_model_PATH
from AI_scientist.pytorch.net import Net
from AI_scientist.variational.variational_meta_learning import get_tasks, plot_individual_tasks_bounce
seed = 1
np.random.seed(seed)
# In[ ]:
num_train_tasks = 100
num_test_tasks = 100
input_size = 1
task_id_list = [
# "latent-linear",
# "polynomial-3",
# "Legendre-3",
# "M-sawtooth",
# "M-sin",
# "M-Gaussian",
# "M-tanh",
# "M-softplus",
"bounce-states",
# "bounce-images",
]
task_settings = {
"zdim": 1,
"z_settings": ["Gaussian", (0, 1)],
"num_layers": 1,
"xlim": (-4, 4),
"activation": "softplus",
"input_size": input_size,
"test_size": 0.2,
"num_examples": 2000,
}
tasks_train, tasks_test = get_tasks(task_id_list, num_train_tasks, num_test_tasks, task_settings = task_settings, render = False)
# In[ ]:
plot_individual_tasks_bounce(tasks_train, num_examples_show = 40, num_tasks_show = 9)
# In[3]:
tasks_train, tasks_test = get_tasks(task_id_list, num_train_tasks, num_test_tasks, task_settings = task_settings)
epochs = 1000
for i in range(epochs):
################
#Train with training tasks:
################
for task_key, task in tasks_train.items():
((X_train, y_train), (X_test, y_test)), info = task
################
# Evaluation with testing tasks
################
# In[ ]:
((X_train, y_train), (X_test, y_test)), info = tasks_train['master_tanh_10']
# In[5]:
plt.plot(X_train.data.numpy(), y_train.data.numpy(), ".")
| [
"os.path.dirname",
"AI_scientist.variational.variational_meta_learning.get_tasks",
"AI_scientist.variational.variational_meta_learning.plot_individual_tasks_bounce",
"numpy.random.seed"
] | [((479, 499), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (493, 499), True, 'import numpy as np\n'), ((1001, 1105), 'AI_scientist.variational.variational_meta_learning.get_tasks', 'get_tasks', (['task_id_list', 'num_train_tasks', 'num_test_tasks'], {'task_settings': 'task_settings', 'render': '(False)'}), '(task_id_list, num_train_tasks, num_test_tasks, task_settings=\n task_settings, render=False)\n', (1010, 1105), False, 'from AI_scientist.variational.variational_meta_learning import get_tasks, plot_individual_tasks_bounce\n'), ((1118, 1203), 'AI_scientist.variational.variational_meta_learning.plot_individual_tasks_bounce', 'plot_individual_tasks_bounce', (['tasks_train'], {'num_examples_show': '(40)', 'num_tasks_show': '(9)'}), '(tasks_train, num_examples_show=40,\n num_tasks_show=9)\n', (1146, 1203), False, 'from AI_scientist.variational.variational_meta_learning import get_tasks, plot_individual_tasks_bounce\n'), ((1243, 1333), 'AI_scientist.variational.variational_meta_learning.get_tasks', 'get_tasks', (['task_id_list', 'num_train_tasks', 'num_test_tasks'], {'task_settings': 'task_settings'}), '(task_id_list, num_train_tasks, num_test_tasks, task_settings=\n task_settings)\n', (1252, 1333), False, 'from AI_scientist.variational.variational_meta_learning import get_tasks, plot_individual_tasks_bounce\n'), ((124, 151), 'os.path.dirname', 'os.path.dirname', (['"""__file__"""'], {}), "('__file__')\n", (139, 151), False, 'import sys, os\n')] |
import numpy as np
import tensorflow as tf
from tensorflow import keras
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
def create_model():
model = keras.Sequential(
[
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax),
]
)
return model
def compile_model(model):
model.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
def split_model(model, split_idx):
model1 = keras.Sequential(model.layers[:split_idx])
model2 = keras.Sequential(model.layers[split_idx:])
model1.build(model1.layers[0].input_shape)
model2.build(model2.layers[0].input_shape)
return model1, model2
def main():
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (
test_images,
test_labels,
) = fashion_mnist.load_data()
train_images = train_images / 255.0
test_images = test_images / 255.0
# Load and save entire model
try:
model = keras.models.load_model("mnist-fashion-full.h5")
except OSError:
model = create_model()
compile_model(model)
model.fit(train_images, train_labels, epochs=5)
# Strip unnecessary metadata and save
model = keras.Sequential(model.layers)
model.build(model.layers[0].input_shape)
model.save("mnist-fashion-full.h5")
# Make predictions
predictions = model.predict(test_images)
predictions = np.argmax(predictions, axis=1)
model.summary()
print(predictions)
# Load and save split model
try:
model_client = keras.models.load_model("mnist-fashion-client.h5")
model_server = keras.models.load_model("mnist-fashion-server.h5")
except OSError:
model_client, model_server = split_model(model, 2)
model_client.save("mnist-fashion-client.h5")
model_server.save("mnist-fashion-server.h5")
# Make predictions
prev_predictions = predictions
predictions = model_client.predict(test_images)
predictions = model_server.predict(predictions)
predictions = np.argmax(predictions, axis=1)
model_client.summary()
model_server.summary()
print(predictions)
print(
"Same predictions with split model? "
f"{np.all(predictions == prev_predictions)}"
)
# Save client model as tflite model
converter = tf.lite.TFLiteConverter.from_keras_model_file(
"mnist-fashion-client.h5"
)
tflite_model = converter.convert()
open("mnist-fashion-client.tflite", "wb").write(tflite_model)
main()
| [
"tensorflow.lite.TFLiteConverter.from_keras_model_file",
"tensorflow.keras.Sequential",
"tensorflow.compat.v1.logging.set_verbosity",
"numpy.argmax",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.models.load_model",
"tensorflow.keras.layers.Flatten",
"numpy.all"
] | [((73, 135), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), '(tf.compat.v1.logging.ERROR)\n', (107, 135), True, 'import tensorflow as tf\n'), ((616, 658), 'tensorflow.keras.Sequential', 'keras.Sequential', (['model.layers[:split_idx]'], {}), '(model.layers[:split_idx])\n', (632, 658), False, 'from tensorflow import keras\n'), ((672, 714), 'tensorflow.keras.Sequential', 'keras.Sequential', (['model.layers[split_idx:]'], {}), '(model.layers[split_idx:])\n', (688, 714), False, 'from tensorflow import keras\n'), ((1607, 1637), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (1616, 1637), True, 'import numpy as np\n'), ((2237, 2267), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(1)'}), '(predictions, axis=1)\n', (2246, 2267), True, 'import numpy as np\n'), ((2518, 2590), 'tensorflow.lite.TFLiteConverter.from_keras_model_file', 'tf.lite.TFLiteConverter.from_keras_model_file', (['"""mnist-fashion-client.h5"""'], {}), "('mnist-fashion-client.h5')\n", (2563, 2590), True, 'import tensorflow as tf\n'), ((1148, 1196), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['"""mnist-fashion-full.h5"""'], {}), "('mnist-fashion-full.h5')\n", (1171, 1196), False, 'from tensorflow import keras\n'), ((1746, 1796), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['"""mnist-fashion-client.h5"""'], {}), "('mnist-fashion-client.h5')\n", (1769, 1796), False, 'from tensorflow import keras\n'), ((1820, 1870), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['"""mnist-fashion-server.h5"""'], {}), "('mnist-fashion-server.h5')\n", (1843, 1870), False, 'from tensorflow import keras\n'), ((210, 252), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {'input_shape': '(28, 28)'}), '(input_shape=(28, 28))\n', (230, 252), False, 'from tensorflow import keras\n'), ((266, 312), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(128)'], {'activation': 'tf.nn.relu'}), '(128, activation=tf.nn.relu)\n', (284, 312), False, 'from tensorflow import keras\n'), ((326, 374), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(10)'], {'activation': 'tf.nn.softmax'}), '(10, activation=tf.nn.softmax)\n', (344, 374), False, 'from tensorflow import keras\n'), ((1396, 1426), 'tensorflow.keras.Sequential', 'keras.Sequential', (['model.layers'], {}), '(model.layers)\n', (1412, 1426), False, 'from tensorflow import keras\n'), ((2413, 2452), 'numpy.all', 'np.all', (['(predictions == prev_predictions)'], {}), '(predictions == prev_predictions)\n', (2419, 2452), True, 'import numpy as np\n')] |
from model import create_model
from load_data import load_data
from parameters import *
from matplotlib.colors import ListedColormap
from sklearn.metrics import accuracy_score, mean_absolute_error, mean_squared_error
import numpy as np
import matplotlib.pyplot as plt
import sys
def inverse_transform_sequence_scaling(y_test, y_pred):
for i in range (0, len(y_test)):
scaler = data["scalers_test"][i]
# y_pred[i] = scaler.inverse_transform(y_pred[i].reshape(-1, 1))[0][0]
y_pred[i] = scaler.inverse_transform(y_pred[i].reshape(-1, 1))[0][0]
y_test[i] = scaler.inverse_transform(y_test[i].reshape(-1, 1))[0][0]
return y_test, y_pred
# def plot_graph(model, data):
# y_test = data["y_test"]
# X_test = data["X_test"]
# y_pred = model.predict(X_test)
# if 'last_sequence_scaler' in data:
# y_test, y_pred = inverse_transform_sequence_scaling(y_test, y_pred)
# y_test = np.squeeze(data["column_scaler"][TARGET].inverse_transform(np.expand_dims(y_test, axis=0)))
# y_pred = np.squeeze(data["column_scaler"]
# [TARGET].inverse_transform(y_pred))
# plt.plot(y_test, c='b')
# plt.plot(y_pred, c='r')
# plt.xlabel("Days")
# plt.ylabel("Price")
# plt.legend(["Actual Price", "Predicted Price"])
# plt.show()
def mae_pred_and_test(model, data):
y_test = data["y_test"]
X_test = data["X_test"]
y_pred = model.predict(X_test)
return mean_absolute_error(y_test, y_pred)
def get_accuracy_buy_hold_sell(model, data, plot=False):
y_test = data["y_test"]
X_test = data["X_test"]
y_pred = model.predict(X_test)
m = tf.keras.metrics.CategoricalAccuracy()
m.update_state(y_test, y_pred)
if plot:
# fig, ax = plt.subplots()
fig, ax = plt.subplots(4, sharex='all',
gridspec_kw=dict(height_ratios=[4, 1,1,1]))
category_test = np.array([tf.math.argmax(x).numpy() for x in y_test])
category_pred = np.array([tf.math.argmax(x).numpy() for x in y_pred])
matches = (category_test == category_pred).astype(int)
# .shift(LOOKUP_STEP)
price = data['df'][TARGET].values[-(len(category_test) +
LOOKUP_STEP):-LOOKUP_STEP]
ax[0].plot(price, 'k')
ax[1].imshow(category_test[None, :], cmap=ListedColormap(["green", "yellow", "red"]), aspect='auto',
vmin=0.0, vmax=2.0)
ax[2].imshow(category_pred[None, :], cmap=ListedColormap(["green", "yellow", "red"]), aspect='auto',
vmin=0.0, vmax=2.0)
ax[3].imshow(matches[None, :], cmap=ListedColormap(["red", "green"]), aspect='auto',
vmin=0.0, vmax=2.0)
plt.xlabel("Days")
plt.legend(["Actual Price", "Predicted Price"])
# fig.show()
plt.subplots_adjust(hspace=0)
plt.show()
return m.result().numpy()
def get_accuracy_and_plot(model, data, plot=False):
y_test = data["y_test"]
X_test = data["X_test"]
y_pred = model.predict(X_test)
if 'last_sequence_scaler' in data:
y_test, y_pred = inverse_transform_sequence_scaling(y_test, y_pred)
y_test = np.squeeze(data["column_scaler"][TARGET].inverse_transform(np.expand_dims(y_test, axis=0)))
y_pred = np.squeeze(data["column_scaler"][TARGET].inverse_transform(y_pred))
if plot:
plt.plot(y_test, c='b')
plt.plot(y_pred, c='r')
plt.xlabel("Days")
plt.ylabel("Price")
plt.legend(["Actual Price", "Predicted Price"])
plt.show()
y_pred = list(map(lambda current, future: int(float(future) > float(current)), y_test[:-LOOKUP_STEP], y_pred[LOOKUP_STEP:]))
y_test = list(map(lambda current, future: int(float(future) > float(current)), y_test[:-LOOKUP_STEP], y_test[LOOKUP_STEP:]))
return accuracy_score(y_test, y_pred)
def predict(model, data):
# retrieve the last sequence from data
last_sequence = data["last_sequence"]
# last_sequence = data["last_sequence"][:N_STEPS]
column_scaler = data["column_scaler"]
last_sequence = np.expand_dims(last_sequence, axis=0)
# get the prediction (scaled from 0 to 1)
prediction = model.predict(last_sequence)
# get the price (by inverting the scaling)
if 'last_sequence_scaler' in data:
prediction = data["last_sequence_scaler"].inverse_transform(
prediction[0][0].reshape(-1, 1))
predicted_price = column_scaler[TARGET].inverse_transform(prediction)[0][0]
return predicted_price
def predict_category(model, data):
last_sequence = data["last_sequence"]
last_sequence = np.expand_dims(last_sequence, axis=0)
prediction = model.predict(last_sequence)
labels = ['buy', 'hold', 'sell']
return labels[tf.math.argmax(prediction[0]).numpy()]
# load the data
data = load_data(test_files, n_steps=N_STEPS, lookup_step=LOOKUP_STEP, test_size_in_days=TEST_SIZE_IN_DAYS,
feature_columns=FEATURE_COLUMNS, stat_columns=STAT_COLUMNS, target=TARGET, shuffle=False)
# construct the model
model = create_model(N_STEPS, N_FEATURES, loss=LOSS, optimizer=OPTIMIZER)
model_path = os.path.join("results", model_name) + ".h5"
model.load_weights(model_path)
print(f"Counts: ", data["y_test"].sum(axis=0))
# evaluate the model
loss, acc = model.evaluate(data["X_test"], data["y_test"])
print(f"Loss: {loss:.4f}, Accuracy: {acc:.4f}")
# calculate the mean absolute error (inverse scaling)
# mae_inverted = data["column_scaler"][TARGET].inverse_transform(mae.reshape(1, -1))[0][0]
# mae_pred_test = mae_pred_and_test(model, data)
# # predict the future price
# future_price = predict(model, data)
# print(f"Fteuture price after {LOOKUP_STEP} days is {future_price:.2f}$")
# accuracy = get_accuracy_and_plot(model, data, len(sys.argv)>1)*100.0
accuracy = get_accuracy_buy_hold_sell(model, data, len(sys.argv) > 1)*100.0
print(f"Accuracy Score: {accuracy:.2f}%",)
prediction = predict_category(model, data)
print(f"Prediction in the next", LOOKUP_STEP, "days:", prediction)
# show_plot = sys.argv[1]
# if show_plot:
# plot_graph(model, data)
| [
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.colors.ListedColormap",
"load_data.load_data",
"numpy.expand_dims",
"model.create_model",
"sklearn.metrics.mean_absolute_error",
"sklearn.metrics.accuracy_score",
"m... | [((4904, 5102), 'load_data.load_data', 'load_data', (['test_files'], {'n_steps': 'N_STEPS', 'lookup_step': 'LOOKUP_STEP', 'test_size_in_days': 'TEST_SIZE_IN_DAYS', 'feature_columns': 'FEATURE_COLUMNS', 'stat_columns': 'STAT_COLUMNS', 'target': 'TARGET', 'shuffle': '(False)'}), '(test_files, n_steps=N_STEPS, lookup_step=LOOKUP_STEP,\n test_size_in_days=TEST_SIZE_IN_DAYS, feature_columns=FEATURE_COLUMNS,\n stat_columns=STAT_COLUMNS, target=TARGET, shuffle=False)\n', (4913, 5102), False, 'from load_data import load_data\n'), ((5143, 5208), 'model.create_model', 'create_model', (['N_STEPS', 'N_FEATURES'], {'loss': 'LOSS', 'optimizer': 'OPTIMIZER'}), '(N_STEPS, N_FEATURES, loss=LOSS, optimizer=OPTIMIZER)\n', (5155, 5208), False, 'from model import create_model\n'), ((1472, 1507), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (1491, 1507), False, 'from sklearn.metrics import accuracy_score, mean_absolute_error, mean_squared_error\n'), ((3896, 3926), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (3910, 3926), False, 'from sklearn.metrics import accuracy_score, mean_absolute_error, mean_squared_error\n'), ((4161, 4198), 'numpy.expand_dims', 'np.expand_dims', (['last_sequence'], {'axis': '(0)'}), '(last_sequence, axis=0)\n', (4175, 4198), True, 'import numpy as np\n'), ((4700, 4737), 'numpy.expand_dims', 'np.expand_dims', (['last_sequence'], {'axis': '(0)'}), '(last_sequence, axis=0)\n', (4714, 4737), True, 'import numpy as np\n'), ((2781, 2799), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Days"""'], {}), "('Days')\n", (2791, 2799), True, 'import matplotlib.pyplot as plt\n'), ((2808, 2855), 'matplotlib.pyplot.legend', 'plt.legend', (["['Actual Price', 'Predicted Price']"], {}), "(['Actual Price', 'Predicted Price'])\n", (2818, 2855), True, 'import matplotlib.pyplot as plt\n'), ((2886, 2915), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0)'}), '(hspace=0)\n', (2905, 2915), True, 'import matplotlib.pyplot as plt\n'), ((2924, 2934), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2932, 2934), True, 'import matplotlib.pyplot as plt\n'), ((3438, 3461), 'matplotlib.pyplot.plot', 'plt.plot', (['y_test'], {'c': '"""b"""'}), "(y_test, c='b')\n", (3446, 3461), True, 'import matplotlib.pyplot as plt\n'), ((3470, 3493), 'matplotlib.pyplot.plot', 'plt.plot', (['y_pred'], {'c': '"""r"""'}), "(y_pred, c='r')\n", (3478, 3493), True, 'import matplotlib.pyplot as plt\n'), ((3502, 3520), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Days"""'], {}), "('Days')\n", (3512, 3520), True, 'import matplotlib.pyplot as plt\n'), ((3529, 3548), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (3539, 3548), True, 'import matplotlib.pyplot as plt\n'), ((3557, 3604), 'matplotlib.pyplot.legend', 'plt.legend', (["['Actual Price', 'Predicted Price']"], {}), "(['Actual Price', 'Predicted Price'])\n", (3567, 3604), True, 'import matplotlib.pyplot as plt\n'), ((3613, 3623), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3621, 3623), True, 'import matplotlib.pyplot as plt\n'), ((3302, 3332), 'numpy.expand_dims', 'np.expand_dims', (['y_test'], {'axis': '(0)'}), '(y_test, axis=0)\n', (3316, 3332), True, 'import numpy as np\n'), ((2387, 2429), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["['green', 'yellow', 'red']"], {}), "(['green', 'yellow', 'red'])\n", (2401, 2429), False, 'from matplotlib.colors import ListedColormap\n'), ((2538, 2580), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["['green', 'yellow', 'red']"], {}), "(['green', 'yellow', 'red'])\n", (2552, 2580), False, 'from matplotlib.colors import ListedColormap\n'), ((2682, 2714), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["['red', 'green']"], {}), "(['red', 'green'])\n", (2696, 2714), False, 'from matplotlib.colors import ListedColormap\n')] |
import rasterio as rio
import matplotlib.pyplot as plt
from sklearn import preprocessing
import numpy as np
raw_image = rio.open("notebooks/data/OSBS_IFAS.contrib.105_hyperspectral.tif").read()
for x in raw_image.reshape(raw_image.shape[0], np.prod(raw_image.shape[1:])).T:
plt.plot(x)
plt.plot(raw_image.mean(axis=(1,2)))
np.testing.assert_almost_equal(raw_image.reshape(369,12).T[0], raw_image[:,0,0])
data = raw_image.reshape(raw_image.shape[0], np.prod(raw_image.shape[1:])).T
norm_data = preprocessing.minmax_scale(data,axis=1).T
raw_image_norm = norm_data.reshape(raw_image.shape)
plt.close('all')
plt.Figure()
for x in raw_image_norm.reshape(raw_image.shape[0], np.prod(raw_image.shape[1:])).T:
plt.plot(x)
plt.show()
| [
"numpy.prod",
"rasterio.open",
"matplotlib.pyplot.Figure",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"sklearn.preprocessing.minmax_scale",
"matplotlib.pyplot.show"
] | [((593, 609), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (602, 609), True, 'import matplotlib.pyplot as plt\n'), ((611, 623), 'matplotlib.pyplot.Figure', 'plt.Figure', ([], {}), '()\n', (621, 623), True, 'import matplotlib.pyplot as plt\n'), ((725, 735), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (733, 735), True, 'import matplotlib.pyplot as plt\n'), ((279, 290), 'matplotlib.pyplot.plot', 'plt.plot', (['x'], {}), '(x)\n', (287, 290), True, 'import matplotlib.pyplot as plt\n'), ((499, 539), 'sklearn.preprocessing.minmax_scale', 'preprocessing.minmax_scale', (['data'], {'axis': '(1)'}), '(data, axis=1)\n', (525, 539), False, 'from sklearn import preprocessing\n'), ((713, 724), 'matplotlib.pyplot.plot', 'plt.plot', (['x'], {}), '(x)\n', (721, 724), True, 'import matplotlib.pyplot as plt\n'), ((121, 187), 'rasterio.open', 'rio.open', (['"""notebooks/data/OSBS_IFAS.contrib.105_hyperspectral.tif"""'], {}), "('notebooks/data/OSBS_IFAS.contrib.105_hyperspectral.tif')\n", (129, 187), True, 'import rasterio as rio\n'), ((242, 270), 'numpy.prod', 'np.prod', (['raw_image.shape[1:]'], {}), '(raw_image.shape[1:])\n', (249, 270), True, 'import numpy as np\n'), ((455, 483), 'numpy.prod', 'np.prod', (['raw_image.shape[1:]'], {}), '(raw_image.shape[1:])\n', (462, 483), True, 'import numpy as np\n'), ((676, 704), 'numpy.prod', 'np.prod', (['raw_image.shape[1:]'], {}), '(raw_image.shape[1:])\n', (683, 704), True, 'import numpy as np\n')] |
from PIL import Image
import numpy
image_path = "0001/0001_c1s1_001051_00.jpg"
original_im = Image.open("/home/zzd/Market/pytorch/query/" + image_path)
original_im = original_im.resize((128,256))
attack_im = Image.open("../attack_query/pytorch/query/" + image_path)
diff = numpy.array(original_im, dtype=float) - numpy.array(attack_im, dtype=float)
# move to 128 for show
diff += 128
diff = Image.fromarray( numpy.uint8(diff))
im_save = Image.new('RGB',(128*3, 256))
im_save.paste( original_im, (0,0))
im_save.paste( diff, (128,0))
im_save.paste( attack_im, (256,0))
im_save.save('vis_noise.jpg')
| [
"numpy.uint8",
"PIL.Image.new",
"numpy.array",
"PIL.Image.open"
] | [((96, 154), 'PIL.Image.open', 'Image.open', (["('/home/zzd/Market/pytorch/query/' + image_path)"], {}), "('/home/zzd/Market/pytorch/query/' + image_path)\n", (106, 154), False, 'from PIL import Image\n'), ((212, 269), 'PIL.Image.open', 'Image.open', (["('../attack_query/pytorch/query/' + image_path)"], {}), "('../attack_query/pytorch/query/' + image_path)\n", (222, 269), False, 'from PIL import Image\n'), ((444, 476), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(128 * 3, 256)'], {}), "('RGB', (128 * 3, 256))\n", (453, 476), False, 'from PIL import Image\n'), ((278, 315), 'numpy.array', 'numpy.array', (['original_im'], {'dtype': 'float'}), '(original_im, dtype=float)\n', (289, 315), False, 'import numpy\n'), ((318, 353), 'numpy.array', 'numpy.array', (['attack_im'], {'dtype': 'float'}), '(attack_im, dtype=float)\n', (329, 353), False, 'import numpy\n'), ((414, 431), 'numpy.uint8', 'numpy.uint8', (['diff'], {}), '(diff)\n', (425, 431), False, 'import numpy\n')] |
import numpy as np
from UTILS import config, performance_metrics, utils
def cross_validation_score(method, dataset, num_folds=10):
""" Cross-validation """
scores = np.zeros((num_folds, ))
for i_fold in range(num_folds):
# shuffle and re-split the data between training and test
dataset.shuffle_and_split()
# check model type: feature_based or non_feature_based
if method.type == 'feature_based':
if method.paradigm == 'stl':
perf = list()
for k in dataset.datasets:
method.fit(dataset.data['train']['x'][k],
y=dataset.data['train']['y'][k],
cat_point=dataset.cat_point)
y_pred = method.predict(dataset.data['test']['x'][k])
y_true = dataset.testRatings[k]
if method.output_shape == 'array':
y_pred = utils.predMatrix(y_true, y_pred) # move back to rating matrix
# dict to save performance metrics for the t-th task
perf.append(performance_metrics.rmse(y_pred, y_true))
elif method.paradigm == 'mtl':
method.fit(dataset.data['train']['x'],
dataset.data['train']['y'],
cat_point=dataset.cat_point)
y_pred = method.predict(dataset.data['train']['x'])
perf = list()
for k in dataset.datasets:
y_true_k = dataset.testRatings[k]
y_pred_k = utils.predMatrix(y_true_k, y_pred[k]) # move back to ratings matrix
perf.append(performance_metrics.rmse(y_pred_k, y_true_k))
else:
raise ValueError('Unknown paradigm: {}'.format(method.paradigm))
elif method.type == 'non_feature_based':
if method.paradigm == 'stl':
perf = list()
for k in dataset.datasets:
method.fit(dataset.trainRatings[k])
y_pred_k = method.predict(dataset.testRatings[k])
y_true_k = dataset.testRatings[k]
if method.output_shape == 'array':
y_pred_k = utils.predMatrix(y_true_k, y_pred_k) # move back to rating matrix
# store results to dict of all performances
perf.append(performance_metrics.rmse(y_pred_k, y_true_k))
elif method.paradigm == 'mtl':
method.fit(dataset.trainRatings)
y_pred = method.predict(dataset.testRatings)
perf = list()
for k in dataset.datasets:
y_true_k = dataset.testRatings[k]
if method.output_shape == 'array':
y_pred_k = utils.predMatrix(y_true_k, y_pred[k]) # move back to rating matrix
perf.append(performance_metrics.rmse(y_pred_k, y_true_k))
else:
raise ValueError('Unknown paradigm: {}'.format(method.paradigm))
else:
raise ValueError('Unknown type %s' % (method.type))
perf = np.array(perf)
scores[i_fold] = np.mean(perf)
return scores.mean()
| [
"UTILS.performance_metrics.rmse",
"numpy.mean",
"numpy.array",
"numpy.zeros",
"UTILS.utils.predMatrix"
] | [((183, 205), 'numpy.zeros', 'np.zeros', (['(num_folds,)'], {}), '((num_folds,))\n', (191, 205), True, 'import numpy as np\n'), ((3268, 3282), 'numpy.array', 'np.array', (['perf'], {}), '(perf)\n', (3276, 3282), True, 'import numpy as np\n'), ((3309, 3322), 'numpy.mean', 'np.mean', (['perf'], {}), '(perf)\n', (3316, 3322), True, 'import numpy as np\n'), ((986, 1018), 'UTILS.utils.predMatrix', 'utils.predMatrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1002, 1018), False, 'from UTILS import config, performance_metrics, utils\n'), ((1156, 1196), 'UTILS.performance_metrics.rmse', 'performance_metrics.rmse', (['y_pred', 'y_true'], {}), '(y_pred, y_true)\n', (1180, 1196), False, 'from UTILS import config, performance_metrics, utils\n'), ((1642, 1679), 'UTILS.utils.predMatrix', 'utils.predMatrix', (['y_true_k', 'y_pred[k]'], {}), '(y_true_k, y_pred[k])\n', (1658, 1679), False, 'from UTILS import config, performance_metrics, utils\n'), ((1744, 1788), 'UTILS.performance_metrics.rmse', 'performance_metrics.rmse', (['y_pred_k', 'y_true_k'], {}), '(y_pred_k, y_true_k)\n', (1768, 1788), False, 'from UTILS import config, performance_metrics, utils\n'), ((2333, 2369), 'UTILS.utils.predMatrix', 'utils.predMatrix', (['y_true_k', 'y_pred_k'], {}), '(y_true_k, y_pred_k)\n', (2349, 2369), False, 'from UTILS import config, performance_metrics, utils\n'), ((2498, 2542), 'UTILS.performance_metrics.rmse', 'performance_metrics.rmse', (['y_pred_k', 'y_true_k'], {}), '(y_pred_k, y_true_k)\n', (2522, 2542), False, 'from UTILS import config, performance_metrics, utils\n'), ((2922, 2959), 'UTILS.utils.predMatrix', 'utils.predMatrix', (['y_true_k', 'y_pred[k]'], {}), '(y_true_k, y_pred[k])\n', (2938, 2959), False, 'from UTILS import config, performance_metrics, utils\n'), ((3023, 3067), 'UTILS.performance_metrics.rmse', 'performance_metrics.rmse', (['y_pred_k', 'y_true_k'], {}), '(y_pred_k, y_true_k)\n', (3047, 3067), False, 'from UTILS import config, performance_metrics, utils\n')] |
# - * - encoding : utf - 8 - * -
# pylint: disable=fixme, line-too-long
"""
Autoregressive Integrated Moving Average (ARIMA) model.
:copyright: 2017-2019 H2O.ai, Inc.
:license: Apache License Version 2.0 (see LICENSE for details)
"""
import numpy as np
class ARIMA(object):
""" Autoregressive integrated moving average
Parameters
----------
p : int
AR size
d : int
differencing order
q : int
MA size
double_precision : bool, optional
use double precision, by default False
Attributes
----------
phi_ {array-like} shape (p,)
AR coefficients
theta_ {array-like} shape (q,)
MA coefficients
"""
def __init__(self, p, d, q, double_precision=False):
self.p = p
self.d = d
self.q = q
self.dtype = np.float64 if double_precision else np.float32
self._lib = self._load_lib()
self.phi_ = np.empty(p, dtype=self.dtype)
self.theta_ = np.empty(q, dtype=self.dtype)
def _load_lib(self):
from ..libs.lib_utils import GPUlib
gpu_lib = GPUlib().get(1)
return gpu_lib
def fit(self, y, maxiter=20):
"""Fit ARIMA model
Parameters
----------
y : array-like
data to fit into ARIMA model
maxiter : int, optional
number of iterations, by default 20
"""
assert isinstance(y, np.ndarray)
assert len(y.shape) == 1
if self.dtype == np.float32:
self._lib.arima_fit_float(self.p, self.d, self.q,
np.flipud(y.astype(self.dtype)),
y.shape[0], self.theta_,
self.phi_, maxiter)
else:
self._lib.arima_fit_double(self.p, self.d, self.q,
np.flipud(y.astype(self.dtype)),
y.shape[0], self.theta_,
self.phi_, maxiter)
| [
"numpy.empty"
] | [((934, 963), 'numpy.empty', 'np.empty', (['p'], {'dtype': 'self.dtype'}), '(p, dtype=self.dtype)\n', (942, 963), True, 'import numpy as np\n'), ((986, 1015), 'numpy.empty', 'np.empty', (['q'], {'dtype': 'self.dtype'}), '(q, dtype=self.dtype)\n', (994, 1015), True, 'import numpy as np\n')] |
'''
author: <NAME> || @slothfulwave612
Python module for i/o operations on the dataset.
'''
## import necessary packages/modules
import os
import numpy as np
import pandas as pd
from pandas.io.json import json_normalize
import json
import math
import multiprocessing
from tqdm.auto import tqdm, trange
import statsmodels.api as sm
def get_competition(path):
'''
Function for getting data about all the competitions.
Argument:
path -- str, path to competition.json file.
Returns:
comp_df -- pandas dataframe, all competition data.
'''
## load the json file
comp_data = json.load(open(path))
## make pandas dataframe
comp_df = pd.DataFrame(comp_data)
return comp_df
def flatten_json(sub_str):
'''
Function to take out values from nested dictionary present in
the json file, so to make a representable dataframe.
---> This piece of code was found on stackoverflow <--
Argument:
sub_str -- substructure defined in the json file.
Returns:
flattened out information.
'''
out = {}
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
elif type(x) is list:
i = 0
for a in x:
flatten(a, name + str(i) + '_')
i += 1
else:
out[name[:-1]] = x
flatten(sub_str)
return out
def get_matches(comp_id, season_id, path):
'''
Function for getting match-data for a given competition
Arguments:
comp_id -- int, the competition id.
season_id -- int, the season id.
path -- str, path to .json file containing match data.
Returns:
match_df -- pandas dataframe, containing all the matches
'''
## loading up the data from json file
match_data = json.load(open(path, encoding='utf8'))
## flattening the json file
match_flatten = [flatten_json(x) for x in match_data]
## creating a dataframe
match_df = pd.DataFrame(match_flatten)
match_df_cols = list(match_df.columns)
## renaming the dataframe
for i in range(len(match_df_cols)):
if match_df_cols[i].count('away_team') == 2:
## for away_team columns
match_df_cols[i] = match_df_cols[i][len('away_team_'):]
elif match_df_cols[i].count('_0') == 1:
## for _0 columns
match_df_cols[i] = match_df_cols[i].replace('_0', '')
elif match_df_cols[i].count('competition') == 2:
## for competition columns
match_df_cols[i] = match_df_cols[i][len('competition_'):]
elif match_df_cols[i].count('home_team') == 2:
## for away_team columns
match_df_cols[i] = match_df_cols[i][len('home_team_'):]
elif match_df_cols[i].count('season') == 2:
## for away_team columns
match_df_cols[i] = match_df_cols[i][len('season_'):]
match_df.columns = match_df_cols
return match_df
def make_event_df(match_id, path):
'''
Function for making event dataframe.
Argument:
match_id -- int, the required match id for which event data will be constructed.
path -- str, path to .json file containing event data.
Returns:
df -- pandas dataframe, the event dataframe for the particular match.
'''
## read in the json file
event_json = json.load(open(path, encoding='utf-8'))
## normalize the json data
df = json_normalize(event_json, sep='_')
return df
def full_season_events(match_df, match_ids, path, comp_name=None, leave=True, shot="basic"):
'''
Function to make event dataframe for a full season.
Arguments:
match_df -- pandas dataframe, containing match-data.
match_id -- list, list of match id.
path -- str, path to directory where .json file is listed.
e.g. '../input/Statsbomb/data/events'
comp_name -- str, competition name + season name, default: None.
leave -- keeps all traces of the progressbar upon termination of iteration.
Returns:
event_df -- pandas dataframe, containing event data for the whole season.
'''
## init an empty dataframe
event_df = pd.DataFrame()
if comp_name == None:
t = match_ids
else:
t = tqdm(match_ids, desc=f'Grabbing data for {comp_name}', position=0, leave=leave)
for match_id in t:
## .json file
temp_path = path + f'/{match_id}.json'
temp_df = make_event_df(match_id, temp_path)
event_df = pd.concat([event_df, temp_df], sort=False)
if shot == "basic":
return event_df.loc[event_df['type_name'] == 'Shot']
elif shot == "intermediate":
return intermediate_dataset(event_df)
elif shot == "advance":
return intermediate_dataset(event_df, adv=True)
def multiple_season_event_df(comp_name, comp_id, season_ids, path_match, path_season, shot):
'''
Function for making event dataframe having multile seasons
for the same competition.
Arguments:
comp_name -- str, competition name + season
comp_id -- int, competition id.
season_ids -- list, list containing season ids.
path_match -- str, path to .json file containing match data.
path_season -- str, path to directory where .json file is listed.
e.g. '../input/Statsbomb/data/events'
Returns:
event_df -- pandas dataframe, containing event of multiple seasons.
'''
## init an empty dataframe
event_df = pd.DataFrame()
## making the event-dataframe
for season_id in tqdm(season_ids, desc=f'Grabbing data for {comp_name}', leave=True):
## add season id to path-match
team_path_match = path_match + f'/{comp_id}/{season_id}.json'
## make a match dataframe for a particular season
match_df = get_matches(comp_id, season_id, team_path_match)
## list all the match ids
match_ids = list(match_df['match_id'].unique())
comp_name_ = match_df['competition_name'].unique()[0] + '-' + match_df['season_name'].unique()[0]
## create the event dataframe for the whole season
temp_df = full_season_events(match_df, match_ids, path_season, comp_name=comp_name_, leave=False, shot=shot)
## add competition
temp_df["comp_name"] = comp_name_
## concat the dataframes
event_df = pd.concat([event_df, temp_df], sort=False)
## make final dataframe
event_df = event_df.reset_index(drop=True)
return event_df
def goal(value):
'''
Function to output 1: if goal or 0: otherwise.
Arguments:
value -- str, shot-outcome-name.
Returns:
0 or 1 -- 0 means no goal 1 means goal.
'''
if value == 'Goal':
return 1
else:
return 0
def body_part(value):
'''
Function to output: Head -- if it is a header,
Foot -- if it is right/left foot,
Other -- if any other body part
'''
if value == "Left Foot" or value == "Right Foot":
return "Foot"
else:
return value
def change_dims(old_value, old_min, old_max, new_min, new_max):
'''
Function for changing the coordinates to our pitch dimensions.
Arguments:
old_value, old_min, old_max, new_min, new_max -- float values.
Returns:
new_value -- float value(the coordinate value either x or y).
'''
## calculate the value
new_value = ( (old_value - old_min) / (old_max - old_min) ) * (new_max - new_min) + new_min
return new_value
def coordinates_x(value):
'''
Return x coordinate
'''
value_x = change_dims(value[0], 0, 120, 0, 104)
return value_x
def coordinates_y(value):
'''
Return 80 - x coordinate
'''
value_y = change_dims(80- value[1], 0, 80, 0, 68)
return value_y
def distance_bw_coordinates(x1, y1, x2=104.0, y2=34.0):
'''
Function for calculating the distance between shot location
and the goal post.
Arguments:
x1, y1 -- float, the x and y coordinate for shot location.
x2, y2 -- float, the x and y coordinate for the goal post location.(default for Statsbomb defined goal-post)
'''
diff_sqr_x = (x2 - x1)**2
diff_sqr_y = (y2 - y1)**2
distance = math.sqrt(diff_sqr_x + diff_sqr_y) ## euclidean distnace
return distance
def post_angle(x, y, g1_x=104, g1_y=30.34, g2_x=104, g2_y=37.66):
'''
Function to calculate the post angle.
Arguments:
x -- float, x coordinate from where the shot was taken.
y -- float, y coordinate from where the shot was taken.
g1 and g2 are the coordinates of the two woodwork, default values
specifying the woodwork coordinate for Statsbomb data.
Returns:
angle -- float, the angle in degrees.
'''
if x == 104 and (30.34 <= y <= 37.66):
return 180
if x == 104 and (y > 37.66 or y < 30.34):
return 0
## calculating the three sides of the triangle.
A_dis = distance_bw_coordinates(x, y, g1_x, g1_y)
B_dis = distance_bw_coordinates(x, y, g2_x, g2_y)
C_dis = distance_bw_coordinates(g1_x, g1_y, g2_x, g2_y)
## using cosine law
value = ((A_dis**2) + (B_dis**2) - (C_dis**2)) / (2 * A_dis * B_dis)
angle = np.degrees(np.arccos(value))
return angle
def create_result_df(df, length, col):
'''
Function to create a result dataframe(statsbomb_xg vs predicted_xg).
Arguments:
df -- pandas dataframe.
length -- int, length of the dataframe.
col -- str, column name for predicted xG value.
Returns:
result -- pandas dataframe containing statsbomb_xg and predicted_xg as columns.
'''
## fetch all the player names
players = df.loc[df['target'] == 1, 'player_name'].value_counts()[:length].index
## init a dictionary
result_dict = {
'player_name': [],
'shots': [],
'goals': [],
'statsbomb_xg': [],
'predicted_xg': []
}
## calculate required values
for player in players:
## total number of shots taken by a player
shots = len(df.loc[(df['player_name'] == player)])
## total number of goals scored by a player
goals = len(df.loc[
(df['player_name'] == player) &
(df['target'] == 1)
])
## aggregated statsbomb-xG-value for a player
stats_xg = df.loc[
(df['player_name'] == player),
'shot_statsbomb_xg'
].sum()
## aggregated predicted-xG-value for a player
pred_xg = df.loc[
(df['player_name'] == player),
col
].sum()
## append result to result_dict
result_dict['player_name'].append(player)
result_dict['shots'].append(shots)
result_dict['goals'].append(goals)
result_dict['statsbomb_xg'].append(stats_xg)
result_dict['predicted_xg'].append(pred_xg)
## create pandas dataframe
result = pd.DataFrame(result_dict).sort_values(by='goals', ascending=False).reset_index(drop=True)
return result
def get_indices(width, height, xpartition, ypartition, xinput, yinput):
"""
Function to get the indices for grid.
Args:
width (float): width of the pitch.
height (float): height of the pitch.
xpartition (int): number of rows in a grid
ypartition (int): number of colimns in a grid.
xinput (float): x-coordinate location.
yinput (float): y-coordinate location.
Returns:
tuple: containing indices for the grid.
"""
## calculate number of partitions in x and y
x_step = width / xpartition
y_step = height / ypartition
## calculate x and y values
x = math.ceil((xinput if xinput > 0 else 0.5) / x_step) # handle border cases as well
y = math.ceil((yinput if yinput > 0 else 0.5) / y_step) # handle border cases as well
return (
ypartition - y, x - 1
)
def get_stats(x_val, y_val):
"""
Function to train model using statsmodel api.
Args:
x_val (pandas.DataFrame): containing features.
y_val (numpy.ndarray): containing targets.
Returns:
statsmodels.iolib.summary.Summary: summary about our model
"""
## train logistic model
log_reg = sm.Logit(y_val, x_val).fit(maxiter=1000)
return log_reg
def make_df(df, cols, rows=25):
"""
Function to make the required dataframe.
Args:
df (pandas.DataFrame))
cols (list): the required columns.
rows (int, optional): First rows. Defaults to 25.
"""
## fetch columns
df = df[cols]
## a new dataframe
new_df = df.groupby(by="player_name").sum().reset_index().sort_values("target", ascending=False).reset_index(drop=True)
## rename target column
new_df = new_df.rename({"target": "goals_scored"}, axis=1)
## fetch first few rows
first_few = new_df.head(rows)
return first_few
def area(x1, y1, x2, y2, x3, y3):
"""
Funtion to calculate area of triangle.
Args:
float: coordinates for triangle vertices.
Returns:
float: area of the triangle.
"""
return abs((x1 * (y2 - y3) + x2 * (y3 - y1)
+ x3 * (y1 - y2)) / 2.0)
def is_inside(player_coord_x, player_coord_y, shot_location_x, shot_location_y, pole_1_x=104.0, pole_1_y=30.34, pole_2_x=104.0, pole_2_y=37.66):
"""
Function to return whether player is between the player taking shot and goal.
Args:
player_coord_x (float): player-coordinate-x.
player_coord_y (float): player-coordinate-y.
shot_location_x (float): shot-coordinate-x.
shot_location_y (float): shot-coordinate-y.
pole_1_x (float, optional): goal-post(1) coordinate x. Defaults to 104.0.
pole_1_y (float, optional): goal-post(1) coordinate y. Defaults to 30.34.
pole_2_x (float, optional): goal-post(2) coordinate x. Defaults to 104.0.
pole_2_y (float, optional): goal-post(2) coordinate x. Defaults to 37.66.
Returns:
bool: True if present else False.
"""
# calculate area of triangle ABC
A = area(shot_location_x, shot_location_y, pole_1_x, pole_1_y, pole_2_x, pole_2_y)
# calculate area of triangle PBC
A1 = area(player_coord_x, player_coord_y, pole_1_x, pole_1_y, pole_2_x, pole_2_y)
# calculate area of triangle PAC
A2 = area(player_coord_x, player_coord_y, shot_location_x, shot_location_y, pole_2_x, pole_2_y)
# calculate area of triangle PAB
A3 = area(player_coord_x, player_coord_y, shot_location_x, shot_location_y, pole_1_x, pole_1_y)
# check if sum of A1, A2 and A3
# is same as A
if round(A,2) == round(A1 + A2 + A3, 2):
return True
else:
return False
def freeze_frame_vars(freeze_frame, shot_location_x, shot_location_y):
"""
Function for making freeze frame variables.
Args:
freeze_frame (list): containing tracking information.
shot_location_x (float): shot coordinate location x.
shot_location_y (float): shot coordinate location y.
Returns:
float values: 1. number of teammates between goal and shot-location.
2. number of opponents(excluding goalkeeper) between goal and shot-location.
3. goalkeeper covering angle.
4. distance between goalkeeper and the goal.
5. distance between goalkeeper and the shot-location.
"""
## init two variable to 0
count_teammate, count_opponent, goal_keeper_angle, dis_goal_keeper, dis_shot_keeper = 0, 0, 0, 0, 0
## traverse the freeze frame
for frame in freeze_frame:
## fetch coodinate location of the players
x_coord = coordinates_x(frame["location"])
y_coord = coordinates_y(frame["location"])
## fetch player's position
position = frame["position"]["name"]
if position != "Goalkeeper":
if frame["teammate"] == True and is_inside(x_coord, y_coord, shot_location_x, shot_location_y):
count_teammate += 1
elif frame["teammate"] == False and is_inside(x_coord, y_coord, shot_location_x, shot_location_y):
count_opponent += 1
else:
## compute goalkeeper covering angle
goal_keeper_angle = post_angle(x_coord, y_coord)
## compute distance between goalkeeper and goal
dis_goal_keeper = distance_bw_coordinates(x_coord, y_coord)
## compute distance between goalkeeper and shot-location
dis_shot_keeper = distance_bw_coordinates(x_coord, y_coord, shot_location_x, shot_location_y)
return count_teammate, count_opponent, goal_keeper_angle, dis_goal_keeper, dis_shot_keeper
def simple_dataset(comp_name, comp_id, season_ids, path_season, path_match, path_save, filename):
'''
Function to make a dataset for our simple-xG-model.
The dataset will have:
1. x and y location,
2. Statsbomb-xG,
3. Player Name,
4. Shot Type Name,
5. Body Part
6. Goal or No-Goal.
Arguments:
path_season -- str, path to the directory where event files are saved.
path_match -- str, path to the directory where match data file is stored for each competitions.
path_save -- str, path to the directory where the shot dataframe will be saved.
'''
## get event-dataframe
event_df = multiple_season_event_df(comp_name, comp_id, season_ids, path_match, path_season, shot="basic")
## col-list
col_list = ['location', 'shot_statsbomb_xg', 'player_name', "comp_name", 'shot_outcome_name', 'shot_body_part_name', 'shot_type_name']
## shot-dataframe from event-dataframe
shot_df = event_df.loc[:, col_list]
## create body part column
shot_df['body_part'] = shot_df['shot_body_part_name'].apply(body_part)
## create target column - 2 classes - goal and no goal
shot_df['target'] = shot_df['shot_outcome_name'].apply(goal)
## drop shot_outcome_name and shot_body_part_name column
shot_df.drop(['shot_outcome_name', 'shot_body_part_name'], axis=1, inplace=True)
## filter out shots from penalties, corners and Kick Off
shot_df = shot_df.loc[
(shot_df["shot_type_name"] != "Penalty") &
(shot_df["shot_type_name"] != "Corner") &
(shot_df["shot_type_name"] != "Kick Off")
]
## add x and y coordinate columns
shot_df['x'] = shot_df['location'].apply(coordinates_x)
shot_df['y'] = shot_df['location'].apply(coordinates_y)
## drop location column
shot_df.drop('location', inplace=True, axis=1)
## save the dataset
shot_df.to_pickle(f'{path_save}/{filename}')
def intermediate_dataset(df, adv=False):
"""
Function for making dataframe for intermediate model(containing shots info).
Args:
df (pandas.DataFrame): required dataframe.
adv (bool, optional): for preparing advanced dataset.
Returns:
pandas.DataFrame: dataframe for intermediate model
"""
## init an empty dictionary
if adv == True:
main_dict = {
'x' : [], 'y': [],
"shot_type_name": [], "shot_body_part_name": [],
"player_name": [], "shot_statsbomb_xg": [],
"pass_type": [], "open_goal": [],
"under_pressure": [], "deflected": [], "player_in_between": [],
"goal_keeper_angle": [], "target": []
}
else:
main_dict = {
'x' : [], 'y': [],
"shot_type_name": [], "shot_body_part_name": [],
"player_name": [], "shot_statsbomb_xg": [],
"pass_type": [], "open_goal": [],
"under_pressure": [], "deflected": [], "target": []
}
## fetch shots from the dataframe
shot_df = df.loc[
df["type_name"] == "Shot"
].copy()
## fetch key-pass and assists from the dataframe
try:
pass_df = df.loc[
(df["pass_shot_assist"] == True) |
(df["pass_goal_assist"] == True)
].copy().set_index("id")
except KeyError:
pass_df = df.loc[
(df["pass_shot_assist"] == True)
].copy().set_index("id")
for _, data in shot_df.iterrows():
## ignore shots from penalties, corners and Kick Off
if (data["shot_type_name"] == "Penalty") or\
(data["shot_type_name"] == "Corner") or\
(data["shot_type_name"] == "Kick Off"):
continue
## fetch shot location
location = data["location"]
## get x and y coordinates
x = coordinates_x(location)
y = coordinates_y(location)
if adv == True:
## fetch freeze frame
freeze_frame = data["shot_freeze_frame"]
## calculate freeze-frame-variables
count_teammate, count_opponent, goal_keeper_angle, dis_goal_keeper, dis_shot_keeper = freeze_frame_vars(
freeze_frame, x, y
)
## append info to main-dict for advanced features
main_dict["player_in_between"].append(count_teammate + count_opponent)
main_dict["goal_keeper_angle"].append(goal_keeper_angle)
## fetch shot_type_name
shot_type_name = data["shot_type_name"]
## fetch shot_outcome_name
if data["shot_outcome_name"] == "Goal":
target = 1
else:
target = 0
## fetch shot_body_part_name
if data["shot_body_part_name"] == "Right Foot":
body_part = "Foot"
elif data["shot_body_part_name"] == "Left Foot":
body_part = "Foot"
else:
body_part = data["shot_body_part_name"]
## fetch player name
player_name = data["player_name"]
## fetch statsbomb xG
stats_xg = data["shot_statsbomb_xg"]
try:
## fetch open_goal
if pd.isna(data["shot_open_goal"]):
open_goal = 0
else:
open_goal = 1
except Exception:
open_goal = 0
## fetch under-pressure
if pd.isna(data["under_pressure"]):
pressure = 0
elif data["under_pressure"] == True:
pressure = 1
## fetch deflected
try:
if pd.isna(data["shot_deflected"]):
deflected = 0
elif data["shot_deflected"] == True:
deflected = 1
except Exception:
deflected = 0
## is-assisted by a pass or not
if pd.isna(data["shot_key_pass_id"]):
pass_type = "Not Assisted"
else:
## fetch key pass id
key_pass_id = data["shot_key_pass_id"]
## fetch data-row of the key pass
temp_data = pass_df.loc[key_pass_id]
## init pass_type
pass_type = ""
## fetch through balls
try:
if temp_data["pass_technique_name"] == "Through Ball":
pass_type = "Through Ball"
except Exception:
pass
## fetch cutbacks
try:
if temp_data["pass_cut_back"] == True:
pass_type = "Cut Back"
except Exception:
pass
## fetch cross
try:
if temp_data["pass_cross"] == True:
pass_type = "Cross"
except Exception:
pass
if pass_type == "":
# fetch pass_type_name
if temp_data["pass_type_name"] == "Corner":
pass_type = "From Corner"
elif temp_data["pass_type_name"] == "Free Kick":
pass_type = "From Free Kick"
else:
pass_type = "Other"
## append to dict
main_dict['x'].append(x)
main_dict['y'].append(y)
main_dict["shot_type_name"].append(shot_type_name)
main_dict["shot_body_part_name"].append(body_part)
main_dict["player_name"].append(player_name)
main_dict["shot_statsbomb_xg"].append(stats_xg)
main_dict["pass_type"].append(pass_type)
main_dict["open_goal"].append(open_goal)
main_dict["under_pressure"].append(pressure)
main_dict["deflected"].append(deflected)
main_dict["target"].append(target)
return pd.DataFrame(main_dict)
def make_train_test(path, path_save):
'''
Function for making and saving train and test data.
Argument:
path -- str, path where the shot data is stored.
path_save -- str, path where the data will be stored.
'''
## load in all the datasets
ucl_data = pd.read_pickle(path+'/Champions_League_shots.pkl')
fawsl_data = pd.read_pickle(path+'/FA_Women\'s_Super_League_shots.pkl')
menwc_data = pd.read_pickle(path+'/FIFA_World_Cup_shots.pkl')
ll_data = pd.read_pickle(path+'/La_Liga_shots.pkl')
nwsl_data = pd.read_pickle(path+'/NWSL_shots.pkl')
pl_data = pd.read_pickle(path+'/Premier_League_shots.pkl')
wwc_data = pd.read_pickle(path+'/Women\'s_World_Cup_shots.pkl')
## make train dataframe
train_df = pd.concat(
[
ll_data,
ucl_data,
menwc_data,
pl_data,
nwsl_data
]
)
## make test dataframe
test_df = pd.concat(
[
fawsl_data,
wwc_data
]
)
## randomly shuffle both the datasets
train_df = train_df.sample(frac=1).reset_index(drop=True)
test_df = test_df.sample(frac=1).reset_index(drop=True)
## check for directory
if os.path.isdir(path_save) == False:
## make directory
os.mkdir(path_save)
## save train dataframe
train_df.to_pickle(path_save+'/train_df.pkl')
## save test dataframe
test_df.to_pickle(path_save+'/test_df.pkl') | [
"pandas.read_pickle",
"math.ceil",
"numpy.arccos",
"math.sqrt",
"os.path.isdir",
"os.mkdir",
"tqdm.auto.tqdm",
"pandas.DataFrame",
"statsmodels.api.Logit",
"pandas.isna",
"pandas.concat",
"pandas.io.json.json_normalize"
] | [((683, 706), 'pandas.DataFrame', 'pd.DataFrame', (['comp_data'], {}), '(comp_data)\n', (695, 706), True, 'import pandas as pd\n'), ((2077, 2104), 'pandas.DataFrame', 'pd.DataFrame', (['match_flatten'], {}), '(match_flatten)\n', (2089, 2104), True, 'import pandas as pd\n'), ((3605, 3640), 'pandas.io.json.json_normalize', 'json_normalize', (['event_json'], {'sep': '"""_"""'}), "(event_json, sep='_')\n", (3619, 3640), False, 'from pandas.io.json import json_normalize\n'), ((4375, 4389), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4387, 4389), True, 'import pandas as pd\n'), ((5725, 5739), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5737, 5739), True, 'import pandas as pd\n'), ((5800, 5867), 'tqdm.auto.tqdm', 'tqdm', (['season_ids'], {'desc': 'f"""Grabbing data for {comp_name}"""', 'leave': '(True)'}), "(season_ids, desc=f'Grabbing data for {comp_name}', leave=True)\n", (5804, 5867), False, 'from tqdm.auto import tqdm, trange\n'), ((8540, 8574), 'math.sqrt', 'math.sqrt', (['(diff_sqr_x + diff_sqr_y)'], {}), '(diff_sqr_x + diff_sqr_y)\n', (8549, 8574), False, 'import math\n'), ((12074, 12125), 'math.ceil', 'math.ceil', (['((xinput if xinput > 0 else 0.5) / x_step)'], {}), '((xinput if xinput > 0 else 0.5) / x_step)\n', (12083, 12125), False, 'import math\n'), ((12164, 12215), 'math.ceil', 'math.ceil', (['((yinput if yinput > 0 else 0.5) / y_step)'], {}), '((yinput if yinput > 0 else 0.5) / y_step)\n', (12173, 12215), False, 'import math\n'), ((25057, 25080), 'pandas.DataFrame', 'pd.DataFrame', (['main_dict'], {}), '(main_dict)\n', (25069, 25080), True, 'import pandas as pd\n'), ((25373, 25425), 'pandas.read_pickle', 'pd.read_pickle', (["(path + '/Champions_League_shots.pkl')"], {}), "(path + '/Champions_League_shots.pkl')\n", (25387, 25425), True, 'import pandas as pd\n'), ((25441, 25500), 'pandas.read_pickle', 'pd.read_pickle', (['(path + "/FA_Women\'s_Super_League_shots.pkl")'], {}), '(path + "/FA_Women\'s_Super_League_shots.pkl")\n', (25455, 25500), True, 'import pandas as pd\n'), ((25517, 25567), 'pandas.read_pickle', 'pd.read_pickle', (["(path + '/FIFA_World_Cup_shots.pkl')"], {}), "(path + '/FIFA_World_Cup_shots.pkl')\n", (25531, 25567), True, 'import pandas as pd\n'), ((25580, 25623), 'pandas.read_pickle', 'pd.read_pickle', (["(path + '/La_Liga_shots.pkl')"], {}), "(path + '/La_Liga_shots.pkl')\n", (25594, 25623), True, 'import pandas as pd\n'), ((25638, 25678), 'pandas.read_pickle', 'pd.read_pickle', (["(path + '/NWSL_shots.pkl')"], {}), "(path + '/NWSL_shots.pkl')\n", (25652, 25678), True, 'import pandas as pd\n'), ((25691, 25741), 'pandas.read_pickle', 'pd.read_pickle', (["(path + '/Premier_League_shots.pkl')"], {}), "(path + '/Premier_League_shots.pkl')\n", (25705, 25741), True, 'import pandas as pd\n'), ((25755, 25808), 'pandas.read_pickle', 'pd.read_pickle', (['(path + "/Women\'s_World_Cup_shots.pkl")'], {}), '(path + "/Women\'s_World_Cup_shots.pkl")\n', (25769, 25808), True, 'import pandas as pd\n'), ((25852, 25914), 'pandas.concat', 'pd.concat', (['[ll_data, ucl_data, menwc_data, pl_data, nwsl_data]'], {}), '([ll_data, ucl_data, menwc_data, pl_data, nwsl_data])\n', (25861, 25914), True, 'import pandas as pd\n'), ((26045, 26078), 'pandas.concat', 'pd.concat', (['[fawsl_data, wwc_data]'], {}), '([fawsl_data, wwc_data])\n', (26054, 26078), True, 'import pandas as pd\n'), ((4461, 4540), 'tqdm.auto.tqdm', 'tqdm', (['match_ids'], {'desc': 'f"""Grabbing data for {comp_name}"""', 'position': '(0)', 'leave': 'leave'}), "(match_ids, desc=f'Grabbing data for {comp_name}', position=0, leave=leave)\n", (4465, 4540), False, 'from tqdm.auto import tqdm, trange\n'), ((4707, 4749), 'pandas.concat', 'pd.concat', (['[event_df, temp_df]'], {'sort': '(False)'}), '([event_df, temp_df], sort=False)\n', (4716, 4749), True, 'import pandas as pd\n'), ((6621, 6663), 'pandas.concat', 'pd.concat', (['[event_df, temp_df]'], {'sort': '(False)'}), '([event_df, temp_df], sort=False)\n', (6630, 6663), True, 'import pandas as pd\n'), ((9555, 9571), 'numpy.arccos', 'np.arccos', (['value'], {}), '(value)\n', (9564, 9571), True, 'import numpy as np\n'), ((22680, 22711), 'pandas.isna', 'pd.isna', (["data['under_pressure']"], {}), "(data['under_pressure'])\n", (22687, 22711), True, 'import pandas as pd\n'), ((23130, 23163), 'pandas.isna', 'pd.isna', (["data['shot_key_pass_id']"], {}), "(data['shot_key_pass_id'])\n", (23137, 23163), True, 'import pandas as pd\n'), ((26327, 26351), 'os.path.isdir', 'os.path.isdir', (['path_save'], {}), '(path_save)\n', (26340, 26351), False, 'import os\n'), ((26396, 26415), 'os.mkdir', 'os.mkdir', (['path_save'], {}), '(path_save)\n', (26404, 26415), False, 'import os\n'), ((12637, 12659), 'statsmodels.api.Logit', 'sm.Logit', (['y_val', 'x_val'], {}), '(y_val, x_val)\n', (12645, 12659), True, 'import statsmodels.api as sm\n'), ((22465, 22496), 'pandas.isna', 'pd.isna', (["data['shot_open_goal']"], {}), "(data['shot_open_goal'])\n", (22472, 22496), True, 'import pandas as pd\n'), ((22876, 22907), 'pandas.isna', 'pd.isna', (["data['shot_deflected']"], {}), "(data['shot_deflected'])\n", (22883, 22907), True, 'import pandas as pd\n'), ((11314, 11339), 'pandas.DataFrame', 'pd.DataFrame', (['result_dict'], {}), '(result_dict)\n', (11326, 11339), True, 'import pandas as pd\n')] |
#! /usr/bin/env python
"""Calculations of the tropopause."""
import numpy as np
import xarray as xr
from .constants import GRAV_EARTH, R_D
from .names import LAT_STR, LEV_STR
from .nb_utils import apply_maybe_groupby
from .interp import drop_nans_and_interp
def z_from_hypso(temp, p_sfc=1000., p_top=0.1, p_str=LEV_STR,
r_d=R_D, grav=GRAV_EARTH):
"""Height computed from hypsometric equation."""
# Ensure all pressures have same horizontal dimensions as temperature.
non_vert_coords = xr.ones_like(
temp.isel(**{p_str: 0})).drop(p_str)
if np.isscalar(p_sfc):
p_sfc_val = p_sfc
p_sfc = p_sfc_val*non_vert_coords
p_sfc[p_str] = p_sfc_val
p_top_val = p_top
p_top = xr.zeros_like(p_sfc)
p_top[p_str] = p_top_val
pressure = (non_vert_coords*temp[p_str]).transpose(*temp.dims)
# Compute half-level pressure values as averages of full levels.
p_half_inner = 0.5*(
pressure.isel(**{p_str: slice(1, None)}).values +
pressure.isel(**{p_str: slice(None, -1)}).values
)
p_axis_num = temp.get_axis_num(p_str)
p_half = np.concatenate(
[np.expand_dims(p_top, p_axis_num), p_half_inner,
np.expand_dims(p_sfc, p_axis_num)], axis=p_axis_num
)
# Convert from hPa to Pa if necessary.
if p_half.max() < 2000:
log_p_half = np.log(p_half*1e2)
else:
log_p_half = np.log(p_half)
dlog_p_half_values = np.diff(log_p_half, axis=p_axis_num)
dlog_p_half = xr.ones_like(pressure)*dlog_p_half_values
temp_dlog_p = temp*dlog_p_half
# Integrate vertically.
height = r_d / grav*temp_dlog_p.isel(
**{p_str: slice(-1, None, -1)}).cumsum(p_str)
height = height.isel(**{p_str: slice(-1, None, -1)})
# Replace 'inf' values at TOA with NaNs and mask where
# input temperature array is masked.
return xr.where(np.isfinite(height) & temp.notnull(),
height, np.nan)
# TODO: define a decorator that handles this maybe-groupby logic,
# so that it's unnecessary to define the two separate functions
# for each case. It seems like pretty simple boilerplate.
def _tropo_wmo(temp, z, p_str=LEV_STR, lat_str=LAT_STR,
threshold=-2e-3, max_pressure=600,
interpolate=True):
"""WMO definition of tropopause: lapse rate < 2 K / km."""
temp_arr, z_arr = drop_nans_and_interp(
[temp, z], do_interp=interpolate, p_str=p_str)
dtemp_dz = temp_arr.diff(p_str) / z_arr.diff(p_str)
dtemp_dz = dtemp_dz.where(z_arr[p_str] < max_pressure, drop=True)
above_thresh = dtemp_dz[p_str].where(dtemp_dz > threshold)
p_tropo_ind = above_thresh.dropna(p_str, how='all').argmax(p_str)
arr = temp_arr[{p_str: p_tropo_ind}].interp(**{lat_str: temp[lat_str]})
arr.name = 'tropopause_wmo'
return arr
def tropopause_wmo(temp, z, p_str=LEV_STR, lat_str=LAT_STR,
threshold=-2e-3, max_pressure=600,
interpolate=True):
"""WMO definition of tropopause: lapse rate < 2 K / km."""
kwargs = dict(p_str=p_str, lat_str=lat_str, threshold=threshold,
max_pressure=max_pressure, interpolate=interpolate)
return apply_maybe_groupby(_tropo_wmo, [p_str, lat_str], [temp, z],
kwargs=kwargs)
def _tropo_cold_point(temp, interpolate=True,
p_str=LEV_STR, lat_str=LAT_STR):
"""Tropopause defined as the coldest point in the column."""
temp_arr, = drop_nans_and_interp(
[temp], do_interp=interpolate, p_str=p_str)
cold_point = temp_arr.min(p_str)
cold_point_lev = temp_arr[p_str][temp_arr.argmin(p_str)]
cold_point[p_str] = cold_point_lev
return cold_point.interp(**{lat_str: temp[lat_str]})
def tropopause_cold_point(temp, interpolate=True,
p_str=LEV_STR, lat_str=LAT_STR):
"""Tropopause defined as the coldest point in the column."""
kwargs = dict(interpolate=interpolate, p_str=p_str,
lat_str=lat_str)
return apply_maybe_groupby(_tropo_cold_point, [p_str, lat_str], [temp],
kwargs=kwargs)
def _tropo_max_vert_curv(temp, z, interpolate=True, max_pressure=500,
p_str=LEV_STR, lat_str=LAT_STR):
"""Tropopause defined as where d^2T/dz^2 maximizes."""
temp_arr, z_arr = drop_nans_and_interp([temp, z], do_interp=interpolate,
p_str=p_str)
temp_arr["z"] = z_arr
d2temp_dz2 = temp_arr.differentiate("z").differentiate("z")
d2temp_dz2 = d2temp_dz2.where(z_arr[p_str] < max_pressure, drop=True)
d2temp_dz2_max_ind = d2temp_dz2.argmax(p_str)
return temp_arr[{p_str: d2temp_dz2_max_ind}].interp(**{lat_str:
temp[lat_str]})
def tropopause_max_vert_curv(temp, z, interpolate=True, max_pressure=500,
p_str=LEV_STR, lat_str=LAT_STR):
"""Tropopause defined as where d^2T/dz^2 maximizes."""
kwargs = dict(interpolate=interpolate, p_str=p_str,
lat_str=lat_str, max_pressure=max_pressure)
return apply_maybe_groupby(_tropo_max_vert_curv, [p_str, lat_str],
[temp, z], kwargs=kwargs)
# NOTE: this one still needs work. Sometimes oscillates between near-surface
# and upper troposphere if specified tropopause temperature is sufficiently
# warm. Conversely, if specified temperature is too cold, some columns never
# come close to it, yet this will take the level nearest to it regardless. So
# need to introduce some threshold, and apply it from the mid-troposphere up,
# akin to how the WMO version is done.
def _tropo_fixed_temp(temp, temp_tropo=200., interpolate=True,
p_str=LEV_STR, lat_str=LAT_STR):
"""Tropopause defined as a fixed temperature."""
temp_arr, = drop_nans_and_interp(
[temp], do_interp=interpolate, p_str=p_str)
temp_closest_ind = np.abs(temp_arr - temp_tropo).argmin(p_str)
return temp_arr[{p_str: temp_closest_ind}].interp(**{lat_str:
temp[lat_str]})
def tropopause_fixed_temp(temp, temp_tropo=200., interpolate=True,
p_str=LEV_STR, lat_str=LAT_STR):
"""Tropopause defined as a fixed temperature."""
kwargs = dict(interpolate=interpolate, p_str=p_str,
lat_str=lat_str, temp_tropo=temp_tropo)
return apply_maybe_groupby(_tropo_fixed_temp, [p_str, lat_str],
[temp], kwargs=kwargs)
def _tropo_fixed_height(temp, z, height_tropo=1e4, interpolate=True,
p_str=LEV_STR, lat_str=LAT_STR):
"""Tropopause defined as a fixed height."""
temp_arr, z_arr = drop_nans_and_interp(
[temp, z], do_interp=interpolate, p_str=p_str)
z_closest_ind = np.abs(z_arr - height_tropo).argmin(p_str)
return temp_arr[{p_str: z_closest_ind}].interp(**{lat_str: temp[lat_str]})
def tropopause_fixed_height(temp, z, height_tropo=1e4, interpolate=True,
p_str=LEV_STR, lat_str=LAT_STR):
"""Tropopause defined as a fixed height."""
kwargs = dict(interpolate=interpolate, p_str=p_str,
lat_str=lat_str, height_tropo=height_tropo)
return apply_maybe_groupby(_tropo_fixed_height, [p_str, lat_str],
[temp, z], kwargs=kwargs)
if __name__ == '__main__':
pass
| [
"numpy.abs",
"numpy.isscalar",
"numpy.log",
"numpy.diff",
"xarray.ones_like",
"xarray.zeros_like",
"numpy.isfinite",
"numpy.expand_dims"
] | [((584, 602), 'numpy.isscalar', 'np.isscalar', (['p_sfc'], {}), '(p_sfc)\n', (595, 602), True, 'import numpy as np\n'), ((739, 759), 'xarray.zeros_like', 'xr.zeros_like', (['p_sfc'], {}), '(p_sfc)\n', (752, 759), True, 'import xarray as xr\n'), ((1451, 1487), 'numpy.diff', 'np.diff', (['log_p_half'], {'axis': 'p_axis_num'}), '(log_p_half, axis=p_axis_num)\n', (1458, 1487), True, 'import numpy as np\n'), ((1361, 1383), 'numpy.log', 'np.log', (['(p_half * 100.0)'], {}), '(p_half * 100.0)\n', (1367, 1383), True, 'import numpy as np\n'), ((1411, 1425), 'numpy.log', 'np.log', (['p_half'], {}), '(p_half)\n', (1417, 1425), True, 'import numpy as np\n'), ((1506, 1528), 'xarray.ones_like', 'xr.ones_like', (['pressure'], {}), '(pressure)\n', (1518, 1528), True, 'import xarray as xr\n'), ((1152, 1185), 'numpy.expand_dims', 'np.expand_dims', (['p_top', 'p_axis_num'], {}), '(p_top, p_axis_num)\n', (1166, 1185), True, 'import numpy as np\n'), ((1210, 1243), 'numpy.expand_dims', 'np.expand_dims', (['p_sfc', 'p_axis_num'], {}), '(p_sfc, p_axis_num)\n', (1224, 1243), True, 'import numpy as np\n'), ((1886, 1905), 'numpy.isfinite', 'np.isfinite', (['height'], {}), '(height)\n', (1897, 1905), True, 'import numpy as np\n'), ((5990, 6019), 'numpy.abs', 'np.abs', (['(temp_arr - temp_tropo)'], {}), '(temp_arr - temp_tropo)\n', (5996, 6019), True, 'import numpy as np\n'), ((6886, 6914), 'numpy.abs', 'np.abs', (['(z_arr - height_tropo)'], {}), '(z_arr - height_tropo)\n', (6892, 6914), True, 'import numpy as np\n')] |
from models.core.train_eval.utils import loadConfig
import matplotlib.pyplot as plt
from importlib import reload
import numpy as np
from planner import policy
reload(policy)
from planner.policy import TestdataObj, MergePolicy, ModelEvaluation
import dill
exp_to_evaluate = 'series083exp002'
config = loadConfig(exp_to_evaluate)
traffic_density = ''
traffic_density = 'high_density_'
# traffic_density = 'medium_density_'
# traffic_density = 'low_density_'
test_data = TestdataObj(traffic_density, config)
model = MergePolicy(test_data, config)
eval_obj = ModelEvaluation(model, test_data, config)
eval_obj.compute_rwse(traffic_density)
# %%
"""To visualise rwse against prediction horizon
"""
exps = [
# 'series077exp001',
# 'series078exp001',
# 'series079exp002',
# 'series081exp001',
'series081exp002',
'series082exp001',
'series082exp002',
]
densities = ['hwigh_density_']
# densities = ['medium_density_']
# densities = ['low_density_']
rwses = {}
for exp_i in range(len(exps)):
for density_i in range(len(densities)):
dirName = './models/experiments/'+exps[exp_i]+'/'+densities[density_i]+'rwse'
with open(dirName, 'rb') as f:
rwses[exps[exp_i]+densities[density_i]] = dill.load(f, ignore=True)
exp_names = []
for exp in exps:
for density in densities:
exp_names.append(exp+density)
for key in ['vel_m','lat_vel','vel_y','vel_f','vel_fadj']:
legends = []
plt.figure()
for exp_name in exp_names:
plt.plot(rwses[exp_name][key])
legends.append(key+'_'+exp_name)
plt.legend(legends)
plt.grid()
# %%
fig_num = 0
pred_h = 4
# for episode in [2895, 1289]:
# for episode in [2895, 1289, 1037]:
for episode in [2895, 1289, 1037, 2870, 2400, 1344, 2872, 2266, 2765, 2215]:
st_seq, cond_seq, _, targ_arr = eval_obj.episodeSetup(episode)
st_i, cond_i, bc_der_i, _, _, targ_i = eval_obj.sceneSetup(st_seq,
cond_seq,
_,
targ_arr,
current_step=19,
pred_h=pred_h)
actions, _, _ = eval_obj.policy.get_actions([st_i.copy(), cond_i.copy()], bc_der_i,
traj_n=10, pred_h=pred_h)
for act_n in range(5):
plt.figure()
plt.plot(np.arange(0, pred_h+0.1, 0.1), targ_i[:, act_n], color='red')
for trj in range(10):
plt.plot(np.arange(0, pred_h+0.1, 0.1), actions[trj,:,act_n], color='grey')
# plt.grid()
plt.title(str(fig_num)+'-'+exp_to_evaluate)
plt.xlabel('Prediction horizon [s]')
if act_n == 1:
plt.ylabel('Lateral speed [m/s]')
else:
plt.ylabel('Acceleration [$m/s^2$]')
fig_num += 1
| [
"matplotlib.pyplot.grid",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"planner.policy.ModelEvaluation",
"matplotlib.pyplot.plot",
"planner.policy.TestdataObj",
"planner.policy.MergePolicy",
"matplotlib.pyplot.figure",
"models.core.train_eval.utils.loadConfig",
"import... | [((164, 178), 'importlib.reload', 'reload', (['policy'], {}), '(policy)\n', (170, 178), False, 'from importlib import reload\n'), ((311, 338), 'models.core.train_eval.utils.loadConfig', 'loadConfig', (['exp_to_evaluate'], {}), '(exp_to_evaluate)\n', (321, 338), False, 'from models.core.train_eval.utils import loadConfig\n'), ((484, 520), 'planner.policy.TestdataObj', 'TestdataObj', (['traffic_density', 'config'], {}), '(traffic_density, config)\n', (495, 520), False, 'from planner.policy import TestdataObj, MergePolicy, ModelEvaluation\n'), ((532, 562), 'planner.policy.MergePolicy', 'MergePolicy', (['test_data', 'config'], {}), '(test_data, config)\n', (543, 562), False, 'from planner.policy import TestdataObj, MergePolicy, ModelEvaluation\n'), ((575, 616), 'planner.policy.ModelEvaluation', 'ModelEvaluation', (['model', 'test_data', 'config'], {}), '(model, test_data, config)\n', (590, 616), False, 'from planner.policy import TestdataObj, MergePolicy, ModelEvaluation\n'), ((1533, 1545), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1543, 1545), True, 'import matplotlib.pyplot as plt\n'), ((1665, 1684), 'matplotlib.pyplot.legend', 'plt.legend', (['legends'], {}), '(legends)\n', (1675, 1684), True, 'import matplotlib.pyplot as plt\n'), ((1690, 1700), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1698, 1700), True, 'import matplotlib.pyplot as plt\n'), ((1587, 1617), 'matplotlib.pyplot.plot', 'plt.plot', (['rwses[exp_name][key]'], {}), '(rwses[exp_name][key])\n', (1595, 1617), True, 'import matplotlib.pyplot as plt\n'), ((2553, 2565), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2563, 2565), True, 'import matplotlib.pyplot as plt\n'), ((2854, 2890), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Prediction horizon [s]"""'], {}), "('Prediction horizon [s]')\n", (2864, 2890), True, 'import matplotlib.pyplot as plt\n'), ((1316, 1341), 'dill.load', 'dill.load', (['f'], {'ignore': '(True)'}), '(f, ignore=True)\n', (1325, 1341), False, 'import dill\n'), ((2584, 2615), 'numpy.arange', 'np.arange', (['(0)', '(pred_h + 0.1)', '(0.1)'], {}), '(0, pred_h + 0.1, 0.1)\n', (2593, 2615), True, 'import numpy as np\n'), ((2928, 2961), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Lateral speed [m/s]"""'], {}), "('Lateral speed [m/s]')\n", (2938, 2961), True, 'import matplotlib.pyplot as plt\n'), ((2990, 3026), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Acceleration [$m/s^2$]"""'], {}), "('Acceleration [$m/s^2$]')\n", (3000, 3026), True, 'import matplotlib.pyplot as plt\n'), ((2701, 2732), 'numpy.arange', 'np.arange', (['(0)', '(pred_h + 0.1)', '(0.1)'], {}), '(0, pred_h + 0.1, 0.1)\n', (2710, 2732), True, 'import numpy as np\n')] |
import numpy as np
class ELO:
def __init__(self, rampup = 0):
self.rampup = rampup
def get_win_chance(self, elo1, elo2, length):
exp = -1*(elo1-elo2) * np.sqrt(length)/2000
return 1/(1+10**exp)
def compute_change(self, elo_winner, elo_loser, length, winner_xp, loser_xp):
elo_winner = int(elo_winner)
elo_loser = int(elo_loser)
points = 4 * np.sqrt(length)
winner_change = (1 - self.get_win_chance(elo_winner, elo_loser, length)) * points
loser_change = points * self.get_win_chance(elo_loser, elo_winner, length)
if (type(loser_xp) is "NaN"):
raise Exception("Loser XP is NaN")
winner_change = winner_change * self.get_rampup_magnitude(int(winner_xp))
loser_change = loser_change * self.get_rampup_magnitude(int(loser_xp))
return (winner_change, loser_change * -1)
def get_rampup_magnitude(self, xp):
if (self.rampup > xp):
return (((self.rampup + 100) - xp) / 100)
return 1
| [
"numpy.sqrt"
] | [((404, 419), 'numpy.sqrt', 'np.sqrt', (['length'], {}), '(length)\n', (411, 419), True, 'import numpy as np\n'), ((178, 193), 'numpy.sqrt', 'np.sqrt', (['length'], {}), '(length)\n', (185, 193), True, 'import numpy as np\n')] |
import itertools
from typing import Any, List, Sequence, Tuple
import numpy as np
import pytest
from pydantic import BaseModel
from useq import (
Channel,
MDAEvent,
MDASequence,
NoT,
NoZ,
Position,
TDurationLoops,
TIntervalDuration,
TIntervalLoops,
ZAboveBelow,
ZAbsolutePositions,
ZRangeAround,
ZRelativePositions,
)
_T = List[Tuple[Any, Sequence[float]]]
z_as_class: _T = [
(ZAboveBelow(above=8, below=4, step=2), [-4, -2, 0, 2, 4, 6, 8]),
(ZAbsolutePositions(absolute=[0, 0.5, 5]), [0, 0.5, 5]),
(ZRelativePositions(relative=[0, 0.5, 5]), [0, 0.5, 5]),
(ZRangeAround(range=8, step=1), [-4, -3, -2, -1, 0, 1, 2, 3, 4]),
(NoZ(), []),
]
z_as_dict: _T = [
(None, []),
({"above": 8, "below": 4, "step": 2}, [-4, -2, 0, 2, 4, 6, 8]),
({"absolute": [0, 0.5, 5]}, [0, 0.5, 5]),
({"relative": [0, 0.5, 5]}, [0, 0.5, 5]),
({"range": 8, "step": 1}, [-4, -3, -2, -1, 0, 1, 2, 3, 4]),
]
z_inputs = z_as_class + z_as_dict
t_as_class: _T = [
# frame every second for 4 seconds
(TIntervalDuration(interval=1, duration=4), [0, 1, 2, 3, 4]),
# 5 frames spanning 8 seconds
(TDurationLoops(loops=5, duration=8), [0, 2, 4, 6, 8]),
# 5 frames, taken every 250 ms
(TIntervalLoops(loops=5, interval=0.25), [0, 0.25, 0.5, 0.75, 1]),
(
[
TIntervalLoops(loops=5, interval=0.25),
TIntervalDuration(interval=1, duration=4),
],
[0, 0.25, 0.5, 0.75, 1, 2, 3, 4, 5],
),
]
t_as_dict: _T = [
(None, []),
({"interval": 0.5, "duration": 2}, [0, 0.5, 1, 1.5, 2]),
({"loops": 5, "duration": 8}, [0, 2, 4, 6, 8]),
({"loops": 5, "interval": 0.25}, [0, 0.25, 0.5, 0.75, 1]),
(
[{"loops": 5, "interval": 0.25}, {"interval": 1, "duration": 4}],
[0, 0.25, 0.50, 0.75, 1, 2, 3, 4, 5],
),
({"loops": 5, "duration": {"milliseconds": 8}}, [0, 0.002, 0.004, 0.006, 0.008]),
({"loops": 5, "duration": {"seconds": 8}}, [0, 2, 4, 6, 8]),
(NoT(), []),
]
t_inputs = t_as_class + t_as_dict
all_orders = ["".join(i) for i in itertools.permutations("tpcz")]
c_inputs = [
("DAPI", ("Channel", "DAPI")),
({"config": "DAPI"}, ("Channel", "DAPI")),
({"config": "DAPI", "group": "Group", "acquire_every": 3}, ("Group", "DAPI")),
(Channel(config="DAPI"), ("Channel", "DAPI")),
(Channel(config="DAPI", group="Group"), ("Group", "DAPI")),
]
p_inputs = [
({"x": 0, "y": 1, "z": 2}, (0, 1, 2)),
({"y": 200}, (None, 200, None)),
((100, 200, 300), (100, 200, 300)),
({"z": 100, "z_plan": {"above": 8, "below": 4, "step": 2}}, (None, None, 100)),
(np.ones(3), (1, 1, 1)),
((None, 200, None), (None, 200, None)),
(np.ones(2), (1, 1, None)),
(Position(x=100, y=200, z=300), (100, 200, 300)),
]
@pytest.mark.parametrize("zplan, zexpectation", z_inputs)
def test_z_plan(zplan: Any, zexpectation: Sequence[float]) -> None:
assert list(MDASequence(z_plan=zplan).z_plan) == zexpectation
@pytest.mark.parametrize("tplan, texpectation", t_inputs)
def test_t_plan(tplan: Any, texpectation: Sequence[float]) -> None:
assert list(MDASequence(time_plan=tplan).time_plan) == texpectation
@pytest.mark.parametrize("channel, cexpectation", c_inputs)
def test_channel(channel: Any, cexpectation: Sequence[float]) -> None:
channel = MDASequence(channels=[channel]).channels[0]
assert (channel.group, channel.config) == cexpectation
@pytest.mark.parametrize("position, pexpectation", p_inputs)
def test_position(position: Any, pexpectation: Sequence[float]) -> None:
position = MDASequence(stage_positions=[position]).stage_positions[0]
assert (position.x, position.y, position.z) == pexpectation
@pytest.mark.parametrize("tplan, texpectation", t_as_dict[:5])
@pytest.mark.parametrize("zplan, zexpectation", z_as_dict)
@pytest.mark.parametrize("channel, cexpectation", c_inputs[:3])
@pytest.mark.parametrize("position, pexpectation", p_inputs[:4])
@pytest.mark.parametrize("order", ["tpcz", "tpzc", "ptzc", "ptcz", "ptc", "zc"])
def test_combinations(
tplan: Any,
texpectation: Sequence[float],
zplan: Any,
zexpectation: Sequence[float],
channel: Any,
cexpectation: Sequence[float],
order: str,
position: Any,
pexpectation: Sequence[float],
) -> None:
mda = MDASequence(
z_plan=zplan,
time_plan=tplan,
channels=[channel],
stage_positions=[position],
axis_order=order,
)
assert list(mda.z_plan) == zexpectation
assert list(mda.time_plan) == texpectation
assert (mda.channels[0].group, mda.channels[0].config) == cexpectation
position = mda.stage_positions[0]
assert (position.x, position.y, position.z) == pexpectation
assert list(mda)
assert mda.to_pycromanager()
@pytest.mark.parametrize("cls", [MDASequence, MDAEvent])
def test_schema(cls: BaseModel) -> None:
assert cls.schema()
assert cls.schema_json()
| [
"useq.ZRelativePositions",
"useq.TIntervalDuration",
"numpy.ones",
"itertools.permutations",
"useq.Channel",
"useq.ZRangeAround",
"useq.Position",
"useq.MDASequence",
"useq.NoZ",
"pytest.mark.parametrize",
"useq.TIntervalLoops",
"useq.ZAbsolutePositions",
"useq.NoT",
"useq.TDurationLoops",... | [((2814, 2870), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""zplan, zexpectation"""', 'z_inputs'], {}), "('zplan, zexpectation', z_inputs)\n", (2837, 2870), False, 'import pytest\n'), ((3008, 3064), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""tplan, texpectation"""', 't_inputs'], {}), "('tplan, texpectation', t_inputs)\n", (3031, 3064), False, 'import pytest\n'), ((3208, 3266), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""channel, cexpectation"""', 'c_inputs'], {}), "('channel, cexpectation', c_inputs)\n", (3231, 3266), False, 'import pytest\n'), ((3458, 3517), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""position, pexpectation"""', 'p_inputs'], {}), "('position, pexpectation', p_inputs)\n", (3481, 3517), False, 'import pytest\n'), ((3732, 3793), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""tplan, texpectation"""', 't_as_dict[:5]'], {}), "('tplan, texpectation', t_as_dict[:5])\n", (3755, 3793), False, 'import pytest\n'), ((3795, 3852), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""zplan, zexpectation"""', 'z_as_dict'], {}), "('zplan, zexpectation', z_as_dict)\n", (3818, 3852), False, 'import pytest\n'), ((3854, 3916), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""channel, cexpectation"""', 'c_inputs[:3]'], {}), "('channel, cexpectation', c_inputs[:3])\n", (3877, 3916), False, 'import pytest\n'), ((3918, 3981), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""position, pexpectation"""', 'p_inputs[:4]'], {}), "('position, pexpectation', p_inputs[:4])\n", (3941, 3981), False, 'import pytest\n'), ((3983, 4062), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""order"""', "['tpcz', 'tpzc', 'ptzc', 'ptcz', 'ptc', 'zc']"], {}), "('order', ['tpcz', 'tpzc', 'ptzc', 'ptcz', 'ptc', 'zc'])\n", (4006, 4062), False, 'import pytest\n'), ((4814, 4869), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cls"""', '[MDASequence, MDAEvent]'], {}), "('cls', [MDASequence, MDAEvent])\n", (4837, 4869), False, 'import pytest\n'), ((4332, 4444), 'useq.MDASequence', 'MDASequence', ([], {'z_plan': 'zplan', 'time_plan': 'tplan', 'channels': '[channel]', 'stage_positions': '[position]', 'axis_order': 'order'}), '(z_plan=zplan, time_plan=tplan, channels=[channel],\n stage_positions=[position], axis_order=order)\n', (4343, 4444), False, 'from useq import Channel, MDAEvent, MDASequence, NoT, NoZ, Position, TDurationLoops, TIntervalDuration, TIntervalLoops, ZAboveBelow, ZAbsolutePositions, ZRangeAround, ZRelativePositions\n'), ((437, 474), 'useq.ZAboveBelow', 'ZAboveBelow', ([], {'above': '(8)', 'below': '(4)', 'step': '(2)'}), '(above=8, below=4, step=2)\n', (448, 474), False, 'from useq import Channel, MDAEvent, MDASequence, NoT, NoZ, Position, TDurationLoops, TIntervalDuration, TIntervalLoops, ZAboveBelow, ZAbsolutePositions, ZRangeAround, ZRelativePositions\n'), ((507, 547), 'useq.ZAbsolutePositions', 'ZAbsolutePositions', ([], {'absolute': '[0, 0.5, 5]'}), '(absolute=[0, 0.5, 5])\n', (525, 547), False, 'from useq import Channel, MDAEvent, MDASequence, NoT, NoZ, Position, TDurationLoops, TIntervalDuration, TIntervalLoops, ZAboveBelow, ZAbsolutePositions, ZRangeAround, ZRelativePositions\n'), ((568, 608), 'useq.ZRelativePositions', 'ZRelativePositions', ([], {'relative': '[0, 0.5, 5]'}), '(relative=[0, 0.5, 5])\n', (586, 608), False, 'from useq import Channel, MDAEvent, MDASequence, NoT, NoZ, Position, TDurationLoops, TIntervalDuration, TIntervalLoops, ZAboveBelow, ZAbsolutePositions, ZRangeAround, ZRelativePositions\n'), ((629, 658), 'useq.ZRangeAround', 'ZRangeAround', ([], {'range': '(8)', 'step': '(1)'}), '(range=8, step=1)\n', (641, 658), False, 'from useq import Channel, MDAEvent, MDASequence, NoT, NoZ, Position, TDurationLoops, TIntervalDuration, TIntervalLoops, ZAboveBelow, ZAbsolutePositions, ZRangeAround, ZRelativePositions\n'), ((699, 704), 'useq.NoZ', 'NoZ', ([], {}), '()\n', (702, 704), False, 'from useq import Channel, MDAEvent, MDASequence, NoT, NoZ, Position, TDurationLoops, TIntervalDuration, TIntervalLoops, ZAboveBelow, ZAbsolutePositions, ZRangeAround, ZRelativePositions\n'), ((1071, 1112), 'useq.TIntervalDuration', 'TIntervalDuration', ([], {'interval': '(1)', 'duration': '(4)'}), '(interval=1, duration=4)\n', (1088, 1112), False, 'from useq import Channel, MDAEvent, MDASequence, NoT, NoZ, Position, TDurationLoops, TIntervalDuration, TIntervalLoops, ZAboveBelow, ZAbsolutePositions, ZRangeAround, ZRelativePositions\n'), ((1171, 1206), 'useq.TDurationLoops', 'TDurationLoops', ([], {'loops': '(5)', 'duration': '(8)'}), '(loops=5, duration=8)\n', (1185, 1206), False, 'from useq import Channel, MDAEvent, MDASequence, NoT, NoZ, Position, TDurationLoops, TIntervalDuration, TIntervalLoops, ZAboveBelow, ZAbsolutePositions, ZRangeAround, ZRelativePositions\n'), ((1266, 1304), 'useq.TIntervalLoops', 'TIntervalLoops', ([], {'loops': '(5)', 'interval': '(0.25)'}), '(loops=5, interval=0.25)\n', (1280, 1304), False, 'from useq import Channel, MDAEvent, MDASequence, NoT, NoZ, Position, TDurationLoops, TIntervalDuration, TIntervalLoops, ZAboveBelow, ZAbsolutePositions, ZRangeAround, ZRelativePositions\n'), ((2020, 2025), 'useq.NoT', 'NoT', ([], {}), '()\n', (2023, 2025), False, 'from useq import Channel, MDAEvent, MDASequence, NoT, NoZ, Position, TDurationLoops, TIntervalDuration, TIntervalLoops, ZAboveBelow, ZAbsolutePositions, ZRangeAround, ZRelativePositions\n'), ((2104, 2134), 'itertools.permutations', 'itertools.permutations', (['"""tpcz"""'], {}), "('tpcz')\n", (2126, 2134), False, 'import itertools\n'), ((2320, 2342), 'useq.Channel', 'Channel', ([], {'config': '"""DAPI"""'}), "(config='DAPI')\n", (2327, 2342), False, 'from useq import Channel, MDAEvent, MDASequence, NoT, NoZ, Position, TDurationLoops, TIntervalDuration, TIntervalLoops, ZAboveBelow, ZAbsolutePositions, ZRangeAround, ZRelativePositions\n'), ((2371, 2408), 'useq.Channel', 'Channel', ([], {'config': '"""DAPI"""', 'group': '"""Group"""'}), "(config='DAPI', group='Group')\n", (2378, 2408), False, 'from useq import Channel, MDAEvent, MDASequence, NoT, NoZ, Position, TDurationLoops, TIntervalDuration, TIntervalLoops, ZAboveBelow, ZAbsolutePositions, ZRangeAround, ZRelativePositions\n'), ((2655, 2665), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (2662, 2665), True, 'import numpy as np\n'), ((2728, 2738), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (2735, 2738), True, 'import numpy as np\n'), ((2760, 2789), 'useq.Position', 'Position', ([], {'x': '(100)', 'y': '(200)', 'z': '(300)'}), '(x=100, y=200, z=300)\n', (2768, 2789), False, 'from useq import Channel, MDAEvent, MDASequence, NoT, NoZ, Position, TDurationLoops, TIntervalDuration, TIntervalLoops, ZAboveBelow, ZAbsolutePositions, ZRangeAround, ZRelativePositions\n'), ((1360, 1398), 'useq.TIntervalLoops', 'TIntervalLoops', ([], {'loops': '(5)', 'interval': '(0.25)'}), '(loops=5, interval=0.25)\n', (1374, 1398), False, 'from useq import Channel, MDAEvent, MDASequence, NoT, NoZ, Position, TDurationLoops, TIntervalDuration, TIntervalLoops, ZAboveBelow, ZAbsolutePositions, ZRangeAround, ZRelativePositions\n'), ((1412, 1453), 'useq.TIntervalDuration', 'TIntervalDuration', ([], {'interval': '(1)', 'duration': '(4)'}), '(interval=1, duration=4)\n', (1429, 1453), False, 'from useq import Channel, MDAEvent, MDASequence, NoT, NoZ, Position, TDurationLoops, TIntervalDuration, TIntervalLoops, ZAboveBelow, ZAbsolutePositions, ZRangeAround, ZRelativePositions\n'), ((3352, 3383), 'useq.MDASequence', 'MDASequence', ([], {'channels': '[channel]'}), '(channels=[channel])\n', (3363, 3383), False, 'from useq import Channel, MDAEvent, MDASequence, NoT, NoZ, Position, TDurationLoops, TIntervalDuration, TIntervalLoops, ZAboveBelow, ZAbsolutePositions, ZRangeAround, ZRelativePositions\n'), ((3606, 3645), 'useq.MDASequence', 'MDASequence', ([], {'stage_positions': '[position]'}), '(stage_positions=[position])\n', (3617, 3645), False, 'from useq import Channel, MDAEvent, MDASequence, NoT, NoZ, Position, TDurationLoops, TIntervalDuration, TIntervalLoops, ZAboveBelow, ZAbsolutePositions, ZRangeAround, ZRelativePositions\n'), ((2955, 2980), 'useq.MDASequence', 'MDASequence', ([], {'z_plan': 'zplan'}), '(z_plan=zplan)\n', (2966, 2980), False, 'from useq import Channel, MDAEvent, MDASequence, NoT, NoZ, Position, TDurationLoops, TIntervalDuration, TIntervalLoops, ZAboveBelow, ZAbsolutePositions, ZRangeAround, ZRelativePositions\n'), ((3149, 3177), 'useq.MDASequence', 'MDASequence', ([], {'time_plan': 'tplan'}), '(time_plan=tplan)\n', (3160, 3177), False, 'from useq import Channel, MDAEvent, MDASequence, NoT, NoZ, Position, TDurationLoops, TIntervalDuration, TIntervalLoops, ZAboveBelow, ZAbsolutePositions, ZRangeAround, ZRelativePositions\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# <NAME> <<EMAIL>>
# 2019 09 13
#
# Copyright 2019 <+YOU OR YOUR COMPANY+>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import numpy
import pmt
from gnuradio import gr
class preamble_insert(gr.basic_block):
"""
docstring for block preamble_insert
"""
def __init__(self, preamble=None, add_head=True, rm_head=False, rm_tail=False):
"""
Inputs:
:param preamble: list or 1d array of preamble bits
:param add_head: insert preamble if true
:param rm_head: remove preamble if true
:param rm_tail: remove trailing len(preamble) bits if true
"""
gr.basic_block.__init__(self,
name="preamble_insert",
in_sig=None,
out_sig=None)
if preamble is None:
raise Exception("Preamble must be provided")
if preamble is not numpy.ndarray:
preamble = numpy.array(preamble).astype(numpy.int32)
assert len(preamble.shape) == 1, "Preamble must be a vector, not a matrix with a dimension of size 1"
assert add_head or rm_head or rm_tail, "At least one operation must be True"
self.preamble = preamble
self.add_head = add_head
self.rm_head = rm_head
self.rm_tail = rm_tail
self.port_in_id = pmt.intern("in")
self.port_out_id = pmt.intern("out")
self.message_port_register_in(self.port_in_id)
self.message_port_register_out(self.port_out_id)
self.set_msg_handler(self.port_in_id, self.handle_pdu)
self.npackets = 0
def handle_pdu(self, pdu):
"""Insert or remove the preamble from a pdu."""
self.npackets += 1
tags = pmt.car(pdu)
data = pmt.to_python(pmt.cdr(pdu)).astype(numpy.int32)
if self.add_head:
data = numpy.concatenate([self.preamble, data])
elif self.rm_head:
data = data[self.preamble.size:]
if self.rm_tail:
data = data[:-self.preamble.size]
# print("packet {}: {} in, {} out".format(self.npackets, data.size, data_out.size))
if data.size > 0:
self.message_port_pub(self.port_out_id,
pmt.cons(tags, pmt.to_pmt(data)))
| [
"pmt.intern",
"gnuradio.gr.basic_block.__init__",
"pmt.to_pmt",
"numpy.array",
"numpy.concatenate",
"pmt.car",
"pmt.cdr"
] | [((1325, 1410), 'gnuradio.gr.basic_block.__init__', 'gr.basic_block.__init__', (['self'], {'name': '"""preamble_insert"""', 'in_sig': 'None', 'out_sig': 'None'}), "(self, name='preamble_insert', in_sig=None, out_sig=None\n )\n", (1348, 1410), False, 'from gnuradio import gr\n'), ((2045, 2061), 'pmt.intern', 'pmt.intern', (['"""in"""'], {}), "('in')\n", (2055, 2061), False, 'import pmt\n'), ((2089, 2106), 'pmt.intern', 'pmt.intern', (['"""out"""'], {}), "('out')\n", (2099, 2106), False, 'import pmt\n'), ((2439, 2451), 'pmt.car', 'pmt.car', (['pdu'], {}), '(pdu)\n', (2446, 2451), False, 'import pmt\n'), ((2560, 2600), 'numpy.concatenate', 'numpy.concatenate', (['[self.preamble, data]'], {}), '([self.preamble, data])\n', (2577, 2600), False, 'import numpy\n'), ((1653, 1674), 'numpy.array', 'numpy.array', (['preamble'], {}), '(preamble)\n', (1664, 1674), False, 'import numpy\n'), ((2481, 2493), 'pmt.cdr', 'pmt.cdr', (['pdu'], {}), '(pdu)\n', (2488, 2493), False, 'import pmt\n'), ((2963, 2979), 'pmt.to_pmt', 'pmt.to_pmt', (['data'], {}), '(data)\n', (2973, 2979), False, 'import pmt\n')] |
# Contains functions and classes relating to the model
import torch
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from collections import OrderedDict
import matplotlib.pyplot as plt
import numpy as np
import json
from PIL import Image
from torch.autograd import Variable
import torchvision.models as models
from torch import nn, optim
import utilityX
arch = {"vgg16": 25088,
"densenet121": 1024}
def setup_network(structure='vgg16', dropout=0.1, hidden_units=4096, lr=0.001, device='gpu'):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if structure == 'vgg16':
model = models.vgg16(pretrained=True)
elif structure == 'densenet121':
model = models.densenet121(pretrained=True)
# Define a new, untrained feed-forward network as a classifier, using ReLU activations and dropout
for param in model.parameters():
param.requires_grad = False
from collections import OrderedDict
model.classifier = nn.Sequential(
nn.Linear(arch[structure], hidden_units),
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(hidden_units, 102),
nn.LogSoftmax(dim=1)
)
print(model)
model = model.to('cuda')
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr)
if torch.cuda.is_available() and device == 'gpu':
model.cuda()
return model, criterion
def save_checkpoint(train_data, model=0, path='checkpoint.pth', structure='vgg16', hidden_units=4096, dropout=0.3, lr=0.001, epochs=1):
model.class_to_idx = train_data.class_to_idx
torch.save({'structure': structure,
'hidden_units': hidden_units,
'dropout': dropout,
'learning_rate': lr,
'no_of_epochs': epochs,
'state_dict': model.state_dict(),
'class_to_idx': model.class_to_idx},
path)
def load_checkpoint(path='checkpoint.pth'):
checkpoint = torch.load(path)
lr = checkpoint['learning_rate']
hidden_units = checkpoint['hidden_units']
dropout = checkpoint['dropout']
epochs = checkpoint['no_of_epochs']
structure = checkpoint['structure']
model, _ = setup_network(structure, dropout, hidden_units, lr)
model.class_to_idx = checkpoint['class_to_idx']
model.load_state_dict(checkpoint['state_dict'])
return model
def predict(image_path, model, topk=5, device='gpu'):
model.to('cuda')
model.eval()
img = process_image(image_path)
img = img.numpy()
img = torch.from_numpy(np.array([img])).float()
with torch.no_grad():
output = model.forward(img.cuda())
probability = torch.exp(output).data
return probability.topk(topk)
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# TODO: Process a PIL image for use in a PyTorch model
img_pil = Image.open(image)
img_transforms = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[
0.229, 0.224, 0.225])
])
image = img_transforms(img_pil)
return image
| [
"torchvision.transforms.CenterCrop",
"torch.nn.ReLU",
"PIL.Image.open",
"torch.nn.Dropout",
"torch.load",
"torch.exp",
"numpy.array",
"torch.cuda.is_available",
"torch.nn.NLLLoss",
"torchvision.models.densenet121",
"torch.nn.Linear",
"torch.nn.LogSoftmax",
"torchvision.transforms.Resize",
... | [((1323, 1335), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (1333, 1335), False, 'from torch import nn, optim\n'), ((2076, 2092), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (2086, 2092), False, 'import torch\n'), ((3046, 3063), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (3056, 3063), False, 'from PIL import Image\n'), ((714, 743), 'torchvision.models.vgg16', 'models.vgg16', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (726, 743), True, 'import torchvision.models as models\n'), ((1098, 1138), 'torch.nn.Linear', 'nn.Linear', (['arch[structure]', 'hidden_units'], {}), '(arch[structure], hidden_units)\n', (1107, 1138), False, 'from torch import nn, optim\n'), ((1148, 1157), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1155, 1157), False, 'from torch import nn, optim\n'), ((1167, 1186), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (1177, 1186), False, 'from torch import nn, optim\n'), ((1196, 1224), 'torch.nn.Linear', 'nn.Linear', (['hidden_units', '(102)'], {}), '(hidden_units, 102)\n', (1205, 1224), False, 'from torch import nn, optim\n'), ((1234, 1254), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (1247, 1254), False, 'from torch import nn, optim\n'), ((1406, 1431), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1429, 1431), False, 'import torch\n'), ((2697, 2712), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2710, 2712), False, 'import torch\n'), ((2776, 2793), 'torch.exp', 'torch.exp', (['output'], {}), '(output)\n', (2785, 2793), False, 'import torch\n'), ((631, 656), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (654, 656), False, 'import torch\n'), ((797, 832), 'torchvision.models.densenet121', 'models.densenet121', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (815, 832), True, 'import torchvision.models as models\n'), ((3114, 3136), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (3131, 3136), True, 'import torchvision.transforms as transforms\n'), ((3146, 3172), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (3167, 3172), True, 'import torchvision.transforms as transforms\n'), ((3182, 3203), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3201, 3203), True, 'import torchvision.transforms as transforms\n'), ((3213, 3288), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (3233, 3288), True, 'import torchvision.transforms as transforms\n'), ((2662, 2677), 'numpy.array', 'np.array', (['[img]'], {}), '([img])\n', (2670, 2677), True, 'import numpy as np\n')] |
from datetime import datetime
from dateutil.relativedelta import relativedelta
from flask import request, jsonify, abort
from flask.views import MethodView
from numpy import around
# Activate Agg, must be done before imports below
from odinapi.utils import use_agg
from odinapi.utils.time_util import datetime2mjd, mjd2stw
from .geoloc_tools import get_geoloc_info
from .level1b_scandata_exporter_v2 import get_scan_data_v2, scan2dictlist_v4
from .level1b_scanlogdata_exporter import get_scan_logdata
from .read_apriori import get_apriori
from .read_mls import read_mls_file
from .read_mipas import read_mipas_file, read_esa_mipas_file
from .read_smiles import read_smiles_file
from .read_sageIII import read_sageIII_file
from .read_osiris import read_osiris_file
from .read_odinsmr2_old import read_qsmr_file
from .read_ace import read_ace_file
from .newdonalettyERANC import run_donaletty
from .database import DatabaseConnector
from odinapi.utils.defs import SPECIES
from .get_odinapi_info import get_config_data_files
from odinapi.views.baseview import register_versions, BaseView
from odinapi.views.urlgen import get_freqmode_raw_url
from odinapi.utils.defs import FREQMODE_TO_BACKEND
from odinapi.utils import time_util
from odinapi.utils.collocations import get_collocations
from odinapi.utils.swagger import SWAGGER
from odinapi.views.views_cached import get_scan_log_data
import odinapi.utils.get_args as get_args
# Make linter happy
use_agg
SWAGGER.add_parameter('date', 'path', str, string_format='date')
SWAGGER.add_type('freqmode_info', {
'Backend': str,
'FreqMode': int,
'NumScan': int,
'URL': str
})
class DateInfo(BaseView):
"""Get scan counts for a day"""
@register_versions('swagger', ['v5'])
def _swagger_def(self, version):
return SWAGGER.get_path_definition(
['level1'],
['date'],
{"200": SWAGGER.get_type_response(
'freqmode_info', is_list=True, Date=str)},
summary="Get scan counts for a day"
)
@register_versions('fetch')
def _get(self, version, date):
try:
date1 = datetime.strptime(date, '%Y-%m-%d')
except ValueError:
abort(404)
date2 = date1 + relativedelta(days=+1)
mjd1 = int(datetime2mjd(date1))
mjd2 = int(datetime2mjd(date2))
stw1 = mjd2stw(mjd1)
stw2 = mjd2stw(mjd2)
query_str = self.gen_query(stw1, stw2, mjd1, mjd2)
return self.gen_data(date, version, query_str)
@register_versions('return', ['v4'])
def _return(self, version, data, date):
return dict(Date=date, Info=data)
@register_versions('return', ['v5'])
def _return_v5(self, version, data, date):
return dict(
Date=date,
Data=data,
Type='freqmode_info',
Count=len(data))
def gen_data(self, date, version, query_string):
con = DatabaseConnector()
query = con.query(query_string)
result = query.dictresult()
con.close()
info_list = []
for row in result:
info_dict = {}
info_dict['Backend'] = row['backend']
info_dict['FreqMode'] = row['freqmode']
info_dict['NumScan'] = row['count']
info_dict['URL'] = get_freqmode_raw_url(
request.url_root, version, date, row['backend'],
row['freqmode'])
info_list.append(info_dict)
return info_list
def gen_query(self, stw1, stw2, mjd1, mjd2):
query_str = (
"select freqmode, backend, count(distinct(stw)) "
"from ac_cal_level1b "
"where stw between {0} and {1} "
"group by backend,freqmode "
"order by backend,freqmode "
).format(stw1, stw2)
return query_str
class DateBackendInfo(DateInfo):
"""Get scan counts for a day and backend"""
SUPPORTED_VERSIONS = ['v4']
@register_versions('fetch')
def _get(self, version, date, backend):
try:
date1 = datetime.strptime(date, '%Y-%m-%d')
except ValueError:
abort(404)
date2 = date1 + relativedelta(days=+1)
mjd1 = int(datetime2mjd(date1))
mjd2 = int(datetime2mjd(date2))
stw1 = mjd2stw(mjd1)
stw2 = mjd2stw(mjd2)
query_str = self.gen_query(stw1, stw2, mjd1, mjd2, backend)
return self.gen_data(date, version, query_str)
@register_versions('return')
def _return(self, version, data, date, backend):
return dict(Date=date, Info=data)
def gen_query(self, stw1, stw2, mjd1, mjd2, backend):
query_str = (
"select freqmode, backend, count(distinct(stw)) "
"from ac_cal_level1b "
"where stw between {0} and {1} "
"and backend='{2}' "
"group by backend,freqmode "
"order by backend,freqmode "
).format(stw1, stw2, backend)
return query_str
class FreqmodeInfo(BaseView):
"""loginfo for all scans from a given date and freqmode"""
SUPPORTED_VERSIONS = ['v4']
KEYS_V4 = [
'Quality',
'DateTime',
'FreqMode',
'LatStart',
'LatEnd',
'LonStart',
'LonEnd',
'SunZD',
'AltStart',
'AltEnd',
'NumSpec',
'MJDStart',
'MJDEnd',
'ScanID']
@register_versions('fetch', ['v4'])
def _fetch_data_v4(self, version, date, backend, freqmode, scanno=None):
con = DatabaseConnector()
loginfo = {}
keylist = self.KEYS_V4
loginfo, _, _ = get_scan_logdata(
con, backend, date+'T00:00:00', freqmode=int(freqmode), dmjd=1,
)
con.close()
try:
for index in range(len(loginfo['ScanID'])):
loginfo['DateTime'][index] = (
loginfo['DateTime'][index]).isoformat('T')
except KeyError:
loginfo['Info'] = []
return jsonify({'Info': loginfo['Info']})
for key in loginfo:
try:
loginfo[key] = loginfo[key].tolist()
except AttributeError:
pass
loginfo['Info'] = []
for ind in range(len(loginfo['ScanID'])):
freq_mode = loginfo['FreqMode'][ind]
scanid = loginfo['ScanID'][ind]
datadict = dict()
for key in keylist:
datadict[key] = loginfo[key][ind]
datadict['URLS'] = dict()
datadict['URLS']['URL-log'] = (
'{0}rest_api/{1}/freqmode_raw/{2}/{3}/{4}/{5}/').format(
request.url_root,
version,
date,
backend,
freq_mode,
scanid)
datadict['URLS']['URL-spectra'] = (
'{0}rest_api/{1}/scan/{2}/{3}/{4}').format(
request.url_root,
version,
backend,
freq_mode,
scanid)
datadict['URLS']['URL-ptz'] = (
'{0}rest_api/{1}/ptz/{2}/{3}/{4}/{5}').format(
request.url_root,
version,
date,
backend,
freq_mode,
scanid
)
for species in SPECIES:
datadict['URLS']['''URL-apriori-{0}'''.format(species)] = (
'{0}rest_api/{1}/apriori/{2}/{3}/{4}/{5}/{6}').format(
request.url_root,
version,
species,
date,
backend,
freq_mode,
scanid
)
loginfo['Info'].append(datadict)
return loginfo
@register_versions('return', ['v4'])
def _return_data_v2(self, version, loginfo, date, backend, freqmode,
scanno=None):
if scanno is None:
try:
return {'Info': loginfo['Info']}
except TypeError:
return {'Info': []}
else:
for s in loginfo['Info']:
if s['ScanID'] == scanno:
return {"Info": s}
SWAGGER.add_type('Log', {
"AltEnd": float,
"AltStart": float,
"DateTime": str,
"FreqMode": int,
"LatEnd": float,
"LatStart": float,
"LonEnd": float,
"LonStart": float,
"MJDEnd": float,
"MJDStart": float,
"NumSpec": int,
"Quality": int,
"ScanID": int,
"SunZD": float,
"URLS": {url_key: str for url_key in [
'URL-apriori-%s' % species for species in SPECIES] +
['URL-log', 'URL-ptz', 'URL-spectra']}
})
class FreqmodeInfoNoBackend(BaseView):
SUPPORTED_VERSIONS = ['v5']
@register_versions('swagger')
def _swagger_def(self, version):
return SWAGGER.get_path_definition(
['level1'],
['date', 'freqmode'],
{"200": SWAGGER.get_type_response('Log', is_list=True)},
summary="Get log info for scans in a day and freqmode"
)
@register_versions('fetch')
def _fetch_data(self, version, date, freqmode):
try:
backend = FREQMODE_TO_BACKEND[freqmode]
except KeyError:
abort(404)
con = DatabaseConnector()
loginfo = {}
keylist = FreqmodeInfo.KEYS_V4
loginfo, _, _ = get_scan_logdata(
con, backend, date+'T00:00:00', freqmode=int(freqmode), dmjd=1)
con.close()
try:
for index in range(len(loginfo['ScanID'])):
loginfo['DateTime'][index] = (
loginfo['DateTime'][index]).isoformat('T')
except KeyError:
loginfo['Info'] = []
return jsonify({'Info': loginfo['Info']})
for key in loginfo:
try:
loginfo[key] = loginfo[key].tolist()
except AttributeError:
pass
loginfo['Info'] = []
for ind in range(len(loginfo['ScanID'])):
freq_mode = loginfo['FreqMode'][ind]
scanid = loginfo['ScanID'][ind]
datadict = dict()
for key in keylist:
datadict[key] = loginfo[key][ind]
datadict['URLS'] = dict()
datadict['URLS']['URL-log'] = (
'{0}rest_api/{1}/level1/{2}/{3}/Log/').format(
request.url_root,
version,
freq_mode,
scanid)
datadict['URLS']['URL-spectra'] = (
'{0}rest_api/{1}/level1/{2}/{3}/L1b/').format(
request.url_root,
version,
freq_mode,
scanid)
datadict['URLS']['URL-ptz'] = (
'{0}rest_api/{1}/level1/{2}/{3}/ptz/').format(
request.url_root,
version,
freq_mode,
scanid
)
for species in SPECIES:
datadict['URLS']['''URL-apriori-{0}'''.format(species)] = (
'{0}rest_api/{1}/level1/{2}/{3}/apriori/{4}/').format(
request.url_root,
version,
freq_mode,
scanid,
species
)
loginfo['Info'].append(datadict)
return loginfo['Info']
@register_versions('return')
def _return_data_v5(self, version, data, date, freqmode):
if not data:
data = []
return {'Data': data, 'Type': 'Log', 'Count': len(data)}
class ScanInfoNoBackend(FreqmodeInfoNoBackend):
@register_versions('swagger')
def _swagger_def(self, version):
return SWAGGER.get_path_definition(
['level1'],
['date', 'freqmode', 'scanno'],
{"200": SWAGGER.get_type_response('Log')},
summary="Get log info for a scan"
)
@register_versions('fetch')
def _fetch_data(self, version, date, freqmode, scanno):
return super(ScanInfoNoBackend, self)._fetch_data(
version, date, freqmode)
@register_versions('return')
def _return_data_v5(self, version, data, date, freqmode, scanno):
for s in data:
if s['ScanID'] == scanno:
return {'Data': s, 'Type': 'Log', 'Count': None}
abort(404)
class ScanSpec(BaseView):
"""Get L1b data"""
SUPPORTED_VERSIONS = ['v4']
@register_versions('fetch', ['v4'])
def _get_v4(self, version, backend, freqmode, scanno, debug=False):
con = DatabaseConnector()
spectra = get_scan_data_v2(con, backend, freqmode, scanno, debug)
con.close()
if spectra == {}:
abort(404)
# spectra is a dictionary containing the relevant data
return scan2dictlist_v4(spectra)
@register_versions('return')
def _to_return_format(self, version, datadict, *args, **kwargs):
return datadict
SWAGGER.add_type('L1b', {
"Altitude": [float],
"Apodization": [int],
"AttitudeVersion": [int],
"Backend": [int],
"Channels": [int],
"Dec2000": [float],
"Efftime": [float],
"FreqCal": [[float]],
"FreqMode": [int],
"FreqRes": [float],
"Frequency": {
"AppliedDopplerCorr": [float],
"ChannelsID": [int],
"IFreqGrid": [float],
"LOFreq": [float],
"SubBandIndex": [[int]],
},
"Frontend": [int],
"GPSpos": [[float]],
"GPSvel": [[float]],
"IntTime": [float],
"Latitude": [float],
"Longitude": [float],
"MJD": [float],
"MoonPos": [[float]],
"Orbit": [float],
"Quality": [float],
"RA2000": [float],
"SBpath": [float],
"STW": [int],
"ScanID": [int],
"Spectrum": [[float]],
"SunPos": [[float]],
"SunZD": [float],
"TSpill": [float],
"Tcal": [float],
"Trec": [float],
"TrecSpectrum": [float],
"Version": [int],
"Vgeo": [float],
"ZeroLagVar": [[float]],
})
SWAGGER.add_parameter('debug', 'query', bool)
class ScanSpecNoBackend(ScanSpec):
"""Get L1b data"""
SUPPORTED_VERSIONS = ['v5']
@register_versions('swagger')
def _swagger_def(self, version):
return SWAGGER.get_path_definition(
['level1'],
['freqmode', 'scanno', 'debug'],
{"200": SWAGGER.get_type_response('L1b')},
summary="Get level1 data for a scan"
)
@register_versions('fetch')
def _get_v5(self, version, freqmode, scanno):
try:
backend = FREQMODE_TO_BACKEND[freqmode]
except KeyError:
abort(404)
try:
debug = get_args.get_bool('debug')
except ValueError:
abort(400)
return self._get_v4(version, backend, freqmode, scanno, debug)
@register_versions('return')
def _to_return_format(self, version, data, *args, **kwargs):
return {'Data': data, 'Type': 'L1b', 'Count': None}
class ScanPTZ(BaseView):
"""Get PTZ data"""
SUPPORTED_VERSIONS = ['v4']
@register_versions('fetch', ['v4'])
def _get_ptz_v4(self, version, date, backend, freqmode, scanno):
con = DatabaseConnector()
loginfo = get_scan_log_data(con, freqmode, scanno)
con.close()
if loginfo == {}:
abort(404)
mjd, _, midlat, midlon = get_geoloc_info(loginfo)
datadict = run_donaletty(mjd, midlat, midlon, scanno)
self._convert_items(datadict)
datadictv4 = dict()
datadictv4['Pressure'] = around(datadict['P'], decimals=8).tolist()
datadictv4['Temperature'] = around(
datadict['T'], decimals=3).tolist()
datadictv4['Altitude'] = datadict['Z']
datadictv4['Latitude'] = datadict['latitude']
datadictv4['Longitude'] = datadict['longitude']
datadictv4['MJD'] = datadict['datetime']
return datadictv4
def _convert_items(self, datadict):
for key in ['P', 'T', 'Z']:
if key == 'P':
# convert from hPa to Pa
datadict[key] *= 100
if key == 'Z':
# convert from km to m
datadict[key] *= 1000
datadict[key] = datadict[key].tolist()
@register_versions('return')
def _to_return_format(self, version, datadict, *args, **kwargs):
return datadict
SWAGGER.add_type('ptz', {
"Altitude": [float],
"Latitude": float,
"Longitude": float,
"MJD": float,
"Pressure": [float],
"Temperature": [float],
})
class ScanPTZNoBackend(ScanPTZ):
"""Get PTZ data"""
SUPPORTED_VERSIONS = ['v5']
@register_versions('swagger')
def _swagger_def(self, version):
return SWAGGER.get_path_definition(
['level1'],
['freqmode', 'scanno'],
{"200": SWAGGER.get_type_response('ptz')},
summary="Get ptz data for a scan"
)
@register_versions('fetch')
def _get_ptz_v5(self, version, freqmode, scanno):
try:
backend = FREQMODE_TO_BACKEND[freqmode]
except KeyError:
abort(404)
# TODO: Not always correct date?
date = time_util.stw2datetime(scanno).strftime('%Y-%m-%d')
return self._get_ptz_v4(version, date, backend, freqmode, scanno)
@register_versions('return')
def _to_return_format(self, version, datadict, *args, **kwargs):
return {'Data': datadict, 'Type': 'ptz', 'Count': None}
SWAGGER.add_parameter(
'aprsource', 'query', str,
description="Alternative apriori data source to use"
)
class ScanAPR(BaseView):
"""Get apriori data for a certain species"""
SUPPORTED_VERSIONS = ['v4']
@register_versions('fetch', ['v4'])
def _get_v4(self, version, species, date, backend, freqmode, scanno):
con = DatabaseConnector()
loginfo = get_scan_log_data(con, freqmode, scanno)
con.close()
if loginfo == {}:
abort(404)
_, day_of_year, midlat, _ = get_geoloc_info(loginfo)
datadict = get_apriori(
species, day_of_year, midlat,
source=get_args.get_string('aprsource'),
)
# vmr can be very small, problematic to decreaese number of digits
return {
'Pressure': around(datadict['pressure'], decimals=8).tolist(),
'VMR': datadict['vmr'].tolist(),
'Species': datadict['species'],
'Altitude': datadict['altitude'].tolist(),
}
@register_versions('return')
def _return_format(self, version, data, *args, **kwargs):
return data
SWAGGER.add_parameter('species', 'path', str)
SWAGGER.add_type('apriori', {
"Pressure": [float],
"Altitude": [float],
"Species": str,
"VMR": [float],
})
class ScanAPRNoBackend(ScanAPR):
"""Get apriori data for a certain species"""
SUPPORTED_VERSIONS = ['v5']
@register_versions('swagger')
def _swagger_def(self, version):
return SWAGGER.get_path_definition(
['level1'],
['freqmode', 'scanno', 'species', 'aprsource'],
{"200": SWAGGER.get_type_response('apriori')},
summary="Get apriori data for a scan and species"
)
@register_versions('fetch')
def _get_v5(self, version, freqmode, scanno, species):
try:
backend = FREQMODE_TO_BACKEND[freqmode]
except KeyError:
abort(404)
# TODO: Not always correct date?
date = time_util.stw2datetime(scanno).strftime('%Y-%m-%d')
return self._get_v4(version, species, date, backend, freqmode, scanno)
@register_versions('return')
def _return_format(self, version, datadict, *args, **kwargs):
return {'Data': datadict, 'Type': 'apriori', 'Count': None}
SWAGGER.add_type('collocation', {
"Instrument": str,
"Species": str,
"URL": str
})
class CollocationsView(BaseView):
SUPPORTED_VERSIONS = ['v5']
@register_versions('swagger')
def _swagger_def(self, version):
return SWAGGER.get_path_definition(
['level1'],
['freqmode', 'scanno'],
{"200": SWAGGER.get_type_response('collocation', is_list=True)},
summary="Get collocations for a scan"
)
@register_versions('fetch')
def _get(self, version, freqmode, scanno):
try:
return get_L2_collocations(
request.url_root, version, freqmode, scanno)
except KeyError:
abort(404)
@register_versions('return')
def _return(self, version, collocations, freqmode, scanno):
return {'Data': collocations, 'Type': 'collocation',
'Count': len(collocations)}
def get_L2_collocations(root_url, version, freqmode, scanno):
collocations_fields = ['date', 'instrument', 'species', 'file',
'file_index']
collocations = []
for coll in get_collocations(
freqmode, scanno, fields=collocations_fields):
url = (
'{root}rest_api/{version}/vds_external/{instrument}/'
'{species}/{date}/{file}/{file_index}').format(
root=root_url, version=version,
instrument=coll['instrument'], species=coll['species'],
date=coll['date'], file=coll['file'],
file_index=coll['file_index'])
collocations.append({
'URL': url,
'Instrument': coll['instrument'],
'Species': coll['species']})
return collocations
class VdsInfo(MethodView):
"""verification data set scan info"""
def get(self, version):
"""GET-method"""
if version not in ['v4']:
abort(404)
query_string = '''select backend, freqmode, count(distinct(scanid))
from collocations group by backend,freqmode'''
datadict = self.gen_data(query_string, version)
return jsonify(datadict)
def gen_data(self, query_string, version):
con = DatabaseConnector()
query = con.query(query_string)
result = query.dictresult()
con.close()
datadict = {'VDS': []}
for row in result:
data = dict()
data['Backend'] = row['backend']
data['FreqMode'] = row['freqmode']
data['NumScan'] = row['count']
data['URL-collocation'] = '{0}rest_api/{1}/vds/{2}/{3}'.format(
request.url_root,
version,
row['backend'],
row['freqmode'])
data['URL-allscans'] = ('{0}rest_api/{1}/vds/{2}/{3}/allscans'
).format(request.url_root, version,
row['backend'], row['freqmode'])
datadict['VDS'].append(data)
return datadict
class VdsFreqmodeInfo(MethodView):
"""verification data set scan info"""
def get(self, version, backend, freqmode):
"""GET-method"""
if version not in ['v4']:
abort(404)
query_string = (
"select backend,freqmode,species,instrument,count(*) "
"from collocations "
"where backend='{0}' and freqmode={1} "
"group by backend, freqmode, species, instrument"
"").format(backend, freqmode)
datadict = self.gen_data(query_string, version)
return jsonify(datadict)
def gen_data(self, query_string, version):
con = DatabaseConnector()
query = con.query(query_string)
result = query.dictresult()
con.close()
datadict = {'VDS': []}
for row in result:
data = dict()
data['Backend'] = row['backend']
data['FreqMode'] = row['freqmode']
data['Species'] = row['species']
data['Instrument'] = row['instrument']
data['NumScan'] = row['count']
data['URL'] = '{0}rest_api/{1}/vds/{2}/{3}/{4}/{5}'.format(
request.url_root,
version,
row['backend'],
row['freqmode'],
row['species'],
row['instrument'],)
datadict['VDS'].append(data)
return datadict
class VdsInstrumentInfo(MethodView):
"""verification data set scan info"""
def get(self, version, backend, freqmode, instrument, species):
"""GET-method"""
if version not in ['v4']:
abort(404)
query_string = '''select date, backend, freqmode,
species, instrument, count(*) from collocations
where backend='{0}' and
freqmode={1} and
species='{2}' and
instrument='{3}'
group by date, backend, freqmode, species, instrument
order by date'''.format(backend, freqmode, species,
instrument)
datadict = self.gen_data(query_string, version)
return jsonify(datadict)
def gen_data(self, query_string, version):
con = DatabaseConnector()
query = con.query(query_string)
result = query.dictresult()
con.close()
datadict = {'VDS': []}
for row in result:
data = dict()
data['Date'] = row['date'].isoformat()
data['Backend'] = row['backend']
data['FreqMode'] = row['freqmode']
data['Species'] = row['species']
data['Instrument'] = row['instrument']
data['NumScan'] = row['count']
data['URL'] = '{0}rest_api/{1}/vds/{2}/{3}/{4}/{5}/{6}'.format(
request.url_root,
version,
row['backend'],
row['freqmode'],
row['species'],
row['instrument'],
row['date'],)
datadict['VDS'].append(data)
return datadict
class VdsDateInfo(MethodView):
"""verification data set scan info"""
def get(self, version, backend, freqmode, species, instrument, date):
"""GET-method"""
if version not in ['v4']:
abort(404)
query_string = '''select * from collocations
where backend='{0}' and
freqmode={1} and
species='{2}' and
instrument='{3}'
and date='{4}' '''.format(backend, freqmode,
species, instrument,
date)
datadict = self.gen_data(query_string, version, backend, freqmode,
species, instrument, date)
return jsonify(datadict)
def gen_data(self, query_string, version, backend, freqmode, species,
instrument, date):
con = DatabaseConnector()
query = con.query(query_string)
result = query.dictresult()
con.close()
datadict = {'VDS': []}
odin_keys = [
'Date', 'FreqMode', 'Backend', 'ScanID', 'AltEnd',
'AltStart', 'LatEnd', 'LatStart', 'LonEnd', 'LonStart',
'MJDEnd', 'MJDStart', 'NumSpec', 'SunZD', 'Datetime',
]
collocation_keys = [
'Latitude', 'Longitude', 'MJD', 'Instrument', 'Species',
'File', 'File_Index', 'DMJD', 'DTheta',
]
for row in result:
data = dict()
odin = dict()
for key in odin_keys:
odin[key] = row[key.lower()]
collocation = dict()
for key in collocation_keys:
collocation[key] = row[key.lower()]
data['OdinInfo'] = odin
data['CollocationInfo'] = collocation
data['URLS'] = dict()
data['URLS']['URL-spectra'] = ('{0}rest_api/{1}/scan/{2}/{3}/{4}'
).format(request.url_root, version,
backend, freqmode,
row['scanid'])
data['URLS']['URL-ptz'] = ('{0}rest_api/{1}/ptz/{2}/{3}/{4}/{5}'
).format(request.url_root, version,
row['date'], backend, freqmode,
row['scanid'])
for species in SPECIES:
data['URLS']['''URL-apriori-{0}'''.format(species)] = (
'{0}rest_api/{1}/apriori/{2}/{3}/{4}/{5}/{6}').format(
request.url_root,
version,
species,
row['date'],
backend,
freqmode,
row['scanid'])
data['URLS']['''URL-{0}-{1}'''.format(row['instrument'],
row['species'])] = (
'{0}rest_api/{1}/vds_external/{2}/{3}/{4}/{5}/{6}').format(
request.url_root,
version,
row['instrument'],
row['species'],
row['date'],
row['file'],
row['file_index'])
datadict['VDS'].append(data)
return datadict
class VdsScanInfo(MethodView):
"""verification data set scan info"""
def get(self, version, backend, freqmode):
"""GET-method"""
if version not in ['v4']:
abort(404)
query_string = '''select distinct(scanid), date, freqmode, backend,
altend, altstart, latend, latstart, lonend, lonstart,
mjdend, mjdstart, numspec, sunzd
from collocations
where backend='{0}' and freqmode={1}
'''.format(backend, freqmode)
datadict = self.gen_data(query_string, version, backend, freqmode)
return jsonify(datadict)
def gen_data(self, query_string, version, backend, freqmode):
con = DatabaseConnector()
query = con.query(query_string)
result = query.dictresult()
con.close()
datadict = {'VDS': []}
odin_keys = [
'Date', 'FreqMode', 'Backend', 'ScanID', 'AltEnd',
'AltStart', 'LatEnd', 'LatStart', 'LonEnd', 'LonStart',
'MJDEnd', 'MJDStart', 'NumSpec', 'SunZD',
]
for row in result:
data, odin = dict(), dict()
for key in odin_keys:
odin[key] = row[key.lower()]
data['Info'] = odin
data['URLS'] = dict()
data['URLS']['URL-spectra'] = ('{0}rest_api/{1}/scan/{2}/{3}/{4}'
).format(request.url_root, version,
backend, freqmode,
row['scanid'])
data['URLS']['URL-ptz'] = ('{0}rest_api/{1}/ptz/{2}/{3}/{4}/{5}'
).format(request.url_root, version,
row['date'], backend, freqmode,
row['scanid'])
for species in SPECIES:
data['URLS']['''URL-apriori-{0}'''.format(species)] = (
'{0}rest_api/{1}/apriori/{2}/{3}/{4}/{5}/{6}').format(
request.url_root,
version,
species,
row['date'],
backend,
freqmode,
row['scanid'])
datadict['VDS'].append(data)
return datadict
class VdsExtData(MethodView):
"""display verification data set data from external instruments"""
def get(self, version, instrument, species, date, file, file_index):
"""GET-method"""
if version not in ['v4']:
abort(404)
datadict = self.gen_data(instrument, species, date, file,
file_index)
return jsonify(datadict)
def gen_data(self, instrument, species, date, file, file_index):
if instrument == 'mls':
data = read_mls_file(file, date, species, file_index)
elif instrument == 'mipas':
data = read_mipas_file(file, date, species, file_index)
elif instrument == 'mipas_esa':
data = read_esa_mipas_file(file, date, species)
elif instrument == 'smiles':
data = read_smiles_file(file, date, species, file_index)
elif instrument == 'sageIII':
data = read_sageIII_file(file, date, species, 'solar')
elif instrument == 'sageIII_lunar':
data = read_sageIII_file(file, date, species, 'lunar')
elif instrument == 'osiris':
data = read_osiris_file(file, date, species, file_index)
elif instrument == 'smr':
data = read_qsmr_file(file, species, file_index)
elif instrument == 'ace':
data = read_ace_file(file, date, file_index)
else:
abort(404)
return data
class ConfigDataFiles(BaseView):
"""display example files available to the system"""
@register_versions('fetch')
def gen_data(self, version):
"""get the data"""
return get_config_data_files()
@register_versions('return')
def return_data(self, version, data):
return data
| [
"odinapi.views.baseview.register_versions",
"odinapi.utils.time_util.mjd2stw",
"dateutil.relativedelta.relativedelta",
"datetime.datetime.strptime",
"odinapi.utils.get_args.get_string",
"odinapi.views.views_cached.get_scan_log_data",
"odinapi.utils.time_util.stw2datetime",
"odinapi.utils.collocations.... | [((1457, 1521), 'odinapi.utils.swagger.SWAGGER.add_parameter', 'SWAGGER.add_parameter', (['"""date"""', '"""path"""', 'str'], {'string_format': '"""date"""'}), "('date', 'path', str, string_format='date')\n", (1478, 1521), False, 'from odinapi.utils.swagger import SWAGGER\n'), ((1522, 1622), 'odinapi.utils.swagger.SWAGGER.add_type', 'SWAGGER.add_type', (['"""freqmode_info"""', "{'Backend': str, 'FreqMode': int, 'NumScan': int, 'URL': str}"], {}), "('freqmode_info', {'Backend': str, 'FreqMode': int,\n 'NumScan': int, 'URL': str})\n", (1538, 1622), False, 'from odinapi.utils.swagger import SWAGGER\n'), ((8371, 8791), 'odinapi.utils.swagger.SWAGGER.add_type', 'SWAGGER.add_type', (['"""Log"""', "{'AltEnd': float, 'AltStart': float, 'DateTime': str, 'FreqMode': int,\n 'LatEnd': float, 'LatStart': float, 'LonEnd': float, 'LonStart': float,\n 'MJDEnd': float, 'MJDStart': float, 'NumSpec': int, 'Quality': int,\n 'ScanID': int, 'SunZD': float, 'URLS': {url_key: str for url_key in [(\n 'URL-apriori-%s' % species) for species in SPECIES] + ['URL-log',\n 'URL-ptz', 'URL-spectra']}}"], {}), "('Log', {'AltEnd': float, 'AltStart': float, 'DateTime':\n str, 'FreqMode': int, 'LatEnd': float, 'LatStart': float, 'LonEnd':\n float, 'LonStart': float, 'MJDEnd': float, 'MJDStart': float, 'NumSpec':\n int, 'Quality': int, 'ScanID': int, 'SunZD': float, 'URLS': {url_key:\n str for url_key in [('URL-apriori-%s' % species) for species in SPECIES\n ] + ['URL-log', 'URL-ptz', 'URL-spectra']}})\n", (8387, 8791), False, 'from odinapi.utils.swagger import SWAGGER\n'), ((13224, 14110), 'odinapi.utils.swagger.SWAGGER.add_type', 'SWAGGER.add_type', (['"""L1b"""', "{'Altitude': [float], 'Apodization': [int], 'AttitudeVersion': [int],\n 'Backend': [int], 'Channels': [int], 'Dec2000': [float], 'Efftime': [\n float], 'FreqCal': [[float]], 'FreqMode': [int], 'FreqRes': [float],\n 'Frequency': {'AppliedDopplerCorr': [float], 'ChannelsID': [int],\n 'IFreqGrid': [float], 'LOFreq': [float], 'SubBandIndex': [[int]]},\n 'Frontend': [int], 'GPSpos': [[float]], 'GPSvel': [[float]], 'IntTime':\n [float], 'Latitude': [float], 'Longitude': [float], 'MJD': [float],\n 'MoonPos': [[float]], 'Orbit': [float], 'Quality': [float], 'RA2000': [\n float], 'SBpath': [float], 'STW': [int], 'ScanID': [int], 'Spectrum': [\n [float]], 'SunPos': [[float]], 'SunZD': [float], 'TSpill': [float],\n 'Tcal': [float], 'Trec': [float], 'TrecSpectrum': [float], 'Version': [\n int], 'Vgeo': [float], 'ZeroLagVar': [[float]]}"], {}), "('L1b', {'Altitude': [float], 'Apodization': [int],\n 'AttitudeVersion': [int], 'Backend': [int], 'Channels': [int],\n 'Dec2000': [float], 'Efftime': [float], 'FreqCal': [[float]],\n 'FreqMode': [int], 'FreqRes': [float], 'Frequency': {\n 'AppliedDopplerCorr': [float], 'ChannelsID': [int], 'IFreqGrid': [float\n ], 'LOFreq': [float], 'SubBandIndex': [[int]]}, 'Frontend': [int],\n 'GPSpos': [[float]], 'GPSvel': [[float]], 'IntTime': [float],\n 'Latitude': [float], 'Longitude': [float], 'MJD': [float], 'MoonPos': [\n [float]], 'Orbit': [float], 'Quality': [float], 'RA2000': [float],\n 'SBpath': [float], 'STW': [int], 'ScanID': [int], 'Spectrum': [[float]],\n 'SunPos': [[float]], 'SunZD': [float], 'TSpill': [float], 'Tcal': [\n float], 'Trec': [float], 'TrecSpectrum': [float], 'Version': [int],\n 'Vgeo': [float], 'ZeroLagVar': [[float]]})\n", (13240, 14110), False, 'from odinapi.utils.swagger import SWAGGER\n'), ((14249, 14294), 'odinapi.utils.swagger.SWAGGER.add_parameter', 'SWAGGER.add_parameter', (['"""debug"""', '"""query"""', 'bool'], {}), "('debug', 'query', bool)\n", (14270, 14294), False, 'from odinapi.utils.swagger import SWAGGER\n'), ((16633, 16786), 'odinapi.utils.swagger.SWAGGER.add_type', 'SWAGGER.add_type', (['"""ptz"""', "{'Altitude': [float], 'Latitude': float, 'Longitude': float, 'MJD': float,\n 'Pressure': [float], 'Temperature': [float]}"], {}), "('ptz', {'Altitude': [float], 'Latitude': float,\n 'Longitude': float, 'MJD': float, 'Pressure': [float], 'Temperature': [\n float]})\n", (16649, 16786), False, 'from odinapi.utils.swagger import SWAGGER\n'), ((17735, 17842), 'odinapi.utils.swagger.SWAGGER.add_parameter', 'SWAGGER.add_parameter', (['"""aprsource"""', '"""query"""', 'str'], {'description': '"""Alternative apriori data source to use"""'}), "('aprsource', 'query', str, description=\n 'Alternative apriori data source to use')\n", (17756, 17842), False, 'from odinapi.utils.swagger import SWAGGER\n'), ((18871, 18916), 'odinapi.utils.swagger.SWAGGER.add_parameter', 'SWAGGER.add_parameter', (['"""species"""', '"""path"""', 'str'], {}), "('species', 'path', str)\n", (18892, 18916), False, 'from odinapi.utils.swagger import SWAGGER\n'), ((18917, 19024), 'odinapi.utils.swagger.SWAGGER.add_type', 'SWAGGER.add_type', (['"""apriori"""', "{'Pressure': [float], 'Altitude': [float], 'Species': str, 'VMR': [float]}"], {}), "('apriori', {'Pressure': [float], 'Altitude': [float],\n 'Species': str, 'VMR': [float]})\n", (18933, 19024), False, 'from odinapi.utils.swagger import SWAGGER\n'), ((20051, 20136), 'odinapi.utils.swagger.SWAGGER.add_type', 'SWAGGER.add_type', (['"""collocation"""', "{'Instrument': str, 'Species': str, 'URL': str}"], {}), "('collocation', {'Instrument': str, 'Species': str, 'URL': str}\n )\n", (20067, 20136), False, 'from odinapi.utils.swagger import SWAGGER\n'), ((1707, 1743), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""swagger"""', "['v5']"], {}), "('swagger', ['v5'])\n", (1724, 1743), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((2041, 2067), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""fetch"""'], {}), "('fetch')\n", (2058, 2067), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((2527, 2562), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""return"""', "['v4']"], {}), "('return', ['v4'])\n", (2544, 2562), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((2655, 2690), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""return"""', "['v5']"], {}), "('return', ['v5'])\n", (2672, 2690), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((3971, 3997), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""fetch"""'], {}), "('fetch')\n", (3988, 3997), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((4475, 4502), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""return"""'], {}), "('return')\n", (4492, 4502), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((5419, 5453), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""fetch"""', "['v4']"], {}), "('fetch', ['v4'])\n", (5436, 5453), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((7930, 7965), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""return"""', "['v4']"], {}), "('return', ['v4'])\n", (7947, 7965), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((8932, 8960), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""swagger"""'], {}), "('swagger')\n", (8949, 8960), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((9252, 9278), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""fetch"""'], {}), "('fetch')\n", (9269, 9278), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((11637, 11664), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""return"""'], {}), "('return')\n", (11654, 11664), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((11891, 11919), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""swagger"""'], {}), "('swagger')\n", (11908, 11919), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((12186, 12212), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""fetch"""'], {}), "('fetch')\n", (12203, 12212), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((12375, 12402), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""return"""'], {}), "('return')\n", (12392, 12402), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((12707, 12741), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""fetch"""', "['v4']"], {}), "('fetch', ['v4'])\n", (12724, 12741), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((13101, 13128), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""return"""'], {}), "('return')\n", (13118, 13128), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((14393, 14421), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""swagger"""'], {}), "('swagger')\n", (14410, 14421), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((14692, 14718), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""fetch"""'], {}), "('fetch')\n", (14709, 14718), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((15071, 15098), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""return"""'], {}), "('return')\n", (15088, 15098), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((15313, 15347), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""fetch"""', "['v4']"], {}), "('fetch', ['v4'])\n", (15330, 15347), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((16510, 16537), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""return"""'], {}), "('return')\n", (16527, 16537), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((16902, 16930), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""swagger"""'], {}), "('swagger')\n", (16919, 16930), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((17189, 17215), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""fetch"""'], {}), "('fetch')\n", (17206, 17215), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((17572, 17599), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""return"""'], {}), "('return')\n", (17589, 17599), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((17963, 17997), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""fetch"""', "['v4']"], {}), "('fetch', ['v4'])\n", (17980, 17997), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((18759, 18786), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""return"""'], {}), "('return')\n", (18776, 18786), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((19163, 19191), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""swagger"""'], {}), "('swagger')\n", (19180, 19191), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((19494, 19520), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""fetch"""'], {}), "('fetch')\n", (19511, 19520), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((19887, 19914), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""return"""'], {}), "('return')\n", (19904, 19914), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((20220, 20248), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""swagger"""'], {}), "('swagger')\n", (20237, 20248), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((20533, 20559), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""fetch"""'], {}), "('fetch')\n", (20550, 20559), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((20775, 20802), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""return"""'], {}), "('return')\n", (20792, 20802), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((21183, 21245), 'odinapi.utils.collocations.get_collocations', 'get_collocations', (['freqmode', 'scanno'], {'fields': 'collocations_fields'}), '(freqmode, scanno, fields=collocations_fields)\n', (21199, 21245), False, 'from odinapi.utils.collocations import get_collocations\n'), ((33716, 33742), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""fetch"""'], {}), "('fetch')\n", (33733, 33742), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((33848, 33875), 'odinapi.views.baseview.register_versions', 'register_versions', (['"""return"""'], {}), "('return')\n", (33865, 33875), False, 'from odinapi.views.baseview import register_versions, BaseView\n'), ((2364, 2377), 'odinapi.utils.time_util.mjd2stw', 'mjd2stw', (['mjd1'], {}), '(mjd1)\n', (2371, 2377), False, 'from odinapi.utils.time_util import datetime2mjd, mjd2stw\n'), ((2393, 2406), 'odinapi.utils.time_util.mjd2stw', 'mjd2stw', (['mjd2'], {}), '(mjd2)\n', (2400, 2406), False, 'from odinapi.utils.time_util import datetime2mjd, mjd2stw\n'), ((4303, 4316), 'odinapi.utils.time_util.mjd2stw', 'mjd2stw', (['mjd1'], {}), '(mjd1)\n', (4310, 4316), False, 'from odinapi.utils.time_util import datetime2mjd, mjd2stw\n'), ((4332, 4345), 'odinapi.utils.time_util.mjd2stw', 'mjd2stw', (['mjd2'], {}), '(mjd2)\n', (4339, 4345), False, 'from odinapi.utils.time_util import datetime2mjd, mjd2stw\n'), ((12607, 12617), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (12612, 12617), False, 'from flask import request, jsonify, abort\n'), ((15470, 15510), 'odinapi.views.views_cached.get_scan_log_data', 'get_scan_log_data', (['con', 'freqmode', 'scanno'], {}), '(con, freqmode, scanno)\n', (15487, 15510), False, 'from odinapi.views.views_cached import get_scan_log_data\n'), ((18124, 18164), 'odinapi.views.views_cached.get_scan_log_data', 'get_scan_log_data', (['con', 'freqmode', 'scanno'], {}), '(con, freqmode, scanno)\n', (18141, 18164), False, 'from odinapi.views.views_cached import get_scan_log_data\n'), ((22189, 22206), 'flask.jsonify', 'jsonify', (['datadict'], {}), '(datadict)\n', (22196, 22206), False, 'from flask import request, jsonify, abort\n'), ((23655, 23672), 'flask.jsonify', 'jsonify', (['datadict'], {}), '(datadict)\n', (23662, 23672), False, 'from flask import request, jsonify, abort\n'), ((25348, 25365), 'flask.jsonify', 'jsonify', (['datadict'], {}), '(datadict)\n', (25355, 25365), False, 'from flask import request, jsonify, abort\n'), ((27125, 27142), 'flask.jsonify', 'jsonify', (['datadict'], {}), '(datadict)\n', (27132, 27142), False, 'from flask import request, jsonify, abort\n'), ((30406, 30423), 'flask.jsonify', 'jsonify', (['datadict'], {}), '(datadict)\n', (30413, 30423), False, 'from flask import request, jsonify, abort\n'), ((32557, 32574), 'flask.jsonify', 'jsonify', (['datadict'], {}), '(datadict)\n', (32564, 32574), False, 'from flask import request, jsonify, abort\n'), ((2136, 2171), 'datetime.datetime.strptime', 'datetime.strptime', (['date', '"""%Y-%m-%d"""'], {}), "(date, '%Y-%m-%d')\n", (2153, 2171), False, 'from datetime import datetime\n'), ((2246, 2268), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(+1)'}), '(days=+1)\n', (2259, 2268), False, 'from dateutil.relativedelta import relativedelta\n'), ((2288, 2307), 'odinapi.utils.time_util.datetime2mjd', 'datetime2mjd', (['date1'], {}), '(date1)\n', (2300, 2307), False, 'from odinapi.utils.time_util import datetime2mjd, mjd2stw\n'), ((2328, 2347), 'odinapi.utils.time_util.datetime2mjd', 'datetime2mjd', (['date2'], {}), '(date2)\n', (2340, 2347), False, 'from odinapi.utils.time_util import datetime2mjd, mjd2stw\n'), ((3310, 3401), 'odinapi.views.urlgen.get_freqmode_raw_url', 'get_freqmode_raw_url', (['request.url_root', 'version', 'date', "row['backend']", "row['freqmode']"], {}), "(request.url_root, version, date, row['backend'], row[\n 'freqmode'])\n", (3330, 3401), False, 'from odinapi.views.urlgen import get_freqmode_raw_url\n'), ((4075, 4110), 'datetime.datetime.strptime', 'datetime.strptime', (['date', '"""%Y-%m-%d"""'], {}), "(date, '%Y-%m-%d')\n", (4092, 4110), False, 'from datetime import datetime\n'), ((4185, 4207), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'days': '(+1)'}), '(days=+1)\n', (4198, 4207), False, 'from dateutil.relativedelta import relativedelta\n'), ((4227, 4246), 'odinapi.utils.time_util.datetime2mjd', 'datetime2mjd', (['date1'], {}), '(date1)\n', (4239, 4246), False, 'from odinapi.utils.time_util import datetime2mjd, mjd2stw\n'), ((4267, 4286), 'odinapi.utils.time_util.datetime2mjd', 'datetime2mjd', (['date2'], {}), '(date2)\n', (4279, 4286), False, 'from odinapi.utils.time_util import datetime2mjd, mjd2stw\n'), ((12980, 12990), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (12985, 12990), False, 'from flask import request, jsonify, abort\n'), ((14916, 14942), 'odinapi.utils.get_args.get_bool', 'get_args.get_bool', (['"""debug"""'], {}), "('debug')\n", (14933, 14942), True, 'import odinapi.utils.get_args as get_args\n'), ((15569, 15579), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (15574, 15579), False, 'from flask import request, jsonify, abort\n'), ((18223, 18233), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (18228, 18233), False, 'from flask import request, jsonify, abort\n'), ((21958, 21968), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (21963, 21968), False, 'from flask import request, jsonify, abort\n'), ((23292, 23302), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (23297, 23302), False, 'from flask import request, jsonify, abort\n'), ((24716, 24726), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (24721, 24726), False, 'from flask import request, jsonify, abort\n'), ((26493, 26503), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (26498, 26503), False, 'from flask import request, jsonify, abort\n'), ((29927, 29937), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (29932, 29937), False, 'from flask import request, jsonify, abort\n'), ((32420, 32430), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (32425, 32430), False, 'from flask import request, jsonify, abort\n'), ((1891, 1957), 'odinapi.utils.swagger.SWAGGER.get_type_response', 'SWAGGER.get_type_response', (['"""freqmode_info"""'], {'is_list': '(True)', 'Date': 'str'}), "('freqmode_info', is_list=True, Date=str)\n", (1916, 1957), False, 'from odinapi.utils.swagger import SWAGGER\n'), ((2211, 2221), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (2216, 2221), False, 'from flask import request, jsonify, abort\n'), ((4150, 4160), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (4155, 4160), False, 'from flask import request, jsonify, abort\n'), ((6028, 6062), 'flask.jsonify', 'jsonify', (["{'Info': loginfo['Info']}"], {}), "({'Info': loginfo['Info']})\n", (6035, 6062), False, 'from flask import request, jsonify, abort\n'), ((9120, 9166), 'odinapi.utils.swagger.SWAGGER.get_type_response', 'SWAGGER.get_type_response', (['"""Log"""'], {'is_list': '(True)'}), "('Log', is_list=True)\n", (9145, 9166), False, 'from odinapi.utils.swagger import SWAGGER\n'), ((9434, 9444), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (9439, 9444), False, 'from flask import request, jsonify, abort\n'), ((9936, 9970), 'flask.jsonify', 'jsonify', (["{'Info': loginfo['Info']}"], {}), "({'Info': loginfo['Info']})\n", (9943, 9970), False, 'from flask import request, jsonify, abort\n'), ((12089, 12121), 'odinapi.utils.swagger.SWAGGER.get_type_response', 'SWAGGER.get_type_response', (['"""Log"""'], {}), "('Log')\n", (12114, 12121), False, 'from odinapi.utils.swagger import SWAGGER\n'), ((14592, 14624), 'odinapi.utils.swagger.SWAGGER.get_type_response', 'SWAGGER.get_type_response', (['"""L1b"""'], {}), "('L1b')\n", (14617, 14624), False, 'from odinapi.utils.swagger import SWAGGER\n'), ((14871, 14881), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (14876, 14881), False, 'from flask import request, jsonify, abort\n'), ((14982, 14992), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (14987, 14992), False, 'from flask import request, jsonify, abort\n'), ((15800, 15833), 'numpy.around', 'around', (["datadict['P']"], {'decimals': '(8)'}), "(datadict['P'], decimals=8)\n", (15806, 15833), False, 'from numpy import around\n'), ((15879, 15912), 'numpy.around', 'around', (["datadict['T']"], {'decimals': '(3)'}), "(datadict['T'], decimals=3)\n", (15885, 15912), False, 'from numpy import around\n'), ((17092, 17124), 'odinapi.utils.swagger.SWAGGER.get_type_response', 'SWAGGER.get_type_response', (['"""ptz"""'], {}), "('ptz')\n", (17117, 17124), False, 'from odinapi.utils.swagger import SWAGGER\n'), ((17372, 17382), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (17377, 17382), False, 'from flask import request, jsonify, abort\n'), ((17440, 17470), 'odinapi.utils.time_util.stw2datetime', 'time_util.stw2datetime', (['scanno'], {}), '(scanno)\n', (17462, 17470), False, 'from odinapi.utils import time_util\n'), ((18388, 18420), 'odinapi.utils.get_args.get_string', 'get_args.get_string', (['"""aprsource"""'], {}), "('aprsource')\n", (18407, 18420), True, 'import odinapi.utils.get_args as get_args\n'), ((19377, 19413), 'odinapi.utils.swagger.SWAGGER.get_type_response', 'SWAGGER.get_type_response', (['"""apriori"""'], {}), "('apriori')\n", (19402, 19413), False, 'from odinapi.utils.swagger import SWAGGER\n'), ((19682, 19692), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (19687, 19692), False, 'from flask import request, jsonify, abort\n'), ((19750, 19780), 'odinapi.utils.time_util.stw2datetime', 'time_util.stw2datetime', (['scanno'], {}), '(scanno)\n', (19772, 19780), False, 'from odinapi.utils import time_util\n'), ((20410, 20464), 'odinapi.utils.swagger.SWAGGER.get_type_response', 'SWAGGER.get_type_response', (['"""collocation"""'], {'is_list': '(True)'}), "('collocation', is_list=True)\n", (20435, 20464), False, 'from odinapi.utils.swagger import SWAGGER\n'), ((20758, 20768), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (20763, 20768), False, 'from flask import request, jsonify, abort\n'), ((18548, 18588), 'numpy.around', 'around', (["datadict['pressure']"], {'decimals': '(8)'}), "(datadict['pressure'], decimals=8)\n", (18554, 18588), False, 'from numpy import around\n'), ((33588, 33598), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (33593, 33598), False, 'from flask import request, jsonify, abort\n')] |
import os
import tensorflow as tf
import numpy as np
import cv2
import gym
from scipy.signal import lfilter
from tensorflow.contrib import summary
def boltzmann(probs, epsilon=0.):
random = tf.random_uniform(shape=(), minval=0, maxval=1)
action = tf.cond(random > epsilon,
lambda: tf.multinomial(tf.log(probs), 1),
lambda: tf.multinomial(
tf.log(tf.ones_like(probs)), 1)
)
return tf.squeeze(action)
def greedy(probs, epsilon=0.):
random = tf.random_uniform(shape=(), minval=0, maxval=1)
action = tf.cond(random > epsilon,
lambda: tf.argmax(probs, axis=1),
lambda: tf.multinomial(
tf.log(tf.ones_like(probs)), 1)
)
return tf.squeeze(action)
def discount(x, gamma):
return lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]
def gae(rews, vals, bval, gamma=0.99, tau=0.97):
vboot = np.hstack((vals, bval))
return discount(rews + gamma * vboot[1:] - vals, tau * gamma)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) # pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class PreprocessObsWrapper(gym.Wrapper):
def __init__(self, env):
gym.Wrapper.__init__(self, env)
def step(self, action):
obs, reward, done, info = self.env.step(action)
obs = self._preprocess(obs)
return obs, reward, done, info
def reset(self, **kwargs):
obs = self.env.reset(**kwargs)
return self._preprocess(obs)
def _preprocess(self, obs):
x = obs[34:34 + 160, :160]
x = x.mean(2)
x = cv2.resize(x, (80, 80))
x = x.astype(np.float32)
x *= (1.0 / 255.0)
x = np.reshape(x, [80, 80, 1])
return x
def make_atari(env_id):
env = gym.make(env_id)
env = NoopResetEnv(env, noop_max=15)
env = MaxAndSkipEnv(env, skip=4)
env = PreprocessObsWrapper(env)
return env
class Logger:
def __init__(self, name, logdir='logs'):
dir = os.path.join(logdir, name)
writer = summary.create_file_writer(dir, flush_millis=10000)
def log_performance(rewards, actions, tloss, ploss, vloss, entropy):
with writer.as_default(), summary.record_summaries_every_n_global_steps(10):
summary.scalar('Perf/Total Reward', tf.reduce_sum(rewards))
summary.histogram('Actions', actions)
summary.scalar('Perf/Episode Duration', tf.size(rewards))
summary.scalar('Perf/Total Loss', tloss)
summary.scalar('Perf/Policy Loss', tf.reduce_mean(ploss))
summary.scalar('Perf/Value Loss', tf.reduce_mean(vloss))
summary.scalar('Perf/Policy Entropy', tf.reduce_mean(entropy))
def log_gradients(gnorms):
with writer.as_default(), summary.record_summaries_every_n_global_steps(10):
summary.histogram('Gradient Norms', gnorms)
def log_weights(var_list):
for var in var_list:
with writer.as_default(), summary.record_summaries_every_n_global_steps(10):
summary.histogram(var.name, var)
self.log_performance = log_performance
self.log_gradients = log_gradients
self.log_weights = log_weights
| [
"numpy.hstack",
"tensorflow.reduce_sum",
"tensorflow.ones_like",
"tensorflow.contrib.summary.histogram",
"tensorflow.reduce_mean",
"tensorflow.contrib.summary.scalar",
"tensorflow.log",
"gym.make",
"numpy.reshape",
"tensorflow.contrib.summary.create_file_writer",
"tensorflow.size",
"cv2.resize... | [((196, 243), 'tensorflow.random_uniform', 'tf.random_uniform', ([], {'shape': '()', 'minval': '(0)', 'maxval': '(1)'}), '(shape=(), minval=0, maxval=1)\n', (213, 243), True, 'import tensorflow as tf\n'), ((482, 500), 'tensorflow.squeeze', 'tf.squeeze', (['action'], {}), '(action)\n', (492, 500), True, 'import tensorflow as tf\n'), ((547, 594), 'tensorflow.random_uniform', 'tf.random_uniform', ([], {'shape': '()', 'minval': '(0)', 'maxval': '(1)'}), '(shape=(), minval=0, maxval=1)\n', (564, 594), True, 'import tensorflow as tf\n'), ((825, 843), 'tensorflow.squeeze', 'tf.squeeze', (['action'], {}), '(action)\n', (835, 843), True, 'import tensorflow as tf\n'), ((993, 1016), 'numpy.hstack', 'np.hstack', (['(vals, bval)'], {}), '((vals, bval))\n', (1002, 1016), True, 'import numpy as np\n'), ((4477, 4493), 'gym.make', 'gym.make', (['env_id'], {}), '(env_id)\n', (4485, 4493), False, 'import gym\n'), ((881, 923), 'scipy.signal.lfilter', 'lfilter', (['[1]', '[1, -gamma]', 'x[::-1]'], {'axis': '(0)'}), '([1], [1, -gamma], x[::-1], axis=0)\n', (888, 923), False, 'from scipy.signal import lfilter\n'), ((1299, 1330), 'gym.Wrapper.__init__', 'gym.Wrapper.__init__', (['self', 'env'], {}), '(self, env)\n', (1319, 1330), False, 'import gym\n'), ((2273, 2304), 'gym.Wrapper.__init__', 'gym.Wrapper.__init__', (['self', 'env'], {}), '(self, env)\n', (2293, 2304), False, 'import gym\n'), ((2895, 2926), 'gym.Wrapper.__init__', 'gym.Wrapper.__init__', (['self', 'env'], {}), '(self, env)\n', (2915, 2926), False, 'import gym\n'), ((3029, 3089), 'numpy.zeros', 'np.zeros', (['((2,) + env.observation_space.shape)'], {'dtype': 'np.uint8'}), '((2,) + env.observation_space.shape, dtype=np.uint8)\n', (3037, 3089), True, 'import numpy as np\n'), ((3899, 3930), 'gym.Wrapper.__init__', 'gym.Wrapper.__init__', (['self', 'env'], {}), '(self, env)\n', (3919, 3930), False, 'import gym\n'), ((4301, 4324), 'cv2.resize', 'cv2.resize', (['x', '(80, 80)'], {}), '(x, (80, 80))\n', (4311, 4324), False, 'import cv2\n'), ((4397, 4423), 'numpy.reshape', 'np.reshape', (['x', '[80, 80, 1]'], {}), '(x, [80, 80, 1])\n', (4407, 4423), True, 'import numpy as np\n'), ((4699, 4725), 'os.path.join', 'os.path.join', (['logdir', 'name'], {}), '(logdir, name)\n', (4711, 4725), False, 'import os\n'), ((4743, 4794), 'tensorflow.contrib.summary.create_file_writer', 'summary.create_file_writer', (['dir'], {'flush_millis': '(10000)'}), '(dir, flush_millis=10000)\n', (4769, 4794), False, 'from tensorflow.contrib import summary\n'), ((663, 687), 'tensorflow.argmax', 'tf.argmax', (['probs'], {'axis': '(1)'}), '(probs, axis=1)\n', (672, 687), True, 'import tensorflow as tf\n'), ((327, 340), 'tensorflow.log', 'tf.log', (['probs'], {}), '(probs)\n', (333, 340), True, 'import tensorflow as tf\n'), ((4911, 4960), 'tensorflow.contrib.summary.record_summaries_every_n_global_steps', 'summary.record_summaries_every_n_global_steps', (['(10)'], {}), '(10)\n', (4956, 4960), False, 'from tensorflow.contrib import summary\n'), ((5054, 5091), 'tensorflow.contrib.summary.histogram', 'summary.histogram', (['"""Actions"""', 'actions'], {}), "('Actions', actions)\n", (5071, 5091), False, 'from tensorflow.contrib import summary\n'), ((5182, 5222), 'tensorflow.contrib.summary.scalar', 'summary.scalar', (['"""Perf/Total Loss"""', 'tloss'], {}), "('Perf/Total Loss', tloss)\n", (5196, 5222), False, 'from tensorflow.contrib import summary\n'), ((5524, 5573), 'tensorflow.contrib.summary.record_summaries_every_n_global_steps', 'summary.record_summaries_every_n_global_steps', (['(10)'], {}), '(10)\n', (5569, 5573), False, 'from tensorflow.contrib import summary\n'), ((5591, 5634), 'tensorflow.contrib.summary.histogram', 'summary.histogram', (['"""Gradient Norms"""', 'gnorms'], {}), "('Gradient Norms', gnorms)\n", (5608, 5634), False, 'from tensorflow.contrib import summary\n'), ((423, 442), 'tensorflow.ones_like', 'tf.ones_like', (['probs'], {}), '(probs)\n', (435, 442), True, 'import tensorflow as tf\n'), ((766, 785), 'tensorflow.ones_like', 'tf.ones_like', (['probs'], {}), '(probs)\n', (778, 785), True, 'import tensorflow as tf\n'), ((5014, 5036), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['rewards'], {}), '(rewards)\n', (5027, 5036), True, 'import tensorflow as tf\n'), ((5148, 5164), 'tensorflow.size', 'tf.size', (['rewards'], {}), '(rewards)\n', (5155, 5164), True, 'import tensorflow as tf\n'), ((5274, 5295), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['ploss'], {}), '(ploss)\n', (5288, 5295), True, 'import tensorflow as tf\n'), ((5347, 5368), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['vloss'], {}), '(vloss)\n', (5361, 5368), True, 'import tensorflow as tf\n'), ((5424, 5447), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['entropy'], {}), '(entropy)\n', (5438, 5447), True, 'import tensorflow as tf\n'), ((5746, 5795), 'tensorflow.contrib.summary.record_summaries_every_n_global_steps', 'summary.record_summaries_every_n_global_steps', (['(10)'], {}), '(10)\n', (5791, 5795), False, 'from tensorflow.contrib import summary\n'), ((5817, 5849), 'tensorflow.contrib.summary.histogram', 'summary.histogram', (['var.name', 'var'], {}), '(var.name, var)\n', (5834, 5849), False, 'from tensorflow.contrib import summary\n')] |
import torch
import torchvision
import torch.nn as nn
import numpy as np
from utils.tensor_folder import TensorFolder
class TennisPlayerDetector(nn.Module):
def __init__(self):
super(TennisPlayerDetector, self).__init__()
# Loads the model
self.model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True).cuda()
self.model.eval()
self.threshold = 0.8
self.COCO_INSTANCE_CATEGORY_NAMES = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',
'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
def check_box_boundaries(self, box):
# Exclude detections in the upper left box
if box[2] <= 60 and box[1] <= 26:
return False
# Exclude detections in the upper right box
if box[0] >= 200 and box[1] <= 26:
return False
# Exclude spectator heads in the bottom
if box[1] > 80:
return False
return True
def compute_center(self, box):
return [(box[0] + box[2]) / 2, (box[1] + box[3]) / 2]
def forward(self, observations):
'''
Computes the mean squared error between the reference and the generated observations
:param observations: (bs, observations_count, channels, height, width) tensor with observations
:return: (bs, observations_count, 2) tensor with x and y coordinates of the detection, -1 if any
'''
batch_size = observations.size(0)
observations_count = observations.size(1)
# Computes positions one sequence at a time
all_predicted_centers = []
for observations_idx in range(observations_count):
current_observations = observations[:, observations_idx]
with torch.no_grad():
predictions = self.model(current_observations)
for current_prediction in predictions:
pred_class = [self.COCO_INSTANCE_CATEGORY_NAMES[i] for i in list(current_prediction['labels'].cpu().numpy())]
pred_boxes = [(i[0], i[1], i[2], i[3]) for i in list(current_prediction['boxes'].detach().cpu().numpy())]
pred_score = list(current_prediction['scores'].detach().cpu().numpy())
filtered_preds = [pred_score.index(x) for x in pred_score if x > self.threshold]
if len(filtered_preds) > 0:
pred_t = filtered_preds[-1]
pred_boxes = pred_boxes[:pred_t + 1]
pred_class = pred_class[:pred_t + 1]
else:
pred_boxes = []
pred_class = []
#match_found = False
matches = []
for idx in range(len(pred_boxes)):
if pred_class[idx] == 'person':
if self.check_box_boundaries(pred_boxes[idx]):
#if match_found:
#print("Warning found more than one tennis player, returining the first only")
#else:
matches.append((pred_boxes[idx][3] - pred_boxes[idx][1], pred_boxes[idx]))
#all_predicted_centers.append(self.compute_center(pred_boxes[idx]))
#match_found = True
if len(matches) == 0:
all_predicted_centers.append([-1, -1])
else:
if len(matches) > 1:
print("Warning found more than one person, returning the tallest detection")
# Sort based on the height of the box
matches.sort(key=lambda x: x[0])
# Uses the highest box between the detected ones
all_predicted_centers.append(self.compute_center(matches[-1][-1]))
predicted_centers = np.asarray(all_predicted_centers).reshape((observations_count, batch_size, 2))
return np.moveaxis(predicted_centers, 0, 1)
| [
"numpy.moveaxis",
"torch.no_grad",
"torchvision.models.detection.fasterrcnn_resnet50_fpn",
"numpy.asarray"
] | [((4964, 5000), 'numpy.moveaxis', 'np.moveaxis', (['predicted_centers', '(0)', '(1)'], {}), '(predicted_centers, 0, 1)\n', (4975, 5000), True, 'import numpy as np\n'), ((286, 355), 'torchvision.models.detection.fasterrcnn_resnet50_fpn', 'torchvision.models.detection.fasterrcnn_resnet50_fpn', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (338, 355), False, 'import torchvision\n'), ((2757, 2772), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2770, 2772), False, 'import torch\n'), ((4870, 4903), 'numpy.asarray', 'np.asarray', (['all_predicted_centers'], {}), '(all_predicted_centers)\n', (4880, 4903), True, 'import numpy as np\n')] |
from __future__ import print_function, division
import hashlib
from copy import deepcopy
import h5py
import numpy as np
from ..util.meshgrid import meshgrid_nd
from ..util.functions import FreezableClass, is_numpy_array, monotonically_increasing, link_or_copy
from astropy import log as logger
from .grid_helpers import single_grid_dims
class SphericalPolarGrid(FreezableClass):
'''
A spherical polar grid.
The grid can be initialized by passing the r, theta, and phi coordinates of cell walls::
>>> grid = SphericalPolarGrid(r_wall, t_wall, p_wall)
where ``r_wall``, ``t_wall``, and ``p_wall`` are 1-d sequences of wall
positions. The number of cells in the resulting grid will be one less
in each dimension that the length of these arrays.
:class:`~hyperion.grid.SphericalPolarGrid` objects may contain multiple
quantities (e.g. density, specific energy). To access these, you can
specify the name of the quantity as an item::
>>> grid['density']
which is no longer a :class:`~hyperion.grid.SphericalPolarGrid` object, but
a :class:`~hyperion.grid.SphericalPolarGridView` object. When setting
this for the first time, this can be set either to another
:class:`~hyperion.grid.SphericalPolarGridView` object, an external h5py
link, or an empty list. For example, the following should work:
>>> grid['density_new'] = grid['density']
:class:`~hyperion.grid.SphericalPolarGridView` objects allow the
specific dust population to be selected as an index:
>>> grid['density'][0]
Which is also a :class:`~hyperion.grid.SphericalPolarGridView` object. The
data can then be accessed with the ``array`` attribute::
>>> grid['density'][0].array
which is a 3-d array of the requested quantity.
'''
def __init__(self, *args):
self.shape = None
self.r_wall = None
self.t_wall = None
self.p_wall = None
self.r = None
self.t = None
self.p = None
self.gr = None
self.gt = None
self.gp = None
self.gw = None
self.gz = None
self.volumes = None
self.areas = None
self.widths = None
self.quantities = {}
self._freeze()
if len(args) > 0:
self.set_walls(*args)
def set_walls(self, r_wall, t_wall, p_wall):
if type(r_wall) in [list, tuple]:
r_wall = np.array(r_wall)
if type(t_wall) in [list, tuple]:
t_wall = np.array(t_wall)
if type(p_wall) in [list, tuple]:
p_wall = np.array(p_wall)
if not is_numpy_array(r_wall) or r_wall.ndim != 1:
raise ValueError("r_wall should be a 1-D sequence")
if not is_numpy_array(t_wall) or t_wall.ndim != 1:
raise ValueError("t_wall should be a 1-D sequence")
if not is_numpy_array(p_wall) or p_wall.ndim != 1:
raise ValueError("p_wall should be a 1-D sequence")
if not monotonically_increasing(r_wall):
raise ValueError("r_wall should be monotonically increasing")
if not monotonically_increasing(t_wall):
raise ValueError("t_wall should be monotonically increasing")
if not monotonically_increasing(p_wall):
raise ValueError("p_wall should be monotonically increasing")
if np.any(t_wall < 0.) or np.any(t_wall > np.pi):
raise ValueError("t_wall values be in the range [0:pi]")
if np.any(p_wall < 0.) or np.any(p_wall > 2. * np.pi):
raise ValueError("p_wall values be in the range [0:2*pi]")
# Find number of grid cells
self.shape = (len(p_wall) - 1, len(t_wall) - 1, len(r_wall) - 1)
# Store wall positions
self.r_wall = r_wall
self.t_wall = t_wall
self.p_wall = p_wall
# Compute cell centers
if r_wall[0] == 0.:
self.r = np.zeros(len(r_wall) - 1)
self.r[0] = r_wall[1] / 2.
self.r[1:] = 10. ** ((np.log10(r_wall[1:-1]) + np.log10(r_wall[2:])) / 2.)
else:
self.r = 10. ** ((np.log10(r_wall[:-1]) + np.log10(r_wall[1:])) / 2.)
self.t = (t_wall[:-1] + t_wall[1:]) / 2.
self.p = (p_wall[:-1] + p_wall[1:]) / 2.
# Generate 3D versions of r, t, p
#(each array is 3D and defined in every cell)
self.gr, self.gt, self.gp = meshgrid_nd(self.r, self.t, self.p)
# Compute cell centers in cylindrical coordinates
self.gz = self.gr * np.cos(self.gt)
self.gw = self.gr * np.sin(self.gt)
# Generate 3D versions of the inner and outer wall positions respectively
gr_wall_min, gt_wall_min, gp_wall_min = \
meshgrid_nd(r_wall[:-1], t_wall[:-1], p_wall[:-1])
gr_wall_max, gt_wall_max, gp_wall_max = \
meshgrid_nd(r_wall[1:], t_wall[1:], p_wall[1:])
# USEFUL QUANTITIES
dr = gr_wall_max - gr_wall_min
dr2 = gr_wall_max ** 2 - gr_wall_min ** 2
dr3 = gr_wall_max ** 3 - gr_wall_min ** 3
dt = gt_wall_max - gt_wall_min
dcost = np.cos(gt_wall_min) - np.cos(gt_wall_max)
dp = gp_wall_max - gp_wall_min
# CELL VOLUMES
# dV = dr * (r*dtheta) * (r*sin(theta)*dphi)
# V = [r_2^3 - r_1^3] / 3. * [cos(theta_1) - cos(theta_2)] * [phi_2 - phi_1]
self.volumes = dr3 * dcost * dp / 3.
# WALL AREAS
self.areas = np.zeros((6,) + self.shape)
# R walls:
# dA = r^2 * sin(theta) * dtheta * dphi
# A = r^2 * [cos(theta_1) - cos(theta_2)] * [phi_2 - phi_1]
self.areas[0, :, :, :] = gr_wall_min ** 2 * dcost * dp
self.areas[1, :, :, :] = gr_wall_max ** 2 * dcost * dp
# Theta walls:
# dA = r * sin(theta) * dr * dphi
# A = 0.5 * [r_2^2 - r_1^2] * sin(theta) * [phi_2 - phi_1]
self.areas[2, :, :, :] = 0.5 * dr2 * np.sin(gt_wall_min) * dp
self.areas[3, :, :, :] = 0.5 * dr2 * np.sin(gt_wall_max) * dp
# Phi walls:
# dA = r * dr * dtheta
# A = 0.5 * [r_2^2 - r_1^2] * [theta_2 - theta_1]
self.areas[4, :, :, :] = 0.5 * dr2 * dt
self.areas[5, :, :, :] = 0.5 * dr2 * dt
# CELL WIDTHS
self.widths = np.zeros((3,) + self.shape)
# R direction:
# dS = dr
# S = r_2 - r_1
self.widths[0, :, :, :] = dr
# Theta direction:
# dS = r * dtheta
# S = r * [theta_2 - theta_1]
self.widths[1, :, :, :] = self.gr * dt
# Phi direction:
# dS = r * sin(theta) * dphi
# S = r * sin(theta) * [phi_2 - phi_1]
self.widths[2, :, :, :] = self.gr * np.sin(self.gt) * dp
def __getattr__(self, attribute):
if attribute == 'n_dust':
n_dust = None
for quantity in self.quantities:
n_dust_q, shape_q = single_grid_dims(self.quantities[quantity])
if n_dust is None:
n_dust = n_dust_q
elif n_dust_q is not None:
if n_dust != n_dust_q:
raise ValueError("Not all dust lists in the grid have the same size")
return n_dust
else:
return FreezableClass.__getattribute__(self, attribute)
def _check_array_dimensions(self, array=None):
'''
Check that a grid's array dimensions agree with this grid's metadata
Parameters
----------
array : np.ndarray or list of np.ndarray, optional
The array for which to test the dimensions. If this is not
specified, this method performs a self-consistency check of array
dimensions and meta-data.
'''
n_pop_ref = None
for quantity in self.quantities:
if array is None:
n_pop, shape = single_grid_dims(self.quantities[quantity])
else:
n_pop, shape = single_grid_dims(array)
if shape != self.shape:
raise ValueError("Quantity arrays do not have the right "
"dimensions: %s instead of %s"
% (shape, self.shape))
if n_pop is not None:
if n_pop_ref is None:
n_pop_ref = n_pop
elif n_pop != n_pop_ref:
raise ValueError("Not all dust lists in the grid have the same size")
def read(self, group, quantities='all'):
'''
Read the geometry and physical quantities from a spherical polar grid
Parameters
----------
group : h5py.Group
The HDF5 group to read the grid from. This group should contain
groups named 'Geometry' and 'Quantities'.
quantities : 'all' or list
Which physical quantities to read in. Use 'all' to read in all
quantities or a list of strings to read only specific quantities.
'''
# Read in geometry
self.read_geometry(group['Geometry'])
# Read in physical quantities
self.read_quantities(group['Quantities'], quantities=quantities)
# Self-consistently check geometry and physical quantities
self._check_array_dimensions()
def read_geometry(self, group):
'''
Read in geometry information from a spherical polar grid
Parameters
----------
group : h5py.Group
The HDF5 group to read the grid geometry from.
'''
if group.attrs['grid_type'].decode('utf-8') != 'sph_pol':
raise ValueError("Grid is not spherical polar")
self.set_walls(group['walls_1']['r'],
group['walls_2']['t'],
group['walls_3']['p'])
# Check that advertised hash matches real hash
if group.attrs['geometry'].decode('utf-8') != self.get_geometry_id():
raise Exception("Calculated geometry hash does not match hash in file")
def read_quantities(self, group, quantities='all'):
'''
Read in physical quantities from a spherical polar grid
Parameters
----------
group : h5py.Group
The HDF5 group to read the grid quantities from
quantities : 'all' or list
Which physical quantities to read in. Use 'all' to read in all
quantities or a list of strings to read only specific quantities.
'''
# Read in physical quantities
if quantities is not None:
for quantity in group:
if quantities == 'all' or quantity in quantities:
array = np.array(group[quantity])
if array.ndim == 4: # if array is 4D, it is a list of 3D arrays
self.quantities[quantity] = [array[i] for i in range(array.shape[0])]
else:
self.quantities[quantity] = array
# Self-consistently check geometry and physical quantities
self._check_array_dimensions()
def write(self, group, quantities='all', copy=True, absolute_paths=False, compression=True, wall_dtype=float, physics_dtype=float):
'''
Write out the spherical polar grid
Parameters
----------
group : h5py.Group
The HDF5 group to write the grid to
quantities : 'all' or list
Which physical quantities to write out. Use 'all' to write out all
quantities or a list of strings to write only specific quantities.
copy : bool
Whether to copy external links, or leave them as links.
absolute_paths : bool
If copy is False, then this indicates whether to use absolute or
relative paths for links.
compression : bool
Whether to compress the arrays in the HDF5 file
wall_dtype : type
The datatype to use to write the wall positions
physics_dtype : type
The datatype to use to write the physical quantities
'''
# Create HDF5 groups if needed
if 'Geometry' not in group:
g_geometry = group.create_group('Geometry')
else:
g_geometry = group['Geometry']
if 'Quantities' not in group:
g_quantities = group.create_group('Quantities')
else:
g_quantities = group['Quantities']
# Write out geometry
g_geometry.attrs['grid_type'] = np.string_('sph_pol'.encode('utf-8'))
g_geometry.attrs['geometry'] = np.string_(self.get_geometry_id().encode('utf-8'))
dset = g_geometry.create_dataset("walls_1", data=np.array(list(zip(self.r_wall)), dtype=[('r', wall_dtype)]), compression=compression)
dset.attrs['Unit'] = np.string_('cm'.encode('utf-8'))
dset = g_geometry.create_dataset("walls_2", data=np.array(list(zip(self.t_wall)), dtype=[('t', wall_dtype)]), compression=compression)
dset.attrs['Unit'] = np.string_('rad'.encode('utf-8'))
dset = g_geometry.create_dataset("walls_3", data=np.array(list(zip(self.p_wall)), dtype=[('p', wall_dtype)]), compression=compression)
dset.attrs['Unit'] = np.string_('rad'.encode('utf-8'))
# Self-consistently check geometry and physical quantities
self._check_array_dimensions()
# Write out physical quantities
for quantity in self.quantities:
if quantities == 'all' or quantity in quantities:
if isinstance(self.quantities[quantity], h5py.ExternalLink):
link_or_copy(g_quantities, quantity, self.quantities[quantity], copy, absolute_paths=absolute_paths)
else:
dset = g_quantities.create_dataset(quantity, data=self.quantities[quantity],
compression=compression,
dtype=physics_dtype)
dset.attrs['geometry'] = np.string_(self.get_geometry_id().encode('utf-8'))
def write_single_array(self, group, name, array, copy=True, absolute_paths=False, compression=True, physics_dtype=float):
'''
Write out a single quantity, checking for consistency with geometry
Parameters
----------
group : h5py.Group
The HDF5 group to write the grid to
name : str
The name of the array in the group
array : np.ndarray
The array to write out
copy : bool
Whether to copy external links, or leave them as links.
absolute_paths : bool
If copy is False, then this indicates whether to use absolute or
relative paths for links.
compression : bool
Whether to compress the arrays in the HDF5 file
wall_dtype : type
The datatype to use to write the wall positions
physics_dtype : type
The datatype to use to write the physical quantities
'''
# Check consistency of array dimensions with grid
self._check_array_dimensions(array)
if isinstance(array, h5py.ExternalLink):
link_or_copy(group, name, array, copy, absolute_paths=absolute_paths)
else:
dset = group.create_dataset(name, data=array,
compression=compression,
dtype=physics_dtype)
dset.attrs['geometry'] = np.string_(self.get_geometry_id().encode('utf-8'))
def get_geometry_id(self):
geo_hash = hashlib.md5()
geo_hash.update(self.r_wall.tostring())
geo_hash.update(self.t_wall.tostring())
geo_hash.update(self.p_wall.tostring())
return geo_hash.hexdigest()
def __getitem__(self, item):
return SphericalPolarGridView(self, item)
def __setitem__(self, item, value):
if isinstance(value, SphericalPolarGridView):
if self.r_wall is None and self.t_wall is None and self.p_wall is None:
logger.warn("No geometry in target grid - copying from original grid")
self.set_walls(value.r_wall, value.t_wall, value.p_wall)
self.quantities[item] = deepcopy(value.quantities[value.viewed_quantity])
elif isinstance(value, h5py.ExternalLink):
self.quantities[item] = value
elif value == []:
self.quantities[item] = []
else:
raise ValueError('value should be an empty list, and ExternalLink, or a SphericalPolarGridView instance')
def __contains__(self, item):
return self.quantities.__contains__(item)
def reset_quantities(self):
self.quantities = {}
def add_derived_quantity(self, name, function):
if name in self.quantities:
raise KeyError(name + ' already exists')
function(self.quantities)
class SphericalPolarGridView(SphericalPolarGrid):
def __init__(self, grid, quantity):
self.viewed_quantity = quantity
SphericalPolarGrid.__init__(self)
self.set_walls(grid.r_wall, grid.t_wall, grid.p_wall)
self.quantities = {quantity: grid.quantities[quantity]}
def append(self, grid):
'''
Used to append quantities from another grid
Parameters
----------
grid : 3D Numpy array or SphericalPolarGridView instance
The grid to copy the quantity from
'''
if isinstance(grid, SphericalPolarGridView):
if self.quantities[self.viewed_quantity] is grid.quantities[grid.viewed_quantity]:
raise Exception("Calling append recursively")
if type(grid.quantities[grid.viewed_quantity]) is list:
raise Exception("Can only append a single grid")
self._check_array_dimensions(grid.quantities[grid.viewed_quantity])
self.quantities[self.viewed_quantity].append(deepcopy(grid.quantities[grid.viewed_quantity]))
elif type(grid) is np.ndarray:
self._check_array_dimensions(grid)
self.quantities[self.viewed_quantity].append(deepcopy(grid))
else:
raise ValueError("grid should be a Numpy array or a SphericalPolarGridView instance")
def add(self, grid):
'''
Used to add quantities from another grid
Parameters
----------
grid : 3D Numpy array or SphericalPolarGridView instance
The grid to copy the quantity from
'''
if type(self.quantities[self.viewed_quantity]) is list:
raise Exception("need to first specify the item to add to")
if isinstance(grid, SphericalPolarGridView):
if type(grid.quantities[grid.viewed_quantity]) is list:
raise Exception("need to first specify the item to add")
self._check_array_dimensions(grid.quantities[grid.viewed_quantity])
self.quantities[self.viewed_quantity] += grid.quantities[grid.viewed_quantity]
elif type(grid) is np.ndarray:
self._check_array_dimensions(grid)
self.quantities[self.viewed_quantity] += grid
else:
raise ValueError("grid should be a Numpy array or a SphericalPolarGridView instance")
def __getitem__(self, item):
if type(item) is int:
grid = SphericalPolarGridView(self, self.viewed_quantity)
grid.quantities = {grid.viewed_quantity: grid.quantities[grid.viewed_quantity][item]}
return grid
else:
return SphericalPolarGrid.__getitem__(self, item)
def __getattr__(self, attribute):
if attribute == 'array':
return self.quantities[self.viewed_quantity]
else:
return SphericalPolarGrid.__getattr__(self, attribute)
| [
"numpy.log10",
"hashlib.md5",
"numpy.any",
"numpy.array",
"numpy.zeros",
"numpy.cos",
"copy.deepcopy",
"numpy.sin",
"astropy.log.warn"
] | [((5497, 5524), 'numpy.zeros', 'np.zeros', (['((6,) + self.shape)'], {}), '((6,) + self.shape)\n', (5505, 5524), True, 'import numpy as np\n'), ((6331, 6358), 'numpy.zeros', 'np.zeros', (['((3,) + self.shape)'], {}), '((3,) + self.shape)\n', (6339, 6358), True, 'import numpy as np\n'), ((15691, 15704), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (15702, 15704), False, 'import hashlib\n'), ((2460, 2476), 'numpy.array', 'np.array', (['r_wall'], {}), '(r_wall)\n', (2468, 2476), True, 'import numpy as np\n'), ((2540, 2556), 'numpy.array', 'np.array', (['t_wall'], {}), '(t_wall)\n', (2548, 2556), True, 'import numpy as np\n'), ((2620, 2636), 'numpy.array', 'np.array', (['p_wall'], {}), '(p_wall)\n', (2628, 2636), True, 'import numpy as np\n'), ((3389, 3409), 'numpy.any', 'np.any', (['(t_wall < 0.0)'], {}), '(t_wall < 0.0)\n', (3395, 3409), True, 'import numpy as np\n'), ((3412, 3434), 'numpy.any', 'np.any', (['(t_wall > np.pi)'], {}), '(t_wall > np.pi)\n', (3418, 3434), True, 'import numpy as np\n'), ((3516, 3536), 'numpy.any', 'np.any', (['(p_wall < 0.0)'], {}), '(p_wall < 0.0)\n', (3522, 3536), True, 'import numpy as np\n'), ((3539, 3567), 'numpy.any', 'np.any', (['(p_wall > 2.0 * np.pi)'], {}), '(p_wall > 2.0 * np.pi)\n', (3545, 3567), True, 'import numpy as np\n'), ((4552, 4567), 'numpy.cos', 'np.cos', (['self.gt'], {}), '(self.gt)\n', (4558, 4567), True, 'import numpy as np\n'), ((4596, 4611), 'numpy.sin', 'np.sin', (['self.gt'], {}), '(self.gt)\n', (4602, 4611), True, 'import numpy as np\n'), ((5158, 5177), 'numpy.cos', 'np.cos', (['gt_wall_min'], {}), '(gt_wall_min)\n', (5164, 5177), True, 'import numpy as np\n'), ((5180, 5199), 'numpy.cos', 'np.cos', (['gt_wall_max'], {}), '(gt_wall_max)\n', (5186, 5199), True, 'import numpy as np\n'), ((16344, 16393), 'copy.deepcopy', 'deepcopy', (['value.quantities[value.viewed_quantity]'], {}), '(value.quantities[value.viewed_quantity])\n', (16352, 16393), False, 'from copy import deepcopy\n'), ((5977, 5996), 'numpy.sin', 'np.sin', (['gt_wall_min'], {}), '(gt_wall_min)\n', (5983, 5996), True, 'import numpy as np\n'), ((6047, 6066), 'numpy.sin', 'np.sin', (['gt_wall_max'], {}), '(gt_wall_max)\n', (6053, 6066), True, 'import numpy as np\n'), ((6773, 6788), 'numpy.sin', 'np.sin', (['self.gt'], {}), '(self.gt)\n', (6779, 6788), True, 'import numpy as np\n'), ((16164, 16234), 'astropy.log.warn', 'logger.warn', (['"""No geometry in target grid - copying from original grid"""'], {}), "('No geometry in target grid - copying from original grid')\n", (16175, 16234), True, 'from astropy import log as logger\n'), ((18044, 18091), 'copy.deepcopy', 'deepcopy', (['grid.quantities[grid.viewed_quantity]'], {}), '(grid.quantities[grid.viewed_quantity])\n', (18052, 18091), False, 'from copy import deepcopy\n'), ((10759, 10784), 'numpy.array', 'np.array', (['group[quantity]'], {}), '(group[quantity])\n', (10767, 10784), True, 'import numpy as np\n'), ((18236, 18250), 'copy.deepcopy', 'deepcopy', (['grid'], {}), '(grid)\n', (18244, 18250), False, 'from copy import deepcopy\n'), ((4048, 4070), 'numpy.log10', 'np.log10', (['r_wall[1:-1]'], {}), '(r_wall[1:-1])\n', (4056, 4070), True, 'import numpy as np\n'), ((4073, 4093), 'numpy.log10', 'np.log10', (['r_wall[2:]'], {}), '(r_wall[2:])\n', (4081, 4093), True, 'import numpy as np\n'), ((4145, 4166), 'numpy.log10', 'np.log10', (['r_wall[:-1]'], {}), '(r_wall[:-1])\n', (4153, 4166), True, 'import numpy as np\n'), ((4169, 4189), 'numpy.log10', 'np.log10', (['r_wall[1:]'], {}), '(r_wall[1:])\n', (4177, 4189), True, 'import numpy as np\n')] |
'''
Functions that help visualize results
'''
import os
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import numpy as np
from . import config
__all__ = ['plotter',
'segment_plotter',
'plot_poincare',
'plot_breathing']
def plotter(working_data, measures, show=True, figsize=None,
title='Heart Rate Signal Peak Detection', moving_average=False): # pragma: no cover
'''plots the analysis results.
Function that uses calculated measures and data stored in the working_data{} and measures{}
dict objects to visualise the fitted peak detection solution.
Parameters
----------
working_data : dict
dictionary object that contains all heartpy's working data (temp) objects.
will be created if not passed to function
measures : dict
dictionary object used by heartpy to store computed measures. Will be created
if not passed to function
show : bool
when False, function will return a plot object rather than display the results.
default : True
figsize: tuple
Set dimensions of image in inches like in matplotlib. figsize=(x, y)
default: None => (6.4, 4.8)
title : string
title for the plot.
default : "Heart Rate Signal Peak Detection"
moving_average : bool
whether to display the moving average on the plot.
The moving average is used for peak fitting.
default: False
Returns
-------
out : matplotlib plot object
only returned if show == False.
Examples
--------
First let's load and analyse some data to visualise
>>> import heartpy as hp
>>> data, _ = hp.load_exampledata(0)
>>> wd, m = hp.process(data, 100.0)
Then we can visualise
>>> plot_object = plotter(wd, m, show=False, title='some awesome title')
This returns a plot object which can be visualized or saved or appended.
See matplotlib API for more information on how to do this.
A matplotlib plotting object is returned. This can be further processed and saved
to a file.
'''
#get color palette
colorpalette = config.get_colorpalette_plotter()
# create plot x-var
fs = working_data['sample_rate']
plotx = np.arange(0, len(working_data['hr'])/fs, 1/fs)
#check if there's a rounding error causing differing lengths of plotx and signal
diff = len(plotx) - len(working_data['hr'])
if diff < 0:
#add to linspace
plotx = np.append(plotx, plotx[-1] + (plotx[-2] - plotx[-1]))
elif diff > 0:
#trim linspace
plotx = plotx[0:-diff]
peaklist = working_data['peaklist']
ybeat = working_data['ybeat']
rejectedpeaks = working_data['removed_beats']
rejectedpeaks_y = working_data['removed_beats_y']
fig, ax = plt.subplots(figsize=figsize)
ax.set_title(title)
ax.plot(plotx, working_data['hr'], color=colorpalette[0], label='heart rate signal', zorder=-10)
ax.set_xlabel('Time (s)')
if moving_average:
ax.plot(plotx, working_data['rolling_mean'], color='gray', alpha=0.5)
ax.scatter(np.asarray(peaklist)/fs, ybeat, color=colorpalette[1], label='BPM:%.2f' %(measures['bpm']))
ax.scatter(rejectedpeaks/fs, rejectedpeaks_y, color=colorpalette[2], label='rejected peaks')
#check if rejected segment detection is on and has rejected segments
try:
if len(working_data['rejected_segments']) >= 1:
for segment in working_data['rejected_segments']:
ax.axvspan(segment[0], segment[1], facecolor='red', alpha=0.5)
except:
pass
ax.legend(loc=4, framealpha=0.6)
if show:
fig.show()
else:
return fig
def segment_plotter(working_data, measures, title='Heart Rate Signal Peak Detection',
figsize=(6, 6), path='', start=0, end=None, step=1): # pragma: no cover
'''plots analysis results
Function that plots the results of segmentwise processing of heart rate signal
and writes all results to separate files at the path provided.
Parameters
----------
working_data : dict
dictionary object that contains all heartpy's working data (temp) objects.
will be created if not passed to function
measures : dict
dictionary object used by heartpy to store computed measures. Will be created
if not passed to function
title : str
the title used in the plot
figsize : tuple
figsize tuple to be passed to matplotlib
path : str
the path where the files will be stored, folder must exist.
start : int
what segment to start plotting with
default : 0
end : int
last segment to plot. Must be smaller than total number of segments
default : None, will plot until end
step : int
stepsize used when iterating over plots every step'th segment will be plotted
default : 1
Returns
-------
None
Examples
--------
This function has no examples. See documentation of heartpy for more info.
'''
#sanity check
assert 0 < step < len(working_data['hr']), 'step must be larger than zero and smaller than total number of segments'
#set endpoint if not explicitly defined
if end == None:
end = len(working_data['hr'])
else:
#make sure it is defined within boundary conditions
assert end <= len(working_data['hr']), 'defined "end" endpoint is larger than number of segments'
#add trailing path slash if user omitted it
if not (path.endswith('/') or path.endswith('\\')) and len(path) > 0:
path += '/'
#create path if it doesn't exist
if not os.path.isdir(path):
os.makedirs(path)
#make plots
filenum = 0
for i in range(start, end, step):
wd_segment = {}
m_segment = {}
#assign values to sub-object for plotting purposes
wd_segment['peaklist'] = working_data['peaklist'][i]
wd_segment['ybeat'] = working_data['ybeat'][i]
wd_segment['removed_beats'] = working_data['removed_beats'][i]
wd_segment['removed_beats_y'] = working_data['removed_beats_y'][i]
wd_segment['hr'] = working_data['hr'][i]
wd_segment['rolling_mean'] = working_data['rolling_mean'][i]
wd_segment['sample_rate'] = working_data['sample_rate'][i]
m_segment['bpm'] = measures['bpm'][i]
try:
wd_segment['rejected_segments'] = working_data['rejected_segments'][i]
except:
pass
#plot it using built-in plotter
plt.figure(figsize = figsize)
p = plotter(wd_segment, m_segment, show=False)
p.savefig('%s%i.png' %(path, filenum))
plt.close('all')
filenum += 1
def plot_poincare(working_data, measures, show = True, figsize=None,
title='Poincare plot'): # pragma: no cover
'''visualize poincare plot
function that visualises poincare plot.
Parameters
----------
working_data : dict
dictionary object that contains all heartpy's working data (temp) objects.
will be created if not passed to function
measures : dict
dictionary object used by heartpy to store computed measures. Will be created
if not passed to function
show : bool
whether to show the plot right away, or return a matplotlib object for
further manipulation
figsize: tuple
Set dimensions of image in inches like in matplotlib. figsize=(x, y)
default: None => (6.4, 4.8)
title : str
the title used in the plot
Returns
-------
out : matplotlib plot object
only returned if show == False.
Examples
--------
This function has no examples. See documentation of heartpy for more info.
'''
#get color palette
colorpalette = config.get_colorpalette_poincare()
#get values from dict
x_plus = working_data['poincare']['x_plus']
x_minus = working_data['poincare']['x_minus']
sd1 = measures['sd1']
sd2 = measures['sd2']
#define figure
fig, ax = plt.subplots(subplot_kw={'aspect': 'equal'}, figsize=figsize)
#plot scatter
ax.scatter(x_plus, x_minus, color = colorpalette[0],
alpha = 0.75, label = 'peak-peak intervals')
#plot identity line
mins = np.min([x_plus, x_minus])
maxs = np.max([x_plus, x_minus])
identity_line = np.linspace(np.min(mins), np.max(maxs))
ax.plot(identity_line, identity_line, color='black', alpha=0.5,
label = 'identity line')
#rotate SD1, SD2 vectors 45 degrees counterclockwise
sd1_xrot, sd1_yrot = rotate_vec(0, sd1, 45)
sd2_xrot, sd2_yrot = rotate_vec(0, sd2, 45)
#plot rotated SD1, SD2 lines
ax.plot([np.mean(x_plus), np.mean(x_plus) + sd1_xrot],
[np.mean(x_minus), np.mean(x_minus) + sd1_yrot],
color = colorpalette[1], label = 'SD1')
ax.plot([np.mean(x_plus), np.mean(x_plus) - sd2_xrot],
[np.mean(x_minus), np.mean(x_minus) + sd2_yrot],
color = colorpalette[2], label = 'SD2')
#plot ellipse
xmn = np.mean(x_plus)
ymn = np.mean(x_minus)
el = Ellipse((xmn, ymn), width = sd2 * 2, height = sd1 * 2, angle = 45.0)
ax.add_artist(el)
el.set_edgecolor((0,0,0))
el.fill = False
ax.set_xlabel(r'RRi$_n$ (ms)')
ax.set_ylabel(r'RRi$_{n+1}$ (ms)')
ax.legend(loc=4, framealpha=0.6)
ax.set_title(title)
if show:
fig.show()
else:
return fig
def rotate_vec(x, y, angle):
'''rotates vector around origin point
Function that takes vector and angle, and rotates around origin point
with given amount of degrees.
Helper function for poincare plotting
Parameters
----------
x : int or float
vector x coordinate
y : int or float
vector y coordinate
angle: int or float
the angle of rotation applied to the vecftor
Returns
-------
x_rot : float
new x coordinate with rotation applied
y_rot : float
new x coordinate with rotation applied
Examples
--------
Given a vector (0,1), if we apply a rotation of 90 degrees clockwise
we expect to get (1,0). Let's test
>>> x_new, y_new = rotate_vec(0, 1, -90)
>>> print('%.3f, %.3f' %(x_new, y_new))
1.000, 0.000
'''
theta = np.radians(angle)
cs = np.cos(theta)
sn = np.sin(theta)
x_rot = (x * cs) - (y * sn)
y_rot = (x * sn) + (y * cs)
return x_rot, y_rot
def plot_breathing(working_data, measures, show=True, figsize=None): # pragma: no cover
'''plots extracted breathing signal and spectrogram
Function that plots the breathing signal extracted from RR-intervals alongside
its computed spectrogram representation.
Parameters
----------
working_data : dict
dictionary object that contains all heartpy's working data (temp) objects.
will be created if not passed to function
measures : dict
dictionary object used by heartpy to store computed measures. Will be created
if not passed to function
show : bool
whether to show the plot right away, or return a matplotlib object for
further manipulation
figsize: tuple
Set dimensions of image in inches like in matplotlib. figsize=(x, y)
default: None => (6.4, 4.8)
Returns
-------
out : matplotlib plot object
only returned if show == False.
Examples
--------
This function has no examples. See documentation of heartpy for more info.
'''
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=figsize)
ax1.plot(working_data['breathing_signal'], label='breathing signal')
ax1.set_xlabel('ms')
ax1.set_title('breathing signal extracted from RR-intervals')
ax2.plot(working_data['breathing_frq'], working_data['breathing_psd'], label='spectrogram')
ax2.set_xlim(0, 1)
ax2.set_xlabel('Hz')
ax2.set_title('spectrogram extracted from breathing rate signal')
ax2.legend()
plt.tight_layout()
if show:
fig.show()
else:
return fig
| [
"numpy.radians",
"numpy.mean",
"os.makedirs",
"numpy.asarray",
"numpy.max",
"numpy.append",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"os.path.isdir",
"numpy.cos",
"matplotlib.pyplot.tight_layout",
"numpy.min",
"numpy.sin",
"matplotlib.patches.Ellipse",
"matplotlib.pyplot.su... | [((2855, 2884), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (2867, 2884), True, 'import matplotlib.pyplot as plt\n'), ((8180, 8241), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'subplot_kw': "{'aspect': 'equal'}", 'figsize': 'figsize'}), "(subplot_kw={'aspect': 'equal'}, figsize=figsize)\n", (8192, 8241), True, 'import matplotlib.pyplot as plt\n'), ((8415, 8440), 'numpy.min', 'np.min', (['[x_plus, x_minus]'], {}), '([x_plus, x_minus])\n', (8421, 8440), True, 'import numpy as np\n'), ((8452, 8477), 'numpy.max', 'np.max', (['[x_plus, x_minus]'], {}), '([x_plus, x_minus])\n', (8458, 8477), True, 'import numpy as np\n'), ((9209, 9224), 'numpy.mean', 'np.mean', (['x_plus'], {}), '(x_plus)\n', (9216, 9224), True, 'import numpy as np\n'), ((9235, 9251), 'numpy.mean', 'np.mean', (['x_minus'], {}), '(x_minus)\n', (9242, 9251), True, 'import numpy as np\n'), ((9261, 9323), 'matplotlib.patches.Ellipse', 'Ellipse', (['(xmn, ymn)'], {'width': '(sd2 * 2)', 'height': '(sd1 * 2)', 'angle': '(45.0)'}), '((xmn, ymn), width=sd2 * 2, height=sd1 * 2, angle=45.0)\n', (9268, 9323), False, 'from matplotlib.patches import Ellipse\n'), ((10455, 10472), 'numpy.radians', 'np.radians', (['angle'], {}), '(angle)\n', (10465, 10472), True, 'import numpy as np\n'), ((10483, 10496), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (10489, 10496), True, 'import numpy as np\n'), ((10506, 10519), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (10512, 10519), True, 'import numpy as np\n'), ((11707, 11742), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': 'figsize'}), '(2, 1, figsize=figsize)\n', (11719, 11742), True, 'import matplotlib.pyplot as plt\n'), ((12145, 12163), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (12161, 12163), True, 'import matplotlib.pyplot as plt\n'), ((2526, 2579), 'numpy.append', 'np.append', (['plotx', '(plotx[-1] + (plotx[-2] - plotx[-1]))'], {}), '(plotx, plotx[-1] + (plotx[-2] - plotx[-1]))\n', (2535, 2579), True, 'import numpy as np\n'), ((6654, 6681), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (6664, 6681), True, 'import matplotlib.pyplot as plt\n'), ((6794, 6810), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (6803, 6810), True, 'import matplotlib.pyplot as plt\n'), ((8510, 8522), 'numpy.min', 'np.min', (['mins'], {}), '(mins)\n', (8516, 8522), True, 'import numpy as np\n'), ((8524, 8536), 'numpy.max', 'np.max', (['maxs'], {}), '(maxs)\n', (8530, 8536), True, 'import numpy as np\n'), ((3159, 3179), 'numpy.asarray', 'np.asarray', (['peaklist'], {}), '(peaklist)\n', (3169, 3179), True, 'import numpy as np\n'), ((5755, 5774), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (5768, 5774), False, 'import os\n'), ((5788, 5805), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (5799, 5805), False, 'import os\n'), ((8845, 8860), 'numpy.mean', 'np.mean', (['x_plus'], {}), '(x_plus)\n', (8852, 8860), True, 'import numpy as np\n'), ((8905, 8921), 'numpy.mean', 'np.mean', (['x_minus'], {}), '(x_minus)\n', (8912, 8921), True, 'import numpy as np\n'), ((9019, 9034), 'numpy.mean', 'np.mean', (['x_plus'], {}), '(x_plus)\n', (9026, 9034), True, 'import numpy as np\n'), ((9079, 9095), 'numpy.mean', 'np.mean', (['x_minus'], {}), '(x_minus)\n', (9086, 9095), True, 'import numpy as np\n'), ((8862, 8877), 'numpy.mean', 'np.mean', (['x_plus'], {}), '(x_plus)\n', (8869, 8877), True, 'import numpy as np\n'), ((8923, 8939), 'numpy.mean', 'np.mean', (['x_minus'], {}), '(x_minus)\n', (8930, 8939), True, 'import numpy as np\n'), ((9036, 9051), 'numpy.mean', 'np.mean', (['x_plus'], {}), '(x_plus)\n', (9043, 9051), True, 'import numpy as np\n'), ((9097, 9113), 'numpy.mean', 'np.mean', (['x_minus'], {}), '(x_minus)\n', (9104, 9113), True, 'import numpy as np\n')] |
# Copyright 2019-2021 Cambridge Quantum Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pytket import Circuit, Qubit, Bit
from qermit.spam.full_spam_correction import ( # type: ignore
gen_full_tomography_spam_circuits_task,
gen_full_tomography_spam_characterisation_task,
gen_full_tomography_spam_correction_task,
gen_get_bit_maps_task,
)
from qermit.spam import ( # type: ignore
CorrectionMethod,
)
from pytket.extensions.qiskit import AerBackend # type: ignore
import numpy as np
def test_gen_full_tomography_spam_circuits_task():
b = AerBackend()
b._characterisation = dict()
task = gen_full_tomography_spam_circuits_task(
b, 5, [[Qubit(0), Qubit(1)], [Qubit(2), Qubit(3)]]
)
assert task.n_in_wires == 1
assert task.n_out_wires == 3
c0 = Circuit(3).CX(0, 1).X(2).measure_all()
c1 = Circuit(2).X(0).X(1).measure_all()
wire = [(c0, 10), (c1, 20)]
res = task([wire])
assert len(res) == 3
assert res[0] == wire
assert len(res[1]) == len(res[2])
assert len(res[1][0][0].get_commands()) == 6
assert len(res[1][1][0].get_commands()) == 8
assert len(res[1][2][0].get_commands()) == 8
assert len(res[1][3][0].get_commands()) == 10
def test_full_tomography_spam_characterisation_task_gen():
b = AerBackend()
b._characterisation = dict()
qb_subsets = [[Qubit(0), Qubit(1)], [Qubit(2), Qubit(3)]]
c0 = Circuit(3).CX(0, 1).X(2).measure_all()
c1 = Circuit(2).X(0).X(1).measure_all()
wire = [(c0, 10), (c1, 20)]
spam_info = gen_full_tomography_spam_circuits_task(b, 5, qb_subsets)([wire])
spam_circs = [c[0] for c in spam_info[1]]
handles = b.process_circuits(spam_circs, 5)
results = b.get_results(handles)
task = gen_full_tomography_spam_characterisation_task(b, qb_subsets)
assert task.n_in_wires == 2
assert task.n_out_wires == 1
task_res = task([results, spam_info[2]])
assert task_res == (True,)
char = b.backend_info.misc["FullCorrelatedSpamCorrection"]
assert char[0] == qb_subsets
assert char[1] == {
Qubit(0): (0, 0),
Qubit(1): (0, 1),
Qubit(2): (1, 0),
Qubit(3): (1, 1),
}
assert char[2][0].all() == np.identity(2).all()
assert char[2][1].all() == np.identity(2).all()
def test_gen_get_bit_maps_task():
task = gen_get_bit_maps_task()
assert task.n_in_wires == 1
assert task.n_out_wires == 2
c0 = Circuit(3).CX(0, 1).X(2).measure_all()
c1 = Circuit(2, 2).X(0).Measure(0, 0).X(1).SWAP(0, 1).Measure(0, 1)
wire = [(c0, 10), (c1, 50)]
res = task([wire])
assert len(res) == 2
assert res[0] == wire
comp0 = (c0.qubit_to_bit_map, {})
comp1 = (c1.qubit_to_bit_map, {Bit(0): Qubit(0)})
assert res[1][0] == comp0
assert res[1][1] == comp1
def test_gen_full_tomography_spam_correction_task():
# characterise noiseless matrix
# use prior experiment
b = AerBackend()
b._characterisation = dict()
qb_subsets = [[Qubit(0), Qubit(1)], [Qubit(2), Qubit(3)]]
c0 = Circuit(3).CX(0, 1).X(2).measure_all()
c1 = Circuit(2, 2).X(0).Measure(0, 0).X(1).SWAP(0, 1).Measure(0, 1)
wire = [(c0, 10), (c1, 20)]
spam_info = gen_full_tomography_spam_circuits_task(b, 5, qb_subsets)([wire])
spam_circs = [c[0] for c in spam_info[1]]
handles = b.process_circuits(spam_circs, 5)
results = b.get_results(handles)
# just returns bool
gen_full_tomography_spam_characterisation_task(b, qb_subsets)(
[results, spam_info[2]]
)
handles1 = b.process_circuits([c0, c1], 20)
results1 = b.get_results(handles1)
q_b_maps = [(c0.qubit_to_bit_map, {}), (c1.qubit_to_bit_map, {Bit(0): Qubit(0)})]
task = gen_full_tomography_spam_correction_task(b, CorrectionMethod.Invert)
assert task.n_in_wires == 3
assert task.n_out_wires == 1
wire = [results1, q_b_maps, True]
corrected_results = task(wire)[0]
assert len(corrected_results) == 2
assert corrected_results[0].get_counts()[(0, 0, 1)] == 20
assert corrected_results[1].get_counts()[(1, 1)] == 20
if __name__ == "__main__":
test_gen_full_tomography_spam_circuits_task()
test_full_tomography_spam_characterisation_task_gen()
test_gen_full_tomography_spam_correction_task()
test_gen_get_bit_maps_task()
| [
"numpy.identity",
"qermit.spam.full_spam_correction.gen_full_tomography_spam_characterisation_task",
"pytket.Qubit",
"qermit.spam.full_spam_correction.gen_full_tomography_spam_circuits_task",
"pytket.extensions.qiskit.AerBackend",
"pytket.Bit",
"qermit.spam.full_spam_correction.gen_get_bit_maps_task",
... | [((1081, 1093), 'pytket.extensions.qiskit.AerBackend', 'AerBackend', ([], {}), '()\n', (1091, 1093), False, 'from pytket.extensions.qiskit import AerBackend\n'), ((1811, 1823), 'pytket.extensions.qiskit.AerBackend', 'AerBackend', ([], {}), '()\n', (1821, 1823), False, 'from pytket.extensions.qiskit import AerBackend\n'), ((2270, 2331), 'qermit.spam.full_spam_correction.gen_full_tomography_spam_characterisation_task', 'gen_full_tomography_spam_characterisation_task', (['b', 'qb_subsets'], {}), '(b, qb_subsets)\n', (2316, 2331), False, 'from qermit.spam.full_spam_correction import gen_full_tomography_spam_circuits_task, gen_full_tomography_spam_characterisation_task, gen_full_tomography_spam_correction_task, gen_get_bit_maps_task\n'), ((2855, 2878), 'qermit.spam.full_spam_correction.gen_get_bit_maps_task', 'gen_get_bit_maps_task', ([], {}), '()\n', (2876, 2878), False, 'from qermit.spam.full_spam_correction import gen_full_tomography_spam_circuits_task, gen_full_tomography_spam_characterisation_task, gen_full_tomography_spam_correction_task, gen_get_bit_maps_task\n'), ((3449, 3461), 'pytket.extensions.qiskit.AerBackend', 'AerBackend', ([], {}), '()\n', (3459, 3461), False, 'from pytket.extensions.qiskit import AerBackend\n'), ((4235, 4303), 'qermit.spam.full_spam_correction.gen_full_tomography_spam_correction_task', 'gen_full_tomography_spam_correction_task', (['b', 'CorrectionMethod.Invert'], {}), '(b, CorrectionMethod.Invert)\n', (4275, 4303), False, 'from qermit.spam.full_spam_correction import gen_full_tomography_spam_circuits_task, gen_full_tomography_spam_characterisation_task, gen_full_tomography_spam_correction_task, gen_get_bit_maps_task\n'), ((2060, 2116), 'qermit.spam.full_spam_correction.gen_full_tomography_spam_circuits_task', 'gen_full_tomography_spam_circuits_task', (['b', '(5)', 'qb_subsets'], {}), '(b, 5, qb_subsets)\n', (2098, 2116), False, 'from qermit.spam.full_spam_correction import gen_full_tomography_spam_circuits_task, gen_full_tomography_spam_characterisation_task, gen_full_tomography_spam_correction_task, gen_get_bit_maps_task\n'), ((3725, 3781), 'qermit.spam.full_spam_correction.gen_full_tomography_spam_circuits_task', 'gen_full_tomography_spam_circuits_task', (['b', '(5)', 'qb_subsets'], {}), '(b, 5, qb_subsets)\n', (3763, 3781), False, 'from qermit.spam.full_spam_correction import gen_full_tomography_spam_circuits_task, gen_full_tomography_spam_characterisation_task, gen_full_tomography_spam_correction_task, gen_get_bit_maps_task\n'), ((3949, 4010), 'qermit.spam.full_spam_correction.gen_full_tomography_spam_characterisation_task', 'gen_full_tomography_spam_characterisation_task', (['b', 'qb_subsets'], {}), '(b, qb_subsets)\n', (3995, 4010), False, 'from qermit.spam.full_spam_correction import gen_full_tomography_spam_circuits_task, gen_full_tomography_spam_characterisation_task, gen_full_tomography_spam_correction_task, gen_get_bit_maps_task\n'), ((1876, 1884), 'pytket.Qubit', 'Qubit', (['(0)'], {}), '(0)\n', (1881, 1884), False, 'from pytket import Circuit, Qubit, Bit\n'), ((1886, 1894), 'pytket.Qubit', 'Qubit', (['(1)'], {}), '(1)\n', (1891, 1894), False, 'from pytket import Circuit, Qubit, Bit\n'), ((1898, 1906), 'pytket.Qubit', 'Qubit', (['(2)'], {}), '(2)\n', (1903, 1906), False, 'from pytket import Circuit, Qubit, Bit\n'), ((1908, 1916), 'pytket.Qubit', 'Qubit', (['(3)'], {}), '(3)\n', (1913, 1916), False, 'from pytket import Circuit, Qubit, Bit\n'), ((2602, 2610), 'pytket.Qubit', 'Qubit', (['(0)'], {}), '(0)\n', (2607, 2610), False, 'from pytket import Circuit, Qubit, Bit\n'), ((2628, 2636), 'pytket.Qubit', 'Qubit', (['(1)'], {}), '(1)\n', (2633, 2636), False, 'from pytket import Circuit, Qubit, Bit\n'), ((2654, 2662), 'pytket.Qubit', 'Qubit', (['(2)'], {}), '(2)\n', (2659, 2662), False, 'from pytket import Circuit, Qubit, Bit\n'), ((2680, 2688), 'pytket.Qubit', 'Qubit', (['(3)'], {}), '(3)\n', (2685, 2688), False, 'from pytket import Circuit, Qubit, Bit\n'), ((3244, 3250), 'pytket.Bit', 'Bit', (['(0)'], {}), '(0)\n', (3247, 3250), False, 'from pytket import Circuit, Qubit, Bit\n'), ((3252, 3260), 'pytket.Qubit', 'Qubit', (['(0)'], {}), '(0)\n', (3257, 3260), False, 'from pytket import Circuit, Qubit, Bit\n'), ((3514, 3522), 'pytket.Qubit', 'Qubit', (['(0)'], {}), '(0)\n', (3519, 3522), False, 'from pytket import Circuit, Qubit, Bit\n'), ((3524, 3532), 'pytket.Qubit', 'Qubit', (['(1)'], {}), '(1)\n', (3529, 3532), False, 'from pytket import Circuit, Qubit, Bit\n'), ((3536, 3544), 'pytket.Qubit', 'Qubit', (['(2)'], {}), '(2)\n', (3541, 3544), False, 'from pytket import Circuit, Qubit, Bit\n'), ((3546, 3554), 'pytket.Qubit', 'Qubit', (['(3)'], {}), '(3)\n', (3551, 3554), False, 'from pytket import Circuit, Qubit, Bit\n'), ((1194, 1202), 'pytket.Qubit', 'Qubit', (['(0)'], {}), '(0)\n', (1199, 1202), False, 'from pytket import Circuit, Qubit, Bit\n'), ((1204, 1212), 'pytket.Qubit', 'Qubit', (['(1)'], {}), '(1)\n', (1209, 1212), False, 'from pytket import Circuit, Qubit, Bit\n'), ((1216, 1224), 'pytket.Qubit', 'Qubit', (['(2)'], {}), '(2)\n', (1221, 1224), False, 'from pytket import Circuit, Qubit, Bit\n'), ((1226, 1234), 'pytket.Qubit', 'Qubit', (['(3)'], {}), '(3)\n', (1231, 1234), False, 'from pytket import Circuit, Qubit, Bit\n'), ((2735, 2749), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (2746, 2749), True, 'import numpy as np\n'), ((2787, 2801), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (2798, 2801), True, 'import numpy as np\n'), ((4204, 4210), 'pytket.Bit', 'Bit', (['(0)'], {}), '(0)\n', (4207, 4210), False, 'from pytket import Circuit, Qubit, Bit\n'), ((4212, 4220), 'pytket.Qubit', 'Qubit', (['(0)'], {}), '(0)\n', (4217, 4220), False, 'from pytket import Circuit, Qubit, Bit\n'), ((1318, 1328), 'pytket.Circuit', 'Circuit', (['(3)'], {}), '(3)\n', (1325, 1328), False, 'from pytket import Circuit, Qubit, Bit\n'), ((1366, 1376), 'pytket.Circuit', 'Circuit', (['(2)'], {}), '(2)\n', (1373, 1376), False, 'from pytket import Circuit, Qubit, Bit\n'), ((1929, 1939), 'pytket.Circuit', 'Circuit', (['(3)'], {}), '(3)\n', (1936, 1939), False, 'from pytket import Circuit, Qubit, Bit\n'), ((1977, 1987), 'pytket.Circuit', 'Circuit', (['(2)'], {}), '(2)\n', (1984, 1987), False, 'from pytket import Circuit, Qubit, Bit\n'), ((2954, 2964), 'pytket.Circuit', 'Circuit', (['(3)'], {}), '(3)\n', (2961, 2964), False, 'from pytket import Circuit, Qubit, Bit\n'), ((3566, 3576), 'pytket.Circuit', 'Circuit', (['(3)'], {}), '(3)\n', (3573, 3576), False, 'from pytket import Circuit, Qubit, Bit\n'), ((3002, 3015), 'pytket.Circuit', 'Circuit', (['(2)', '(2)'], {}), '(2, 2)\n', (3009, 3015), False, 'from pytket import Circuit, Qubit, Bit\n'), ((3614, 3627), 'pytket.Circuit', 'Circuit', (['(2)', '(2)'], {}), '(2, 2)\n', (3621, 3627), False, 'from pytket import Circuit, Qubit, Bit\n')] |
import numpy as np
from numba import njit as jit
from poliastro.core.util import circular_velocity
@jit
def beta(ecc_0, ecc_f, inc_0, inc_f, argp):
# Note: "The argument of perigee will vary during the orbit transfer
# due to the natural drift and because e may approach zero.
# However, [the equation] still gives a good estimate of the desired
# thrust angle."
return np.arctan(
abs(
3
* np.pi
* (inc_f - inc_0)
/ (
4
* np.cos(argp)
* (
ecc_0
- ecc_f
+ np.log((1 + ecc_f) * (-1 + ecc_0) / ((1 + ecc_0) * (-1 + ecc_f)))
)
)
)
)
@jit
def delta_V(V_0, ecc_0, ecc_f, beta_):
"""Compute required increment of velocity."""
return 2 * V_0 * np.abs(np.arcsin(ecc_0) - np.arcsin(ecc_f)) / (3 * np.cos(beta_))
@jit
def extra_quantities(k, a, ecc_0, ecc_f, inc_0, inc_f, argp, f):
"""Extra quantities given by the model."""
beta_ = beta(ecc_0, ecc_f, inc_0, inc_f, argp)
V_0 = circular_velocity(k, a)
delta_V_ = delta_V(V_0, ecc_0, ecc_f, beta_)
t_f_ = delta_V_ / f
return delta_V_, beta_, t_f_
| [
"numpy.log",
"numpy.arcsin",
"numpy.cos",
"poliastro.core.util.circular_velocity"
] | [((1119, 1142), 'poliastro.core.util.circular_velocity', 'circular_velocity', (['k', 'a'], {}), '(k, a)\n', (1136, 1142), False, 'from poliastro.core.util import circular_velocity\n'), ((924, 937), 'numpy.cos', 'np.cos', (['beta_'], {}), '(beta_)\n', (930, 937), True, 'import numpy as np\n'), ((880, 896), 'numpy.arcsin', 'np.arcsin', (['ecc_0'], {}), '(ecc_0)\n', (889, 896), True, 'import numpy as np\n'), ((899, 915), 'numpy.arcsin', 'np.arcsin', (['ecc_f'], {}), '(ecc_f)\n', (908, 915), True, 'import numpy as np\n'), ((533, 545), 'numpy.cos', 'np.cos', (['argp'], {}), '(argp)\n', (539, 545), True, 'import numpy as np\n'), ((642, 707), 'numpy.log', 'np.log', (['((1 + ecc_f) * (-1 + ecc_0) / ((1 + ecc_0) * (-1 + ecc_f)))'], {}), '((1 + ecc_f) * (-1 + ecc_0) / ((1 + ecc_0) * (-1 + ecc_f)))\n', (648, 707), True, 'import numpy as np\n')] |
import copy
import pickle
import numpy as np
from pcdet.utils import box_utils, common_utils
from pcdet.datasets.dataset import DatasetTemplate
from pcdet.datasets.robosense import robosense_utils
class RobosenseDataset(DatasetTemplate):
def __init__(self, dataset_cfg, class_names, training=True, root_path=None, logger=None):
super().__init__(
dataset_cfg=dataset_cfg, class_names=class_names, training=training, root_path=root_path, logger=logger
)
self.infos = []
self.include_robosense_data(self.mode)
# balanced_infos_resampling as nuscenes
if self.training and self.dataset_cfg.get('BALANCED_RESAMPLING', False):
self.infos = self.balanced_infos_resampling(self.infos)
def include_robosense_data(self, mode):
self.logger.info('Loading RoboSense dataset')
robosense_infos = []
for info_path in self.dataset_cfg.INFO_PATH[mode]:
info_path = self.root_path / info_path
if not info_path.exists():
self.logger.info('Info_Path not exist in mode %s' % mode)
continue
else:
with open(info_path, 'rb') as f:
infos = pickle.load(f)
robosense_infos.extend(infos)
for i in range(len(robosense_infos)):
robosense_infos[i]['annos']['gt_names'] = np.array([robosense_utils.map_name_from_general_to_detection[name]
for name in robosense_infos[i]['annos']['gt_names']])
self.infos.extend(robosense_infos)
self.logger.info('Total samples for robosense dataset: %d, for mode: %s' % (len(robosense_infos), mode))
def balanced_infos_resampling(self, infos):
"""
Class-balanced sampling of nuScenes dataset from https://arxiv.org/abs/1908.09492
"""
if self.class_names is None:
return infos
cls_infos = {name: [] for name in self.class_names}
for info in infos:
# 每一帧为复制单位, 是有重复帧的
for name in set(info['annos']['gt_names']):
if name in self.class_names:
cls_infos[name].append(info)
duplicated_samples = sum([len(v) for _, v in cls_infos.items()])
cls_dist = {k: len(v)/duplicated_samples for k, v in cls_infos.items()}
sampled_infos = []
frac = 1.0 / len(self.class_names)
ratios = [frac / v for v in cls_dist.values()]
for cur_cls_infos, cur_cls_ratio in zip(list(cls_infos.values()), ratios):
sampled_infos += np.random.choice(
cur_cls_infos, int(len(cur_cls_infos) * cur_cls_ratio)
).tolist()
self.logger.info('Total samples after balanced resampling: %s' % (len(sampled_infos)))
return sampled_infos
def __len__(self):
if self._merge_all_iters_to_one_epoch:
return len(self.infos) * self.total_epochs
return len(self.infos)
def __getitem__(self, index):
if self._merge_all_iters_to_one_epoch:
index = index % len(self.infos)
info = copy.deepcopy(self.infos[index])
points = robosense_utils.load_pcd_to_ndarray(info['lidar_path'])
# 反射率归一化
points[:, 3] = points[:, 3] / 255.0
annos = info['annos']
input_dict = {
'points': points,
'frame_id': annos['frame_id']
}
if 'gt_boxes_lidar' in annos:
# 在这去掉don't care/unknown吗?
annos = robosense_utils.drop_info_with_name(annos, name='unknown') #name='DontCare')
# # 去掉box里面点数很少的box 是否有用?
# annos = robosense_utils.drop_info_with_box_points(annos, min_pts=5)
# 是否有用?
if self.dataset_cfg.get('FILTER_MIN_POINTS_IN_GT', False):
mask = (annos['num_lidar_pts'] > self.dataset_cfg.FILTER_MIN_POINTS_IN_GT - 1)
else:
mask = None
gt_names = annos['gt_names'] if mask is None else annos['gt_names'][mask]
gt_boxes_lidar = annos['gt_boxes_lidar'] if mask is None else annos['gt_boxes_lidar'][mask]
input_dict.update({
'gt_names': gt_names,
'gt_boxes': gt_boxes_lidar
})
data_dict = self.prepare_data(data_dict=input_dict)
return data_dict
@staticmethod
def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):
"""
Args:
batch_dict:
frame_id:
pred_dicts: list of pred_dicts
pred_boxes: (N, 7), Tensor
pred_scores: (N), Tensor
pred_labels: (N), Tensor
class_names:
output_path:
Returns:
"""
def get_template_prediction(num_samples):
ret_dict = {
'name': np.zeros(num_samples), 'score': np.zeros(num_samples),
'boxes_lidar': np.zeros([num_samples, 7]), 'pred_labels': np.zeros(num_samples)
}
return ret_dict
def generate_single_sample_dict(box_dict):
pred_scores = box_dict['pred_scores'].cpu().numpy()
pred_boxes = box_dict['pred_boxes'].cpu().numpy()
pred_labels = box_dict['pred_labels'].cpu().numpy()
pred_dict = get_template_prediction(pred_scores.shape[0])
if pred_scores.shape[0] == 0:
return pred_dict
# 这个确实可以这样用...
pred_dict['name'] = np.array(class_names)[pred_labels.astype(np.int) - 1]
pred_dict['score'] = pred_scores
pred_dict['boxes_lidar'] = pred_boxes
pred_dict['pred_labels'] = pred_labels
return pred_dict
annos = []
for index, box_dict in enumerate(pred_dicts):
single_pred_dict = generate_single_sample_dict(box_dict)
single_pred_dict['frame_id'] = batch_dict['frame_id'][index]
annos.append(single_pred_dict)
return annos
def evaluation(self, det_annos, class_names, **kwargs):
eval_det_annos = copy.deepcopy(det_annos) # 'frame_id' 'name' 'score' 'boxes_lidar' 'pred_labels'
eval_gt_annos = [copy.deepcopy(info['annos']) for info in self.infos] # 'gt_names' 'gt_boxes_lidar' 'num_lidar_pts' 'frame_id'
for i in range(len(eval_gt_annos)):
eval_gt_annos[i]['name'] = eval_gt_annos[i]['gt_names']
# for test
eval_gt_annos = eval_gt_annos[0:len(eval_det_annos)]
assert len(eval_gt_annos) == len(eval_det_annos)
for i in range(len(eval_det_annos)):
assert eval_det_annos[i]['frame_id'] == eval_gt_annos[i]['frame_id']
# Filter boxes (distance, points per box, etc.).
filtered_gt_box_num_By_range = 0
for i in range(len(eval_gt_annos)):
mask = box_utils.mask_boxes_outside_range_numpy(eval_gt_annos[i]['gt_boxes_lidar'], self.point_cloud_range)
filtered_gt_box_num_By_range += (eval_gt_annos[i]['gt_boxes_lidar'].shape[0] - mask.sum())
for key in eval_gt_annos[i].keys():
if isinstance(eval_gt_annos[i][key], np.ndarray):
eval_gt_annos[i][key] = eval_gt_annos[i][key][mask]
self.logger.info('Eval preprocess--filter gt box by PointCloud range. filtered %d GT boxes' % (int(filtered_gt_box_num_By_range)))
# # filtered_dt_box_num_By_score
# min_score = 0.2
# for i in range(len(eval_det_annos)):
# keep_indices = [i for i, x in enumerate(eval_det_annos[i]['score']) if x >= min_score]
# for key in eval_det_annos[i].keys():
# if isinstance(eval_det_annos[i][key], np.ndarray):
# eval_det_annos[i][key] = eval_det_annos[i][key][keep_indices]
# filtered_gt_box_num_By_pointsnum = 0
# min_pts = 5
# for i in range(len(eval_gt_annos)):
# mask = eval_gt_annos[i]['boxes_points_pts'] >= min_pts
# filtered_gt_box_num_By_pointsnum += (eval_gt_annos[i]['gt_boxes_lidar'].shape[0] - mask.sum())
# for key in eval_gt_annos[i].keys():
# eval_gt_annos[i][key] = eval_gt_annos[i][key][mask]
# self.logger.info('Eval preprocess--filter gt box by box points num. filtered %d GT boxes' % (
# int(filtered_gt_box_num_By_pointsnum)))
#
print(class_names)
total_num_class_det = np.zeros(len(class_names), dtype=np.int)
total_num_class_gt = np.zeros(len(class_names), dtype=np.int)
for i in range(len(eval_det_annos)):
name_ints = np.array([class_names.index(n) for n in eval_det_annos[i]['name']], dtype=np.int32)
for name_int in name_ints:
total_num_class_det[name_int] += 1
name_ints = np.array([class_names.index(n) for n in eval_gt_annos[i]['name'] if n in class_names], dtype=np.int32)
for name_int in name_ints:
total_num_class_gt[name_int] += 1
print('Det total num class: ', total_num_class_det.tolist())
print('GT total num class: ', total_num_class_gt.tolist())
# TODO
# 是否在这里去掉unknown/dontcare类 ||| 把未检测的类都映射成dont care
# common_utils.drop_info_with_name
def robosense_eval():
from . import robosense_eval
# some parameter needed here
using_IOU = True
overlap_0_7 = np.array([0.7, 0.7, 0.7, 0.7, 0.5, 0.5, 0.5, 0.5])
min_overlaps = overlap_0_7
# TODO
# now just part for robosense
# class_to_name = {
# 0: 'vehicle',
# 1: 'pedestrian',
# 2: 'bicycle',
#
# }
class_to_name = {
0: 'vehicle',
1: 'tricycle',
2: 'big_vehicle',
3: 'huge_vehicle',
4: 'motorcycle',
5: 'bicycle',
6: 'pedestrian',
7: 'cone'
}
my_eval = robosense_eval.RoboSense_Eval(
gt_annos=eval_gt_annos, dt_annos=eval_det_annos,
current_classes=class_names, output_dir=self.root_path,
class_to_name=class_to_name, min_overlaps=min_overlaps
)
ap_result_str, ap_dict = my_eval.evaluate()
return ap_result_str, ap_dict
# 直接转到kitti下面,用kitti的标准评估
def kitti_eval():
from pcdet.datasets.kitti.kitti_object_eval_python import eval as kitti_eval
from pcdet.datasets.kitti import kitti_utils
map_name_to_kitti = {
'vehicle': 'Car',
'tricycle': 'Tricycle',
'big_vehicle': 'Big_vehicle',
'huge_vehicle': 'Huge_vehicle',
'motorcycle': 'Motorcycle',
'bicycle': 'Cyclist',
'pedestrian': 'Pedestrian',
'cone': 'Cone',
'unknown': 'DontCare'
}
kitti_utils.transform_annotations_to_kitti_format(eval_det_annos, map_name_to_kitti=map_name_to_kitti)
kitti_utils.transform_annotations_to_kitti_format(eval_gt_annos, map_name_to_kitti=map_name_to_kitti)
kitti_class_names = [map_name_to_kitti[x] for x in class_names]
PR_detail_dict = {}
ap_result_str, ap_dict = kitti_eval.get_official_eval_result(
gt_annos=eval_gt_annos, dt_annos=eval_det_annos, current_classes=kitti_class_names,
PR_detail_dict=PR_detail_dict
)
return ap_result_str, ap_dict
ap_result_str, ap_dict = kitti_eval() #robosense_eval()
return ap_result_str, ap_dict
def create_robosense_infos(dataset_cfg, data_path, save_path):
import os
num_workers = 4
train_scenes = ['ruby119_nanshandadao_1200421163451', 'ruby_ruby002_baoshenlu_1200303103447', 'ruby_ruby136_shizilukou_1200526171538',
'ruby_ruby144_shizilukou_1200529160951']
val_scenes = ['ruby112_lishanlu_1200430192539', 'ruby119_longzhudadao_1200423181920', 'ruby_ruby002_wushitoulu_1200303111734',
'ruby_ruby136_shizilukou_1200521120824', 'ruby_ruby136_shizilukou_1200526161859']
# 所有pcd 和 label路径
train_data_path = []
train_label_path = []
val_data_path = []
val_label_path = []
for scene in train_scenes:
scene_label_path = os.listdir(data_path / scene / 'label')
for path in scene_label_path:
name = path.split('.')[0]
train_data_path.append(data_path / scene / 'npy' / (name + '.npy'))
train_label_path.append(data_path / scene / 'label' / (name + '.json'))
for scene in val_scenes:
scene_label_path = os.listdir(data_path / scene / 'label')
for path in scene_label_path:
name = path.split('.')[0]
val_data_path.append(data_path / scene / 'npy' / (name + '.npy'))
val_label_path.append(data_path / scene / 'label' / (name + '.json'))
from pcdet.datasets.robosense import robosense_utils
import concurrent.futures as futures
with futures.ThreadPoolExecutor(num_workers) as executor:
train_infos = executor.map(robosense_utils.process_single_data, zip(train_data_path, train_label_path))
with futures.ThreadPoolExecutor(num_workers) as executor:
val_infos = executor.map(robosense_utils.process_single_data, zip(val_data_path, val_label_path))
train_file_name = save_path / 'robosense_infos_train.pkl'
val_file_name = save_path / 'robosense_infos_val.pkl'
with open(train_file_name, 'wb') as f:
pickle.dump(list(train_infos), f)
with open(val_file_name, 'wb') as f:
pickle.dump(list(val_infos), f)
robosense_utils.create_groundtruth_database(info_path=train_file_name, save_path=save_path)
print('---------------Data preparation Done---------------')
# test for debug by ck
if __name__ == '__main__':
import sys
import os
import yaml
import json
import datetime
from pathlib import Path
from easydict import EasyDict
print(os.path.abspath('../../..')+'/tools/cfgs/dataset_configs/robosense_dataset.yaml')
dataset_cfg = EasyDict(yaml.load(open(os.path.abspath('../../..')+'/tools/cfgs/dataset_configs/robosense_dataset.yaml')))
ROOT_DIR = (Path(__file__).resolve().parent / '../../../').resolve()
ROOT_DIR_ck = Path(dataset_cfg.DATA_PATH)
create_robosense_infos(
dataset_cfg=dataset_cfg,
data_path=ROOT_DIR_ck,
save_path=ROOT_DIR_ck
)
# log_file = ROOT_DIR_ck / 'data_log' / ('log_data_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
# logger = common_utils.create_logger(log_file)
# robosense_dataset = RobosenseDataset(
# dataset_cfg=dataset_cfg, class_names=['vehicle', 'pedestrian', 'bicycle'],
# root_path=ROOT_DIR_ck,
# logger=logger, training=True
# )
#
# one_sample = robosense_dataset.__getitem__(8)
# print('RoboSense Read data end!')
| [
"pcdet.datasets.robosense.robosense_utils.drop_info_with_name",
"os.listdir",
"pcdet.datasets.kitti.kitti_object_eval_python.eval",
"pcdet.datasets.kitti.kitti_object_eval_python.eval.get_official_eval_result",
"pcdet.datasets.robosense.robosense_utils.load_pcd_to_ndarray",
"pathlib.Path",
"concurrent.f... | [((13973, 14068), 'pcdet.datasets.robosense.robosense_utils.create_groundtruth_database', 'robosense_utils.create_groundtruth_database', ([], {'info_path': 'train_file_name', 'save_path': 'save_path'}), '(info_path=train_file_name,\n save_path=save_path)\n', (14016, 14068), False, 'from pcdet.datasets.robosense import robosense_utils\n'), ((14635, 14662), 'pathlib.Path', 'Path', (['dataset_cfg.DATA_PATH'], {}), '(dataset_cfg.DATA_PATH)\n', (14639, 14662), False, 'from pathlib import Path\n'), ((3159, 3191), 'copy.deepcopy', 'copy.deepcopy', (['self.infos[index]'], {}), '(self.infos[index])\n', (3172, 3191), False, 'import copy\n'), ((3209, 3264), 'pcdet.datasets.robosense.robosense_utils.load_pcd_to_ndarray', 'robosense_utils.load_pcd_to_ndarray', (["info['lidar_path']"], {}), "(info['lidar_path'])\n", (3244, 3264), False, 'from pcdet.datasets.robosense import robosense_utils\n'), ((6258, 6282), 'copy.deepcopy', 'copy.deepcopy', (['det_annos'], {}), '(det_annos)\n', (6271, 6282), False, 'import copy\n'), ((11848, 11860), 'pcdet.datasets.kitti.kitti_object_eval_python.eval', 'kitti_eval', ([], {}), '()\n', (11858, 11860), True, 'from pcdet.datasets.kitti.kitti_object_eval_python import eval as kitti_eval\n'), ((12627, 12666), 'os.listdir', 'os.listdir', (["(data_path / scene / 'label')"], {}), "(data_path / scene / 'label')\n", (12637, 12666), False, 'import os\n'), ((12964, 13003), 'os.listdir', 'os.listdir', (["(data_path / scene / 'label')"], {}), "(data_path / scene / 'label')\n", (12974, 13003), False, 'import os\n'), ((13348, 13387), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', (['num_workers'], {}), '(num_workers)\n', (13374, 13387), True, 'import concurrent.futures as futures\n'), ((13522, 13561), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', (['num_workers'], {}), '(num_workers)\n', (13548, 13561), True, 'import concurrent.futures as futures\n'), ((1394, 1518), 'numpy.array', 'np.array', (["[robosense_utils.map_name_from_general_to_detection[name] for name in\n robosense_infos[i]['annos']['gt_names']]"], {}), "([robosense_utils.map_name_from_general_to_detection[name] for name in\n robosense_infos[i]['annos']['gt_names']])\n", (1402, 1518), True, 'import numpy as np\n'), ((3559, 3617), 'pcdet.datasets.robosense.robosense_utils.drop_info_with_name', 'robosense_utils.drop_info_with_name', (['annos'], {'name': '"""unknown"""'}), "(annos, name='unknown')\n", (3594, 3617), False, 'from pcdet.datasets.robosense import robosense_utils\n'), ((6366, 6394), 'copy.deepcopy', 'copy.deepcopy', (["info['annos']"], {}), "(info['annos'])\n", (6379, 6394), False, 'import copy\n'), ((7016, 7120), 'pcdet.utils.box_utils.mask_boxes_outside_range_numpy', 'box_utils.mask_boxes_outside_range_numpy', (["eval_gt_annos[i]['gt_boxes_lidar']", 'self.point_cloud_range'], {}), "(eval_gt_annos[i]['gt_boxes_lidar'],\n self.point_cloud_range)\n", (7056, 7120), False, 'from pcdet.utils import box_utils, common_utils\n'), ((9598, 9648), 'numpy.array', 'np.array', (['[0.7, 0.7, 0.7, 0.7, 0.5, 0.5, 0.5, 0.5]'], {}), '([0.7, 0.7, 0.7, 0.7, 0.5, 0.5, 0.5, 0.5])\n', (9606, 9648), True, 'import numpy as np\n'), ((11212, 11318), 'pcdet.datasets.kitti.kitti_utils.transform_annotations_to_kitti_format', 'kitti_utils.transform_annotations_to_kitti_format', (['eval_det_annos'], {'map_name_to_kitti': 'map_name_to_kitti'}), '(eval_det_annos,\n map_name_to_kitti=map_name_to_kitti)\n', (11261, 11318), False, 'from pcdet.datasets.kitti import kitti_utils\n'), ((11327, 11432), 'pcdet.datasets.kitti.kitti_utils.transform_annotations_to_kitti_format', 'kitti_utils.transform_annotations_to_kitti_format', (['eval_gt_annos'], {'map_name_to_kitti': 'map_name_to_kitti'}), '(eval_gt_annos,\n map_name_to_kitti=map_name_to_kitti)\n', (11376, 11432), False, 'from pcdet.datasets.kitti import kitti_utils\n'), ((11575, 11735), 'pcdet.datasets.kitti.kitti_object_eval_python.eval.get_official_eval_result', 'kitti_eval.get_official_eval_result', ([], {'gt_annos': 'eval_gt_annos', 'dt_annos': 'eval_det_annos', 'current_classes': 'kitti_class_names', 'PR_detail_dict': 'PR_detail_dict'}), '(gt_annos=eval_gt_annos, dt_annos=\n eval_det_annos, current_classes=kitti_class_names, PR_detail_dict=\n PR_detail_dict)\n', (11610, 11735), True, 'from pcdet.datasets.kitti.kitti_object_eval_python import eval as kitti_eval\n'), ((14336, 14363), 'os.path.abspath', 'os.path.abspath', (['"""../../.."""'], {}), "('../../..')\n", (14351, 14363), False, 'import os\n'), ((5020, 5041), 'numpy.zeros', 'np.zeros', (['num_samples'], {}), '(num_samples)\n', (5028, 5041), True, 'import numpy as np\n'), ((5052, 5073), 'numpy.zeros', 'np.zeros', (['num_samples'], {}), '(num_samples)\n', (5060, 5073), True, 'import numpy as np\n'), ((5106, 5132), 'numpy.zeros', 'np.zeros', (['[num_samples, 7]'], {}), '([num_samples, 7])\n', (5114, 5132), True, 'import numpy as np\n'), ((5149, 5170), 'numpy.zeros', 'np.zeros', (['num_samples'], {}), '(num_samples)\n', (5157, 5170), True, 'import numpy as np\n'), ((5661, 5682), 'numpy.array', 'np.array', (['class_names'], {}), '(class_names)\n', (5669, 5682), True, 'import numpy as np\n'), ((1228, 1242), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1239, 1242), False, 'import pickle\n'), ((14460, 14487), 'os.path.abspath', 'os.path.abspath', (['"""../../.."""'], {}), "('../../..')\n", (14475, 14487), False, 'import os\n'), ((14560, 14574), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (14564, 14574), False, 'from pathlib import Path\n')] |
import cvxpy as cp
import numpy as np
if __name__ == "__main__":
# Problem data.
m = 30
n = 20
np.random.seed(1)
A = np.random.randn(m, n)
b = np.random.randn(m)
# Construct the problem.
x = cp.Variable(n)
objective = cp.Minimize(cp.sum_squares(A*x - b))
constraints = [0 <= x, x <= 1]
prob = cp.Problem(objective, constraints)
# The optimal objective value is returned by `prob.solve()`.
result = prob.solve()
# The optimal value for x is stored in `x.value`.
print(x.value)
# The optimal Lagrange multiplier for a constraint is stored in
# `constraint.dual_value`.
print(constraints[0].dual_value) | [
"cvxpy.Variable",
"cvxpy.Problem",
"cvxpy.sum_squares",
"numpy.random.seed",
"numpy.random.randn"
] | [((112, 129), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (126, 129), True, 'import numpy as np\n'), ((138, 159), 'numpy.random.randn', 'np.random.randn', (['m', 'n'], {}), '(m, n)\n', (153, 159), True, 'import numpy as np\n'), ((168, 186), 'numpy.random.randn', 'np.random.randn', (['m'], {}), '(m)\n', (183, 186), True, 'import numpy as np\n'), ((225, 239), 'cvxpy.Variable', 'cp.Variable', (['n'], {}), '(n)\n', (236, 239), True, 'import cvxpy as cp\n'), ((339, 373), 'cvxpy.Problem', 'cp.Problem', (['objective', 'constraints'], {}), '(objective, constraints)\n', (349, 373), True, 'import cvxpy as cp\n'), ((268, 293), 'cvxpy.sum_squares', 'cp.sum_squares', (['(A * x - b)'], {}), '(A * x - b)\n', (282, 293), True, 'import cvxpy as cp\n')] |
import os
import numpy as np
import sys
import inspect
import functools
from collections import defaultdict
#pylint:disable=no-member,too-many-function-args
class Device: CPU, GPU = 0, 1
DEFAULT_DEVICE = Device.CPU if os.environ.get('GPU', 0) !=1 else Device.GPU
try:
import pyopencl as cl
# TODO: move this import to require_init_gpu?
from .ops import gpu
_register_ops(gpu, device=Device.GPU)
GPU = True
except ImportError:
# no GPU support
cl = None
GPU = False
class Tensor:
training = True
ops = defaultdict(dict)
def __init__(self, data, device=DEFAULT_DEVICE, requires_grad=True):
if not isinstance(data, (list, tuple, np.ndarray)):
raise ValueError('`data` must be either a list, ndarray or caer Tensor')
self.data = self._move_data(data, device)
self.device = device
def __repr__(self):
return f"<hazel.Tensor {self.data!r}>"
def assign(self, x):
self.data = x.data
@property
def shape(self):
return self.data.shape
@property
def dtype(self):
return self.data.dtype
# ***** creation functions *****
@classmethod
def zeros(cls, *shape, **kwargs):
return cls(np.zeros(shape, dtype=np.float32), **kwargs)
@classmethod
def ones(cls, *shape, **kwargs):
return cls(np.ones(shape, dtype=np.float32), **kwargs)
@classmethod
def randn(cls, *shape, **kwargs):
return cls(np.random.randn(*shape).astype(np.float32), **kwargs)
@classmethod
def uniform(cls, *shape, **kwargs):
return cls((np.random.uniform(-1., 1., size=shape)/np.sqrt(np.prod(shape))).astype(np.float32), **kwargs)
@classmethod
def eye(cls, dim, **kwargs):
return cls(np.eye(dim).astype(np.float32), **kwargs)
# ***** toposort and backward pass *****
def deepwalk(self, visited: set, nodes: list):
visited.add(self)
if self._ctx:
[i.deepwalk(visited, nodes) for i in self._ctx.parents if i not in visited]
nodes.append(self)
return nodes
def backward(self):
assert self.shape == (1,)
# fill in the first grad with one
# this is "implicit gradient creation"
self.grad = Tensor(np.ones(self.shape, dtype=self.dtype), device=self.device, requires_grad=False)
for t0 in reversed(self.deepwalk(set(), [])):
assert (t0.grad is not None)
if len(t0._ctx.parents) == 1:
grads = [grads]
for t, g in zip(t0._ctx.parents, grads):
if g is not None:
assert g.shape == t.shape, \
f"grad shape must match tensor shape in {self._ctx!r}, {g.shape!r} != {t.shape!r}"
gt = Tensor(g, device=self.device, requires_grad=False)
t.grad = gt if t.grad is None else (t.grad + gt)
# ***** hazel supports only CPU *****
@staticmethod
def _move_data(data, device):
return data
def to_(self, device):
self.data = self._move_data(self.data, device)
self.device = device
if self.grad:
self.grad.to_(device)
def to(self, device):
ret = Tensor(self.data, device)
if self.grad:
ret.grad = self.grad.to(device)
return ret
def detach(self):
return Tensor(self.data, device=self.device)
# ***** non first class ops *****
def __getitem__(self, val):
arg = []
for i,s in enumerate(val if type(val) in [list, tuple] else ([] if val is None else [val])):
arg.append((s.start if s.start is not None else 0,
(s.stop if s.stop >=0 else self.shape[i]+s.stop) if s.stop is not None else self.shape[i]))
assert s.step is None or s.step == 1
return self.slice(arg = arg+[(0,self.shape[i]) for i in range(len(arg), len(self.shape))])
def pad2d(self, padding):
return self[:, :, -padding[2]:self.shape[2]+padding[3], -padding[0]:self.shape[3]+padding[1]]
def dot(self, w):
return self.matmul(w)
def mean(self, axis=None):
out = self.sum(axis=axis)
return out * (np.prod(out.shape)/np.prod(self.shape))
def sqrt(self):
return self.pow(0.5)
def div(self, y):
return self * (y ** -1.0)
__truediv__ = div
def sigmoid(self):
e = self.exp()
return e.div(1 + e)
def swish(self):
return self * self.sigmoid()
def relu6(self):
return self.relu() - (self-6).relu()
def hardswish(self):
return self * (self+3).relu6() * (1/6)
def tanh(self):
return 2.0 * ((2.0 * self).sigmoid()) - 1.0
def leakyrelu(self, neg_slope=0.01):
return self.relu() - (-neg_slope*self).relu()
def softmax(self):
ns = list(self.shape)[:-1]+[1]
m = self.max(axis=len(self.shape)-1).reshape(shape=ns)
e = (self - m).exp()
ss = e.sum(axis=len(self.shape)-1).reshape(shape=ns)
return e.div(ss)
def logsoftmax(self):
ns = list(self.shape)[:-1]+[1]
m = self.max(axis=len(self.shape)-1).reshape(shape=ns)
ss = m + (self-m).exp().sum(axis=len(self.shape)-1).reshape(shape=ns).log()
return self - ss
def dropout(self, p=0.5):
# TODO: this needs a test
if Tensor.training:
_mask = np.asarray(np.random.binomial(1, 1.0-p, size=self.shape), dtype=self.dtype)
return self * Tensor(_mask, requires_grad=False, device=self.device) * (1/(1.0 - p))
else:
return self
def softplus(self, limit=20, beta=1):
# safe softplus - 1/beta*log(1 + exp(beta*x)) (PyTorch)
eb = (self*beta).exp()
ret = (1 + eb).log()
return (1/beta)*ret
def mish(self):
return self * (self.softplus().tanh()) # x*tanh(softplus(x))
def abs(self):
return self.relu() + (-1.0*self).relu()
def sign(self):
return self / (self.abs() + 1e-10)
def _pool2d(self, py, px):
xup = self[:, :, :self.shape[2]-self.shape[2]%py, :self.shape[3]-self.shape[3]%px]
return xup.reshape(shape=(xup.shape[0], xup.shape[1], xup.shape[2]//py, py, xup.shape[3]//px, px))
def avg_pool2d(self, kernel_size=(2,2)):
return self._pool2d(*kernel_size).mean(axis=(3,5))
def max_pool2d(self, kernel_size=(2,2)):
return self._pool2d(*kernel_size).max(axis=(3,5))
cl_ctx, cl_queue = None, None
def register(name, fxn, device=Device.CPU):
Tensor.ops[device][name] = fxn
def dispatch(*x, **kwargs):
tt = [arg for arg in x if isinstance(arg, Tensor)][0]
x = [Tensor(np.array([arg], dtype=tt.dtype), device=tt.device, requires_grad=False) if not isinstance(arg, Tensor) else arg for arg in x]
f = Tensor.ops[tt.device][name]
f.cl_ctx, f.cl_queue, f.device = cl_ctx, cl_queue, tt.device
return f.apply(f, *x, **kwargs)
setattr(Tensor, name, dispatch)
if name in ['add', 'sub', 'mul', 'pow', 'matmul']:
setattr(Tensor, f"__{name}__", dispatch)
setattr(Tensor, f"__i{name}__", lambda self,x: self.assign(dispatch(self,x)))
setattr(Tensor, f"__r{name}__", lambda self,x: dispatch(x,self))
for device in [device for device in Device.__dict__.keys() if device[0] != "_"]:
setattr(Tensor, f"{device.lower()}", functools.partialmethod(Tensor.to, Device.__dict__[device]))
setattr(Tensor, f"{device.lower()}_", functools.partialmethod(Tensor.to_, Device.__dict__[device]))
# This registers all the operations
def _register_ops(namespace, device=Device.CPU):
for name, cls in inspect.getmembers(namespace, inspect.isclass):
if name[0] != "_": register(name.lower(), cls, device=device)
from hazel import cpu_ops
_register_ops(cpu_ops)
| [
"numpy.prod",
"numpy.eye",
"inspect.getmembers",
"numpy.ones",
"os.environ.get",
"numpy.array",
"numpy.zeros",
"collections.defaultdict",
"numpy.random.uniform",
"numpy.random.randn",
"numpy.random.binomial",
"functools.partialmethod"
] | [((550, 567), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (561, 567), False, 'from collections import defaultdict\n'), ((7685, 7731), 'inspect.getmembers', 'inspect.getmembers', (['namespace', 'inspect.isclass'], {}), '(namespace, inspect.isclass)\n', (7703, 7731), False, 'import inspect\n'), ((221, 245), 'os.environ.get', 'os.environ.get', (['"""GPU"""', '(0)'], {}), "('GPU', 0)\n", (235, 245), False, 'import os\n'), ((7413, 7472), 'functools.partialmethod', 'functools.partialmethod', (['Tensor.to', 'Device.__dict__[device]'], {}), '(Tensor.to, Device.__dict__[device])\n', (7436, 7472), False, 'import functools\n'), ((7516, 7576), 'functools.partialmethod', 'functools.partialmethod', (['Tensor.to_', 'Device.__dict__[device]'], {}), '(Tensor.to_, Device.__dict__[device])\n', (7539, 7576), False, 'import functools\n'), ((1249, 1282), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (1257, 1282), True, 'import numpy as np\n'), ((1368, 1400), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (1375, 1400), True, 'import numpy as np\n'), ((2288, 2325), 'numpy.ones', 'np.ones', (['self.shape'], {'dtype': 'self.dtype'}), '(self.shape, dtype=self.dtype)\n', (2295, 2325), True, 'import numpy as np\n'), ((4201, 4219), 'numpy.prod', 'np.prod', (['out.shape'], {}), '(out.shape)\n', (4208, 4219), True, 'import numpy as np\n'), ((4220, 4239), 'numpy.prod', 'np.prod', (['self.shape'], {}), '(self.shape)\n', (4227, 4239), True, 'import numpy as np\n'), ((5427, 5474), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(1.0 - p)'], {'size': 'self.shape'}), '(1, 1.0 - p, size=self.shape)\n', (5445, 5474), True, 'import numpy as np\n'), ((1487, 1510), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (1502, 1510), True, 'import numpy as np\n'), ((1783, 1794), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (1789, 1794), True, 'import numpy as np\n'), ((6715, 6746), 'numpy.array', 'np.array', (['[arg]'], {'dtype': 'tt.dtype'}), '([arg], dtype=tt.dtype)\n', (6723, 6746), True, 'import numpy as np\n'), ((1619, 1659), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)'], {'size': 'shape'}), '(-1.0, 1.0, size=shape)\n', (1636, 1659), True, 'import numpy as np\n'), ((1666, 1680), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (1673, 1680), True, 'import numpy as np\n')] |
# Copyright (c) 2020.
# MIT License
#
# Copyright (c) 2019 YumeNetwork
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import numpy as np
import pandas
import psycopg2
from sqlalchemy import and_, select, func
from modules.sql.dbConnect import Db
from model.guild import Guild
from model.user import User
from psycopg2 import extras
class RankingsDB:
@staticmethod
def rows_to_dict(rows) -> dict:
rankings = {"guild_id": rows['guild_id'], "level": rows['level'], "xp": rows['xp'], "total": rows['total'],
"reach": rows['reach'], "user_id": rows['user_id']}
return rankings
@staticmethod
def get_user(user_id: int, guild_id: int) -> dict:
con, cur = Db.connect()
try:
cur.execute(
"SELECT * FROM public.rankings WHERE user_id = {}::text and guild_id = {}::text;".format(str(user_id),
str(guild_id)))
except Exception as err:
print(err)
con.rollback()
rows = cur.fetchone()
if rows:
rankings = RankingsDB.rows_to_dict(rows)
return rankings
return {}
@staticmethod
def create_ranking(user_id: int, guild_id: int):
con, cur = Db.connect()
try:
cur.execute(
"INSERT INTO public.rankings ( guild_id, level, reach, total, user_id, xp) "
" VALUES ( {}::text, 0, 20, 0, {}::text, 0 );".format(
str(guild_id), str(user_id)))
except Exception as err:
print(err)
con.rollback()
con.commit()
@staticmethod
def reset_user(user_id: int, guild_id: int):
con, cur = Db.connect()
try:
cur.execute(
"UPDATE public.rankings SET level = 0, reach = 20, total = 0, xp = 0 "
"WHERE guild_id = {}::text AND user_id = {}::text;".format(
str(guild_id), str(user_id)))
except Exception as err:
print(err)
con.rollback()
con.commit()
@staticmethod
def update_user(user_id: int, guild_id: int, ranking: dict):
con, cur = Db.connect()
try:
cur.execute(
"UPDATE public.rankings SET level = {}, reach = {}, total = {}, xp = {} "
"WHERE guild_id = {}::text AND user_id = {}::text;".format(
ranking['level'], ranking['reach'], ranking['total'], ranking['xp'],
str(guild_id), str(user_id)))
except Exception as err:
print(err)
con.rollback()
con.commit()
@staticmethod
def update_user_id(user_id: id, guild_id: id, level: int, reach: int, xp: int):
con, cur = Db.connect()
try:
cur.execute(
"UPDATE public.rankings SET level = {}, reach = {}, xp = {} "
"WHERE guild_id = {}::text AND user_id = {}::text;".format(
level, reach, xp, str(guild_id), str(user_id)))
except Exception as err:
print(err)
con.rollback()
con.commit()
@staticmethod
def get_rank(user_id: int, guild_id: int) -> int:
con, cur = Db.connect()
try:
cur.execute(
"SELECT user_id FROM public.rankings "
"WHERE guild_id = {}::text GROUP BY user_id, total ORDER BY total DESC "
.format(str(guild_id)))
except Exception as err:
print(err)
con.rollback()
rows = cur.fetchall()
if rows:
df = pandas.DataFrame(np.array(rows), columns=["ID"])
return df.ID[df.ID == str(user_id)].index.tolist()[0] + 1
return 0
@staticmethod
def get_scoreboard(guild: Guild) -> list:
con, cur = Db.connect()
try:
cur.execute(
"SELECT user_id FROM public.rankings "
"WHERE guild_id = {}::text GROUP BY user_id, total ORDER BY total DESC LIMIT 10".format(
str(guild.guild_id)))
except Exception as err:
print(err)
con.rollback()
rows = cur.fetchall()
if rows:
df = pandas.DataFrame(np.array(rows), columns=["ID"])
return df.ID.values.tolist()
return []
@staticmethod
def get_all():
con, cur = Db.connect()
try:
cur.execute(
"SELECT * FROM public.rankings;")
except Exception as err:
print(err)
con.rollback()
rows = cur.fetchall()
if rows:
rankings = []
for row in rows:
rankings.append(RankingsDB.rows_to_dict(row))
return rankings
@staticmethod
def set_ignored_chan(guild_id: int, chan_id: int):
con, cur = Db.connect()
try:
cur.execute(
"INSERT INTO public.rankings_chan ( guild_id, chan_id) VALUES ({}::text , {}::text );".format(
str(guild_id), str(chan_id)))
except Exception as err:
print(err)
con.rollback()
con.commit()
@staticmethod
def is_ignored_chan(chan_id: int):
con, cur = Db.connect()
try:
cur.execute(
"SELECT * FROM public.rankings_chan WHERE chan_id = {}::text;".format(str(chan_id))
)
except Exception as err:
print(err)
con.rollback()
rows = cur.fetchone()
if rows:
return True
return False
@staticmethod
def get_ignored_chan(guild_id: int):
channels = []
con, cur = Db.connect()
try:
cur.execute(
"SELECT FROM public.rankings_chan WHERE guild_id = {}::text".format(str(guild_id))
)
rows = cur.fetchall()
if rows:
for row in rows:
channels.append({
"chan_id": row['chan_id'],
"guild_id": row["guild_id"]
})
return channels
except Exception as err:
print(err)
@staticmethod
def delete_ignored_chan(guild_id: int, chan_id: int):
con, cur = Db.connect()
try:
cur.execute(
"DELETE FROM public.rankings_chan WHERE guild_id = {}::text AND chan_id = {}::text;".format(
str(guild_id), str(chan_id)))
except Exception as err:
print(err)
con.rollback()
con.commit()
| [
"numpy.array",
"modules.sql.dbConnect.Db.connect"
] | [((1754, 1766), 'modules.sql.dbConnect.Db.connect', 'Db.connect', ([], {}), '()\n', (1764, 1766), False, 'from modules.sql.dbConnect import Db\n'), ((2365, 2377), 'modules.sql.dbConnect.Db.connect', 'Db.connect', ([], {}), '()\n', (2375, 2377), False, 'from modules.sql.dbConnect import Db\n'), ((2821, 2833), 'modules.sql.dbConnect.Db.connect', 'Db.connect', ([], {}), '()\n', (2831, 2833), False, 'from modules.sql.dbConnect import Db\n'), ((3292, 3304), 'modules.sql.dbConnect.Db.connect', 'Db.connect', ([], {}), '()\n', (3302, 3304), False, 'from modules.sql.dbConnect import Db\n'), ((3874, 3886), 'modules.sql.dbConnect.Db.connect', 'Db.connect', ([], {}), '()\n', (3884, 3886), False, 'from modules.sql.dbConnect import Db\n'), ((4343, 4355), 'modules.sql.dbConnect.Db.connect', 'Db.connect', ([], {}), '()\n', (4353, 4355), False, 'from modules.sql.dbConnect import Db\n'), ((4949, 4961), 'modules.sql.dbConnect.Db.connect', 'Db.connect', ([], {}), '()\n', (4959, 4961), False, 'from modules.sql.dbConnect import Db\n'), ((5514, 5526), 'modules.sql.dbConnect.Db.connect', 'Db.connect', ([], {}), '()\n', (5524, 5526), False, 'from modules.sql.dbConnect import Db\n'), ((5983, 5995), 'modules.sql.dbConnect.Db.connect', 'Db.connect', ([], {}), '()\n', (5993, 5995), False, 'from modules.sql.dbConnect import Db\n'), ((6376, 6388), 'modules.sql.dbConnect.Db.connect', 'Db.connect', ([], {}), '()\n', (6386, 6388), False, 'from modules.sql.dbConnect import Db\n'), ((6817, 6829), 'modules.sql.dbConnect.Db.connect', 'Db.connect', ([], {}), '()\n', (6827, 6829), False, 'from modules.sql.dbConnect import Db\n'), ((7414, 7426), 'modules.sql.dbConnect.Db.connect', 'Db.connect', ([], {}), '()\n', (7424, 7426), False, 'from modules.sql.dbConnect import Db\n'), ((4746, 4760), 'numpy.array', 'np.array', (['rows'], {}), '(rows)\n', (4754, 4760), True, 'import numpy as np\n'), ((5366, 5380), 'numpy.array', 'np.array', (['rows'], {}), '(rows)\n', (5374, 5380), True, 'import numpy as np\n')] |
import logging
from collections import namedtuple
import numpy as np
from tqdm import tqdm
from es.draw_utils import find_best_shape, get_reward_config
from es.environment import Environment
from es.optimizer import GradientDescent, Adam, Momentum, Nesterov, Adadelta, Adagrad, RMSProp
from es.strategy import RandomStrategy, EvolutionStrategy, SimpleEvolutionStrategy
from shapes.canvas import Canvas
from shapes.shape import from_shape_mode
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
class Drawer(object):
def __init__(self, alpha=0.5, random=100, sample=10, step=100, learning_rate=4.64,
sigma_factor=0.03, algorithm='natural', optimizer='adam', shape_mode=0, seed=None,
rewards='mse', rewards_thresholds='1', rewards_coeffs='1e-6,1',
scale_decay=0.00005, background=None, save_all=False, save_actions=False, channels=1,
representation=False):
self.alpha = alpha
self.random = random
self.sample = sample
self.step = step
self.learning_rate = learning_rate
self.sigma_factor = sigma_factor
self.algorithm = algorithm
self.optimizer = optimizer
self.shape_mode = shape_mode
self.seed = seed
self.rewards = rewards
self.rewards_thresholds = rewards_thresholds
self.rewards_coeffs = rewards_coeffs
self.scale_decay = scale_decay
self.background = background
self.save_all = save_all
self.save_actions = save_actions
self.channels = channels
self.rng = np.random.RandomState(
seed=self.seed if self.seed is not None else np.random.randint(0, 2 ** 32))
assert shape_mode or not representation, "Cannot use representation with shape mode = 0"
self.representation = representation
def draw(self, images, n):
result = self.initialize_result(images, n)
for idx in tqdm(range(len(images))):
env = self.init(input=images[idx], n=n)
random_strategy = RandomStrategy(
self.random,
*env.observation_shape(),
alpha=self.alpha,
shape_mode=self.shape_mode,
rng=self.rng,
decay=self.scale_decay
)
for i in range(1, n + 1):
best_score, best_shape = find_best_shape(env=env, strategy=random_strategy,
action=i)
strategy = self.pick_strategy(best_shape=best_shape, env=env)
for j in range(1, self.step + 1):
score, shape = find_best_shape(env=env, strategy=strategy, action=i)
if score < best_score:
best_score = score
best_shape = shape
env.step(shape=best_shape, n=n)
if self.save_all:
if self.representation:
result[i - 1][idx] = env.representation
else:
result[i - 1][idx] = env.canvas.img
if not self.save_all:
if self.representation:
result[idx] = env.representation
else:
result[idx] = env.canvas.img
return result
def initialize_result(self, images, n):
if self.representation:
shape_cls = from_shape_mode(self.shape_mode)
params_len = shape_cls.params_len()
shape = (images.shape[0], n, params_len)
else:
shape = images.shape
if self.save_all:
return [np.empty(shape) for _ in range(n)]
else:
return np.empty(shape)
def init(self, input, n):
canvas = Canvas(
target=input,
size=max(input.shape[0], input.shape[1]),
background=self.background,
channels=self.channels
)
Config = namedtuple('Config', ['n', 'rewards', 'rewards_thresholds', 'rewards_coeffs'])
config = Config(
n=n,
rewards=self.rewards,
rewards_thresholds=self.rewards_thresholds,
rewards_coeffs=self.rewards_coeffs
)
reward_config = get_reward_config(canvas, config)
env = Environment(
canvas=canvas,
reward_config=reward_config,
num_shapes=n,
save_actions=self.save_actions
)
return env
def pick_strategy(self, best_shape, env):
if self.algorithm == 'natural':
optimizer = {
'sgd': GradientDescent,
'momentum': Momentum,
'nesterov': Nesterov,
'adagrad': Adagrad,
'rmsprop': RMSProp,
'adadelta': Adadelta,
'adam': Adam
}[self.optimizer]
strategy = EvolutionStrategy(
best_shape,
*env.observation_shape(),
alpha=self.alpha,
n=self.sample,
sigma_factor=self.sigma_factor,
optimizer=optimizer(
initial_params=best_shape.params(),
learning_rate=self.learning_rate
),
shape_mode=self.shape_mode,
rng=self.rng
)
elif self.algorithm == 'simple':
strategy = SimpleEvolutionStrategy(
best_shape,
*env.observation_shape(),
alpha=self.alpha,
n=self.sample,
sigma_factor=self.sigma_factor,
shape_mode=self.shape_mode,
rng=self.rng
)
else:
strategy = RandomStrategy(
self.sample,
*env.observation_shape(),
alpha=self.alpha,
rng=self.rng
)
return strategy
class RepresentationDrawer(object):
def __init__(self, shape_cls, size, channels, background):
self.shape_cls = shape_cls
self.size = size
self.channels = channels
self.background = background
def draw(self, representation):
result = np.empty((representation.shape[0], self.size, self.size, self.channels))
for i, params in enumerate(representation):
c = Canvas.without_target(
size=self.size,
background=self.background,
channels=self.channels
)
for p in params:
shape = self.shape_cls.from_normalized_params(self.size, self.size, *p)
c.add(shape=shape)
result[i] = c.img
return result
| [
"logging.basicConfig",
"logging.getLogger",
"collections.namedtuple",
"es.draw_utils.get_reward_config",
"es.environment.Environment",
"numpy.random.randint",
"numpy.empty",
"shapes.shape.from_shape_mode",
"es.draw_utils.find_best_shape",
"shapes.canvas.Canvas.without_target"
] | [((446, 485), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (465, 485), False, 'import logging\n'), ((492, 519), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (509, 519), False, 'import logging\n'), ((3988, 4066), 'collections.namedtuple', 'namedtuple', (['"""Config"""', "['n', 'rewards', 'rewards_thresholds', 'rewards_coeffs']"], {}), "('Config', ['n', 'rewards', 'rewards_thresholds', 'rewards_coeffs'])\n", (3998, 4066), False, 'from collections import namedtuple\n'), ((4281, 4314), 'es.draw_utils.get_reward_config', 'get_reward_config', (['canvas', 'config'], {}), '(canvas, config)\n', (4298, 4314), False, 'from es.draw_utils import find_best_shape, get_reward_config\n'), ((4330, 4435), 'es.environment.Environment', 'Environment', ([], {'canvas': 'canvas', 'reward_config': 'reward_config', 'num_shapes': 'n', 'save_actions': 'self.save_actions'}), '(canvas=canvas, reward_config=reward_config, num_shapes=n,\n save_actions=self.save_actions)\n', (4341, 4435), False, 'from es.environment import Environment\n'), ((6255, 6327), 'numpy.empty', 'np.empty', (['(representation.shape[0], self.size, self.size, self.channels)'], {}), '((representation.shape[0], self.size, self.size, self.channels))\n', (6263, 6327), True, 'import numpy as np\n'), ((3437, 3469), 'shapes.shape.from_shape_mode', 'from_shape_mode', (['self.shape_mode'], {}), '(self.shape_mode)\n', (3452, 3469), False, 'from shapes.shape import from_shape_mode\n'), ((3733, 3748), 'numpy.empty', 'np.empty', (['shape'], {}), '(shape)\n', (3741, 3748), True, 'import numpy as np\n'), ((6397, 6491), 'shapes.canvas.Canvas.without_target', 'Canvas.without_target', ([], {'size': 'self.size', 'background': 'self.background', 'channels': 'self.channels'}), '(size=self.size, background=self.background, channels=\n self.channels)\n', (6418, 6491), False, 'from shapes.canvas import Canvas\n'), ((2370, 2430), 'es.draw_utils.find_best_shape', 'find_best_shape', ([], {'env': 'env', 'strategy': 'random_strategy', 'action': 'i'}), '(env=env, strategy=random_strategy, action=i)\n', (2385, 2430), False, 'from es.draw_utils import find_best_shape, get_reward_config\n'), ((3665, 3680), 'numpy.empty', 'np.empty', (['shape'], {}), '(shape)\n', (3673, 3680), True, 'import numpy as np\n'), ((1656, 1685), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2 ** 32)'], {}), '(0, 2 ** 32)\n', (1673, 1685), True, 'import numpy as np\n'), ((2653, 2706), 'es.draw_utils.find_best_shape', 'find_best_shape', ([], {'env': 'env', 'strategy': 'strategy', 'action': 'i'}), '(env=env, strategy=strategy, action=i)\n', (2668, 2706), False, 'from es.draw_utils import find_best_shape, get_reward_config\n')] |
from __future__ import absolute_import, print_function, division
from time import time
import pandas as pd
from six.moves import range
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.utils.validation import *
import matplotlib.pyplot as plt
class MICE():
def __init__(
self,
model=LinearRegression(),
init_fill_method="mean",
min_value=None,
max_value=None,
verbose=True):
self.min_value=min_value,
self.max_value=max_value,
self.fill_method=init_fill_method
self.model = model
self.verbose = verbose
def perform_imputation_round(
self,
X_filled,
missing_mask,
observed_mask):
n_rows, n_cols = X_filled.shape
n_missing_for_each_column = missing_mask.sum(axis=0)
ordered_column_indices = np.arange(n_cols)
for col_idx in range(len(X.T)):
missing_row_mask_for_this_col = missing_mask[:, col_idx]
n_missing_for_this_col = n_missing_for_each_column[col_idx]
if n_missing_for_this_col > 0:
observed_row_mask_for_this_col = observed_mask[:, col_idx]
column_values = X_filled[:, col_idx]
#print(column_values)
column_values_observed = column_values[observed_row_mask_for_this_col]
other_column_indices = np.concatenate([
ordered_column_indices[:col_idx],
ordered_column_indices[col_idx + 1:]
])
X_other_cols = X_filled[:, other_column_indices]
#print(X_other_cols)
#print(X_other_cols)
X_other_cols_observed = X_other_cols[observed_row_mask_for_this_col]
#print(X_other_cols_observed)
X_other_cols_missing = X_other_cols[missing_row_mask_for_this_col]
#print(X_other_cols_missing)
lr = self.model
lr.fit(X_other_cols_observed,column_values_observed)
X_other_cols_missing = X_other_cols[missing_row_mask_for_this_col]
y1 = lr.predict(X_other_cols_missing)
#print(y2)
X_filled[missing_row_mask_for_this_col, col_idx] = y1
return X_filled
def initialize(self, X, missing_mask, observed_mask):
X_filled = X.copy()
for col_idx in range(len(X.T)):
missing_mask_col = missing_mask[:, col_idx]
n_missing = missing_mask_col.sum()
if n_missing > 0:
observed_row_mask_for_col = observed_mask[:, col_idx]
column = X_filled[:, col_idx]
observed_column = column[observed_row_mask_for_col]
if self.fill_method == "mean":
fill_values = np.mean(observed_column)
else:
raise ValueError("Invalid fill method %s" % self.fill_method)
X_filled[missing_mask_col, col_idx] = fill_values
return X_filled
def multiple_imputations(self, X):
start_t = time()
X = np.asarray(X)
missing_mask = np.isnan(X)
missing_mask = np.asarray(missing_mask)
observed_mask = ~missing_mask
X_filled = self.initialize(
X,
missing_mask=missing_mask,
observed_mask=observed_mask)
#print(X_filled)
results_list = []
total_rounds = 10
for m in range(total_rounds):
print("[MICE] Starting imputation round %d/%d, elapsed time %0.3f" % (
m + 1,
total_rounds,
time() - start_t))
X_filled = self.perform_imputation_round(
X_filled=X_filled,
missing_mask=missing_mask,
observed_mask=observed_mask)
results_list.append(X_filled[missing_mask])
return np.array(results_list), missing_mask
def complete(self, X):
print("[MICE] Completing matrix with shape %s" % (X.shape,))
X_completed = np.array(X.copy())
imputed_arrays, missing_mask = self.multiple_imputations(X)
#print(imputed_arrays)
average_imputated_values = imputed_arrays.mean(axis=0)
X_completed[missing_mask] = average_imputated_values
return X_completed
imputer = MICE()
X=pd.read_csv(r'C:\Users\admin_\Desktop\dataset.csv')
X=X.replace(0,np.nan)
#print(X)
Y=imputer.complete(X)
a=np.float32(Y)
#plt.plot(a)
#plt.show()
print(a)
#np.savetxt(r"C:\Users\admin_\Desktop\accc3.csv",a) | [
"numpy.mean",
"six.moves.range",
"numpy.float32",
"pandas.read_csv",
"numpy.asarray",
"numpy.array",
"numpy.isnan",
"numpy.concatenate",
"time.time",
"sklearn.linear_model.LinearRegression",
"numpy.arange"
] | [((4590, 4644), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\admin_\\\\Desktop\\\\dataset.csv"""'], {}), "('C:\\\\Users\\\\admin_\\\\Desktop\\\\dataset.csv')\n", (4601, 4644), True, 'import pandas as pd\n'), ((4702, 4715), 'numpy.float32', 'np.float32', (['Y'], {}), '(Y)\n', (4712, 4715), True, 'import numpy as np\n'), ((358, 376), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (374, 376), False, 'from sklearn.linear_model import LinearRegression\n'), ((947, 964), 'numpy.arange', 'np.arange', (['n_cols'], {}), '(n_cols)\n', (956, 964), True, 'import numpy as np\n'), ((3268, 3274), 'time.time', 'time', ([], {}), '()\n', (3272, 3274), False, 'from time import time\n'), ((3288, 3301), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (3298, 3301), True, 'import numpy as np\n'), ((3326, 3337), 'numpy.isnan', 'np.isnan', (['X'], {}), '(X)\n', (3334, 3337), True, 'import numpy as np\n'), ((3362, 3386), 'numpy.asarray', 'np.asarray', (['missing_mask'], {}), '(missing_mask)\n', (3372, 3386), True, 'import numpy as np\n'), ((3659, 3678), 'six.moves.range', 'range', (['total_rounds'], {}), '(total_rounds)\n', (3664, 3678), False, 'from six.moves import range\n'), ((4133, 4155), 'numpy.array', 'np.array', (['results_list'], {}), '(results_list)\n', (4141, 4155), True, 'import numpy as np\n'), ((1491, 1584), 'numpy.concatenate', 'np.concatenate', (['[ordered_column_indices[:col_idx], ordered_column_indices[col_idx + 1:]]'], {}), '([ordered_column_indices[:col_idx], ordered_column_indices[\n col_idx + 1:]])\n', (1505, 1584), True, 'import numpy as np\n'), ((2984, 3008), 'numpy.mean', 'np.mean', (['observed_column'], {}), '(observed_column)\n', (2991, 3008), True, 'import numpy as np\n'), ((3860, 3866), 'time.time', 'time', ([], {}), '()\n', (3864, 3866), False, 'from time import time\n')] |
import networkx as nx
import numpy as np
import os
from collections import namedtuple
from itertools import chain
from matplotlib import colors
from matplotlib import pyplot as plt
from numpy.random import rand, randint, shuffle
from scipy.spatial import Delaunay, Voronoi
from perlin_noise import PerlinNoise
VERSION = "v0.1.0"
flip_funcs = dict(central=lambda m: np.flip(m),
diagonal=lambda m: np.transpose(m),
horizontal=lambda m: np.flip(m, axis=1))
Neighborhood = namedtuple("Neighborhood", "north west south east H V")
def neighborhood(i, j):
return np.array([[i, j+1],
[i-1, j+1],
[i-1, j],
[i-1, j-1],
[i, j-1],
[i+1, j-1],
[i+1, j],
[i+1, j+1]])
def to_str(array, sep=","):
return sep.join([str(a) for a in array])
class MapGenerationFailure(Exception):
pass
class Map:
starting_mills = None
def __init__(self, size=45, padded_size=79,
symmetry="central",
min_starting_dist=15,
seed=None):
if seed is None:
seed = randint(2**31)
self.seed = seed
np.random.seed(seed)
self.to_quadrant_funcs = dict(central=self.to_upper_triangle,
diagonal=self.to_upper_triangle,
horizontal=self.to_upper_half)
self.reflect_point_funcs = dict(central=self.reflect_central)
self.to_quadrant = self.to_quadrant_funcs[symmetry]
self.reflect_point = self.reflect_point_funcs[symmetry]
self.symmetry = symmetry
self.symmetric_array = flip_funcs[symmetry]
self.size = size
self.padded_size = padded_size
pad_before = (padded_size - size)//2
pad_after = padded_size - size - pad_before
instart = pad_before
instop = pad_before + size
self.instart = instart
self.instop = instop
self.inslice = slice(instart, instop)
self.padded_tiles = np.zeros((self.padded_size, self.padded_size), dtype=int)
self.min_starting_dist = min_starting_dist
self.ownerships = -2*np.ones((self.padded_size, self.padded_size), dtype=int)
pnoise = PerlinNoise(scale=20, octaves=2) # Take info from constructor
elevations = pnoise.noise_grid(range(size), range(size)) + 0.5
elevations += self.symmetric_array(elevations)
elevations = np.array(np.round(3/2*elevations), dtype=int)
self.padded_elevations = np.zeros((self.padded_size, self.padded_size), dtype=int)
self.padded_elevations[self.inslice, self.inslice] = elevations
for k in range(pad_before):
self.padded_elevations[k, self.inslice] = elevations[0, :]
self.padded_elevations[self.inslice, k] = elevations[:, 0]
for k in range(pad_after):
self.padded_elevations[-(k+1), self.inslice] = elevations[-1, :]
self.padded_elevations[self.inslice, -(k+1)] = elevations[:, -1]
self.padded_elevations[:instart, :instart] = elevations[0, 0]*np.ones((pad_before, pad_before))
self.padded_elevations[:instart, instop:] = elevations[0, -1]*np.ones((pad_before, pad_after))
self.padded_elevations[instop:, :instart] = elevations[-1, 0]*np.ones((pad_after, pad_before))
self.padded_elevations[instop:, instop:] = elevations[-1, -1]*np.ones((pad_after, pad_after))
water = self.padded_elevations == 0
self.terraform()
self.waypoint_mask = np.ones_like(self.padded_tiles, dtype=bool)
self.waypoint_mask[self.inslice, self.inslice] = False
self.add_blockers()
self.add_waypoints()
self.mills = []
self.add_mills()
self.remove_head_to_head()
water = np.logical_and(self.padded_elevations < 3, water)
water = np.logical_and(np.logical_not(self.waypoint_mask), water)
self.padded_tiles[water] = tiles["water"].index
self.padded_elevations[water] = 2
self.padded_elevations -= 2
cliffs = self.padded_elevations % 2 == 1
self.padded_tiles[cliffs] = tiles["cliff"].index
self.add_roads()
self.close_map()
@property
def elevations(self):
return self.padded_elevations[self.inslice, self.inslice]
@property
def local_waypoints(self):
return np.array(self.waypoints) - self.instart
@property
def tiles(self):
return self.padded_tiles[self.inslice, self.inslice]
def add_blockers(self):
# TODO make noise scale and f constructor parameters/random
pnoise = PerlinNoise(scale=30, octaves=1)
f = 0.8 # Blockers amount
density = pnoise.noise_grid(range(self.padded_size), range(self.padded_size)) + 0.5
density = f*(density + self.symmetric_array(density))/2
rs = rand(self.padded_size, self.padded_size)
rs = (rs + self.symmetric_array(rs))/2
# plt.matshow(density)
# plt.colorbar()
self.padded_tiles[np.logical_and(rs < density, self.padded_tiles == 0)] = tiles["blocker"].index
def add_mill(self, x, y, layout, start=False):
self.mills.append((x, y))
self.mills.append(self.reflect_point(x, y))
flips = randint(2, size=3)
if flips[0] == 1:
layout = np.flip(layout, axis=0)
if flips[1] == 1:
layout = np.flip(layout, axis=1)
if flips[2] == 1:
layout = np.transpose(layout)
Lx, Ly = layout.shape
el = np.max(self.padded_elevations[x-3:x-3+2*Lx, y-3:y-3+2*Ly])
if el % 2 == 1:
el += 1
for k1 in range(Lx):
i = x + 2*k1 - 3
for k2 in range(Ly):
j = y + 2*k2 - 3
tile = layout[k1, k2]
for di in range(2):
ii = i + di
for dj in range(2):
jj = j + dj
self.padded_tiles[ii, jj] = tile.index
self.terraform_tile(ii, jj, el)
rii, rjj = self.reflect_point(ii, jj)
self.padded_tiles[rii, rjj] = tile.index
self.terraform_tile(rii, rjj, el)
if start:
self.ownerships[ii, jj] = 0
self.ownerships[rii, rjj] = 1
def add_mills(self):
starting_mill_placed = False
for i, j in self.waypoints:
if (self.distance_to_map_edge(i, j) >= 5
and self.distance_to_reflection(i, j) >= self.min_starting_dist):
starting_mill_placed = True
self.starting_mills = [(i, j), self.reflect_point(i, j)]
self.add_mill(i, j, starting_mill_layout, start=True)
break
if not starting_mill_placed:
print("Unable to find starting mill")
raise MapGenerationFailure("No position was good for starting mill.")
wps = np.array(self.waypoints)
shuffle(wps)
n_mills = 0
for i, j in wps:
if (self.distance_to_map_edge(i, j) >= 5
and self.distance_to_reflection(i, j) >= 10
and self.distance_to_closest_mill(i, j) >= 10):
n_mills += 2
self.add_mill(i, j, mill_layout)
if n_mills < 4:
raise MapGenerationFailure(f"Only able to place {n_mills} mills.")
def add_roads(self):
w, rw = self.starting_mills
road_closed = False
connected = [w]
rconnected = [rw]
road = []
while not road_closed:
neigs = list(self.waypoint_network.neighbors(w))
wend = neigs[randint(len(neigs))]
rwend = self.reflect_point(*wend)
road += self.raytrace(*w, *wend, width=4)
road += self.raytrace(*rw, *rwend, width=4)
if wend in rconnected:
road_closed = True
connected.append(wend)
rconnected.append(rwend)
w, rw = wend, rwend
for mill in self.mills:
if mill not in connected and mill not in rconnected:
rmill = self.reflect_point(*mill)
wend = connected[randint(len(connected))]
rwend = self.reflect_point(*wend)
road += self.raytrace(*mill, *wend, width=3)
road += self.raytrace(*rmill, *rwend, width=3)
connected.append(mill)
rconnected.append(rmill)
for i, j in road:
current = self.padded_tiles[i, j]
if current == tiles["blocker"].index or current == tiles["grass"].index:
self.padded_tiles[i, j] = tiles["road"].index
elif current == tiles["cliff"].index:
self.padded_tiles[i, j] = tiles["ramp"].index
def add_waypoints(self):
n = randint(32, 48)
positions = randint(0, self.padded_size, size=(n, 2))
waypoints = list(positions)
waypoints.extend([self.reflect_point(*pos) for pos in positions])
waypoints = np.array(waypoints)
vor = Voronoi(waypoints)
f = 0.2 # Rate of regularization # TODO pass it from constructor
for k in range(len(waypoints)):
region = vor.regions[vor.point_region[k]]
point = waypoints[k]
if len(region) > 0 and np.min(region) >= 0:
vertices = np.array([vor.vertices[j] for j in region])
waypoints[k] = f*np.mean(vertices, axis=0) + (1 - f)*point
waypoints = np.round(waypoints)
# Keep only the original waypoints and re-reflect them ensure symmetry
waypoints = list(waypoints[:n])
self.waypoints = []
for i, j in waypoints:
if self.instart < i < self.instop and self.instart < j < self.instop:
self.waypoints.append([i, j])
self.waypoints.append(self.reflect_point(i, j))
self.waypoints = np.array(sorted(self.waypoints, key=lambda p:-self.distance_to_reflection(*p)), dtype=int)
delaunay = Delaunay(self.waypoints)
self.waypoint_network = nx.Graph()
for simplex in delaunay.simplices:
v1, v2, v3 = [tuple(v) for v in np.array(delaunay.points[simplex], dtype=int)]
self.waypoint_network.add_edge(v1, v2)
self.waypoint_network.add_edge(v2, v3)
self.waypoint_network.add_edge(v3, v1)
def close_map(self):
el = self.padded_elevations[self.instart-1,:]
ramps = el % 2 == 1
self.padded_tiles[self.instart-2,:] = tiles["blocker"].index
self.padded_tiles[self.instart-2,:][ramps] = tiles["cliff"].index
el = self.padded_elevations[self.instop+1,:]
ramps = el % 2 == 1
self.padded_tiles[self.instop+1,:] = tiles["blocker"].index
self.padded_tiles[self.instop+1,:][ramps] = tiles["cliff"].index
el = self.padded_elevations[:,self.instart-1]
ramps = el % 2 == 1
self.padded_tiles[:,self.instart-2] = tiles["blocker"].index
self.padded_tiles[:,self.instart-2][ramps] = tiles["cliff"].index
el = self.padded_elevations[:,self.instop+1]
ramps = el % 2 == 1
self.padded_tiles[:,self.instop+1] = tiles["blocker"].index
self.padded_tiles[:,self.instop+1][ramps] = tiles["cliff"].index
def distance(self, i1, j1, i2, j2):
return np.max(np.abs([i1 - i2, j1 - j2]))
def distance_to_closest_mill(self, i, j):
if len(self.mills) == 0:
return 1000
ds = [self.distance(i, j, mi, mj) for mi, mj in self.mills]
return np.min(ds)
def distance_to_map_edge(self, i, j):
if not (self.instart < i < self.instop and self.instart < j < self.instop):
return -1 # Out of the map
return np.min([i - self.instart,
self.instop - i,
j - self.instart,
self.instop - j])
def distance_to_reflection(self, i, j):
ri, rj = self.reflect_point(i, j)
return self.distance(i, j, ri, rj)
def is_in_quadrant(self, i, j):
qi, qj = self.to_quadrant(i, j)
return qi == i and qj == j
def raytrace(self, x1, y1, x2, y2, width=0):
dx = x2 - x1
dy = y2 - y1
perp = np.array([-dy, dx])/np.sqrt(dx**2 + dy**2)
v0 = np.array([x1, y1]) - width*perp/2
v1 = np.array([x2, y2]) - width*perp/2
ts = np.linspace(0, width, int(np.ceil(width)) + 1)
tiles = set()
for t in ts:
tiles.update(self.raytrace_thin(v0 + t*perp, v1 + t*perp))
return list(tiles)
def raytrace_thin(self, v0, v1):
# The equation of the ray is v = v0 + t*d
d = v1 - v0
inc = np.sign(d) # Gives the direction in which the ray progress
tile = np.array(np.round(v0), dtype=int)
endtile = np.array(np.round(v1), dtype=int)
if d[0] == 0:
return [(tile[0], tile[1] + k*inc[1]) for k in range(np.abs(tile[0] - endtile[0]))]
if d[1] == 0:
return [(tile[0] + k*inc[0], tile[1]) for k in range(np.abs(tile[1] - endtile[1]))]
v = v0
tiles = [tuple(tile)]
# Continue as long as we are not in the last tile
while np.max(np.abs(v1 - v)) > 0.5:
# L = (Lx, Ly) where Lx is the x coordinate of the next vertical
# line and Ly the y coordinate of the next horizontal line
L = tile + 0.5*inc
# Solve the equation v + d*t == L for t, simultaneously for the next
# horizontal line and vertical line
t = (L - v)/d
if t[0] < t[1]: # The vertical line is intersected first
tile[0] += inc[0]
v += t[0]*d
else: # The horizontal line is intersected first
tile[1] += inc[1]
v += t[1]*d
tiles.append(tuple(tile))
return tiles
def reflect_central(self, i, j):
return self.instop + self.instart - i - 1, self.instop + self.instart - j - 1
def remove_head_to_head(self):
change_detected = True
while change_detected:
change_detected = self.remove_horizontal_head_to_head()
self.padded_elevations = np.transpose(self.padded_elevations)
change_detected = True
while change_detected:
change_detected = self.remove_horizontal_head_to_head()
self.padded_elevations = np.transpose(self.padded_elevations)
def remove_horizontal_head_to_head(self):
change_detected = False
for i in range(1, self.padded_size-2):
for j in range(1, self.padded_size-2):
current = self.padded_elevations[i, j]
if current % 2 == 1:
west = self.padded_elevations[i, j-1]
if west - current == 1:
east = self.padded_elevations[i, j+1]
east2 = self.padded_elevations[i, j+2]
if east - current == 1:
change_detected = True
self.padded_elevations[i, j] += 1
elif east - current == 0 and east2 - current == 1:
change_detected = True
self.padded_elevations[i, j] += 1
self.padded_elevations[i, j+1] += 1
return change_detected
def save_map(self):
processed = np.zeros_like(self.padded_tiles, dtype=bool)
elevations = []
terrains = []
entities = []
entity_id = 1
start_tiles = []
for j in range(self.padded_size):
for i in range(self.padded_size):
t = ts[self.padded_tiles[i, j]]
el = self.padded_elevations[i, j]
elevations.append(el)
if t == tiles["ramp"]:
terrains.append(terrainkeys["grass_ramp"])
continue
if t == tiles["cliff"]:
terrains.append(terrainkeys["grass_cliff"])
continue
if t == tiles["water"]:
terrains.append(terrainkeys["water"])
continue
if not processed[i, j]:
processed[i, j] = True
if t == tiles["blocker"]:
blocker = templates["autumn_rocks"].format(
x=i,
y=j,
h=el,
id=entity_id
)
entity_id += 1
entities.append(blocker)
if t == tiles["farm"] or t == tiles["pig"]:
farm = templates["farmland"].format(
id=entity_id,
i=i,
j=j,
x=i + 0.5,
y=j + 0.5,
h=el
)
entity_id += 1
entities.append(farm)
processed[i:i+2, j:j+2] = True
if t == tiles["pig"]:
pig = templates["structure_farm"].format(
id=entity_id,
i=i,
j=j,
x=i + 0.5,
y=j + 0.5,
h=el,
owner_id=self.ownerships[i,j]
)
entity_id += 1
entities.append(pig)
if t == tiles["mill"] or t == tiles["start"]:
mill = templates["windmill"].format(
id=entity_id,
i=i,
j=j,
x=i + 0.5,
y=j + 0.5,
h=el
)
entity_id += 1
entities.append(mill)
processed[i:i+2, j:j+2] = True
if t == tiles["start"]:
gristmill = templates["structure_gristmill"].format(
id=entity_id,
i=i,
j=j,
x=i + 0.5,
y=j + 0.5,
h=el,
owner_id=self.ownerships[i, j]
)
entities.append(gristmill)
entity_id += 1
start_tiles.append((i, j))
terrains.append(terrainkeys["grass"])
start0, start1 = start_tiles
mapname = f"kestrl_{VERSION}_{self.seed}"
mapxml = templates["map"].format(
map_name=mapname,
dimx=self.padded_size,
dimy=self.padded_size,
instartx=self.instart - 1,
instopx=self.instop + 1,
instarty=self.instart - 1,
instopy=self.instop + 1,
padded_insizex=self.instop - self.instart + 2,
padded_insizey=self.instop - self.instart + 2,
starting_mill_0=to_str(start0),
starting_mill_1=to_str(start1),
last_entity_id=entity_id - 1,
terrains=to_str(terrains),
elevations=to_str(elevations),
entities=to_str(entities, sep="\n"))
with open(f"maps/{mapname}.xml", "w", encoding="utf-8") as file:
file.write(mapxml)
def set_axis(self, ax):
ax.set_xticks(np.arange(self.padded_size) + 0.5, minor='true')
ax.set_yticks(np.arange(self.padded_size) + 0.5, minor='true')
ax.grid(color="black", which='minor', alpha=0.2)
def terraform(self):
el = self.padded_elevations
self.padded_elevations = np.zeros_like(self.padded_elevations, dtype=int)
for i in range(self.padded_size):
for j in range(self.padded_size):
target_el = 2*el[i, j]
if target_el > self.padded_tiles[i, j]:
self.terraform_tile(i, j, target_el, allow_down=False)
def terraform_tile(self, i, j, el, allow_down=False):
if i == 0 or j == 0 or i == self.padded_size - 1 or j == self.padded_size - 1:
return
diff = el - self.padded_elevations[i, j]
if diff < 0 and not allow_down:
return
self.padded_elevations[i, j] = el
for m, n in neighborhood(i, j):
other = self.padded_elevations[m, n]
if el - other >= 2:
self.terraform_tile(m, n, el - 1, allow_down=allow_down)
elif other - el >= 2:
self.terraform_tile(m, n, el + 1, allow_down=allow_down)
def to_upper_half(self, x, y):
if x > self.size/2:
x = self.size - x
return np.array([x, y])
def to_upper_triangle(self, x, y):
return np.asarray(sorted([x, y]))
class Tile:
index = 0
def __init__(self, name, size=1, color="black"):
self.index = Tile.index
Tile.index += 1
self.name = name
self.size = size
self.color = color
ts = [Tile("grass", size=1, color="burlywood"),
Tile("free", size=2, color="burlywood"),
Tile("blocker", size=1, color="darkgreen"),
Tile("cliff", size=1, color="darkslategray"),
Tile("ramp", size=1, color="peru"),
Tile("water", size=1, color="dodgerblue"),
Tile("road", size=1, color="burlywood"),
Tile("pig", size=2, color="gold"),
Tile("farm", size=2, color="wheat"),
Tile("mill", size=2, color="white"),
Tile("start", size=2, color="darkred")]
tiles = {t.name:t for t in ts}
# 3 warren space are always guaranteed
mill_layout = [["farm", "farm", "farm", "free"],
["farm", "mill", "farm", "free"],
["farm", "farm", "farm", "free"]]
starting_mill_layout = [["free", "free", "free", "free"],
["farm", "farm", "pig", "free"],
["farm", "start", "pig", "free"],
["farm", "pig", "pig", "free"]]
mill_layout = np.asarray([[tiles[t] for t in row] for row in mill_layout])
starting_mill_layout = np.asarray([[tiles[t] for t in row] for row in starting_mill_layout])
templates = {}
for filename in os.listdir("templates"):
name = filename[:-4]
with open(f"templates/{filename}", "r") as file:
templates[name] = file.read()
terrainkeys = {"grass":"1",
"water":"2",
"grass_cliff":"3",
"grass_ramp":"4"}
N = 0
while N < 20:
try:
game_map = Map(size=45)
game_map.save_map()
N += 1
except MapGenerationFailure:
pass
# print(list(chain.from_iterable(game_map.padded_elevations)))
fig, axes = plt.subplots(1, 2, sharex=True, sharey=True)
xx, yy = np.transpose(game_map.waypoints)
tri = Delaunay(game_map.waypoints)
xlim = [game_map.instart - 0.5, game_map.instop - 0.5]
ylim = xlim
extent = xlim + ylim
for ax in axes:
game_map.set_axis(ax)
ax = axes[0]
color_bounds = np.arange(len(tiles) + 1) - 0.5
cmap = colors.ListedColormap([tile.color for tile in tiles.values()])
norm = colors.BoundaryNorm(color_bounds, cmap.N, clip=True)
ax.imshow(np.transpose(game_map.padded_tiles), cmap=cmap, norm=norm, extent=extent, origin="lower")
ax.set_xlim(xlim)
ax.set_ylim(ylim)
# ax.plot(xx, yy, "ks")
# ax.triplot(xx, yy, tri.simplices, "k-", alpha=0.3)
ax = axes[1]
ax.matshow(np.transpose(game_map.padded_elevations), extent=extent, origin="lower")
plt.show() | [
"numpy.sqrt",
"numpy.random.rand",
"numpy.logical_not",
"numpy.array",
"matplotlib.colors.BoundaryNorm",
"numpy.arange",
"numpy.flip",
"numpy.mean",
"os.listdir",
"numpy.asarray",
"numpy.max",
"numpy.random.seed",
"numpy.min",
"numpy.round",
"numpy.abs",
"numpy.ceil",
"collections.na... | [((514, 569), 'collections.namedtuple', 'namedtuple', (['"""Neighborhood"""', '"""north west south east H V"""'], {}), "('Neighborhood', 'north west south east H V')\n", (524, 569), False, 'from collections import namedtuple\n'), ((23105, 23165), 'numpy.asarray', 'np.asarray', (['[[tiles[t] for t in row] for row in mill_layout]'], {}), '([[tiles[t] for t in row] for row in mill_layout])\n', (23115, 23165), True, 'import numpy as np\n'), ((23189, 23258), 'numpy.asarray', 'np.asarray', (['[[tiles[t] for t in row] for row in starting_mill_layout]'], {}), '([[tiles[t] for t in row] for row in starting_mill_layout])\n', (23199, 23258), True, 'import numpy as np\n'), ((23292, 23315), 'os.listdir', 'os.listdir', (['"""templates"""'], {}), "('templates')\n", (23302, 23315), False, 'import os\n'), ((23787, 23831), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharex': '(True)', 'sharey': '(True)'}), '(1, 2, sharex=True, sharey=True)\n', (23799, 23831), True, 'from matplotlib import pyplot as plt\n'), ((23842, 23874), 'numpy.transpose', 'np.transpose', (['game_map.waypoints'], {}), '(game_map.waypoints)\n', (23854, 23874), True, 'import numpy as np\n'), ((23881, 23909), 'scipy.spatial.Delaunay', 'Delaunay', (['game_map.waypoints'], {}), '(game_map.waypoints)\n', (23889, 23909), False, 'from scipy.spatial import Delaunay, Voronoi\n'), ((24180, 24232), 'matplotlib.colors.BoundaryNorm', 'colors.BoundaryNorm', (['color_bounds', 'cmap.N'], {'clip': '(True)'}), '(color_bounds, cmap.N, clip=True)\n', (24199, 24232), False, 'from matplotlib import colors\n'), ((24545, 24555), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24553, 24555), True, 'from matplotlib import pyplot as plt\n'), ((606, 733), 'numpy.array', 'np.array', (['[[i, j + 1], [i - 1, j + 1], [i - 1, j], [i - 1, j - 1], [i, j - 1], [i + 1,\n j - 1], [i + 1, j], [i + 1, j + 1]]'], {}), '([[i, j + 1], [i - 1, j + 1], [i - 1, j], [i - 1, j - 1], [i, j - 1\n ], [i + 1, j - 1], [i + 1, j], [i + 1, j + 1]])\n', (614, 733), True, 'import numpy as np\n'), ((24243, 24278), 'numpy.transpose', 'np.transpose', (['game_map.padded_tiles'], {}), '(game_map.padded_tiles)\n', (24255, 24278), True, 'import numpy as np\n'), ((24471, 24511), 'numpy.transpose', 'np.transpose', (['game_map.padded_elevations'], {}), '(game_map.padded_elevations)\n', (24483, 24511), True, 'import numpy as np\n'), ((1273, 1293), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1287, 1293), True, 'import numpy as np\n'), ((2191, 2248), 'numpy.zeros', 'np.zeros', (['(self.padded_size, self.padded_size)'], {'dtype': 'int'}), '((self.padded_size, self.padded_size), dtype=int)\n', (2199, 2248), True, 'import numpy as np\n'), ((2406, 2438), 'perlin_noise.PerlinNoise', 'PerlinNoise', ([], {'scale': '(20)', 'octaves': '(2)'}), '(scale=20, octaves=2)\n', (2417, 2438), False, 'from perlin_noise import PerlinNoise\n'), ((2706, 2763), 'numpy.zeros', 'np.zeros', (['(self.padded_size, self.padded_size)'], {'dtype': 'int'}), '((self.padded_size, self.padded_size), dtype=int)\n', (2714, 2763), True, 'import numpy as np\n'), ((3727, 3770), 'numpy.ones_like', 'np.ones_like', (['self.padded_tiles'], {'dtype': 'bool'}), '(self.padded_tiles, dtype=bool)\n', (3739, 3770), True, 'import numpy as np\n'), ((3994, 4043), 'numpy.logical_and', 'np.logical_and', (['(self.padded_elevations < 3)', 'water'], {}), '(self.padded_elevations < 3, water)\n', (4008, 4043), True, 'import numpy as np\n'), ((4834, 4866), 'perlin_noise.PerlinNoise', 'PerlinNoise', ([], {'scale': '(30)', 'octaves': '(1)'}), '(scale=30, octaves=1)\n', (4845, 4866), False, 'from perlin_noise import PerlinNoise\n'), ((5072, 5112), 'numpy.random.rand', 'rand', (['self.padded_size', 'self.padded_size'], {}), '(self.padded_size, self.padded_size)\n', (5076, 5112), False, 'from numpy.random import rand, randint, shuffle\n'), ((5482, 5500), 'numpy.random.randint', 'randint', (['(2)'], {'size': '(3)'}), '(2, size=3)\n', (5489, 5500), False, 'from numpy.random import rand, randint, shuffle\n'), ((5766, 5840), 'numpy.max', 'np.max', (['self.padded_elevations[x - 3:x - 3 + 2 * Lx, y - 3:y - 3 + 2 * Ly]'], {}), '(self.padded_elevations[x - 3:x - 3 + 2 * Lx, y - 3:y - 3 + 2 * Ly])\n', (5772, 5840), True, 'import numpy as np\n'), ((7268, 7292), 'numpy.array', 'np.array', (['self.waypoints'], {}), '(self.waypoints)\n', (7276, 7292), True, 'import numpy as np\n'), ((7301, 7313), 'numpy.random.shuffle', 'shuffle', (['wps'], {}), '(wps)\n', (7308, 7313), False, 'from numpy.random import rand, randint, shuffle\n'), ((9215, 9230), 'numpy.random.randint', 'randint', (['(32)', '(48)'], {}), '(32, 48)\n', (9222, 9230), False, 'from numpy.random import rand, randint, shuffle\n'), ((9251, 9292), 'numpy.random.randint', 'randint', (['(0)', 'self.padded_size'], {'size': '(n, 2)'}), '(0, self.padded_size, size=(n, 2))\n', (9258, 9292), False, 'from numpy.random import rand, randint, shuffle\n'), ((9441, 9460), 'numpy.array', 'np.array', (['waypoints'], {}), '(waypoints)\n', (9449, 9460), True, 'import numpy as np\n'), ((9476, 9494), 'scipy.spatial.Voronoi', 'Voronoi', (['waypoints'], {}), '(waypoints)\n', (9483, 9494), False, 'from scipy.spatial import Delaunay, Voronoi\n'), ((9931, 9950), 'numpy.round', 'np.round', (['waypoints'], {}), '(waypoints)\n', (9939, 9950), True, 'import numpy as np\n'), ((10460, 10484), 'scipy.spatial.Delaunay', 'Delaunay', (['self.waypoints'], {}), '(self.waypoints)\n', (10468, 10484), False, 'from scipy.spatial import Delaunay, Voronoi\n'), ((10517, 10527), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (10525, 10527), True, 'import networkx as nx\n'), ((12055, 12065), 'numpy.min', 'np.min', (['ds'], {}), '(ds)\n', (12061, 12065), True, 'import numpy as np\n'), ((12257, 12335), 'numpy.min', 'np.min', (['[i - self.instart, self.instop - i, j - self.instart, self.instop - j]'], {}), '([i - self.instart, self.instop - i, j - self.instart, self.instop - j])\n', (12263, 12335), True, 'import numpy as np\n'), ((13226, 13236), 'numpy.sign', 'np.sign', (['d'], {}), '(d)\n', (13233, 13236), True, 'import numpy as np\n'), ((14786, 14822), 'numpy.transpose', 'np.transpose', (['self.padded_elevations'], {}), '(self.padded_elevations)\n', (14798, 14822), True, 'import numpy as np\n'), ((14988, 15024), 'numpy.transpose', 'np.transpose', (['self.padded_elevations'], {}), '(self.padded_elevations)\n', (15000, 15024), True, 'import numpy as np\n'), ((16018, 16062), 'numpy.zeros_like', 'np.zeros_like', (['self.padded_tiles'], {'dtype': 'bool'}), '(self.padded_tiles, dtype=bool)\n', (16031, 16062), True, 'import numpy as np\n'), ((20748, 20796), 'numpy.zeros_like', 'np.zeros_like', (['self.padded_elevations'], {'dtype': 'int'}), '(self.padded_elevations, dtype=int)\n', (20761, 20796), True, 'import numpy as np\n'), ((21803, 21819), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (21811, 21819), True, 'import numpy as np\n'), ((373, 383), 'numpy.flip', 'np.flip', (['m'], {}), '(m)\n', (380, 383), True, 'import numpy as np\n'), ((422, 437), 'numpy.transpose', 'np.transpose', (['m'], {}), '(m)\n', (434, 437), True, 'import numpy as np\n'), ((478, 496), 'numpy.flip', 'np.flip', (['m'], {'axis': '(1)'}), '(m, axis=1)\n', (485, 496), True, 'import numpy as np\n'), ((1216, 1232), 'numpy.random.randint', 'randint', (['(2 ** 31)'], {}), '(2 ** 31)\n', (1223, 1232), False, 'from numpy.random import rand, randint, shuffle\n'), ((2331, 2387), 'numpy.ones', 'np.ones', (['(self.padded_size, self.padded_size)'], {'dtype': 'int'}), '((self.padded_size, self.padded_size), dtype=int)\n', (2338, 2387), True, 'import numpy as np\n'), ((2627, 2655), 'numpy.round', 'np.round', (['(3 / 2 * elevations)'], {}), '(3 / 2 * elevations)\n', (2635, 2655), True, 'import numpy as np\n'), ((3284, 3317), 'numpy.ones', 'np.ones', (['(pad_before, pad_before)'], {}), '((pad_before, pad_before))\n', (3291, 3317), True, 'import numpy as np\n'), ((3388, 3420), 'numpy.ones', 'np.ones', (['(pad_before, pad_after)'], {}), '((pad_before, pad_after))\n', (3395, 3420), True, 'import numpy as np\n'), ((3491, 3523), 'numpy.ones', 'np.ones', (['(pad_after, pad_before)'], {}), '((pad_after, pad_before))\n', (3498, 3523), True, 'import numpy as np\n'), ((3594, 3625), 'numpy.ones', 'np.ones', (['(pad_after, pad_after)'], {}), '((pad_after, pad_after))\n', (3601, 3625), True, 'import numpy as np\n'), ((4075, 4109), 'numpy.logical_not', 'np.logical_not', (['self.waypoint_mask'], {}), '(self.waypoint_mask)\n', (4089, 4109), True, 'import numpy as np\n'), ((4579, 4603), 'numpy.array', 'np.array', (['self.waypoints'], {}), '(self.waypoints)\n', (4587, 4603), True, 'import numpy as np\n'), ((5244, 5296), 'numpy.logical_and', 'np.logical_and', (['(rs < density)', '(self.padded_tiles == 0)'], {}), '(rs < density, self.padded_tiles == 0)\n', (5258, 5296), True, 'import numpy as np\n'), ((5549, 5572), 'numpy.flip', 'np.flip', (['layout'], {'axis': '(0)'}), '(layout, axis=0)\n', (5556, 5572), True, 'import numpy as np\n'), ((5629, 5652), 'numpy.flip', 'np.flip', (['layout'], {'axis': '(1)'}), '(layout, axis=1)\n', (5636, 5652), True, 'import numpy as np\n'), ((5701, 5721), 'numpy.transpose', 'np.transpose', (['layout'], {}), '(layout)\n', (5713, 5721), True, 'import numpy as np\n'), ((11826, 11852), 'numpy.abs', 'np.abs', (['[i1 - i2, j1 - j2]'], {}), '([i1 - i2, j1 - j2])\n', (11832, 11852), True, 'import numpy as np\n'), ((12755, 12774), 'numpy.array', 'np.array', (['[-dy, dx]'], {}), '([-dy, dx])\n', (12763, 12774), True, 'import numpy as np\n'), ((12775, 12801), 'numpy.sqrt', 'np.sqrt', (['(dx ** 2 + dy ** 2)'], {}), '(dx ** 2 + dy ** 2)\n', (12782, 12801), True, 'import numpy as np\n'), ((12811, 12829), 'numpy.array', 'np.array', (['[x1, y1]'], {}), '([x1, y1])\n', (12819, 12829), True, 'import numpy as np\n'), ((12858, 12876), 'numpy.array', 'np.array', (['[x2, y2]'], {}), '([x2, y2])\n', (12866, 12876), True, 'import numpy as np\n'), ((13311, 13323), 'numpy.round', 'np.round', (['v0'], {}), '(v0)\n', (13319, 13323), True, 'import numpy as np\n'), ((13363, 13375), 'numpy.round', 'np.round', (['v1'], {}), '(v1)\n', (13371, 13375), True, 'import numpy as np\n'), ((9783, 9826), 'numpy.array', 'np.array', (['[vor.vertices[j] for j in region]'], {}), '([vor.vertices[j] for j in region])\n', (9791, 9826), True, 'import numpy as np\n'), ((13761, 13775), 'numpy.abs', 'np.abs', (['(v1 - v)'], {}), '(v1 - v)\n', (13767, 13775), True, 'import numpy as np\n'), ((20475, 20502), 'numpy.arange', 'np.arange', (['self.padded_size'], {}), '(self.padded_size)\n', (20484, 20502), True, 'import numpy as np\n'), ((20546, 20573), 'numpy.arange', 'np.arange', (['self.padded_size'], {}), '(self.padded_size)\n', (20555, 20573), True, 'import numpy as np\n'), ((9735, 9749), 'numpy.min', 'np.min', (['region'], {}), '(region)\n', (9741, 9749), True, 'import numpy as np\n'), ((10616, 10661), 'numpy.array', 'np.array', (['delaunay.points[simplex]'], {'dtype': 'int'}), '(delaunay.points[simplex], dtype=int)\n', (10624, 10661), True, 'import numpy as np\n'), ((12932, 12946), 'numpy.ceil', 'np.ceil', (['width'], {}), '(width)\n', (12939, 12946), True, 'import numpy as np\n'), ((9860, 9885), 'numpy.mean', 'np.mean', (['vertices'], {'axis': '(0)'}), '(vertices, axis=0)\n', (9867, 9885), True, 'import numpy as np\n'), ((13476, 13504), 'numpy.abs', 'np.abs', (['(tile[0] - endtile[0])'], {}), '(tile[0] - endtile[0])\n', (13482, 13504), True, 'import numpy as np\n'), ((13603, 13631), 'numpy.abs', 'np.abs', (['(tile[1] - endtile[1])'], {}), '(tile[1] - endtile[1])\n', (13609, 13631), True, 'import numpy as np\n')] |
import copy
import itertools
from typing import Any
from typing import Iterator
from typing import List
from typing import Union
import numpy as np
import pycocotools.mask as mask_utils
import torch
from tkdet.layers.roi_align import ROIAlign
from .boxes import Boxes
__all__ = ["BitMasks", "PolygonMasks", "rasterize_polygons_within_box", "polygons_to_bitmask"]
def polygon_area(x, y):
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray:
assert len(polygons) > 0, "COCOAPI does not support empty polygons"
rles = mask_utils.frPyObjects(polygons, height, width)
rle = mask_utils.merge(rles)
return mask_utils.decode(rle).astype(np.bool)
def rasterize_polygons_within_box(
polygons: List[np.ndarray],
box: np.ndarray,
mask_size: int
) -> torch.Tensor:
w, h = box[2] - box[0], box[3] - box[1]
polygons = copy.deepcopy(polygons)
for p in polygons:
p[0::2] = p[0::2] - box[0]
p[1::2] = p[1::2] - box[1]
ratio_h = mask_size / max(h, 0.1)
ratio_w = mask_size / max(w, 0.1)
if ratio_h == ratio_w:
for p in polygons:
p *= ratio_h
else:
for p in polygons:
p[0::2] *= ratio_w
p[1::2] *= ratio_h
mask = polygons_to_bitmask(polygons, mask_size, mask_size)
mask = torch.from_numpy(mask)
return mask
class BitMasks(object):
def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device)
assert tensor.dim() == 3, tensor.size()
self.image_size = tensor.shape[1:]
self.tensor = tensor
def to(self, *args: Any, **kwargs: Any) -> "BitMasks":
return BitMasks(self.tensor.to(*args, **kwargs))
@property
def device(self) -> torch.device:
return self.tensor.device
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks":
if isinstance(item, int):
return BitMasks(self.tensor[item].view(1, -1))
m = self.tensor[item]
assert m.dim() == 3, \
f"Indexing on BitMasks with {item} returns a tensor with shape {m.shape}!"
return BitMasks(m)
def __iter__(self) -> torch.Tensor:
yield from self.tensor
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
def __len__(self) -> int:
return self.tensor.shape[0]
def nonempty(self) -> torch.Tensor:
return self.tensor.flatten(1).any(dim=1)
@staticmethod
def from_polygon_masks(
polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]],
height: int,
width: int
) -> "BitMasks":
if isinstance(polygon_masks, PolygonMasks):
polygon_masks = polygon_masks.polygons
masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]
return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = self.tensor.device
batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]
rois = torch.cat([batch_inds, boxes], dim=1)
bit_masks = self.tensor.to(dtype=torch.float32)
rois = rois.to(device=device)
output = (
ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)
.forward(bit_masks[:, None, :, :], rois)
.squeeze(1)
)
output = output >= 0.5
return output
def get_bounding_boxes(self) -> None:
raise NotImplementedError
@staticmethod
def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks":
assert isinstance(bitmasks_list, (list, tuple))
assert len(bitmasks_list) > 0
assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list)
cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0))
return cat_bitmasks
class PolygonMasks(object):
def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]):
assert isinstance(polygons, list), (
"Cannot create PolygonMasks: Expect a list of list of polygons per image. "
"Got '{}' instead.".format(type(polygons))
)
def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:
if isinstance(t, torch.Tensor):
t = t.cpu().numpy()
return np.asarray(t).astype("float64")
def process_polygons(
polygons_per_instance: List[Union[torch.Tensor, np.ndarray]]
) -> List[np.ndarray]:
assert isinstance(polygons_per_instance, list), (
"Cannot create polygons: Expect a list of polygons per instance. "
"Got '{}' instead.".format(type(polygons_per_instance))
)
polygons_per_instance = [_make_array(p) for p in polygons_per_instance]
for polygon in polygons_per_instance:
assert len(polygon) % 2 == 0 and len(polygon) >= 6
return polygons_per_instance
self.polygons: List[List[np.ndarray]] = [
process_polygons(polygons_per_instance) for polygons_per_instance in polygons
]
def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks":
return self
@property
def device(self) -> torch.device:
return torch.device("cpu")
def get_bounding_boxes(self) -> Boxes:
boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32)
for idx, polygons_per_instance in enumerate(self.polygons):
minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32)
maxxy = torch.zeros(2, dtype=torch.float32)
for polygon in polygons_per_instance:
coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32)
minxy = torch.min(minxy, torch.min(coords, dim=0).values)
maxxy = torch.max(maxxy, torch.max(coords, dim=0).values)
boxes[idx, :2] = minxy
boxes[idx, 2:] = maxxy
return Boxes(boxes)
def nonempty(self) -> torch.Tensor:
keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons]
return torch.from_numpy(np.asarray(keep, dtype=np.bool))
def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks":
if isinstance(item, int):
selected_polygons = [self.polygons[item]]
elif isinstance(item, slice):
selected_polygons = self.polygons[item]
elif isinstance(item, list):
selected_polygons = [self.polygons[i] for i in item]
elif isinstance(item, torch.Tensor):
if item.dtype == torch.bool:
assert item.dim() == 1, item.shape
item = item.nonzero().squeeze(1).cpu().numpy().tolist()
elif item.dtype in [torch.int32, torch.int64]:
item = item.cpu().numpy().tolist()
else:
raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype))
selected_polygons = [self.polygons[i] for i in item]
return PolygonMasks(selected_polygons)
def __iter__(self) -> Iterator[List[np.ndarray]]:
return iter(self.polygons)
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.polygons))
return s
def __len__(self) -> int:
return len(self.polygons)
def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
device = boxes.device
boxes = boxes.to(torch.device("cpu"))
results = [
rasterize_polygons_within_box(poly, box.numpy(), mask_size)
for poly, box in zip(self.polygons, boxes)
]
if len(results) == 0:
return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device)
return torch.stack(results, dim=0).to(device=device)
def area(self):
area = []
for polygons_per_instance in self.polygons:
area_per_instance = 0
for p in polygons_per_instance:
area_per_instance += polygon_area(p[0::2], p[1::2])
area.append(area_per_instance)
return torch.tensor(area)
@staticmethod
def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks":
assert isinstance(polymasks_list, (list, tuple))
assert len(polymasks_list) > 0
assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list)
cat_polymasks = type(polymasks_list[0])(
list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list))
)
return cat_polymasks
| [
"torch.as_tensor",
"numpy.roll",
"tkdet.layers.roi_align.ROIAlign",
"pycocotools.mask.decode",
"torch.stack",
"pycocotools.mask.frPyObjects",
"numpy.asarray",
"torch.from_numpy",
"torch.min",
"torch.max",
"torch.tensor",
"itertools.chain.from_iterable",
"pycocotools.mask.merge",
"copy.deep... | [((647, 694), 'pycocotools.mask.frPyObjects', 'mask_utils.frPyObjects', (['polygons', 'height', 'width'], {}), '(polygons, height, width)\n', (669, 694), True, 'import pycocotools.mask as mask_utils\n'), ((705, 727), 'pycocotools.mask.merge', 'mask_utils.merge', (['rles'], {}), '(rles)\n', (721, 727), True, 'import pycocotools.mask as mask_utils\n'), ((966, 989), 'copy.deepcopy', 'copy.deepcopy', (['polygons'], {}), '(polygons)\n', (979, 989), False, 'import copy\n'), ((1414, 1436), 'torch.from_numpy', 'torch.from_numpy', (['mask'], {}), '(mask)\n', (1430, 1436), False, 'import torch\n'), ((1653, 1709), 'torch.as_tensor', 'torch.as_tensor', (['tensor'], {'dtype': 'torch.bool', 'device': 'device'}), '(tensor, dtype=torch.bool, device=device)\n', (1668, 1709), False, 'import torch\n'), ((3512, 3549), 'torch.cat', 'torch.cat', (['[batch_inds, boxes]'], {'dim': '(1)'}), '([batch_inds, boxes], dim=1)\n', (3521, 3549), False, 'import torch\n'), ((5765, 5784), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5777, 5784), False, 'import torch\n'), ((8786, 8804), 'torch.tensor', 'torch.tensor', (['area'], {}), '(area)\n', (8798, 8804), False, 'import torch\n'), ((739, 761), 'pycocotools.mask.decode', 'mask_utils.decode', (['rle'], {}), '(rle)\n', (756, 761), True, 'import pycocotools.mask as mask_utils\n'), ((1616, 1635), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1628, 1635), False, 'import torch\n'), ((4255, 4308), 'torch.cat', 'torch.cat', (['[bm.tensor for bm in bitmasks_list]'], {'dim': '(0)'}), '([bm.tensor for bm in bitmasks_list], dim=0)\n', (4264, 4308), False, 'import torch\n'), ((6076, 6111), 'torch.zeros', 'torch.zeros', (['(2)'], {'dtype': 'torch.float32'}), '(2, dtype=torch.float32)\n', (6087, 6111), False, 'import torch\n'), ((6643, 6674), 'numpy.asarray', 'np.asarray', (['keep'], {'dtype': 'np.bool'}), '(keep, dtype=np.bool)\n', (6653, 6674), True, 'import numpy as np\n'), ((8131, 8150), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (8143, 8150), False, 'import torch\n'), ((8359, 8428), 'torch.empty', 'torch.empty', (['(0)', 'mask_size', 'mask_size'], {'dtype': 'torch.bool', 'device': 'device'}), '(0, mask_size, mask_size, dtype=torch.bool, device=device)\n', (8370, 8428), False, 'import torch\n'), ((8444, 8471), 'torch.stack', 'torch.stack', (['results'], {'dim': '(0)'}), '(results, dim=0)\n', (8455, 8471), False, 'import torch\n'), ((9142, 9209), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['(pm.polygons for pm in polymasks_list)'], {}), '(pm.polygons for pm in polymasks_list)\n', (9171, 9209), False, 'import itertools\n'), ((426, 439), 'numpy.roll', 'np.roll', (['y', '(1)'], {}), '(y, 1)\n', (433, 439), True, 'import numpy as np\n'), ((453, 466), 'numpy.roll', 'np.roll', (['x', '(1)'], {}), '(x, 1)\n', (460, 466), True, 'import numpy as np\n'), ((3163, 3182), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (3179, 3182), False, 'import torch\n'), ((4820, 4833), 'numpy.asarray', 'np.asarray', (['t'], {}), '(t)\n', (4830, 4833), True, 'import numpy as np\n'), ((3676, 3730), 'tkdet.layers.roi_align.ROIAlign', 'ROIAlign', (['(mask_size, mask_size)', '(1.0)', '(0)'], {'aligned': '(True)'}), '((mask_size, mask_size), 1.0, 0, aligned=True)\n', (3684, 3730), False, 'from tkdet.layers.roi_align import ROIAlign\n'), ((6290, 6314), 'torch.min', 'torch.min', (['coords'], {'dim': '(0)'}), '(coords, dim=0)\n', (6299, 6314), False, 'import torch\n'), ((6364, 6388), 'torch.max', 'torch.max', (['coords'], {'dim': '(0)'}), '(coords, dim=0)\n', (6373, 6388), False, 'import torch\n'), ((6187, 6212), 'torch.from_numpy', 'torch.from_numpy', (['polygon'], {}), '(polygon)\n', (6203, 6212), False, 'import torch\n')] |
from VISA_Driver import VISA_Driver
import numpy as np
class Driver(VISA_Driver):
""" The Yoko driver re-implements the VISA driver with some more options"""
def performOpen(self, options={}):
"""Perform the operation of opening the instrument connection"""
# keep track of sweep state
self.is_sweeping = False
# start by calling the generic VISA open to make sure we have a connection
VISA_Driver.performOpen(self, options=options)
# always get function and range: they are essential for correct resolution and sweeping
self.readValueFromOther('Function')
def initSetConfig(self):
"""This function is run before setting values in Set Config.
Check if new range is smaller than old, if so first go to new value to
avoid zeroing when setting the range with high value"""
# get functions and range, first from internal settings (from SetCfg)
newFunction = self.getValue('Function')
dRangeNew, dMaxNew = self.getMaxValueAndSmallestStep()
# get actual settings on instrument by first calling readFromOther
oldFunction = self.readValueFromOther('Function')
# also read the range settings
if oldFunction == 'Voltage':
self.readValueFromOther('Range (V)')
elif oldFunction == 'Current':
self.readValueFromOther('Range (I)')
dRangeOld, dMaxOld = self.getMaxValueAndSmallestStep()
# check if instrument in different mode ot new range is bigger, if so return
if (newFunction != oldFunction) or (dMaxNew > dMaxOld):
return
# set new value, either voltage or current
if newFunction == 'Voltage':
quant = self.getQuantity('Voltage')
elif newFunction == 'Current':
quant = self.getQuantity('Current')
# get new value and sweep rate from internal quantity
value = quant.getValue()
rate = quant.getSweepRate()
# set value here, before changing the range
self.sendValueToOther(quant.name, value, sweepRate=rate)
def performSetValue(self, quant, value, sweepRate=0.0, options={}):
"""Perform the Set Value instrument operation. This function should
return the actual value set by the instrument"""
# check if set value and in sweep mode
if quant.name in ('Voltage', 'Current'):
# check limits
(dStep, dMax) = self.getMaxValueAndSmallestStep()
if abs(value) > dMax:
# new value out of range, return error
raise Exception('New value (%.6g) is out of range (max = %.6g)' % (value, dMax))
# calculate actual value based on smallest step size
value = dStep * np.round(value/dStep)
# check if sweep mode or output off, if not call generic driver
if sweepRate == 0.0 or (not self.getValue('Output')):
return VISA_Driver.performSetValue(self, quant, value, 0.0, options=options)
# sweep mode, do it here
# get old value to find sweep time and step size
currValue = self.performGetValue(quant)
if value == currValue:
# already at the final value, return
return value
# if sweep range is less than two minimal steps, don't sweep
if abs(value-currValue) < 2.5*dStep:
return VISA_Driver.performSetValue(self, quant, value, 0.0, options=options)
dSweepTime = abs(value-currValue)/sweepRate
# don't allow sweep times that are shorter than 0.1 s
dSweepTime = max(dSweepTime, 0.1)
sSweepTime = '%.1f' % dSweepTime
sCmd = '*CLS;:PROG:REP 0;' + \
'SLOP %s;' % sSweepTime + \
'INT %s;' % sSweepTime + \
'EDIT:STAR;' + \
':SOUR:LEV %.6E;' % value + \
':PROG:EDIT:END;' + \
':PROG:RUN'
self.is_sweeping = True
VISA_Driver.writeAndLog(self, sCmd)
# return target value
return value
else:
# for all other quantities, call the generic VISA driver
return VISA_Driver.performSetValue(self, quant, value, sweepRate,
options=options)
def checkIfSweeping(self, quant, options={}):
"""Check if instrument is sweeping the given quantity"""
# check for bit 7 EOP (End of program) im the extended event register
status = self.askAndLog(':STAT:EVEN?')
# mark as done if bit is set
if (int(status) & 128) > 0:
self.is_sweeping = False
return self.is_sweeping
def getMaxValueAndSmallestStep(self):
"""Return the resolution, which depends on function (voltage or current)
and range settings"""
# get function type
func = self.getValue('Function')
if func == 'Voltage':
# get range, voltage
dRange = {'30 V': 1E-3, '10 V': 1E-4, '1 V': 1E-5, '100 mV': 1E-6,
'10 mV': 1E-7}
dMax = {'30 V': 32.0, '10 V': 12.0, '1 V': 1.2, '100 mV': 0.12,
'10 mV': 0.012}
sRange = self.getValue('Range (V)')
return (dRange[sRange], dMax[sRange])
elif func == 'Current':
# get range, current
dRange = {'200 mA': 2E-6, '100 mA': 1E-6, '10 mA': 1E-7, '1 mA': 1E-8}
dMax = {'200 mA': .2, '100 mA': .12, '10 mA': 0.012, '1 mA': 0.0012}
sRange = self.getValue('Range (I)')
return (dRange[sRange], dMax[sRange])
| [
"VISA_Driver.VISA_Driver.performSetValue",
"VISA_Driver.VISA_Driver.writeAndLog",
"numpy.round",
"VISA_Driver.VISA_Driver.performOpen"
] | [((437, 483), 'VISA_Driver.VISA_Driver.performOpen', 'VISA_Driver.performOpen', (['self'], {'options': 'options'}), '(self, options=options)\n', (460, 483), False, 'from VISA_Driver import VISA_Driver\n'), ((4070, 4105), 'VISA_Driver.VISA_Driver.writeAndLog', 'VISA_Driver.writeAndLog', (['self', 'sCmd'], {}), '(self, sCmd)\n', (4093, 4105), False, 'from VISA_Driver import VISA_Driver\n'), ((4269, 4344), 'VISA_Driver.VISA_Driver.performSetValue', 'VISA_Driver.performSetValue', (['self', 'quant', 'value', 'sweepRate'], {'options': 'options'}), '(self, quant, value, sweepRate, options=options)\n', (4296, 4344), False, 'from VISA_Driver import VISA_Driver\n'), ((2776, 2799), 'numpy.round', 'np.round', (['(value / dStep)'], {}), '(value / dStep)\n', (2784, 2799), True, 'import numpy as np\n'), ((2963, 3032), 'VISA_Driver.VISA_Driver.performSetValue', 'VISA_Driver.performSetValue', (['self', 'quant', 'value', '(0.0)'], {'options': 'options'}), '(self, quant, value, 0.0, options=options)\n', (2990, 3032), False, 'from VISA_Driver import VISA_Driver\n'), ((3445, 3514), 'VISA_Driver.VISA_Driver.performSetValue', 'VISA_Driver.performSetValue', (['self', 'quant', 'value', '(0.0)'], {'options': 'options'}), '(self, quant, value, 0.0, options=options)\n', (3472, 3514), False, 'from VISA_Driver import VISA_Driver\n')] |
#!/usr/bin/env python3
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import mpmath
import numpy
import yaml
def read_file(path):
with open(path) as fin:
return yaml.safe_load(fin)
def write_file(path, data):
with open(path, 'w') as fout:
yaml.dump(data, fout)
RANK_BOOL = 0
RANK_INT = 1
RANK_FLOAT = 2
RANK_STR = 3
rank_to_type = [numpy.bool_, numpy.int32, numpy.double, mpmath.mpf]
def _import(data, rank_to_type_override = {}):
def wrap(scanned_data):
data, rank = scanned_data
_type = rank_to_type_override.get(rank, rank_to_type[rank])
return (
(
numpy.array(data, _type)
if issubclass(_type, numpy.generic) else
mpmath.matrix(data)
if _type == mpmath.mpf else
data
)
if isinstance(data, list) else
data
if isinstance(data, dict) else
(
data # avoid using numpy scalars
if issubclass(_type, numpy.generic) else
_type(data)
)
)
def scan(data):
# scalars and lists do not call wrap() yet
# for scalars we want to avoid conversion to numpy if not in a list
# for lists we want to do conversion after collecting all nested lists
if isinstance(data, bool):
return data, RANK_BOOL
if isinstance(data, int):
return data, RANK_INT
if isinstance(data, float):
return data, RANK_FLOAT
if isinstance(data, str):
return data, RANK_STR
if isinstance(data, list):
scanned_data = [scan(i) for i in data]
return (
[i for i, _ in scanned_data],
max([i for _, i in scanned_data])
)
if isinstance(data, dict):
scanned_data = [(i, scan(j)) for i, j in data.items()]
return (
{i: wrap(j) for i, j in scanned_data},
max([i for _, (_, i) in scanned_data])
)
assert False
return wrap(scan(data))
def export(data):
return (
float(data)
if isinstance(data, numpy.floating) else
int(data)
if isinstance(data, numpy.integer) else
bool(data)
if isinstance(data, numpy.bool_) else
[export(i) for i in data]
if isinstance(data, numpy.ndarray) else
str(data)
if isinstance(data, mpmath.mpf) else
(
[export(data[i]) for i in range(data.rows)]
if data.cols == 1 else
[
[export(data[i, j]) for j in range(data.cols)]
for i in range(data.rows)
]
)
if isinstance(data, mpmath.matrix) else
{i: export(j) for i, j in data.items()}
if isinstance(data, dict) else
data
)
if __name__ == '__main__':
mpmath.mp.prec = 106
write_file(
'a.yml',
export(
{
'a': True,
'b': 27,
'c': 27.5,
'd': '27.5000000000000000000000000000001',
'e': numpy.array([True, False, True], numpy.bool),
'f': numpy.array([[1, 2, 3]], numpy.int32),
'g': numpy.array([[1., 2., 3.], [4., 5., 6.]], numpy.double),
'h': mpmath.matrix([[1., 2., 3.], [4., 5., 6.]]),
'i': mpmath.matrix([1., 2., '3.0000000000000000000000000001'])
}
)
)
print(_import(read_file('a.yml')))
| [
"yaml.safe_load",
"numpy.array",
"mpmath.matrix",
"yaml.dump"
] | [((1208, 1227), 'yaml.safe_load', 'yaml.safe_load', (['fin'], {}), '(fin)\n', (1222, 1227), False, 'import yaml\n'), ((1293, 1314), 'yaml.dump', 'yaml.dump', (['data', 'fout'], {}), '(data, fout)\n', (1302, 1314), False, 'import yaml\n'), ((1635, 1659), 'numpy.array', 'numpy.array', (['data', '_type'], {}), '(data, _type)\n', (1646, 1659), False, 'import numpy\n'), ((3751, 3795), 'numpy.array', 'numpy.array', (['[True, False, True]', 'numpy.bool'], {}), '([True, False, True], numpy.bool)\n', (3762, 3795), False, 'import numpy\n'), ((3810, 3847), 'numpy.array', 'numpy.array', (['[[1, 2, 3]]', 'numpy.int32'], {}), '([[1, 2, 3]], numpy.int32)\n', (3821, 3847), False, 'import numpy\n'), ((3862, 3923), 'numpy.array', 'numpy.array', (['[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]', 'numpy.double'], {}), '([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], numpy.double)\n', (3873, 3923), False, 'import numpy\n'), ((3932, 3981), 'mpmath.matrix', 'mpmath.matrix', (['[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]'], {}), '([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n', (3945, 3981), False, 'import mpmath\n'), ((3990, 4049), 'mpmath.matrix', 'mpmath.matrix', (["[1.0, 2.0, '3.0000000000000000000000000001']"], {}), "([1.0, 2.0, '3.0000000000000000000000000001'])\n", (4003, 4049), False, 'import mpmath\n'), ((1715, 1734), 'mpmath.matrix', 'mpmath.matrix', (['data'], {}), '(data)\n', (1728, 1734), False, 'import mpmath\n')] |
import sys
sys.path.append('/home/holiestcow/Documents/2017_fall/ne697_hayward')
from radsynth.core.playground import Playground
from radsynth.core.items import Detector, Source
import numpy as np
def main():
# Define items
# cs137_1 = Source(name='cs137_1', position=np.array([20, 20]), isotope='cs137',
# activity_mCi=1)
cs137_2 = Source(name='cs137_2', position=np.array([-25, -35]), isotope='cs137',
activity_mCi=0.25)
# NOTE: Have these det positions created with a path generator.
det_pos0 = Detector(name='waypoint_pos0', position=np.array([45, -32]), material='NaI',
detector_number=4, orientation=315, time=0)
det_pos1 = Detector(name='waypoint_pos1', position=np.array([15, -20]), material='NaI',
detector_number=4, orientation=315, time=35)
det_pos2 = Detector(name='waypoint_pos2', position=np.array([-20, -20]), material='NaI',
detector_number=4, orientation=25, time=35 + 50)
det_pos3 = Detector(name='waypoint_pos3', position=np.array([0, 0]), material='NaI',
detector_number=4, orientation=25, time=100)
det_pos4 = Detector(name='waypoint_pos4', position=np.array([-40, 40]), material='NaI',
detector_number=4, orientation=315, time=22 + 35 + 35 + 30)
# define environment
environment = Playground()
# add items
# Sources
# environment.add_tracked_item(cs137_1)
environment.add_tracked_item(cs137_2)
# Detectors
environment.add_tracked_item(det_pos0)
environment.add_tracked_item(det_pos1)
environment.add_tracked_item(det_pos2)
environment.add_tracked_item(det_pos3)
environment.add_tracked_item(det_pos4)
environment.add_measurement_plan(waypoints=[det_pos0.name, det_pos1.name, det_pos2.name,
det_pos3.name, det_pos4.name],
plan_name='detector_movement',
time_step=5,
sub_time_step=0.2)
# print(environment.plans['detector_movement'].observed_object_list)
# print(environment.plans['detector_movement'].physical_object_list)
# print(len(environment.plans['detector_movement'].observed_object_list))
# print(len(environment.plans['detector_movement'].physical_object_list))
fig, ax, art = environment.plotme(plot_width=8, plot_height=6, legend_position=(1.2, 1),
legend_column_number=1)
fig.savefig('test_playground.png', additional_artists=art, bbox_inches='tight')
environment.write_geant_macros(
output_prefix='/home/cbritt2/ne692_hayward/ddli-code/geant4',
output_suffix='test', local_output='/home/holiestcow/Documents/2017_fall/ne697_hayward/ddli-code/geant4/scripts/test_batch',
batch_name='testeroony',
nparticles=100000) # 1e5
return
main()
| [
"radsynth.core.playground.Playground",
"numpy.array",
"sys.path.append"
] | [((13, 82), 'sys.path.append', 'sys.path.append', (['"""/home/holiestcow/Documents/2017_fall/ne697_hayward"""'], {}), "('/home/holiestcow/Documents/2017_fall/ne697_hayward')\n", (28, 82), False, 'import sys\n'), ((1416, 1428), 'radsynth.core.playground.Playground', 'Playground', ([], {}), '()\n', (1426, 1428), False, 'from radsynth.core.playground import Playground\n'), ((404, 424), 'numpy.array', 'np.array', (['[-25, -35]'], {}), '([-25, -35])\n', (412, 424), True, 'import numpy as np\n'), ((606, 625), 'numpy.array', 'np.array', (['[45, -32]'], {}), '([45, -32])\n', (614, 625), True, 'import numpy as np\n'), ((766, 785), 'numpy.array', 'np.array', (['[15, -20]'], {}), '([15, -20])\n', (774, 785), True, 'import numpy as np\n'), ((927, 947), 'numpy.array', 'np.array', (['[-20, -20]'], {}), '([-20, -20])\n', (935, 947), True, 'import numpy as np\n'), ((1093, 1109), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (1101, 1109), True, 'import numpy as np\n'), ((1251, 1270), 'numpy.array', 'np.array', (['[-40, 40]'], {}), '([-40, 40])\n', (1259, 1270), True, 'import numpy as np\n')] |
import numpy as np
import scipy.signal as sp
def find_template_1D(t, s):
# TODO: Locate template t in signal s and return index. Use scipy.signal.correlate2d
corr = sp.correlate2d(s, t)
return np.argmax(corr)
s = np.array([[-1, 0, 0, 5, 1, 1, 0, 0, -1, -7, 2, 1, 0, 0, -1]])
t = np.array([[-1, -7, 2]])
print("Signal: \n {} \n {}".format(np.array(range(s.shape[1])), s[0]))
print("Template: \n {} \n {}".format(np.array(range(t.shape[1])), t[0]))
index = find_template_1D(t, s)
print("Index: {}".format(index))
| [
"numpy.array",
"scipy.signal.correlate2d",
"numpy.argmax"
] | [((229, 290), 'numpy.array', 'np.array', (['[[-1, 0, 0, 5, 1, 1, 0, 0, -1, -7, 2, 1, 0, 0, -1]]'], {}), '([[-1, 0, 0, 5, 1, 1, 0, 0, -1, -7, 2, 1, 0, 0, -1]])\n', (237, 290), True, 'import numpy as np\n'), ((295, 318), 'numpy.array', 'np.array', (['[[-1, -7, 2]]'], {}), '([[-1, -7, 2]])\n', (303, 318), True, 'import numpy as np\n'), ((175, 195), 'scipy.signal.correlate2d', 'sp.correlate2d', (['s', 't'], {}), '(s, t)\n', (189, 195), True, 'import scipy.signal as sp\n'), ((207, 222), 'numpy.argmax', 'np.argmax', (['corr'], {}), '(corr)\n', (216, 222), True, 'import numpy as np\n')] |
from unittest import TestCase
import os
import numpy as np
from bladex import NacaProfile, Shaft, Propeller, Blade
from smithers.io.obj import ObjHandler
from smithers.io.stlhandler import STLHandler
def create_sample_blade_NACApptc():
sections = np.asarray([NacaProfile('5407') for i in range(13)])
radii=np.array([0.034375, 0.0375, 0.04375, 0.05, 0.0625, 0.075, 0.0875,
0.1, 0.10625, 0.1125, 0.11875, 0.121875, 0.125])
chord_lengths = np.array([0.039, 0.045, 0.05625, 0.06542, 0.08125,
0.09417, 0.10417, 0.10708, 0.10654, 0.10417,
0.09417, 0.07867, 0.025])
pitch = np.array([0.35, 0.35, 0.36375, 0.37625, 0.3945, 0.405, 0.40875,
0.4035, 0.3955, 0.38275, 0.3645, 0.35275, 0.33875])
rake=np.array([0.0 ,0.0, 0.0005, 0.00125, 0.00335, 0.005875, 0.0075,
0.007375, 0.006625, 0.00545, 0.004033, 0.0033, 0.0025])
skew_angles=np.array([6.6262795, 3.6262795, -1.188323, -4.4654502,
-7.440779, -7.3840979, -5.0367916, -1.3257914,
1.0856404, 4.1448947, 7.697235, 9.5368917,
11.397609])
return Blade(
sections=sections,
radii=radii,
chord_lengths=chord_lengths,
pitch=pitch,
rake=rake,
skew_angles=skew_angles)
class TestPropeller(TestCase):
"""
Test case for the Propeller class.
"""
def test_sections_inheritance_NACApptc(self):
prop= create_sample_blade_NACApptc()
self.assertIsInstance(prop.sections[0], NacaProfile)
def test_radii_NACApptc(self):
prop = create_sample_blade_NACApptc()
np.testing.assert_equal(prop.radii, np.array([0.034375, 0.0375, 0.04375,
0.05, 0.0625, 0.075,
0.0875, 0.1, 0.10625,
0.1125, 0.11875, 0.121875,
0.125]))
def test_chord_NACApptc(self):
prop = create_sample_blade_NACApptc()
np.testing.assert_equal(prop.chord_lengths,np.array([0.039, 0.045,
0.05625, 0.06542,
0.08125, 0.09417,
0.10417, 0.10708,
0.10654, 0.10417,
0.09417, 0.07867,
0.025]))
def test_pitch_NACApptc(self):
prop = create_sample_blade_NACApptc()
np.testing.assert_equal(prop.pitch, np.array([0.35, 0.35, 0.36375,
0.37625, 0.3945, 0.405,
0.40875, 0.4035, 0.3955,
0.38275, 0.3645, 0.35275,
0.33875]))
def test_rake_NACApptc(self):
prop = create_sample_blade_NACApptc()
np.testing.assert_equal(prop.rake, np.array([0.0 ,0.0, 0.0005, 0.00125,
0.00335, 0.005875, 0.0075,
0.007375, 0.006625, 0.00545,
0.004033, 0.0033, 0.0025]))
def test_skew_NACApptc(self):
prop = create_sample_blade_NACApptc()
np.testing.assert_equal(prop.skew_angles, np.array([6.6262795,
3.6262795,
-1.188323,
-4.4654502,
-7.440779,
-7.3840979,
-5.0367916,
-1.3257914,
1.0856404,
4.1448947,
7.697235,
9.5368917,
11.397609]))
def test_sections_array_different_length(self):
prop = create_sample_blade_NACApptc()
prop.sections = np.arange(9)
with self.assertRaises(ValueError):
prop._check_params()
def test_radii_array_different_length(self):
prop = create_sample_blade_NACApptc()
prop.radii = np.arange(9)
with self.assertRaises(ValueError):
prop._check_params()
def test_chord_array_different_length(self):
prop = create_sample_blade_NACApptc()
prop.chord_lengths = np.arange(9)
with self.assertRaises(ValueError):
prop._check_params()
def test_pitch_array_different_length(self):
prop = create_sample_blade_NACApptc()
prop.pitch = np.arange(9)
with self.assertRaises(ValueError):
prop._check_params()
def test_rake_array_different_length(self):
prop = create_sample_blade_NACApptc()
prop.rake = np.arange(9)
with self.assertRaises(ValueError):
prop._check_params()
def test_skew_array_different_length(self):
prop = create_sample_blade_NACApptc()
prop.skew_angles = np.arange(9)
with self.assertRaises(ValueError):
prop._check_params()
def test_generate_iges_not_string(self):
sh = Shaft("tests/test_datasets/shaft.iges")
prop = create_sample_blade_NACApptc()
prop = Propeller(sh, prop, 1)
propeller_and_shaft = 1
with self.assertRaises(Exception):
prop.generate_iges(propeller_and_shaft)
def test_generate_stl_not_string(self):
sh = Shaft("tests/test_datasets/shaft.iges")
prop = create_sample_blade_NACApptc()
prop = Propeller(sh, prop, 1)
propeller_and_shaft = 1
with self.assertRaises(Exception):
prop.generate_stl(propeller_and_shaft)
def test_generate_iges(self):
sh = Shaft("tests/test_datasets/shaft.iges")
prop = create_sample_blade_NACApptc()
prop = Propeller(sh, prop, 4)
prop.generate_iges("tests/test_datasets/propeller_and_shaft.iges")
self.assertTrue(os.path.isfile('tests/test_datasets/propeller_and_shaft.iges'))
self.addCleanup(os.remove, 'tests/test_datasets/propeller_and_shaft.iges')
def test_generate_stl(self):
sh = Shaft("tests/test_datasets/shaft.iges")
prop = create_sample_blade_NACApptc()
prop = Propeller(sh, prop, 4)
prop.generate_stl("tests/test_datasets/propeller_and_shaft.stl")
self.assertTrue(os.path.isfile('tests/test_datasets/propeller_and_shaft.stl'))
self.addCleanup(os.remove, 'tests/test_datasets/propeller_and_shaft.stl')
def test_generate_obj_by_coords(self):
sh = Shaft("tests/test_datasets/shaft.iges")
prop = create_sample_blade_NACApptc()
prop = Propeller(sh, prop, 4)
prop.generate_obj("tests/test_datasets/propeller_and_shaft.obj", region_selector='by_coords')
data = ObjHandler.read('tests/test_datasets/propeller_and_shaft.obj')
assert data.regions == ['propellerTip','propellerStem']
# we want 0 to be the first index
data.polygons = np.asarray(data.polygons) - 1
tip_poly = data.polygons[:data.regions_change_indexes[1][0]]
stem_poly = data.polygons[data.regions_change_indexes[1][0]:]
blades_stl = STLHandler.read('/tmp/temp_blades.stl')
shaft_stl = STLHandler.read('/tmp/temp_shaft.stl')
# same vertices
all_vertices = np.concatenate(
[shaft_stl["points"], blades_stl["points"]], axis=0
)
unique_vertices = np.unique(all_vertices, axis=0)
np.testing.assert_almost_equal(data.vertices, unique_vertices, decimal=3)
blades_min_x = np.min(blades_stl['points'][:,0])
assert np.all(data.vertices[np.asarray(tip_poly).flatten()][:,0] >= blades_min_x)
assert not any(np.all(data.vertices[np.asarray(stem_poly).flatten()][:,0].reshape(-1,data.polygons.shape[1]) >= blades_min_x, axis=1))
def test_generate_obj_blades_and_stem(self):
sh = Shaft("tests/test_datasets/shaft.iges")
prop = create_sample_blade_NACApptc()
prop = Propeller(sh, prop, 4)
prop.generate_obj("tests/test_datasets/propeller_and_shaft.obj", region_selector='blades_and_stem')
data = ObjHandler.read('tests/test_datasets/propeller_and_shaft.obj')
assert data.regions == ['propellerTip','propellerStem']
tip_polygons = np.asarray(data.polygons[:data.regions_change_indexes[1][0]]) - 1
stem_polygons = np.asarray(data.polygons[data.regions_change_indexes[1][0]:]) - 1
blades_stl = STLHandler.read('/tmp/temp_blades.stl')
shaft_stl = STLHandler.read('/tmp/temp_shaft.stl')
# same vertices
all_vertices = np.concatenate(
[shaft_stl["points"], blades_stl["points"]], axis=0
)
unique_vertices, indexing = np.unique(
all_vertices, return_index=True, axis=0
)
np.testing.assert_almost_equal(data.vertices, unique_vertices, decimal=3)
assert np.all(indexing[stem_polygons.flatten()] < shaft_stl['points'].shape[0])
assert np.all(indexing[tip_polygons.flatten()] >= shaft_stl['points'].shape[0])
def test_isdisplay(self):
assert hasattr(Propeller, "display") == True
| [
"numpy.unique",
"numpy.asarray",
"numpy.min",
"os.path.isfile",
"numpy.array",
"smithers.io.stlhandler.STLHandler.read",
"bladex.Shaft",
"numpy.testing.assert_almost_equal",
"bladex.NacaProfile",
"bladex.Blade",
"numpy.concatenate",
"smithers.io.obj.ObjHandler.read",
"bladex.Propeller",
"n... | [((316, 435), 'numpy.array', 'np.array', (['[0.034375, 0.0375, 0.04375, 0.05, 0.0625, 0.075, 0.0875, 0.1, 0.10625, \n 0.1125, 0.11875, 0.121875, 0.125]'], {}), '([0.034375, 0.0375, 0.04375, 0.05, 0.0625, 0.075, 0.0875, 0.1, \n 0.10625, 0.1125, 0.11875, 0.121875, 0.125])\n', (324, 435), True, 'import numpy as np\n'), ((471, 597), 'numpy.array', 'np.array', (['[0.039, 0.045, 0.05625, 0.06542, 0.08125, 0.09417, 0.10417, 0.10708, \n 0.10654, 0.10417, 0.09417, 0.07867, 0.025]'], {}), '([0.039, 0.045, 0.05625, 0.06542, 0.08125, 0.09417, 0.10417, \n 0.10708, 0.10654, 0.10417, 0.09417, 0.07867, 0.025])\n', (479, 597), True, 'import numpy as np\n'), ((665, 785), 'numpy.array', 'np.array', (['[0.35, 0.35, 0.36375, 0.37625, 0.3945, 0.405, 0.40875, 0.4035, 0.3955, \n 0.38275, 0.3645, 0.35275, 0.33875]'], {}), '([0.35, 0.35, 0.36375, 0.37625, 0.3945, 0.405, 0.40875, 0.4035, \n 0.3955, 0.38275, 0.3645, 0.35275, 0.33875])\n', (673, 785), True, 'import numpy as np\n'), ((812, 936), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0005, 0.00125, 0.00335, 0.005875, 0.0075, 0.007375, 0.006625, \n 0.00545, 0.004033, 0.0033, 0.0025]'], {}), '([0.0, 0.0, 0.0005, 0.00125, 0.00335, 0.005875, 0.0075, 0.007375, \n 0.006625, 0.00545, 0.004033, 0.0033, 0.0025])\n', (820, 936), True, 'import numpy as np\n'), ((967, 1133), 'numpy.array', 'np.array', (['[6.6262795, 3.6262795, -1.188323, -4.4654502, -7.440779, -7.3840979, -\n 5.0367916, -1.3257914, 1.0856404, 4.1448947, 7.697235, 9.5368917, 11.397609\n ]'], {}), '([6.6262795, 3.6262795, -1.188323, -4.4654502, -7.440779, -\n 7.3840979, -5.0367916, -1.3257914, 1.0856404, 4.1448947, 7.697235, \n 9.5368917, 11.397609])\n', (975, 1133), True, 'import numpy as np\n'), ((1213, 1333), 'bladex.Blade', 'Blade', ([], {'sections': 'sections', 'radii': 'radii', 'chord_lengths': 'chord_lengths', 'pitch': 'pitch', 'rake': 'rake', 'skew_angles': 'skew_angles'}), '(sections=sections, radii=radii, chord_lengths=chord_lengths, pitch=\n pitch, rake=rake, skew_angles=skew_angles)\n', (1218, 1333), False, 'from bladex import NacaProfile, Shaft, Propeller, Blade\n'), ((4698, 4710), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (4707, 4710), True, 'import numpy as np\n'), ((4905, 4917), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (4914, 4917), True, 'import numpy as np\n'), ((5120, 5132), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (5129, 5132), True, 'import numpy as np\n'), ((5327, 5339), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (5336, 5339), True, 'import numpy as np\n'), ((5532, 5544), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (5541, 5544), True, 'import numpy as np\n'), ((5744, 5756), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (5753, 5756), True, 'import numpy as np\n'), ((5893, 5932), 'bladex.Shaft', 'Shaft', (['"""tests/test_datasets/shaft.iges"""'], {}), "('tests/test_datasets/shaft.iges')\n", (5898, 5932), False, 'from bladex import NacaProfile, Shaft, Propeller, Blade\n'), ((5994, 6016), 'bladex.Propeller', 'Propeller', (['sh', 'prop', '(1)'], {}), '(sh, prop, 1)\n', (6003, 6016), False, 'from bladex import NacaProfile, Shaft, Propeller, Blade\n'), ((6202, 6241), 'bladex.Shaft', 'Shaft', (['"""tests/test_datasets/shaft.iges"""'], {}), "('tests/test_datasets/shaft.iges')\n", (6207, 6241), False, 'from bladex import NacaProfile, Shaft, Propeller, Blade\n'), ((6303, 6325), 'bladex.Propeller', 'Propeller', (['sh', 'prop', '(1)'], {}), '(sh, prop, 1)\n', (6312, 6325), False, 'from bladex import NacaProfile, Shaft, Propeller, Blade\n'), ((6500, 6539), 'bladex.Shaft', 'Shaft', (['"""tests/test_datasets/shaft.iges"""'], {}), "('tests/test_datasets/shaft.iges')\n", (6505, 6539), False, 'from bladex import NacaProfile, Shaft, Propeller, Blade\n'), ((6601, 6623), 'bladex.Propeller', 'Propeller', (['sh', 'prop', '(4)'], {}), '(sh, prop, 4)\n', (6610, 6623), False, 'from bladex import NacaProfile, Shaft, Propeller, Blade\n'), ((6917, 6956), 'bladex.Shaft', 'Shaft', (['"""tests/test_datasets/shaft.iges"""'], {}), "('tests/test_datasets/shaft.iges')\n", (6922, 6956), False, 'from bladex import NacaProfile, Shaft, Propeller, Blade\n'), ((7018, 7040), 'bladex.Propeller', 'Propeller', (['sh', 'prop', '(4)'], {}), '(sh, prop, 4)\n', (7027, 7040), False, 'from bladex import NacaProfile, Shaft, Propeller, Blade\n'), ((7340, 7379), 'bladex.Shaft', 'Shaft', (['"""tests/test_datasets/shaft.iges"""'], {}), "('tests/test_datasets/shaft.iges')\n", (7345, 7379), False, 'from bladex import NacaProfile, Shaft, Propeller, Blade\n'), ((7441, 7463), 'bladex.Propeller', 'Propeller', (['sh', 'prop', '(4)'], {}), '(sh, prop, 4)\n', (7450, 7463), False, 'from bladex import NacaProfile, Shaft, Propeller, Blade\n'), ((7582, 7644), 'smithers.io.obj.ObjHandler.read', 'ObjHandler.read', (['"""tests/test_datasets/propeller_and_shaft.obj"""'], {}), "('tests/test_datasets/propeller_and_shaft.obj')\n", (7597, 7644), False, 'from smithers.io.obj import ObjHandler\n'), ((7968, 8007), 'smithers.io.stlhandler.STLHandler.read', 'STLHandler.read', (['"""/tmp/temp_blades.stl"""'], {}), "('/tmp/temp_blades.stl')\n", (7983, 8007), False, 'from smithers.io.stlhandler import STLHandler\n'), ((8028, 8066), 'smithers.io.stlhandler.STLHandler.read', 'STLHandler.read', (['"""/tmp/temp_shaft.stl"""'], {}), "('/tmp/temp_shaft.stl')\n", (8043, 8066), False, 'from smithers.io.stlhandler import STLHandler\n'), ((8115, 8182), 'numpy.concatenate', 'np.concatenate', (["[shaft_stl['points'], blades_stl['points']]"], {'axis': '(0)'}), "([shaft_stl['points'], blades_stl['points']], axis=0)\n", (8129, 8182), True, 'import numpy as np\n'), ((8231, 8262), 'numpy.unique', 'np.unique', (['all_vertices'], {'axis': '(0)'}), '(all_vertices, axis=0)\n', (8240, 8262), True, 'import numpy as np\n'), ((8271, 8344), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['data.vertices', 'unique_vertices'], {'decimal': '(3)'}), '(data.vertices, unique_vertices, decimal=3)\n', (8301, 8344), True, 'import numpy as np\n'), ((8369, 8403), 'numpy.min', 'np.min', (["blades_stl['points'][:, 0]"], {}), "(blades_stl['points'][:, 0])\n", (8375, 8403), True, 'import numpy as np\n'), ((8700, 8739), 'bladex.Shaft', 'Shaft', (['"""tests/test_datasets/shaft.iges"""'], {}), "('tests/test_datasets/shaft.iges')\n", (8705, 8739), False, 'from bladex import NacaProfile, Shaft, Propeller, Blade\n'), ((8801, 8823), 'bladex.Propeller', 'Propeller', (['sh', 'prop', '(4)'], {}), '(sh, prop, 4)\n', (8810, 8823), False, 'from bladex import NacaProfile, Shaft, Propeller, Blade\n'), ((8948, 9010), 'smithers.io.obj.ObjHandler.read', 'ObjHandler.read', (['"""tests/test_datasets/propeller_and_shaft.obj"""'], {}), "('tests/test_datasets/propeller_and_shaft.obj')\n", (8963, 9010), False, 'from smithers.io.obj import ObjHandler\n'), ((9277, 9316), 'smithers.io.stlhandler.STLHandler.read', 'STLHandler.read', (['"""/tmp/temp_blades.stl"""'], {}), "('/tmp/temp_blades.stl')\n", (9292, 9316), False, 'from smithers.io.stlhandler import STLHandler\n'), ((9337, 9375), 'smithers.io.stlhandler.STLHandler.read', 'STLHandler.read', (['"""/tmp/temp_shaft.stl"""'], {}), "('/tmp/temp_shaft.stl')\n", (9352, 9375), False, 'from smithers.io.stlhandler import STLHandler\n'), ((9424, 9491), 'numpy.concatenate', 'np.concatenate', (["[shaft_stl['points'], blades_stl['points']]"], {'axis': '(0)'}), "([shaft_stl['points'], blades_stl['points']], axis=0)\n", (9438, 9491), True, 'import numpy as np\n'), ((9551, 9601), 'numpy.unique', 'np.unique', (['all_vertices'], {'return_index': '(True)', 'axis': '(0)'}), '(all_vertices, return_index=True, axis=0)\n', (9560, 9601), True, 'import numpy as np\n'), ((9632, 9705), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['data.vertices', 'unique_vertices'], {'decimal': '(3)'}), '(data.vertices, unique_vertices, decimal=3)\n', (9662, 9705), True, 'import numpy as np\n'), ((265, 284), 'bladex.NacaProfile', 'NacaProfile', (['"""5407"""'], {}), "('5407')\n", (276, 284), False, 'from bladex import NacaProfile, Shaft, Propeller, Blade\n'), ((1749, 1868), 'numpy.array', 'np.array', (['[0.034375, 0.0375, 0.04375, 0.05, 0.0625, 0.075, 0.0875, 0.1, 0.10625, \n 0.1125, 0.11875, 0.121875, 0.125]'], {}), '([0.034375, 0.0375, 0.04375, 0.05, 0.0625, 0.075, 0.0875, 0.1, \n 0.10625, 0.1125, 0.11875, 0.121875, 0.125])\n', (1757, 1868), True, 'import numpy as np\n'), ((2214, 2340), 'numpy.array', 'np.array', (['[0.039, 0.045, 0.05625, 0.06542, 0.08125, 0.09417, 0.10417, 0.10708, \n 0.10654, 0.10417, 0.09417, 0.07867, 0.025]'], {}), '([0.039, 0.045, 0.05625, 0.06542, 0.08125, 0.09417, 0.10417, \n 0.10708, 0.10654, 0.10417, 0.09417, 0.07867, 0.025])\n', (2222, 2340), True, 'import numpy as np\n'), ((2829, 2949), 'numpy.array', 'np.array', (['[0.35, 0.35, 0.36375, 0.37625, 0.3945, 0.405, 0.40875, 0.4035, 0.3955, \n 0.38275, 0.3645, 0.35275, 0.33875]'], {}), '([0.35, 0.35, 0.36375, 0.37625, 0.3945, 0.405, 0.40875, 0.4035, \n 0.3955, 0.38275, 0.3645, 0.35275, 0.33875])\n', (2837, 2949), True, 'import numpy as np\n'), ((3286, 3410), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0005, 0.00125, 0.00335, 0.005875, 0.0075, 0.007375, 0.006625, \n 0.00545, 0.004033, 0.0033, 0.0025]'], {}), '([0.0, 0.0, 0.0005, 0.00125, 0.00335, 0.005875, 0.0075, 0.007375, \n 0.006625, 0.00545, 0.004033, 0.0033, 0.0025])\n', (3294, 3410), True, 'import numpy as np\n'), ((3697, 3863), 'numpy.array', 'np.array', (['[6.6262795, 3.6262795, -1.188323, -4.4654502, -7.440779, -7.3840979, -\n 5.0367916, -1.3257914, 1.0856404, 4.1448947, 7.697235, 9.5368917, 11.397609\n ]'], {}), '([6.6262795, 3.6262795, -1.188323, -4.4654502, -7.440779, -\n 7.3840979, -5.0367916, -1.3257914, 1.0856404, 4.1448947, 7.697235, \n 9.5368917, 11.397609])\n', (3705, 3863), True, 'import numpy as np\n'), ((6723, 6785), 'os.path.isfile', 'os.path.isfile', (['"""tests/test_datasets/propeller_and_shaft.iges"""'], {}), "('tests/test_datasets/propeller_and_shaft.iges')\n", (6737, 6785), False, 'import os\n'), ((7138, 7199), 'os.path.isfile', 'os.path.isfile', (['"""tests/test_datasets/propeller_and_shaft.stl"""'], {}), "('tests/test_datasets/propeller_and_shaft.stl')\n", (7152, 7199), False, 'import os\n'), ((7776, 7801), 'numpy.asarray', 'np.asarray', (['data.polygons'], {}), '(data.polygons)\n', (7786, 7801), True, 'import numpy as np\n'), ((9099, 9160), 'numpy.asarray', 'np.asarray', (['data.polygons[:data.regions_change_indexes[1][0]]'], {}), '(data.polygons[:data.regions_change_indexes[1][0]])\n', (9109, 9160), True, 'import numpy as np\n'), ((9189, 9250), 'numpy.asarray', 'np.asarray', (['data.polygons[data.regions_change_indexes[1][0]:]'], {}), '(data.polygons[data.regions_change_indexes[1][0]:])\n', (9199, 9250), True, 'import numpy as np\n'), ((8440, 8460), 'numpy.asarray', 'np.asarray', (['tip_poly'], {}), '(tip_poly)\n', (8450, 8460), True, 'import numpy as np\n'), ((8538, 8559), 'numpy.asarray', 'np.asarray', (['stem_poly'], {}), '(stem_poly)\n', (8548, 8559), True, 'import numpy as np\n')] |
from sys import argv
import numpy as np
import scipy as sp
from scipy.linalg import eig,svd,eigh
from scipy.sparse.linalg import eigs
from sklearn.neighbors import kneighbors_graph
from copy import deepcopy
from .utils import *
from pymanopt.manifolds import Grassmann
import nudged
from sklearn.metrics.pairwise import pairwise_distances
def findSingleLP(X,d,k,sigma,embMethod='lpp'):
D,N = X.shape
W = np.zeros((N,N))
B = np.zeros((N,N))
if embMethod == 'pca':
for i in range(N-1):
for j in range(i+1,N):
W[i,j] = 1.0/N
W = 0.5*(W + W.T)
B = np.eye(N)
L = B - W
M1 = X.dot(L).dot(X.T)
Mc = np.eye(M1.shape[0])
elif embMethod == 'lpp':
G = kneighbors_graph(X.T,k,mode='distance',include_self=False).toarray()
W = 0.5*(G + G.T)
W[W!=0] = np.exp(-W[W!=0] / (2*sigma*sigma))
B = np.diag(np.sum(W,axis=0))
L = B - W
M1 = X.dot(L).dot(X.T)
Mc = X.dot(B).dot(X.T)
elif embMethod == 'rand':
Gnk = Grassmann(D,2)
proj = Gnk.rand()
return [proj]
elif embMethod == 'syn':
proj = np.zeros((D,2))
card = 2
#ids = np.arange(D)
ids = np.array([1,0,4,3]) # For ecoli 2
#ids = np.array([2,7,3,0]) # For yeast 2
#ids = np.array([12, 39, 5, 0, 45, 43]) # For seaWater 3
#ids = np.array([0, 46, 5, 14, 11, 40, 49, 43]) # For seaWater 4
np.random.shuffle(ids)
#print ids
proj[ids[:card],0] = 1/np.sqrt(card)
proj[ids[card:2*card],1] = 1/np.sqrt(card)
#proj[ids[card-1:2*card-1],1] = 1/np.sqrt(card) # For cities
return [proj]
u,s = eig(M1)
if np.min(u) < 0:
M1 = M1 - np.min(u)*np.eye(M1.shape[0])
u,s = eig(Mc)
if np.min(u) < 0:
Mc = Mc - np.min(u)*np.eye(Mc.shape[0])
eigvals,eigvecs = eig(M1,Mc)
eigvecs = np.dot(sp.linalg.sqrtm(Mc),eigvecs)
if embMethod == 'pca':
ind = np.argsort(-eigvals)
proj = eigvecs[:,ind[0:d]]
elif embMethod == 'lpp':
ind = np.argsort(eigvals)
proj = eigvecs[:,ind[0:d]]
return [proj]
def findMultipleLP(X,d,k,sigma,maxIter,embMethod='lpp',labs = None):
N = X.shape[1]
W = np.zeros((N,N))
B = np.zeros((N,N))
if embMethod == 'pca':
for i in range(N-1):
for j in range(i+1,N):
W[i,j] = 1.0/N
W = np.maximum(W, W.T)
B = np.eye(N)
L = B - W
M1 = X.dot(L).dot(X.T)
Mc = np.eye(M1.shape[0])
elif embMethod == 'lpp':
G = kneighbors_graph(X.T,k,mode='distance',include_self=False).toarray()
W = 0.5*(G + G.T)
W[W!=0] = np.exp(-W[W!=0] / (2*sigma*sigma))
B = np.diag(np.sum(W,axis=0))
L = B - W
M1 = X.dot(L).dot(X.T)
Mc = X.dot(B).dot(X.T)
elif embMethod == 'lde':
Gw = np.zeros((N,N))
Gb = np.zeros((N,N))
dists = pairwise_distances(X.T)
for ii in range(N):
inds = np.where(labs == labs[ii])[0]
sinds = np.argsort(dists[ii,inds])
Gw[ii,inds[sinds[:k]]] = 1
inds = np.where(labs != labs[ii])[0]
sinds = np.argsort(dists[ii,inds])
Gb[ii,inds[sinds[:k]]] = 1
Gw = np.maximum(Gw, Gw.T)
Bw = np.diag(np.sum(Gw,axis=0))
Lw = Bw - Gw
M1 = X.dot(Lw).dot(X.T)
Gb = np.maximum(Gb, Gb.T)
Bb = np.diag(np.sum(Gb,axis=0))
Lb = Bb - Gb
Mc = X.dot(Lb).dot(X.T)
u,s = eig(M1)
u = np.real(u)
if np.min(u) < 0:
M1 = M1 - np.min(u)*np.eye(M1.shape[0])
u,s = eig(Mc)
u = np.real(u)
if np.min(u) < 0:
Mc = Mc - np.min(u)*np.eye(Mc.shape[0])
projList = []
projListFinal = []
if embMethod == 'lde':
# gamma = 1e3 # bio 1e3
gamma = 5e2
thresh = 0.5*2
else:
gamma = 1e4
thresh = 0.6*2
for iters in range(1,maxIter+1):
if iters > 1:
#print np.linalg.norm(X.dot(L).dot(X.T)), np.linalg.norm(C)
if embMethod == 'pca':
M1 = X.dot(L).dot(X.T) - gamma*C
elif embMethod == 'lpp':
M1 = X.dot(L).dot(X.T) + gamma*C
elif embMethod == 'lde':
M1 = X.dot(Lw).dot(X.T) + gamma*C
M1 = 0.5*(M1 + M1.T)
u,s = np.linalg.eig(M1)
if np.min(u) < 0:
M1 = M1 - np.min(u)*np.eye(M1.shape[0])
eigvals,eigvecs = eig(M1,Mc)
eigvals = np.real(eigvals)
eigvecs = np.real(eigvecs)
eigvecs = np.dot(np.real(sp.linalg.sqrtm(Mc)),eigvecs)
if embMethod == 'pca':
ind = np.argsort(-eigvals)
temp = eigvecs[:,ind[0:d]]
elif embMethod == 'lpp' or embMethod == 'lde':
ind = np.argsort(eigvals)
temp = eigvecs[:,ind[0:d]]
for dim in range(2):
temp[:,dim] /= np.linalg.norm(temp[:,dim])
if len(projList) == 0:
projList.append(temp)
C = matprod(temp,temp.T)
projListFinal.append(temp)
else:
projList.append(temp)
C = grassSum(projList)
#print np.linalg.norm(temp[:,0]), np.linalg.norm(temp[:,1])
mval = 1e10
for kk in projListFinal:
mval = np.minimum(mval, 2 - np.linalg.norm(matprod(temp.T,kk)))
if mval > thresh:
err = []
emb1 = (temp.T.dot(X)).T
emb1 = emb1.tolist()
for ps in projListFinal:
emb2 = (ps.T.dot(X)).T
emb2 = emb2.tolist()
trans = nudged.estimate(emb1, emb2)
tt = np.linalg.norm(np.array(emb2) - np.array(trans.transform(emb1)))/np.linalg.norm(emb2)
err.append(tt)
# print np.linalg.norm(emb1), err
#print mval, np.min(np.array(err))
if np.min(np.array(err)) > 0.8:
projListFinal.append(temp)
#print len(projList), len(projListFinal)
return projListFinal
def grassSum(projList):
T = len(projList)
n = projList[0].shape[0]
Bs = np.zeros((n,n))
#print projList[0]
for t in range(0,T):
Bs += matprod(projList[t],projList[t].T)
return Bs
def grassMean(projList):
T = len(projList)
n = projList[0].shape[0]
Bs = np.zeros((n,n))
idn = np.eye(n)
for t in range(0,T):
temp = matprod(projList[t],projList[t].T)
Bs += idn - temp
Bs = 0.5*(Bs + Bs.T)
u,s = np.linalg.eig(Bs)
u = np.real(u)
if np.min(u) < 0:
Bs = Bs - np.min(u)*np.eye(Bs.shape[0])
eigvalue,eigvector = eig(Bs)
eigvalue = np.real(eigvalue)
eigvector = np.real(eigvector)
ind = np.argsort(eigvalue)
eigvector = (eigvector[:,ind[0:2]])
return eigvector
def DNM_TR(A,B,d,dectype):
def partial_evd(A,B,ll,d):
D, W = eigs(A - ll*B, k = d, which = 'LR')
D = np.real(D)
W = np.real(W)
return D, W
def full_evd(A,B,ll,d):
D, W = eig(A-ll*B)
D = np.real(D)
W = np.real(W)
sind = np.argsort(-D)
D = D[sind[range(d)]]
W = W[:,sind[range(d)]]
return D, W
maxiter = 100
tol = 1e-5
ll = 0
llold = np.Inf
llall = []
if dectype.lower() == 'partial':
f = partial_evd
elif dectype.lower() == 'full':
f = full_evd
else:
print ('Invalid option for dectype')
for i in range(maxiter):
D, W = f(A,B,ll,d)
betap = -np.diag(matprod(W.T,B,W))
llold = ll
ll = np.sum(llold*betap-D)/np.sum(betap)
llall.append(ll)
if (i > 1):
if (np.abs(ll-llold)/np.abs(llold) < tol):
break
if False:
plt.figure()
plt.plot(llall)
return D, W
| [
"numpy.sqrt",
"sklearn.metrics.pairwise.pairwise_distances",
"numpy.argsort",
"sklearn.neighbors.kneighbors_graph",
"numpy.array",
"pymanopt.manifolds.Grassmann",
"numpy.linalg.norm",
"numpy.where",
"numpy.exp",
"numpy.real",
"numpy.min",
"numpy.maximum",
"numpy.abs",
"numpy.eye",
"scipy... | [((413, 429), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (421, 429), True, 'import numpy as np\n'), ((437, 453), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (445, 453), True, 'import numpy as np\n'), ((1711, 1718), 'scipy.linalg.eig', 'eig', (['M1'], {}), '(M1)\n', (1714, 1718), False, 'from scipy.linalg import eig, svd, eigh\n'), ((1800, 1807), 'scipy.linalg.eig', 'eig', (['Mc'], {}), '(Mc)\n', (1803, 1807), False, 'from scipy.linalg import eig, svd, eigh\n'), ((1901, 1912), 'scipy.linalg.eig', 'eig', (['M1', 'Mc'], {}), '(M1, Mc)\n', (1904, 1912), False, 'from scipy.linalg import eig, svd, eigh\n'), ((2275, 2291), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (2283, 2291), True, 'import numpy as np\n'), ((2299, 2315), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (2307, 2315), True, 'import numpy as np\n'), ((3577, 3584), 'scipy.linalg.eig', 'eig', (['M1'], {}), '(M1)\n', (3580, 3584), False, 'from scipy.linalg import eig, svd, eigh\n'), ((3593, 3603), 'numpy.real', 'np.real', (['u'], {}), '(u)\n', (3600, 3603), True, 'import numpy as np\n'), ((3685, 3692), 'scipy.linalg.eig', 'eig', (['Mc'], {}), '(Mc)\n', (3688, 3692), False, 'from scipy.linalg import eig, svd, eigh\n'), ((3701, 3711), 'numpy.real', 'np.real', (['u'], {}), '(u)\n', (3708, 3711), True, 'import numpy as np\n'), ((6297, 6313), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (6305, 6313), True, 'import numpy as np\n'), ((6510, 6526), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (6518, 6526), True, 'import numpy as np\n'), ((6536, 6545), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (6542, 6545), True, 'import numpy as np\n'), ((6682, 6699), 'numpy.linalg.eig', 'np.linalg.eig', (['Bs'], {}), '(Bs)\n', (6695, 6699), True, 'import numpy as np\n'), ((6708, 6718), 'numpy.real', 'np.real', (['u'], {}), '(u)\n', (6715, 6718), True, 'import numpy as np\n'), ((6815, 6822), 'scipy.linalg.eig', 'eig', (['Bs'], {}), '(Bs)\n', (6818, 6822), False, 'from scipy.linalg import eig, svd, eigh\n'), ((6838, 6855), 'numpy.real', 'np.real', (['eigvalue'], {}), '(eigvalue)\n', (6845, 6855), True, 'import numpy as np\n'), ((6872, 6890), 'numpy.real', 'np.real', (['eigvector'], {}), '(eigvector)\n', (6879, 6890), True, 'import numpy as np\n'), ((6901, 6921), 'numpy.argsort', 'np.argsort', (['eigvalue'], {}), '(eigvalue)\n', (6911, 6921), True, 'import numpy as np\n'), ((614, 623), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (620, 623), True, 'import numpy as np\n'), ((686, 705), 'numpy.eye', 'np.eye', (['M1.shape[0]'], {}), '(M1.shape[0])\n', (692, 705), True, 'import numpy as np\n'), ((1726, 1735), 'numpy.min', 'np.min', (['u'], {}), '(u)\n', (1732, 1735), True, 'import numpy as np\n'), ((1815, 1824), 'numpy.min', 'np.min', (['u'], {}), '(u)\n', (1821, 1824), True, 'import numpy as np\n'), ((1934, 1953), 'scipy.linalg.sqrtm', 'sp.linalg.sqrtm', (['Mc'], {}), '(Mc)\n', (1949, 1953), True, 'import scipy as sp\n'), ((2005, 2025), 'numpy.argsort', 'np.argsort', (['(-eigvals)'], {}), '(-eigvals)\n', (2015, 2025), True, 'import numpy as np\n'), ((2450, 2468), 'numpy.maximum', 'np.maximum', (['W', 'W.T'], {}), '(W, W.T)\n', (2460, 2468), True, 'import numpy as np\n'), ((2481, 2490), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (2487, 2490), True, 'import numpy as np\n'), ((2553, 2572), 'numpy.eye', 'np.eye', (['M1.shape[0]'], {}), '(M1.shape[0])\n', (2559, 2572), True, 'import numpy as np\n'), ((3611, 3620), 'numpy.min', 'np.min', (['u'], {}), '(u)\n', (3617, 3620), True, 'import numpy as np\n'), ((3719, 3728), 'numpy.min', 'np.min', (['u'], {}), '(u)\n', (3725, 3728), True, 'import numpy as np\n'), ((4548, 4559), 'scipy.linalg.eig', 'eig', (['M1', 'Mc'], {}), '(M1, Mc)\n', (4551, 4559), False, 'from scipy.linalg import eig, svd, eigh\n'), ((4578, 4594), 'numpy.real', 'np.real', (['eigvals'], {}), '(eigvals)\n', (4585, 4594), True, 'import numpy as np\n'), ((4613, 4629), 'numpy.real', 'np.real', (['eigvecs'], {}), '(eigvecs)\n', (4620, 4629), True, 'import numpy as np\n'), ((6726, 6735), 'numpy.min', 'np.min', (['u'], {}), '(u)\n', (6732, 6735), True, 'import numpy as np\n'), ((7059, 7092), 'scipy.sparse.linalg.eigs', 'eigs', (['(A - ll * B)'], {'k': 'd', 'which': '"""LR"""'}), "(A - ll * B, k=d, which='LR')\n", (7063, 7092), False, 'from scipy.sparse.linalg import eigs\n'), ((7107, 7117), 'numpy.real', 'np.real', (['D'], {}), '(D)\n', (7114, 7117), True, 'import numpy as np\n'), ((7130, 7140), 'numpy.real', 'np.real', (['W'], {}), '(W)\n', (7137, 7140), True, 'import numpy as np\n'), ((7205, 7220), 'scipy.linalg.eig', 'eig', (['(A - ll * B)'], {}), '(A - ll * B)\n', (7208, 7220), False, 'from scipy.linalg import eig, svd, eigh\n'), ((7229, 7239), 'numpy.real', 'np.real', (['D'], {}), '(D)\n', (7236, 7239), True, 'import numpy as np\n'), ((7252, 7262), 'numpy.real', 'np.real', (['W'], {}), '(W)\n', (7259, 7262), True, 'import numpy as np\n'), ((7278, 7292), 'numpy.argsort', 'np.argsort', (['(-D)'], {}), '(-D)\n', (7288, 7292), True, 'import numpy as np\n'), ((860, 900), 'numpy.exp', 'np.exp', (['(-W[W != 0] / (2 * sigma * sigma))'], {}), '(-W[W != 0] / (2 * sigma * sigma))\n', (866, 900), True, 'import numpy as np\n'), ((2104, 2123), 'numpy.argsort', 'np.argsort', (['eigvals'], {}), '(eigvals)\n', (2114, 2123), True, 'import numpy as np\n'), ((2728, 2768), 'numpy.exp', 'np.exp', (['(-W[W != 0] / (2 * sigma * sigma))'], {}), '(-W[W != 0] / (2 * sigma * sigma))\n', (2734, 2768), True, 'import numpy as np\n'), ((4418, 4435), 'numpy.linalg.eig', 'np.linalg.eig', (['M1'], {}), '(M1)\n', (4431, 4435), True, 'import numpy as np\n'), ((4743, 4763), 'numpy.argsort', 'np.argsort', (['(-eigvals)'], {}), '(-eigvals)\n', (4753, 4763), True, 'import numpy as np\n'), ((4992, 5020), 'numpy.linalg.norm', 'np.linalg.norm', (['temp[:, dim]'], {}), '(temp[:, dim])\n', (5006, 5020), True, 'import numpy as np\n'), ((7761, 7786), 'numpy.sum', 'np.sum', (['(llold * betap - D)'], {}), '(llold * betap - D)\n', (7767, 7786), True, 'import numpy as np\n'), ((7783, 7796), 'numpy.sum', 'np.sum', (['betap'], {}), '(betap)\n', (7789, 7796), True, 'import numpy as np\n'), ((915, 932), 'numpy.sum', 'np.sum', (['W'], {'axis': '(0)'}), '(W, axis=0)\n', (921, 932), True, 'import numpy as np\n'), ((1057, 1072), 'pymanopt.manifolds.Grassmann', 'Grassmann', (['D', '(2)'], {}), '(D, 2)\n', (1066, 1072), False, 'from pymanopt.manifolds import Grassmann\n'), ((1759, 1768), 'numpy.min', 'np.min', (['u'], {}), '(u)\n', (1765, 1768), True, 'import numpy as np\n'), ((1769, 1788), 'numpy.eye', 'np.eye', (['M1.shape[0]'], {}), '(M1.shape[0])\n', (1775, 1788), True, 'import numpy as np\n'), ((1848, 1857), 'numpy.min', 'np.min', (['u'], {}), '(u)\n', (1854, 1857), True, 'import numpy as np\n'), ((1858, 1877), 'numpy.eye', 'np.eye', (['Mc.shape[0]'], {}), '(Mc.shape[0])\n', (1864, 1877), True, 'import numpy as np\n'), ((2783, 2800), 'numpy.sum', 'np.sum', (['W'], {'axis': '(0)'}), '(W, axis=0)\n', (2789, 2800), True, 'import numpy as np\n'), ((2924, 2940), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (2932, 2940), True, 'import numpy as np\n'), ((2953, 2969), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (2961, 2969), True, 'import numpy as np\n'), ((2986, 3009), 'sklearn.metrics.pairwise.pairwise_distances', 'pairwise_distances', (['X.T'], {}), '(X.T)\n', (3004, 3009), False, 'from sklearn.metrics.pairwise import pairwise_distances\n'), ((3324, 3344), 'numpy.maximum', 'np.maximum', (['Gw', 'Gw.T'], {}), '(Gw, Gw.T)\n', (3334, 3344), True, 'import numpy as np\n'), ((3452, 3472), 'numpy.maximum', 'np.maximum', (['Gb', 'Gb.T'], {}), '(Gb, Gb.T)\n', (3462, 3472), True, 'import numpy as np\n'), ((3644, 3653), 'numpy.min', 'np.min', (['u'], {}), '(u)\n', (3650, 3653), True, 'import numpy as np\n'), ((3654, 3673), 'numpy.eye', 'np.eye', (['M1.shape[0]'], {}), '(M1.shape[0])\n', (3660, 3673), True, 'import numpy as np\n'), ((3752, 3761), 'numpy.min', 'np.min', (['u'], {}), '(u)\n', (3758, 3761), True, 'import numpy as np\n'), ((3762, 3781), 'numpy.eye', 'np.eye', (['Mc.shape[0]'], {}), '(Mc.shape[0])\n', (3768, 3781), True, 'import numpy as np\n'), ((4451, 4460), 'numpy.min', 'np.min', (['u'], {}), '(u)\n', (4457, 4460), True, 'import numpy as np\n'), ((4663, 4682), 'scipy.linalg.sqrtm', 'sp.linalg.sqrtm', (['Mc'], {}), '(Mc)\n', (4678, 4682), True, 'import scipy as sp\n'), ((4876, 4895), 'numpy.argsort', 'np.argsort', (['eigvals'], {}), '(eigvals)\n', (4886, 4895), True, 'import numpy as np\n'), ((6759, 6768), 'numpy.min', 'np.min', (['u'], {}), '(u)\n', (6765, 6768), True, 'import numpy as np\n'), ((6769, 6788), 'numpy.eye', 'np.eye', (['Bs.shape[0]'], {}), '(Bs.shape[0])\n', (6775, 6788), True, 'import numpy as np\n'), ((747, 808), 'sklearn.neighbors.kneighbors_graph', 'kneighbors_graph', (['X.T', 'k'], {'mode': '"""distance"""', 'include_self': '(False)'}), "(X.T, k, mode='distance', include_self=False)\n", (763, 808), False, 'from sklearn.neighbors import kneighbors_graph\n'), ((1164, 1180), 'numpy.zeros', 'np.zeros', (['(D, 2)'], {}), '((D, 2))\n', (1172, 1180), True, 'import numpy as np\n'), ((1239, 1261), 'numpy.array', 'np.array', (['[1, 0, 4, 3]'], {}), '([1, 0, 4, 3])\n', (1247, 1261), True, 'import numpy as np\n'), ((1471, 1493), 'numpy.random.shuffle', 'np.random.shuffle', (['ids'], {}), '(ids)\n', (1488, 1493), True, 'import numpy as np\n'), ((2615, 2676), 'sklearn.neighbors.kneighbors_graph', 'kneighbors_graph', (['X.T', 'k'], {'mode': '"""distance"""', 'include_self': '(False)'}), "(X.T, k, mode='distance', include_self=False)\n", (2631, 2676), False, 'from sklearn.neighbors import kneighbors_graph\n'), ((3108, 3135), 'numpy.argsort', 'np.argsort', (['dists[ii, inds]'], {}), '(dists[ii, inds])\n', (3118, 3135), True, 'import numpy as np\n'), ((3244, 3271), 'numpy.argsort', 'np.argsort', (['dists[ii, inds]'], {}), '(dists[ii, inds])\n', (3254, 3271), True, 'import numpy as np\n'), ((3366, 3384), 'numpy.sum', 'np.sum', (['Gw'], {'axis': '(0)'}), '(Gw, axis=0)\n', (3372, 3384), True, 'import numpy as np\n'), ((3494, 3512), 'numpy.sum', 'np.sum', (['Gb'], {'axis': '(0)'}), '(Gb, axis=0)\n', (3500, 3512), True, 'import numpy as np\n'), ((5760, 5787), 'nudged.estimate', 'nudged.estimate', (['emb1', 'emb2'], {}), '(emb1, emb2)\n', (5775, 5787), False, 'import nudged\n'), ((7860, 7878), 'numpy.abs', 'np.abs', (['(ll - llold)'], {}), '(ll - llold)\n', (7866, 7878), True, 'import numpy as np\n'), ((7877, 7890), 'numpy.abs', 'np.abs', (['llold'], {}), '(llold)\n', (7883, 7890), True, 'import numpy as np\n'), ((1544, 1557), 'numpy.sqrt', 'np.sqrt', (['card'], {}), '(card)\n', (1551, 1557), True, 'import numpy as np\n'), ((1595, 1608), 'numpy.sqrt', 'np.sqrt', (['card'], {}), '(card)\n', (1602, 1608), True, 'import numpy as np\n'), ((3058, 3084), 'numpy.where', 'np.where', (['(labs == labs[ii])'], {}), '(labs == labs[ii])\n', (3066, 3084), True, 'import numpy as np\n'), ((3194, 3220), 'numpy.where', 'np.where', (['(labs != labs[ii])'], {}), '(labs != labs[ii])\n', (3202, 3220), True, 'import numpy as np\n'), ((4492, 4501), 'numpy.min', 'np.min', (['u'], {}), '(u)\n', (4498, 4501), True, 'import numpy as np\n'), ((4502, 4521), 'numpy.eye', 'np.eye', (['M1.shape[0]'], {}), '(M1.shape[0])\n', (4508, 4521), True, 'import numpy as np\n'), ((5882, 5902), 'numpy.linalg.norm', 'np.linalg.norm', (['emb2'], {}), '(emb2)\n', (5896, 5902), True, 'import numpy as np\n'), ((6073, 6086), 'numpy.array', 'np.array', (['err'], {}), '(err)\n', (6081, 6086), True, 'import numpy as np\n'), ((5832, 5846), 'numpy.array', 'np.array', (['emb2'], {}), '(emb2)\n', (5840, 5846), True, 'import numpy as np\n')] |
import numpy as np
import torch
from torch.autograd import Variable
import copy
def multi_target_epoch(common, decoders, optim, X, targets,
batch_size=16, shuffle=True, train=True):
assert len(decoders) == len(targets)
N = X.size()[0]
for t in targets:
assert t.size()[0] == N
train_X = copy.deepcopy(X)
train_targets = copy.deepcopy(targets)
if shuffle:
p = np.random.permutation(N)
train_X = train_X.numpy()[p]
train_X = torch.from_numpy(train_X)
for i, t in enumerate(train_targets):
train_targets[i] = torch.from_numpy(t.numpy()[p])
nb_batches = N // batch_size
total = nb_batches * batch_size
cum_losses = []
nb_correct = []
for i in range(len(decoders)):
cum_losses.append(0.0)
nb_correct.append(0)
criterion = torch.nn.NLLLoss()
for i in range(nb_batches):
batch_X = Variable(train_X[i*batch_size:(i+1)*batch_size])
batch_ts = []
for t in train_targets:
batch_ts.append(Variable(t[i*batch_size:(i+1)*batch_size]))
repre = common(batch_X)
losses = []
for i, (dec, t) in enumerate(zip(decoders, batch_ts)):
y = dec(repre)
loss = criterion(y, t)
losses.append(loss)
_, preds = y.max(dim=1)
nb_correct[i] += (preds == t).sum().data[0]
cum_losses[i] += loss.data[0]
if train:
complete_loss = sum(losses)
optim.zero_grad()
complete_loss.backward()
optim.step()
return [cl/nb_batches for cl in cum_losses], [c/total for c in nb_correct]
def grouping_reporter(epoch, lr, losses, accs, val_losses, val_accs):
string = "{:>3}, lr {:.3e}".format(epoch, lr)
for l, a in zip(losses, accs):
string += " ({:.3f} {:.3f})".format(l, a)
string += " |"
for l, a in zip(val_losses, val_accs):
string += " ({:.3f} {:.3f})".format(l, a)
return string
def train(common, decoders, params,
train_data, val_data,
nb_epochs, report_interval=25,
reporter=grouping_reporter):
lr = 1e-3
optim = torch.optim.Adam(params, lr=lr)
best_val_loss = float("inf")
patience_init = 5
patience = patience_init
for i in range(nb_epochs):
ce, acc = multi_target_epoch(
common, decoders, optim,
train_data[0], train_data[1]
)
val_ce, val_acc = multi_target_epoch(
common, decoders, optim,
val_data[0], val_data[1], train=False
)
val_loss = sum(val_ce)
if val_loss < best_val_loss:
best_val_loss = val_loss
patience = patience_init
else:
if patience == 0:
for param_group in optim.param_groups:
lr *= 0.5
param_group['lr'] = lr
string = reporter(i, lr, ce, acc, val_ce, val_acc)
print(string)
else:
patience -= 1
if lr < 1e-7:
print("stopping training, because of LR being effectively zero")
string = reporter(i, lr, ce, acc, val_ce, val_acc)
print(string)
break
if i % report_interval == report_interval - 1:
string = reporter(i, lr, ce, acc, val_ce, val_acc)
print(string)
| [
"torch.optim.Adam",
"torch.from_numpy",
"torch.nn.NLLLoss",
"copy.deepcopy",
"torch.autograd.Variable",
"numpy.random.permutation"
] | [((338, 354), 'copy.deepcopy', 'copy.deepcopy', (['X'], {}), '(X)\n', (351, 354), False, 'import copy\n'), ((375, 397), 'copy.deepcopy', 'copy.deepcopy', (['targets'], {}), '(targets)\n', (388, 397), False, 'import copy\n'), ((862, 880), 'torch.nn.NLLLoss', 'torch.nn.NLLLoss', ([], {}), '()\n', (878, 880), False, 'import torch\n'), ((2196, 2227), 'torch.optim.Adam', 'torch.optim.Adam', (['params'], {'lr': 'lr'}), '(params, lr=lr)\n', (2212, 2227), False, 'import torch\n'), ((426, 450), 'numpy.random.permutation', 'np.random.permutation', (['N'], {}), '(N)\n', (447, 450), True, 'import numpy as np\n'), ((506, 531), 'torch.from_numpy', 'torch.from_numpy', (['train_X'], {}), '(train_X)\n', (522, 531), False, 'import torch\n'), ((932, 986), 'torch.autograd.Variable', 'Variable', (['train_X[i * batch_size:(i + 1) * batch_size]'], {}), '(train_X[i * batch_size:(i + 1) * batch_size])\n', (940, 986), False, 'from torch.autograd import Variable\n'), ((1063, 1111), 'torch.autograd.Variable', 'Variable', (['t[i * batch_size:(i + 1) * batch_size]'], {}), '(t[i * batch_size:(i + 1) * batch_size])\n', (1071, 1111), False, 'from torch.autograd import Variable\n')] |
"""
atomic reference. Used for predicting extensive properties.
"""
import logging
from typing import List, Optional
import numpy as np
import tensorflow as tf
from ase import Atoms
from pymatgen.core import Element, Molecule, Structure
from m3gnet.config import DataType
from m3gnet.graph import Index
from m3gnet.utils import get_segment_indices_from_n, register
logger = logging.getLogger(__name__)
@register
class BaseAtomRef(tf.keras.layers.Layer):
"""
Base AtomRef that predicts 0 correction
"""
def call(self, graph: List, **kwargs):
"""
Args:
graph (list): list repr of a graph
**kwargs:
Returns: 0
"""
return 0.0
@register
class AtomRef(BaseAtomRef):
"""
Atom reference values. For example, if the average H energy is -20.0, and
the average O energy is -10.0, the AtomRef predicts -20 * 2 + (-10.) =
-50.0 for the atom reference energy for H2O
"""
def __init__(
self,
property_per_element: Optional[np.ndarray] = None,
max_z: int = 94,
**kwargs,
):
"""
Args:
property_per_element (np.ndarray): element reference value
max_z (int): maximum atomic number
**kwargs:
"""
super().__init__(**kwargs)
if property_per_element is None:
self.property_per_element = np.zeros(shape=(max_z + 1,))
else:
self.property_per_element = np.array(property_per_element).ravel()
self.max_z = max_z
def _get_feature_matrix(self, structs_or_graphs):
n = len(structs_or_graphs)
features = np.zeros(shape=(n, self.max_z + 1))
for i, s in enumerate(structs_or_graphs):
if isinstance(s, (Structure, Molecule)):
atomic_numbers = [i.specie.Z for i in s.sites]
elif isinstance(s, (list, np.ndarray)):
atomic_numbers = s
elif isinstance(s, Atoms):
atomic_numbers = s.get_atomic_numbers()
else:
atomic_numbers = s.atoms[:, 0]
features[i] = np.bincount(atomic_numbers, minlength=self.max_z + 1)
return features
def fit(self, structs_or_graphs, properties):
"""
Fit the elemental reference values for the properties
Args:
structs_or_graphs (list): list of graphs or structures
properties (np.ndarray): array of extensive properties
Returns:
"""
features = self._get_feature_matrix(structs_or_graphs)
self.property_per_element = np.linalg.pinv(features.T.dot(features)).dot(features.T.dot(properties))
string_prop = ""
for i, j in enumerate(self.property_per_element):
if abs(j) > 1e-5:
string_prop += f"{str(Element.from_Z(i))}: {j:.5f}"
logger.info("Property offset values: " + string_prop)
return True
def transform(self, structs_or_graphs, properties):
"""
Correct the extensive properties by subtracting the atom reference
values
Args:
structs_or_graphs (list): list of graphs or structures
properties (np.ndarray): array of extensive properties
Returns: corrected property values
"""
properties = np.array(properties)
atom_properties = self.predict_properties(structs_or_graphs)
return properties - np.reshape(atom_properties, properties.shape)
def inverse_transform(self, structs_or_graphs, properties):
"""
Take the transformed values and get the original values
Args:
structs_or_graphs (list): list of graphs or structures
properties (np.ndarray): array of extensive properties
Returns: corrected property values
"""
properties = np.array(properties)
property_atoms = self.predict_properties(structs_or_graphs)
final_properties = properties + np.reshape(property_atoms, properties.shape)
return final_properties
def predict_properties(self, structs_or_graphs):
"""
Args:
structs_or_graphs (list): calculate the atom summed property
values
Returns:
"""
if not isinstance(structs_or_graphs, list):
structs_or_graphs = [structs_or_graphs]
features = self._get_feature_matrix(structs_or_graphs)
return features.dot(self.property_per_element)
def call(self, graph: List, **kwargs):
"""
Args:
graph (list): a list repr of a graph
**kwargs:
Returns:
"""
atomic_numbers = graph[Index.ATOMS][:, 0]
atom_energies = tf.gather(tf.cast(self.property_per_element, DataType.tf_float), atomic_numbers)
res = tf.math.segment_sum(atom_energies, get_segment_indices_from_n(graph[Index.N_ATOMS]))
return tf.reshape(res, (-1, 1))
def set_property_per_element(self, property_per_element):
"""
Set the property per atom value
Args:
property_per_element (np.ndarray): array of elemental properties,
the i-th row is the elemental value for atomic number i.
Returns:
"""
self.property_per_element = property_per_element
def get_config(self):
"""
Get dict config for serialization
Returns (dict):
"""
config = super().get_config()
config.update(**{"property_per_element": self.property_per_element, "max_z": self.max_z})
return config
| [
"logging.getLogger",
"numpy.reshape",
"pymatgen.core.Element.from_Z",
"numpy.array",
"numpy.zeros",
"m3gnet.utils.get_segment_indices_from_n",
"tensorflow.reshape",
"tensorflow.cast",
"numpy.bincount"
] | [((378, 405), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (395, 405), False, 'import logging\n'), ((1662, 1697), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n, self.max_z + 1)'}), '(shape=(n, self.max_z + 1))\n', (1670, 1697), True, 'import numpy as np\n'), ((3336, 3356), 'numpy.array', 'np.array', (['properties'], {}), '(properties)\n', (3344, 3356), True, 'import numpy as np\n'), ((3866, 3886), 'numpy.array', 'np.array', (['properties'], {}), '(properties)\n', (3874, 3886), True, 'import numpy as np\n'), ((4939, 4963), 'tensorflow.reshape', 'tf.reshape', (['res', '(-1, 1)'], {}), '(res, (-1, 1))\n', (4949, 4963), True, 'import tensorflow as tf\n'), ((1404, 1432), 'numpy.zeros', 'np.zeros', ([], {'shape': '(max_z + 1,)'}), '(shape=(max_z + 1,))\n', (1412, 1432), True, 'import numpy as np\n'), ((2137, 2190), 'numpy.bincount', 'np.bincount', (['atomic_numbers'], {'minlength': '(self.max_z + 1)'}), '(atomic_numbers, minlength=self.max_z + 1)\n', (2148, 2190), True, 'import numpy as np\n'), ((3454, 3499), 'numpy.reshape', 'np.reshape', (['atom_properties', 'properties.shape'], {}), '(atom_properties, properties.shape)\n', (3464, 3499), True, 'import numpy as np\n'), ((3995, 4039), 'numpy.reshape', 'np.reshape', (['property_atoms', 'properties.shape'], {}), '(property_atoms, properties.shape)\n', (4005, 4039), True, 'import numpy as np\n'), ((4754, 4807), 'tensorflow.cast', 'tf.cast', (['self.property_per_element', 'DataType.tf_float'], {}), '(self.property_per_element, DataType.tf_float)\n', (4761, 4807), True, 'import tensorflow as tf\n'), ((4874, 4922), 'm3gnet.utils.get_segment_indices_from_n', 'get_segment_indices_from_n', (['graph[Index.N_ATOMS]'], {}), '(graph[Index.N_ATOMS])\n', (4900, 4922), False, 'from m3gnet.utils import get_segment_indices_from_n, register\n'), ((1487, 1517), 'numpy.array', 'np.array', (['property_per_element'], {}), '(property_per_element)\n', (1495, 1517), True, 'import numpy as np\n'), ((2840, 2857), 'pymatgen.core.Element.from_Z', 'Element.from_Z', (['i'], {}), '(i)\n', (2854, 2857), False, 'from pymatgen.core import Element, Molecule, Structure\n')] |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
NEZHA (NEural contextualiZed representation for CHinese lAnguage understanding) is the Chinese pretrained language
model currently based on BERT developed by Huawei.
1. Prepare data
Following the data preparation as in BERT, run command as below to get dataset for training:
python ./create_pretraining_data.py \
--input_file=./sample_text.txt \
--output_file=./examples.tfrecord \
--vocab_file=./your/path/vocab.txt \
--do_lower_case=True \
--max_seq_length=128 \
--max_predictions_per_seq=20 \
--masked_lm_prob=0.15 \
--random_seed=12345 \
--dupe_factor=5
2. Pretrain
First, prepare the distributed training environment, then adjust configurations in config.py, finally run train.py.
"""
import os
import numpy as np
from config import bert_train_cfg, bert_net_cfg
import mindspore.dataset.engine.datasets as de
import mindspore._c_dataengine as deMap
from mindspore import context
from mindspore.common.tensor import Tensor
from mindspore.train.model import Model
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor
from mindspore.model_zoo.Bert_NEZHA import BertNetworkWithLoss, BertTrainOneStepCell
from mindspore.nn.optim import Lamb
_current_dir = os.path.dirname(os.path.realpath(__file__))
def create_train_dataset(batch_size):
"""create train dataset"""
# apply repeat operations
repeat_count = bert_train_cfg.epoch_size
ds = de.StorageDataset([bert_train_cfg.DATA_DIR], bert_train_cfg.SCHEMA_DIR,
columns_list=["input_ids", "input_mask", "segment_ids", "next_sentence_labels",
"masked_lm_positions", "masked_lm_ids", "masked_lm_weights"])
type_cast_op = deMap.TypeCastOp("int32")
ds = ds.map(input_columns="masked_lm_ids", operations=type_cast_op)
ds = ds.map(input_columns="masked_lm_positions", operations=type_cast_op)
ds = ds.map(input_columns="next_sentence_labels", operations=type_cast_op)
ds = ds.map(input_columns="segment_ids", operations=type_cast_op)
ds = ds.map(input_columns="input_mask", operations=type_cast_op)
ds = ds.map(input_columns="input_ids", operations=type_cast_op)
# apply batch operations
ds = ds.batch(batch_size, drop_remainder=True)
ds = ds.repeat(repeat_count)
return ds
def weight_variable(shape):
"""weight variable"""
np.random.seed(1)
ones = np.random.uniform(-0.1, 0.1, size=shape).astype(np.float32)
return Tensor(ones)
def train_bert():
"""train bert"""
context.set_context(mode=context.GRAPH_MODE)
context.set_context(device_target="Ascend")
context.set_context(enable_task_sink=True)
context.set_context(enable_loop_sink=True)
context.set_context(enable_mem_reuse=True)
ds = create_train_dataset(bert_net_cfg.batch_size)
netwithloss = BertNetworkWithLoss(bert_net_cfg, True)
optimizer = Lamb(netwithloss.trainable_params(), decay_steps=bert_train_cfg.decay_steps,
start_learning_rate=bert_train_cfg.start_learning_rate,
end_learning_rate=bert_train_cfg.end_learning_rate, power=bert_train_cfg.power,
warmup_steps=bert_train_cfg.num_warmup_steps, decay_filter=lambda x: False)
netwithgrads = BertTrainOneStepCell(netwithloss, optimizer=optimizer)
netwithgrads.set_train(True)
model = Model(netwithgrads)
config_ck = CheckpointConfig(save_checkpoint_steps=bert_train_cfg.save_checkpoint_steps,
keep_checkpoint_max=bert_train_cfg.keep_checkpoint_max)
ckpoint_cb = ModelCheckpoint(prefix=bert_train_cfg.checkpoint_prefix, config=config_ck)
model.train(ds.get_repeat_count(), ds, callbacks=[LossMonitor(), ckpoint_cb], dataset_sink_mode=False)
if __name__ == '__main__':
train_bert()
| [
"mindspore.train.callback.CheckpointConfig",
"mindspore._c_dataengine.TypeCastOp",
"mindspore.train.model.Model",
"mindspore.dataset.engine.datasets.StorageDataset",
"mindspore.context.set_context",
"os.path.realpath",
"mindspore.common.tensor.Tensor",
"numpy.random.uniform",
"mindspore.train.callba... | [((1934, 1960), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1950, 1960), False, 'import os\n'), ((2116, 2341), 'mindspore.dataset.engine.datasets.StorageDataset', 'de.StorageDataset', (['[bert_train_cfg.DATA_DIR]', 'bert_train_cfg.SCHEMA_DIR'], {'columns_list': "['input_ids', 'input_mask', 'segment_ids', 'next_sentence_labels',\n 'masked_lm_positions', 'masked_lm_ids', 'masked_lm_weights']"}), "([bert_train_cfg.DATA_DIR], bert_train_cfg.SCHEMA_DIR,\n columns_list=['input_ids', 'input_mask', 'segment_ids',\n 'next_sentence_labels', 'masked_lm_positions', 'masked_lm_ids',\n 'masked_lm_weights'])\n", (2133, 2341), True, 'import mindspore.dataset.engine.datasets as de\n'), ((2417, 2442), 'mindspore._c_dataengine.TypeCastOp', 'deMap.TypeCastOp', (['"""int32"""'], {}), "('int32')\n", (2433, 2442), True, 'import mindspore._c_dataengine as deMap\n'), ((3065, 3082), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (3079, 3082), True, 'import numpy as np\n'), ((3165, 3177), 'mindspore.common.tensor.Tensor', 'Tensor', (['ones'], {}), '(ones)\n', (3171, 3177), False, 'from mindspore.common.tensor import Tensor\n'), ((3222, 3266), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE'}), '(mode=context.GRAPH_MODE)\n', (3241, 3266), False, 'from mindspore import context\n'), ((3271, 3314), 'mindspore.context.set_context', 'context.set_context', ([], {'device_target': '"""Ascend"""'}), "(device_target='Ascend')\n", (3290, 3314), False, 'from mindspore import context\n'), ((3319, 3361), 'mindspore.context.set_context', 'context.set_context', ([], {'enable_task_sink': '(True)'}), '(enable_task_sink=True)\n', (3338, 3361), False, 'from mindspore import context\n'), ((3366, 3408), 'mindspore.context.set_context', 'context.set_context', ([], {'enable_loop_sink': '(True)'}), '(enable_loop_sink=True)\n', (3385, 3408), False, 'from mindspore import context\n'), ((3413, 3455), 'mindspore.context.set_context', 'context.set_context', ([], {'enable_mem_reuse': '(True)'}), '(enable_mem_reuse=True)\n', (3432, 3455), False, 'from mindspore import context\n'), ((3529, 3568), 'mindspore.model_zoo.Bert_NEZHA.BertNetworkWithLoss', 'BertNetworkWithLoss', (['bert_net_cfg', '(True)'], {}), '(bert_net_cfg, True)\n', (3548, 3568), False, 'from mindspore.model_zoo.Bert_NEZHA import BertNetworkWithLoss, BertTrainOneStepCell\n'), ((3956, 4010), 'mindspore.model_zoo.Bert_NEZHA.BertTrainOneStepCell', 'BertTrainOneStepCell', (['netwithloss'], {'optimizer': 'optimizer'}), '(netwithloss, optimizer=optimizer)\n', (3976, 4010), False, 'from mindspore.model_zoo.Bert_NEZHA import BertNetworkWithLoss, BertTrainOneStepCell\n'), ((4056, 4075), 'mindspore.train.model.Model', 'Model', (['netwithgrads'], {}), '(netwithgrads)\n', (4061, 4075), False, 'from mindspore.train.model import Model\n'), ((4092, 4228), 'mindspore.train.callback.CheckpointConfig', 'CheckpointConfig', ([], {'save_checkpoint_steps': 'bert_train_cfg.save_checkpoint_steps', 'keep_checkpoint_max': 'bert_train_cfg.keep_checkpoint_max'}), '(save_checkpoint_steps=bert_train_cfg.save_checkpoint_steps,\n keep_checkpoint_max=bert_train_cfg.keep_checkpoint_max)\n', (4108, 4228), False, 'from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor\n'), ((4275, 4349), 'mindspore.train.callback.ModelCheckpoint', 'ModelCheckpoint', ([], {'prefix': 'bert_train_cfg.checkpoint_prefix', 'config': 'config_ck'}), '(prefix=bert_train_cfg.checkpoint_prefix, config=config_ck)\n', (4290, 4349), False, 'from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor\n'), ((3094, 3134), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)'], {'size': 'shape'}), '(-0.1, 0.1, size=shape)\n', (3111, 3134), True, 'import numpy as np\n'), ((4404, 4417), 'mindspore.train.callback.LossMonitor', 'LossMonitor', ([], {}), '()\n', (4415, 4417), False, 'from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor\n')] |
"""
Dans_Diffraction Examples
Compare calcualted reflection intensities for a CIF between different software
"""
import numpy as np
import Dans_Diffraction as dif
print(dif.version_info())
xtl = dif.Crystal('~/Downloads/Rutile.cif')
# The CIF doesn't include isotropic thermal parameters, the default in Dans_Diffraction is 0, in Vesta it is to set B=1
xtl.Atoms.changeatom([0, 1], uiso=0.0126651) # B=1
xtl.generate_structure()
# Vesta uses scattering factors from "Waasmaier and Kirfel, Acta Cryst. (1995) A51, 416-431"
# Dans_Diffraction uses scattering factors from Internaltion Tables Vol C., set the Waasmaier coefficients:
xtl.Scatter.setup_scatter(energy_kev=8.048, scattering_factors='waaskirf')
# Vesta intesnity (http://jp-minerals.org/vesta/en/)
# h k l d (Å) F(real) F(imag) |F| 2θ I M
# 1 1 0 3.243499 37.053131 0.000000 37.0531 27.47676 100.00000 4
# 1 0 1 2.483556 23.439516 0.000000 23.4395 36.13751 44.32719 8
# 2 0 0 2.293500 13.943417 0.000000 13.9434 39.24969 6.53618 4
# 1 1 1 2.183984 -17.270726 0.000000 17.2707 41.30531 17.90116 8
hkl = [[1, 1, 0], [0, 1, 1], [0, 2, 0], [1, 1, 1]]
inten_vesta = np.array([37.0531, 23.4395, 13.9434, 17.2707]) ** 2
inten_disp = np.array([37.5527, 23.9331, 14.5057, 17.4241]) ** 2 # with x-ray dispersion effects
inten_neut = np.array([3.97176, 14.0359, 23.4644, 19.4905]) ** 2 # Neutron scattering lengths
inten_dift = np.array([44.4472885365421, 20.3801391390013, 3.04824911713064, 8.42516396817784])**2 # From CrystalDiffract
intensity = xtl.Scatter.x_ray(hkl) # Dans_Diffraction
imax = intensity[0]
print('\n Dans_Diffraction Vesta CrystalDiffract')
print(' (h,k,l) I I/I110 I I/I110 I I/I110')
for n in range(len(hkl)):
print('%20s %8.2f %8.2f %8.2f %8.2f %8.2f %8.2f' % (
hkl[n], intensity[n], 100 * intensity[n] / imax,
inten_vesta[n], 100 * inten_vesta[n] / inten_vesta[0],
inten_dift[n], 100 * inten_dift[n] / inten_dift[0]
))
| [
"Dans_Diffraction.Crystal",
"numpy.array",
"Dans_Diffraction.version_info"
] | [((198, 235), 'Dans_Diffraction.Crystal', 'dif.Crystal', (['"""~/Downloads/Rutile.cif"""'], {}), "('~/Downloads/Rutile.cif')\n", (209, 235), True, 'import Dans_Diffraction as dif\n'), ((171, 189), 'Dans_Diffraction.version_info', 'dif.version_info', ([], {}), '()\n', (187, 189), True, 'import Dans_Diffraction as dif\n'), ((1295, 1341), 'numpy.array', 'np.array', (['[37.0531, 23.4395, 13.9434, 17.2707]'], {}), '([37.0531, 23.4395, 13.9434, 17.2707])\n', (1303, 1341), True, 'import numpy as np\n'), ((1360, 1406), 'numpy.array', 'np.array', (['[37.5527, 23.9331, 14.5057, 17.4241]'], {}), '([37.5527, 23.9331, 14.5057, 17.4241])\n', (1368, 1406), True, 'import numpy as np\n'), ((1458, 1504), 'numpy.array', 'np.array', (['[3.97176, 14.0359, 23.4644, 19.4905]'], {}), '([3.97176, 14.0359, 23.4644, 19.4905])\n', (1466, 1504), True, 'import numpy as np\n'), ((1553, 1640), 'numpy.array', 'np.array', (['[44.4472885365421, 20.3801391390013, 3.04824911713064, 8.42516396817784]'], {}), '([44.4472885365421, 20.3801391390013, 3.04824911713064, \n 8.42516396817784])\n', (1561, 1640), True, 'import numpy as np\n')] |
"""Encoding
Provides sklearn-compatible transformer classes for categorical encoding:
* :class:`.NullEncoder`
* :class:`.LabelEncoder`
* :class:`.OneHotEncoder`
* :class:`.TargetEncoder`
* :class:`.TargetEncoderCV`
* :class:`.TargetEncoderLOO`
* :class:`.TextMultiLabelBinarizer`
* :class:`.NhotEncoder`
* :class:`.JsonEncoder`
* :class:`.JoinTransformer`
Also provides functions to simply return an encoded DataFrame:
* :func:`.null_encode`
* :func:`.label_encode`
* :func:`.one_hot_encode`
* :func:`.target_encode`
* :func:`.target_encode_cv`
* :func:`.target_encode_loo`
"""
import ast
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import KFold
from sklearn.exceptions import NotFittedError
class NullEncoder(BaseEstimator, TransformerMixin):
"""Null encoder.
For each column with null values, adds a column containing indicators
as to whether each sample in original column is null.
"""
def __init__(self, cols=None, suffix='_isnull', dtype='uint8',
delete_old=False, nocol=None):
"""Null encoder.
Parameters
----------
cols : list of str
Columns to null encode. Default is to null encode all columns in
the DataFrame which contain null values.
suffix : str
Suffix to append to original column names to create null indicator
column names
dtype : str
Datatype to use for encoded columns.
Default = 'uint8'
delete_old : bool
Whether to delete the old column which was encoded
Default = False
nocol : None or str
Action to take if a col in ``cols`` is not in the dataframe to
transform. Valid values:
* None - ignore cols in ``cols`` which are not in dataframe
* 'warn' - issue a warning when a column is not in dataframe
* 'err' - raise an error when a column is not in dataframe
"""
# Check types
if not isinstance(suffix, str):
raise TypeError('suffix must be a string')
if cols is not None and not isinstance(cols, (list, str)):
raise TypeError('cols must be None, or a list or a string')
if isinstance(cols, list):
if not all(isinstance(c, str) for c in cols):
raise TypeError('each element of cols must be a string')
if not isinstance(dtype, str):
raise TypeError('dtype must be a string (e.g. \'uint8\'')
if nocol is not None and nocol not in ('warn', 'err'):
raise ValueError('nocol must be None, \'warn\', or \'err\'')
if not isinstance(delete_old, bool):
raise TypeError('delete_old must be True or False')
# Store parameters
if isinstance(cols, str):
self.cols = [cols]
else:
self.cols = cols
self.suffix = suffix
self.dtype = dtype
self.delete_old = delete_old
self.nocol = nocol
def fit(self, X, y):
"""Fit null encoder to X and y.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
NullEncoder
Returns self, the fit object.
"""
# Encode all columns with any null values by default
if self.cols is None:
self.cols = [c for c in X if X[c].isnull().sum() > 0]
# Check columns are in X
if self.nocol == 'err':
for col in self.cols:
if col not in X:
raise ValueError('Column \''+col+'\' not in X')
elif self.nocol == 'warn':
for col in self.cols:
if col not in X:
print('Column \''+col+'\' not in X')
# Return fit object
return self
def transform(self, X, y=None):
"""Perform the null encoding transformation.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
# Add null indicator column for each original column
Xo = X.copy()
for col in self.cols:
Xo[col+self.suffix] = X[col].isnull().astype(self.dtype)
if self.delete_old:
del Xo[col]
# Return encoded dataframe
return Xo
def fit_transform(self, X, y=None):
"""Fit and transform the data with null encoding.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
return self.fit(X, y).transform(X, y)
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Label encoder.
Replaces categorical column(s) with integer labels for each unique
category in original column.
"""
def __init__(self, cols=None, nocol=None):
"""Label encoder.
Parameters
----------
cols : list of str
Columns to label encode. Default is to label encode all
categorical columns in the DataFrame.
nocol : None or str
Action to take if a col in ``cols`` is not in the dataframe to
transform. Valid values:
* None - ignore cols in ``cols`` which are not in dataframe
* 'warn' - issue a warning when a column is not in dataframe
* 'err' - raise an error when a column is not in dataframe
"""
# Check types
if cols is not None and not isinstance(cols, (list, str)):
raise TypeError('cols must be None, or a list or a string')
if isinstance(cols, list):
if not all(isinstance(c, str) for c in cols):
raise TypeError('each element of cols must be a string')
if nocol is not None and nocol not in ('warn', 'err'):
raise ValueError('nocol must be None, \'warn\', or \'err\'')
# Store parameters
self.nocol = nocol
if isinstance(cols, str):
self.cols = [cols]
else:
self.cols = cols
def fit(self, X, y):
"""Fit label encoder to X and y.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
LabelEncoder
Returns self, the fit object.
"""
# Encode all categorical cols by default
if self.cols is None:
self.cols = [c for c in X if str(X[c].dtype)=='object']
# Check columns are in X
if self.nocol == 'err':
for col in self.cols:
if col not in X:
raise ValueError('Column \''+col+'\' not in X')
elif self.nocol == 'warn':
for col in self.cols:
if col not in X:
print('Column \''+col+'\' not in X')
# Create the map from objects to integers for each column
self.maps = dict()
for col in self.cols:
Xu = X[col].dropna().unique()
self.maps[col] = dict(zip(Xu, np.arange(len(Xu))))
# Return fit object
return self
def transform(self, X, y=None):
"""Perform the label encoding transformation.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
Xo = X.copy()
for col, tmap in self.maps.items():
# Map the column
Xo[col] = Xo[col].map(tmap)
# Convert to appropriate datatype
max_val = max(tmap.values())
if X[col].isnull().any(): #nulls, so need to use float!
if max_val < 8388608:
dtype = 'float32'
else:
dtype = 'float64'
else:
if max_val < 256:
dtype = 'uint8'
elif max_val < 65536:
dtype = 'uint16'
elif max_val < 4294967296:
dtype = 'uint32'
else:
dtype = 'uint64'
Xo[col] = Xo[col].astype(dtype)
# Return encoded dataframe
return Xo
def fit_transform(self, X, y=None):
"""Fit and transform the data with label encoding.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
return self.fit(X, y).transform(X, y)
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""One-hot encoder.
Replaces categorical column(s) with binary columns for each unique value
in original column.
"""
def __init__(self, cols=None, reduce_df=False, dtype='uint8', nocol=None):
"""One-hot encoder.
Parameters
----------
cols : list of str
Columns to one-hot encode. Default is to one-hot encode all
categorical columns in the DataFrame.
reduce_df : bool
Whether to use reduced degrees of freedom for encoding (that is,
add N-1 one-hot columns for a column with N categories). E.g. for
a column with categories A, B, and C: When reduce_df is True,
A=[1, 0], B=[0, 1], and C=[0, 0]. When reduce_df is False,
A=[1, 0, 0], B=[0, 1, 0], and C=[0, 0, 1].
Default = False
dtype : str
Datatype to use for encoded columns. Default = 'uint8'
nocol : None or str
Action to take if a col in ``cols`` is not in the dataframe to
transform. Valid values:
* None - ignore cols in ``cols`` which are not in dataframe
* 'warn' - issue a warning when a column is not in dataframe
* 'err' - raise an error when a column is not in dataframe
"""
# Check types
if cols is not None and not isinstance(cols, (list, str)):
raise TypeError('cols must be None, or a list or a string')
if isinstance(cols, list):
if not all(isinstance(c, str) for c in cols):
raise TypeError('each element of cols must be a string')
if not isinstance(reduce_df, bool):
raise TypeError('reduce_df must be True or False')
if not isinstance(dtype, str):
raise TypeError('dtype must be a string (e.g. \'uint8\'')
if nocol is not None and nocol not in ('warn', 'err'):
raise ValueError('nocol must be None, \'warn\', or \'err\'')
# Store parameters
if isinstance(cols, str):
self.cols = [cols]
else:
self.cols = cols
self.reduce_df = reduce_df
self.dtype = dtype
self.nocol = nocol
def fit(self, X, y):
"""Fit one-hot encoder to X and y
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
OneHotEncoder
Returns self, the fit object.
"""
# Encode all categorical cols by default
if self.cols is None:
self.cols = [c for c in X
if str(X[c].dtype)=='object']
# Check columns are in X
if self.nocol == 'err':
for col in self.cols:
if col not in X:
raise ValueError('Column \''+col+'\' not in X')
elif self.nocol == 'warn':
for col in self.cols:
if col not in X:
print('Column \''+col+'\' not in X')
# Store each unique value
self.maps = dict()
for col in self.cols:
self.maps[col] = []
uniques = X[col].unique()
for unique in uniques:
self.maps[col].append(unique)
# Remove last degree of freedom
if self.reduce_df:
for col in self.cols:
del self.maps[col][-1]
# Return fit object
return self
def transform(self, X, y=None):
"""Perform the one-hot encoding transformation.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
Xo = X.copy()
for col, vals in self.maps.items():
for val in vals:
new_col = col+'_'+str(val)
Xo[new_col] = (Xo[col]==val).astype(self.dtype)
del Xo[col]
return Xo
def fit_transform(self, X, y=None):
"""Fit and transform the data with one-hot encoding.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
return self.fit(X, y).transform(X, y)
class TargetEncoder(BaseEstimator, TransformerMixin):
"""Target encoder.
Replaces category values in categorical column(s) with the mean target
(dependent variable) value for each category.
"""
def __init__(self, cols=None, dtype='float64', nocol=None):
"""Target encoder.
Parameters
----------
cols : str or list of str
Column(s) to target encode. Default is to target encode all
categorical columns in the DataFrame.
dtype : str
Datatype to use for encoded columns. Default = 'float64'
nocol : None or str
Action to take if a col in ``cols`` is not in the dataframe to
transform. Valid values:
* None - ignore cols in ``cols`` which are not in dataframe
* 'warn' - issue a warning when a column is not in dataframe
* 'err' - raise an error when a column is not in dataframe
"""
# Check types
if cols is not None and not isinstance(cols, (list, str)):
raise TypeError('cols must be None, or a list or a string')
if isinstance(cols, list):
if not all(isinstance(c, str) for c in cols):
raise TypeError('each element of cols must be a string')
if not isinstance(dtype, str):
raise TypeError('dtype must be a string (e.g. \'uint8\'')
if nocol is not None and nocol not in ('warn', 'err'):
raise ValueError('nocol must be None, \'warn\', or \'err\'')
# Store parameters
if isinstance(cols, str):
self.cols = [cols]
else:
self.cols = cols
self.dtype = dtype
self.nocol = nocol
def fit(self, X, y):
"""Fit target encoder to X and y.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
TargetEncoder
Returns self, the fit object.
"""
# Encode all categorical cols by default
if self.cols is None:
self.cols = [col for col in X if str(X[col].dtype)=='object']
# Check columns are in X
if self.nocol == 'err':
for col in self.cols:
if col not in X:
raise ValueError('Column \''+col+'\' not in X')
elif self.nocol == 'warn':
for col in self.cols:
if col not in X:
print('Column \''+col+'\' not in X')
# Encode each element of each column
self.maps = dict()
for col in self.cols:
if col in X:
tmap = dict()
uniques = X[col].unique()
for unique in uniques:
tmap[unique] = y[X[col]==unique].mean()
self.maps[col] = tmap
# Return fit object
return self
def transform(self, X, y=None):
"""Perform the target encoding transformation.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
Xo = X.copy()
for col, tmap in self.maps.items():
vals = np.full(X.shape[0], np.nan, dtype=self.dtype)
for val, mean_target in tmap.items():
vals[X[col]==val] = mean_target
Xo[col] = vals
return Xo
def fit_transform(self, X, y=None):
"""Fit and transform the data with target encoding.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
return self.fit(X, y).transform(X, y)
class TargetEncoderCV(BaseEstimator, TransformerMixin):
"""Cross-fold target encoder.
Replaces category values in categorical column(s) with the mean target
(dependent variable) value for each category, using a cross-fold strategy
such that no sample's target value is used in computing the target mean
which is used to replace that sample's category value.
"""
def __init__(self, cols=None, n_splits=3, shuffle=True, dtype='float64',
nocol=None):
"""Cross-fold target encoder.
Parameters
----------
cols : str or list of str
Column(s) to target encode. Default is to target encode all
categorical columns in the DataFrame.
n_splits : int
Number of cross-fold splits. Default = 3.
shuffle : bool
Whether to shuffle the data when splitting into folds.
dtype : str
Datatype to use for encoded columns. Default = 'float64'
nocol : None or str
Action to take if a col in ``cols`` is not in the dataframe to
transform. Valid values:
* None - ignore cols in ``cols`` which are not in dataframe
* 'warn' - issue a warning when a column is not in dataframe
* 'err' - raise an error when a column is not in dataframe
"""
# Check types
if cols is not None and not isinstance(cols, (list, str)):
raise TypeError('cols must be None, or a list or a string')
if isinstance(cols, list):
if not all(isinstance(c, str) for c in cols):
raise TypeError('each element of cols must be a string')
if not isinstance(n_splits, int):
raise TypeError('n_splits must be an integer')
if n_splits < 1:
raise ValueError('n_splits must be positive')
if not isinstance(shuffle, bool):
raise TypeError('shuffle must be True or False')
if not isinstance(dtype, str):
raise TypeError('dtype must be a string (e.g. \'float64\'')
if nocol is not None and nocol not in ('warn', 'err'):
raise ValueError('nocol must be None, \'warn\', or \'err\'')
# Store parameters
if isinstance(cols, str):
self.cols = [cols]
else:
self.cols = cols
self.n_splits = n_splits
self.shuffle = shuffle
self.dtype = dtype
self.nocol = nocol
def fit(self, X, y):
"""Fit cross-fold target encoder to X and y.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
TargetEncoderCV
Returns self, the fit object.
"""
self._target_encoder = TargetEncoder(cols=self.cols, nocol=self.nocol)
self._target_encoder.fit(X, y)
return self
def transform(self, X, y=None):
"""Perform the target encoding transformation.
Uses cross-fold target encoding when given training data, and uses
normal target encoding when given test data.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
# Use target encoding from fit() if this is test data
if y is None:
return self._target_encoder.transform(X)
# Compute means for each fold
self._train_ix = []
self._test_ix = []
self._fit_tes = []
kf = KFold(n_splits=self.n_splits, shuffle=self.shuffle)
for train_ix, test_ix in kf.split(X):
self._train_ix.append(train_ix)
self._test_ix.append(test_ix)
te = TargetEncoder(cols=self.cols)
self._fit_tes.append(te.fit(X.iloc[train_ix,:], y.iloc[train_ix]))
# Apply means across folds
Xo = X.copy()
for ix in range(len(self._test_ix)):
test_ix = self._test_ix[ix]
Xo.iloc[test_ix,:] = self._fit_tes[ix].transform(X.iloc[test_ix,:])
# Return transformed DataFrame
return Xo
def fit_transform(self, X, y=None):
"""Fit and transform the data with cross-fold target encoding.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
return self.fit(X, y).transform(X, y)
class TargetEncoderLOO(BaseEstimator, TransformerMixin):
"""Leave-one-out target encoder.
Replaces category values in categorical column(s) with the mean target
(dependent variable) value for each category, using a leave-one-out
strategy such that no sample's target value is used in computing the
target mean which is used to replace that sample's category value.
Can also optionally use a Bayesian estimation of the sample's mean target
value, which sets a prior to the average of all encoding values, with the
strength of that prior proportional to the ``bayesian_c`` parameter.
"""
def __init__(self, cols=None, dtype='float64', nocol=None,
bayesian_c=None):
"""Leave-one-out target encoder.
Parameters
----------
cols : str or list of str
Column(s) to target encode. Default is to target encode all
categorical columns in the DataFrame.
dtype : str
Datatype to use for encoded columns. Default = 'float64'
bayesian_c : float
Prior strength (C) for the Bayesian average
https://en.wikipedia.org/wiki/Bayesian_average
nocol : None or str
Action to take if a col in ``cols`` is not in the dataframe to
transform. Valid values:
* None - ignore cols in ``cols`` which are not in dataframe
* 'warn' - issue a warning when a column is not in dataframe
* 'err' - raise an error when a column is not in dataframe
"""
# Check types
if cols is not None and not isinstance(cols, (list, str)):
raise TypeError('cols must be None, or a list or a string')
if isinstance(cols, list):
if not all(isinstance(c, str) for c in cols):
raise TypeError('each element of cols must be a string')
if not isinstance(dtype, str):
raise TypeError('dtype must be a string (e.g. \'float64\'')
if nocol is not None and nocol not in ('warn', 'err'):
raise ValueError('nocol must be None, \'warn\', or \'err\'')
if bayesian_c is not None and not isinstance(bayesian_c, (float, int)):
raise TypeError('bayesian_c must be None or float or int')
# Store parameters
if isinstance(cols, str):
self.cols = [cols]
else:
self.cols = cols
self.dtype = dtype
self.nocol = nocol
if isinstance(bayesian_c, int):
self.bayesian_c = float(bayesian_c)
else:
self.bayesian_c = bayesian_c
self.overall_mean = None
def fit(self, X, y):
"""Fit leave-one-out target encoder to X and y.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
TargetEncoderLOO
Returns self, the fit object.
"""
# Encode all categorical cols by default
if self.cols is None:
self.cols = [col for col in X if str(X[col].dtype)=='object']
# Check columns are in X
if self.nocol == 'err':
for col in self.cols:
if col not in X:
raise ValueError('Column \''+col+'\' not in X')
elif self.nocol == 'warn':
for col in self.cols:
if col not in X:
print('Column \''+col+'\' not in X')
# Compute the overall mean
self.overall_mean = np.mean(y)
# Encode each element of each column
self.sum_count = dict()
for col in self.cols:
self.sum_count[col] = dict()
uniques = X[col].dropna().unique()
for unique in uniques:
ix = X[col]==unique
self.sum_count[col][unique] = (y[ix].sum(),ix.sum())
# Return the fit object
return self
def transform(self, X, y=None):
"""Perform the target encoding transformation.
Uses leave-one-out target encoding when given training data, and uses
normal target encoding when given test data.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
# Create output dataframe
Xo = X.copy()
# Bayesian C value
if self.bayesian_c is not None:
C = self.bayesian_c
Cm = C*self.overall_mean
# Use means from training data if passed test data
if y is None:
for col in self.sum_count:
vals = np.full(X.shape[0], np.nan)
for cat, sum_count in self.sum_count[col].items():
if self.bayesian_c is None:
vals[X[col]==cat] = sum_count[0]/sum_count[1]
else: #use bayesian mean
vals[X[col]==cat] = (Cm+sum_count[0])/(C+sum_count[1])
Xo[col] = vals
# LOO target encode each column if this is training data
else:
for col in self.sum_count:
vals = np.full(X.shape[0], np.nan)
for cat, sum_count in self.sum_count[col].items():
ix = X[col]==cat
if sum_count[1]<2:
vals[ix] = np.nan
else:
if self.bayesian_c is None:
vals[ix] = (sum_count[0]-y[ix])/(sum_count[1]-1)
else: #use Bayesian mean
vals[ix] = ((Cm+sum_count[0]-y[ix])
/(C+sum_count[1]-1))
Xo[col] = vals
# Return encoded DataFrame
return Xo
def fit_transform(self, X, y=None):
"""Fit and transform the data with leave-one-out target encoding.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
return self.fit(X, y).transform(X, y)
class MultiTargetEncoderLOO(BaseEstimator, TransformerMixin):
"""Leave-one-out target encoder which handles multiple classes per sample.
Replaces category values in categorical column(s) with the mean target
(dependent variable) value for each category, using a leave-one-out
strategy such that no sample's target value is used in computing the
target mean which is used to replace that sample's category value.
Can also optionally use a Bayesian estimation of the sample's mean target
value, which sets a prior to the average of all encoding values, with the
strength of that prior proportional to the ``bayesian_c`` parameter.
Parameters
----------
cols : str or list of str
Column(s) to target encode. Default is to target encode all
categorical columns in the DataFrame.
dtype : str
Datatype to use for encoded columns. Default = 'float64'
bayesian_c : float
Prior strength (C) for the Bayesian average
https://en.wikipedia.org/wiki/Bayesian_average
sep : str
Separator string which delimits the labels
nocol : None or str
Action to take if a col in ``cols`` is not in the dataframe to
transform. Valid values:
* None - (default) ignore cols which aren't in dataframe
* 'warn' - issue a warning when a column is not in dataframe
* 'err' - raise an error when a column is not in dataframe
"""
def __init__(self, cols=None, dtype='float64', nocol=None,
bayesian_c=0.0, sep=','):
# Check types
if cols is not None and not isinstance(cols, (list, str)):
raise TypeError('cols must be None, or a list or a string')
if isinstance(cols, list):
if not all(isinstance(c, str) for c in cols):
raise TypeError('each element of cols must be a string')
if not isinstance(dtype, str):
raise TypeError('dtype must be a string (e.g. \'float64\'')
if nocol is not None and nocol not in ('warn', 'err'):
raise ValueError('nocol must be None, \'warn\', or \'err\'')
if not isinstance(bayesian_c, (float, int)):
raise TypeError('bayesian_c must be float or int')
if not isinstance(sep, str):
raise TypeError('sep must be a str')
# Store parameters
if isinstance(cols, str):
self.cols = [cols]
else:
self.cols = cols
self.dtype = dtype
self.nocol = nocol
self.bayesian_c = float(bayesian_c)
self.sep = sep
self.overall_mean = None
def fit(self, X, y):
"""Fit leave-one-out target encoder to X and y.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
MultiTargetEncoderLOO
Returns self, the fit object.
"""
# Encode all categorical cols by default
if self.cols is None:
self.cols = [col for col in X if str(X[col].dtype)=='object']
# Check columns are in X
if self.nocol == 'err':
for col in self.cols:
if col not in X:
raise ValueError('Column \''+col+'\' not in X')
elif self.nocol == 'warn':
for col in self.cols:
if col not in X:
print('Column \''+col+'\' not in X')
# Compute the overall mean
self.overall_mean = np.mean(y)
# Count labels in each column
self.sum_count = dict()
for col in self.cols:
self.sum_count[col] = dict()
for i, tlist in enumerate(X[col].tolist()):
if isinstance(tlist, str):
for val in tlist.split(self.sep):
if len(val)>0:
if val in self.sum_count[col]:
self.sum_count[col][val][0] += y.iloc[i]
self.sum_count[col][val][1] += 1
else:
self.sum_count[col][val] = [y.iloc[i], 1]
# Return the fit object
return self
def transform(self, X, y=None):
"""Perform the target encoding transformation.
Uses leave-one-out target encoding when given training data, and uses
normal target encoding when given test data.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
# Create output dataframe
Xo = X.copy()
# Bayesian C value
C = self.bayesian_c
Cm = C*self.overall_mean
# Flag for whether to perform LOO (depends on test vs train)
lm = 0 if y is None else 1
# Encode the columns
for col in self.sum_count:
vals = np.full(X.shape[0], 0.0)
counts = np.full(X.shape[0], 0.0)
for i, tlist in enumerate(X[col].tolist()):
if isinstance(tlist, str) and len(tlist)>0:
for tval in tlist.split(self.sep):
if (tval in self.sum_count[col]
and (self.sum_count[col][tval][1]>1 or lm==0)):
SC = self.sum_count[col][tval]
vals[i] += SC[0]-(0 if y is None else lm*y.iloc[i])
counts[i] += SC[1]-lm
Xo[col] = (Cm+vals)/(C+counts)
Xo.loc[counts==0.0, col] = np.nan
# Return encoded DataFrame
return Xo
def fit_transform(self, X, y=None):
"""Fit and transform the data with leave-one-out target encoding.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
return self.fit(X, y).transform(X, y)
class TextMultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Multi-label encode text data
For each specified column, transform from a delimited list of text
labels to a Nlabels-length binary vector.
Parameters
----------
cols : list of str
Columns to encode. Default is to encode all columns.
dtype : str
Datatype to use for encoded columns.
Default = 'uint8'
sep : str
Separator character in the text data. Default = ','
labels : dict
Labels for each column. Dict with keys w/ column names and values
w/ lists of labels
nocol : None or str
Action to take if a col in ``cols`` is not in the dataframe to
transform. Valid values:
* None - ignore cols in ``cols`` which are not in dataframe
* 'warn' - issue a warning when a column is not in dataframe
* 'err' - raise an error when a column is not in dataframe
"""
def __init__(self, cols=None, dtype='uint8', nocol=None,
sep=',', labels=None):
"""Multi-label encode text data"""
# Check types
if cols is not None and not isinstance(cols, (list, str)):
raise TypeError('cols must be None, or a list or a string')
if isinstance(cols, list):
if not all(isinstance(c, str) for c in cols):
raise TypeError('each element of cols must be a string')
if not isinstance(dtype, str):
raise TypeError('dtype must be a string (e.g. \'uint8\'')
if nocol is not None and nocol not in ('warn', 'err'):
raise ValueError('nocol must be None, \'warn\', or \'err\'')
if not isinstance(sep, str):
raise TypeError('sep must be a str')
if labels is not None:
if not isinstance(labels, dict):
raise TypeError('labels must be a dict of lists of labels')
for c, i in labels.items():
if i is not None and not isinstance(i, list):
raise TypeError('labels must be a dict of lists of labels')
if i is not None and not all(isinstance(t, str) for t in i):
raise TypeError('labels must be a dict of lists of labels')
# Store parameters
if isinstance(cols, str):
self.cols = [cols]
else:
self.cols = cols
self.dtype = dtype
self.nocol = nocol
self.sep = sep
if labels is None:
self.labels = dict()
else:
self.labels = labels
def fit(self, X, y=None):
"""Fit encoder to X and y.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
Returns
-------
NullEncoder
Returns self, the fit object.
"""
# Encode all columns with any null values by default
if self.cols is None:
self.cols = [c for c in X]
# Add Nones to labels
for c in self.cols:
if c not in self.labels:
self.labels[c] = None
# Check columns are in X
if self.nocol == 'err':
for col in self.cols:
if col not in X:
raise ValueError('Column \''+col+'\' not in X')
elif self.nocol == 'warn':
for col in self.cols:
if col not in X:
print('Column \''+col+'\' not in X')
# Return fit object
return self
def __onehot(self, series, unique_labels=None):
"""One-hot transform multi-label data
Parameters
----------
series : pandas Series
Series containing text labels
unique_labels : None or list of str
Unique labels in the dataset. Default is to generate list of
labels from unique labels in the data.
Returns
-------
one_hot : ndarray
Nsamples-by-Nclasses array of encoded data. Each row is a
sample, and each column corresponds to a label. If a sample
has a given label, the value in that cell is 1, else it is 0.
unique_labels : list of str
Nclasses-length list of labels.
"""
labels = [l.split(self.sep) for l in series.tolist()]
if unique_labels is None:
unique_labels = list(set(sum(labels, [])))
one_hot = np.zeros((series.shape[0], len(unique_labels)))
for i, sample in enumerate(labels):
for label in sample:
try:
one_hot[i, unique_labels.index(label)] = 1
except:
pass
return one_hot, unique_labels
def transform(self, X, y=None):
"""Perform the null encoding transformation.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
# Add null indicator column for each original column
Xo = X.copy()
for col in self.cols:
one_hot, labels = (
self.__onehot(X[col], unique_labels=self.labels[col]))
for i, label in enumerate(labels):
Xo[col+'_'+label] = one_hot[:, i].astype(self.dtype)
del Xo[col]
# Return encoded dataframe
return Xo
def fit_transform(self, X, y=None):
"""Fit and transform the data with null encoding.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
return self.fit(X, y).transform(X, y)
class NhotEncoder(BaseEstimator, TransformerMixin):
"""N-hot encode multilabel data.
Replaces column(s) containing lists of categories with binary columns.
TODO: uhhh d'oh just realized this is basically the same thing as
TextMultiLabelBinarizer, should just keep one... Probs this one.
Parameters
----------
cols : list of str
Columns to encode
sep : str
Separator
dtype : str
Datatype to use for encoded columns. Default = 'uint8'
Examples
--------
TODO
"""
def __init__(self, cols, sep=',', dtype='float32',
top_n=None, top_prc=None):
# Check types
if not isinstance(cols, (list, str)):
raise TypeError('cols must be a str or list of str')
if not isinstance(sep, str):
raise TypeError('sep must be a str')
if not isinstance(dtype, str):
raise TypeError('dtype must be a str')
if top_n is not None:
if not isinstance(top_n, int):
raise TypeError('top_n must be an int')
if top_n < 1:
raise TypeError('top_n must be at least 1')
if top_prc is not None:
if not isinstance(top_prc, float):
raise TypeError('top_prc must be a float')
if top_prc<0.0 or top_prc>1.0:
raise TypeError('top_prc must be between 0 and 1')
# Store parameters
if isinstance(cols, str):
self.cols = [cols]
else:
self.cols = cols
self.sep = sep
self.dtype = dtype
self.top_n = top_n
self.top_prc = top_prc
self.maps = None
def _get_top(self, labels):
"""Get most frequent labels"""
if self.top_n is not None and self.top_n < len(labels):
df = pd.DataFrame([labels.keys(), labels.values()]).T
df.sort_values(1, ascending=False, inplace=True)
return df[0][:self.top_n].tolist()
elif self.top_prc is not None:
df = pd.DataFrame([labels.keys(), labels.values()]).T
df.sort_values(1, ascending=False, inplace=True)
return df[0][:int(self.top_prc*len(labels))].tolist()
else:
return list(labels.keys())
def fit(self, X, y):
"""Fit N-hot encoder to X and y
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
NhotEncoder object
Returns self, the fit object.
"""
# Check columns are in X
for col in self.cols:
if col not in X:
raise ValueError('Column \''+col+'\' not in X')
# Store each unique value
self.maps = dict()
for col in self.cols:
labels = dict()
for vals in X[col].tolist():
if isinstance(vals, str):
for val in vals.split(self.sep):
if val in labels:
labels[val] += 1
else:
labels[val] = 1
self.maps[col] = self._get_top(labels)
# Return fit object
return self
def transform(self, X, y=None):
"""Perform the one-hot encoding transformation.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
Xo = X.copy()
for col, vals in self.maps.items():
for val in vals:
new_col = col+'_'+str(val)
matches = np.full(X.shape[0], np.nan)
for i, e in enumerate(Xo[col].tolist()):
if isinstance(e, str):
matches[i] = val in e.split(self.sep)
Xo[new_col] = matches.astype(self.dtype)
del Xo[col]
return Xo
def fit_transform(self, X, y=None):
"""Fit and transform the data with one-hot encoding.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
return self.fit(X, y).transform(X, y)
class JsonEncoder(BaseEstimator, TransformerMixin):
"""Replace columns with JSON data with columns containing values from
specific JSON fields.
Parameters
----------
fields : dict
Fields from each column to extract. Keys should be column names,
and values should be lists of either:
* str - field name to extract
* (str, str, ???) tuple - first str is field name to extract, second
str is conditional field to use, and third element is value to
compare to the vaue in the conditional field. Will only encode the
value from the field if the value of the condition field is equal
to the third element.
sep : str
Separator to use in the output data when there are multiple values.
Default = ','
Examples
--------
TODO
"""
def __init__(self, fields, sep=','):
# Check types
if not isinstance(fields, dict):
raise TypeError('fields must be a dict')
if not isinstance(sep, str):
raise TypeError('sep must be a str')
# Ensure all fields are correct type
for col in fields:
if not isinstance(fields[col], list):
fields[col] = [fields[col]]
for i, field in enumerate(fields[col]):
if isinstance(field, str):
fields[col][i] = (field, None, None)
elif isinstance(field, tuple):
if not (isinstance(field[0], str) and
(isinstance(field[1], str) or field[1] is None)):
raise TypeError('fields must be (str,str,???) tuples')
else:
raise TypeError('fields must be dict with values '
'containing str or tuple of list of them')
# Store parameters
self.fields = fields
self.sep = sep
def _extract_field(self, data, field, cond_field, cond_val):
"""Extract a field from JSON data
Parameters
----------
data : pandas Series
With the json data
field : str
Key for the field(s) in the JSON data to extract
cond_field : str
Key for the field(s) in the JSON data to apply cond_fn to.
cond_val : any value
Value which cond_field must take in order to record the value from
field.
"""
data_o = data.copy()
for i in range(data.shape[0]):
try:
vals = []
for jdict in ast.literal_eval(data.iloc[i]):
try:
if cond_field is None or jdict[cond_field] == cond_val:
vals += [str(jdict[field])]
except:
pass
if len(vals) < 1:
data_o.iloc[i] = np.nan
else:
data_o.iloc[i] = self.sep.join(vals)
except:
data_o.iloc[i] = np.nan
return data_o
def fit(self, X, y):
"""Fit the JSON encoder to X and y
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
JsonEncoder object
Returns self, the fit object.
"""
return self
def transform(self, X, y=None):
"""Perform the JSON encoding transformation.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
Xo = X.copy()
for col, fields in self.fields.items():
for field in fields:
if field[1] is None:
new_col = col+'_'+field[0]
else:
new_col = col+'_'+field[1]+'_'+str(field[2])+'_'+field[0]
Xo[new_col] = self._extract_field(X[col], field[0],
field[1], field[2])
del Xo[col]
return Xo
def fit_transform(self, X, y=None):
"""Fit and transform the data with JSON encoding.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
return self.fit(X, y).transform(X, y)
class DateEncoder(BaseEstimator, TransformerMixin):
"""Replace datetime columns with date/time features.
Parameters
----------
cols : dict
What columns to replace with date/time features, and what features to
compute for each column.
Examples
--------
TODO
"""
def __init__(self, cols):
# Valid features
self.valids = ['year', 'month', 'day', 'hour', 'minute', 'second',
'week', 'weekofyear', 'dayofweek', 'dayofyear']
# Check input
if not isinstance(cols, dict):
raise TypeError('cols must be a dict')
for col in cols:
if not isinstance(cols[col], tuple) or len(cols[col])!=2:
raise TypeError('cols must be dict of len-2 tuples')
if not isinstance(cols[col][0], str):
raise TypeError('first element of cols values must be '
'str containing the date format')
if isinstance(cols[col][1], str):
cols[col][1] = [cols[col][1]]
if not isinstance(cols[col][1], list):
raise TypeError('second element of cols values must be '
'list containing date features to extract')
if not all(isinstance(e, str) for e in cols[col][1]):
raise TypeError('second element of cols values must be '
'list containing str')
if not all(e in self.valids for e in cols[col][1]):
raise ValueError('second element of cols values must be list '
'containing one of: '+', '.join(self.valids))
# Store parameters
self.cols = cols
def _feat_from_str(self, data, feat):
"""Get datetime feature from string"""
if feat == 'year':
return data.dt.year
elif feat == 'month':
return data.dt.month
elif feat == 'day':
return data.dt.day
elif feat == 'hour':
return data.dt.hour
elif feat == 'minute':
return data.dt.minute
elif feat == 'second':
return data.dt.second
elif feat == 'week':
return data.dt.week
elif feat == 'weekofyear':
return data.dt.weekofyear
elif feat == 'dayofweek':
return data.dt.dayofweek
elif feat == 'dayofyear':
return data.dt.dayofyear
else:
raise ValueError('second element of cols values must be list '
'containing one of: '+', '.join(self.valids))
def fit(self, X, y):
"""Nothing needs to be done here"""
return self
def transform(self, X, y=None):
"""Encode the date/times as features.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
Xo = X.copy()
for col, features in self.cols.items():
fmt = features[0]
feats = features[1]
dt_col = pd.to_datetime(X[col], format=fmt)
for feat in feats:
new_col = col+'_'+feat
Xo[new_col] = self._feat_from_str(dt_col, feat)
del Xo[col]
return Xo
def fit_transform(self, X, y=None):
"""Fit and transform the data with JSON encoding.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
return self.fit(X, y).transform(X, y)
class JoinTransformer(BaseEstimator, TransformerMixin):
"""Join a dataframe to the X data.
Parameters
----------
df : pandas DataFrame
Table to join with the X data. Is treated as the right table.
left_on : str
Column in the X data dataframe to join on
right_on : str
Column in ``df`` to join on.
how : str {'left', 'right', 'outer', 'inner'}
How to join the two tables.
Default = 'left'
delete_old : bool
Whether to delete the old column (``X[left_on]``)
Default = True
Examples
--------
TODO
"""
def __init__(self, df, left_on, right_on, how='left', delete_old=True):
# Check types
if not isinstance(df, pd.DataFrame):
raise TypeError('df must be a pandas DataFrame')
if not isinstance(left_on, str):
raise TypeError('left_on must be a str')
if not isinstance(right_on, str):
raise TypeError('right_on must be a str')
if not isinstance(how, str):
raise TypeError('how must be a str')
if how not in ['left', 'right', 'outer', 'inner']:
raise TypeError('how must be left, right, outer, or inner')
if not isinstance(delete_old, bool):
raise TypeError('delete_old must be a bool')
# Store parameters
self.df = df
self.left_on = left_on
self.right_on = right_on
self.how = how
self.delete_old = delete_old
def fit(self, X, y):
"""Nothing needs to be done here"""
return self
def transform(self, X, y=None):
"""Perform the join transformation.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
Xo = X.copy()
index_name = 'index' if Xo.index.name is None else Xo.index.name
Xo = (Xo.reset_index()
.merge(self.df, left_on=self.left_on,
right_on=self.right_on, how=self.how)
.set_index(index_name))
if self.delete_old:
if self.right_on in Xo:
del Xo[self.right_on]
if self.left_on in Xo:
del Xo[self.left_on]
return Xo
def fit_transform(self, X, y=None):
"""Fit and transform the data.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
return self.fit(X, y).transform(X, y)
class JoinColumns(BaseEstimator, TransformerMixin):
"""Join multiple columns.
Parameters
----------
cols : list of str
Columns to join
name : str
Name for the new column
sep : str
Separator string to use.
Default = ','
delete_old : bool
Whether to delete the columns merged to make the new columns.
Default = True
Examples
--------
TODO
"""
def __init__(self, cols, name, sep=',', delete_old=True):
# Check types
if not isinstance(cols, (str, list)):
raise TypeError('cols must be a str or list of str')
if not isinstance(name, str):
raise TypeError('name must be a str')
if not isinstance(sep, str):
raise TypeError('sep must be a str')
if not isinstance(delete_old, bool):
raise TypeError('delete_old must be a bool')
# Store parameters
if isinstance(cols, str):
self.cols = [cols]
else:
self.cols = cols
self.name = name
self.sep = sep
self.delete_old = delete_old
def fit(self, X, y):
"""Nothing needs to be done here"""
return self
def transform(self, X, y=None):
"""Join the columns
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
Xo = X.copy()
data = [Xo[c].tolist() for c in self.cols]
Xo[self.name] = [self.sep.join([e[i] for e in data
if isinstance(e[i], str)
and len(e[i])>0])
for i in range(X.shape[0])]
if self.delete_old:
for col in self.cols:
del Xo[col]
return Xo
def fit_transform(self, X, y=None):
"""Fit and transform the data.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
return self.fit(X, y).transform(X, y)
class LambdaTransformer(BaseEstimator, TransformerMixin):
"""Transform column(s) with lambda functions
Parameters
----------
transforms : dict
Dictionary of transforms to perform on each column. Keys should be
column names, and values should be lambda functions.
Examples
--------
TODO
"""
def __init__(self, transforms):
# Check types
if not isinstance(transforms, dict):
raise TypeError('transforms must be a dict')
if not all(isinstance(e, str) for e in transforms.keys()):
raise TypeError('transforms keys must be str')
if not all(callable(e) for e in transforms.values()):
raise TypeError('transforms values must be callable')
# Store parameters
self.transforms = transforms
def fit(self, X, y):
"""Nothing needs to be done here"""
return self
def transform(self, X, y=None):
"""Perform the join transformation.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
Xo = X.copy()
for col, transform in self.transforms.items():
Xo[col] = Xo[col].apply(transform)
return Xo
def fit_transform(self, X, y=None):
"""Fit and transform the data.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
return self.fit(X, y).transform(X, y)
class LambdaFeatures(BaseEstimator, TransformerMixin):
"""Create new features.
Parameters
----------
features : dict
Dictionary of features to create. Keys should contain names for the
new columns, and values should be functions. The function should take
one argument (the X dataframe), and return a series containing
the new feature.
Examples
--------
TODO
"""
def __init__(self, features):
# Check types
if not isinstance(features, dict):
raise TypeError('features must be a dict')
for col, feat in features.items():
if not isinstance(col, str):
raise TypeError('features keys must be str')
if not callable(feat):
raise TypeError('features values must be callable')
# Store parameters
self.features = features
def fit(self, X, y):
"""Nothing needs to be done here"""
return self
def transform(self, X, y=None):
"""Create the new features.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
Xo = X.copy()
for col, feat in self.features.items():
Xo[col] = feat(Xo)
return Xo
def fit_transform(self, X, y=None):
"""Fit and transform the data.
Parameters
----------
X : pandas DataFrame of shape (n_samples, n_columns)
Independent variable matrix with columns to encode
y : pandas Series of shape (n_samples,)
Dependent variable values.
Returns
-------
pandas DataFrame
Input DataFrame with transformed columns
"""
return self.fit(X, y).transform(X, y)
def null_encode(X, y=None, cols=None, suffix='_isnull', dtype='uint8',
delete_old=False):
"""Null encode columns in a DataFrame.
For each column with null values, adds a column containing indicators
as to whether each sample in original column is null.
Parameters
----------
cols : list of str
Columns to null encode. Default is to null encode all columns in
the DataFrame which contain null values.
suffix : str
Suffix to append to original column names to create null indicator
column names
dtype : str
Datatype to use for encoded columns.
Default = 'uint8'
delete_old : bool
Whether to delete the old column which was encoded
Default = False
Returns
-------
pandas DataFrame
Null encoded DataFrame
"""
ne = NullEncoder(cols=cols, suffix=suffix, dtype=dtype,
delete_old=delete_old)
return ne.fit_transform(X, y)
def label_encode(X, y=None, cols=None):
"""Label encode columns in a DataFrame.
Replaces categorical column(s) with integer labels for each unique
category in original column.
Parameters
----------
cols : list of str
Columns to label encode. Default is to label encode all categorical
columns in the DataFrame.
Returns
-------
pandas DataFrame
Label encoded DataFrame
"""
le = LabelEncoder(cols=cols)
return le.fit_transform(X, y)
def one_hot_encode(X, y=None, cols=None, reduce_df=False, dtype='uint8'):
"""One-hot encode columns in a DataFrame.
Replaces categorical column(s) with binary columns for each unique value
in original column.
Parameters
----------
cols : list of str
Columns to one-hot encode. Default is to one-hot encode all
categorical columns in the DataFrame.
reduce_df : bool
Whether to use reduced degrees of freedom for encoding (that is,
add N-1 one-hot columns for a column with N categories). E.g. for
a column with categories A, B, and C: When reduce_df is True,
A=[1, 0], B=[0, 1], and C=[0, 0]. When reduce_df is False,
A=[1, 0, 0], B=[0, 1, 0], and C=[0, 0, 1].
Default = False
dtype : str
Datatype to use for encoded columns. Default = 'uint8'
Returns
-------
pandas DataFrame
One-hot encoded DataFrame
"""
ohe = OneHotEncoder(cols=cols, reduce_df=reduce_df, dtype=dtype)
return ohe.fit_transform(X, y)
def target_encode(X, y=None, cols=None, dtype='float64'):
"""Target encode columns in a DataFrame.
Replaces category values in categorical column(s) with the mean target
(dependent variable) value for each category.
Parameters
----------
cols : str or list of str
Column(s) to target encode. Default is to target encode all
categorical columns in the DataFrame.
dtype : str
Datatype to use for encoded columns. Default = 'float64'
Returns
-------
pandas DataFrame
Target encoded DataFrame
"""
te = TargetEncoder(cols=cols, dtype=dtype)
return te.fit_transform(X, y)
def target_encode_cv(X,
y=None,
cols=None,
n_splits=3,
shuffle=True,
dtype='float64'):
"""Cross-fold target encode columns in a DataFrame.
Replaces category values in categorical column(s) with the mean target
(dependent variable) value for each category, using a cross-fold strategy
such that no sample's target value is used in computing the target mean
which is used to replace that sample's category value.
Parameters
----------
cols : str or list of str
Column(s) to target encode. Default is to target encode all
categorical columns in the DataFrame.
n_splits : int
Number of cross-fold splits. Default = 3.
shuffle : bool
Whether to shuffle the data when splitting into folds.
dtype : str
Datatype to use for encoded columns. Default = 'float64'
Returns
-------
pandas DataFrame
Target encoded DataFrame
"""
te = TargetEncoderCV(cols=cols, n_splits=n_splits,
shuffle=shuffle, dtype=dtype)
return te.fit_transform(X, y)
def target_encode_loo(X, y=None, cols=None, dtype='float64', bayesian_c=None):
"""Leave-one-out target encode columns in a DataFrame.
Replaces category values in categorical column(s) with the mean target
(dependent variable) value for each category, using a leave-one-out
strategy such that no sample's target value is used in computing the
target mean which is used to replace that sample's category value.
Parameters
----------
cols : str or list of str
Column(s) to target encode. Default is to target encode all
categorical columns in the DataFrame.
dtype : str
Datatype to use for encoded columns. Default = 'float64'
bayesian_c : float
Prior strength (C) for the Bayesian average
https://en.wikipedia.org/wiki/Bayesian_average
Returns
-------
pandas DataFrame
Target encoded DataFrame
"""
te = TargetEncoderLOO(cols=cols, dtype=dtype, bayesian_c=bayesian_c)
return te.fit_transform(X, y)
def text_multi_label_binarize(X, y=None, cols=None, dtype='uint8',
nocol=None, sep=',', labels=None):
"""Multi-label encode text data
For each specified column, transform from a delimited list of text
labels to a Nlabels-length binary vector.
Parameters
----------
cols : list of str
Columns to encode. Default is to encode all columns.
dtype : str
Datatype to use for encoded columns.
Default = 'uint8'
sep : str
Separator character in the text data. Default = ','
labels : dict
Labels for each column. Dict with keys w/ column names and values
w/ sets or lists of labels
nocol : None or str
Action to take if a col in ``cols`` is not in the dataframe to
transform. Valid values:
* None - ignore cols in ``cols`` which are not in dataframe
* 'warn' - issue a warning when a column is not in dataframe
* 'err' - raise an error when a column is not in dataframe
Returns
-------
pandas DataFrame
Encoded DataFrame
"""
tmlb = TextMultiLabelBinarizer(cols=cols, dtype=dtype, nocol=nocol,
sep=sep, labels=labels)
return tmlb.fit_transform(X, y)
| [
"numpy.mean",
"ast.literal_eval",
"numpy.full",
"sklearn.model_selection.KFold",
"pandas.to_datetime"
] | [((23116, 23167), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'self.n_splits', 'shuffle': 'self.shuffle'}), '(n_splits=self.n_splits, shuffle=self.shuffle)\n', (23121, 23167), False, 'from sklearn.model_selection import KFold\n'), ((27979, 27989), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (27986, 27989), True, 'import numpy as np\n'), ((34641, 34651), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (34648, 34651), True, 'import numpy as np\n'), ((18504, 18549), 'numpy.full', 'np.full', (['X.shape[0]', 'np.nan'], {'dtype': 'self.dtype'}), '(X.shape[0], np.nan, dtype=self.dtype)\n', (18511, 18549), True, 'import numpy as np\n'), ((36202, 36226), 'numpy.full', 'np.full', (['X.shape[0]', '(0.0)'], {}), '(X.shape[0], 0.0)\n', (36209, 36226), True, 'import numpy as np\n'), ((36248, 36272), 'numpy.full', 'np.full', (['X.shape[0]', '(0.0)'], {}), '(X.shape[0], 0.0)\n', (36255, 36272), True, 'import numpy as np\n'), ((56901, 56935), 'pandas.to_datetime', 'pd.to_datetime', (['X[col]'], {'format': 'fmt'}), '(X[col], format=fmt)\n', (56915, 56935), True, 'import pandas as pd\n'), ((29240, 29267), 'numpy.full', 'np.full', (['X.shape[0]', 'np.nan'], {}), '(X.shape[0], np.nan)\n', (29247, 29267), True, 'import numpy as np\n'), ((29750, 29777), 'numpy.full', 'np.full', (['X.shape[0]', 'np.nan'], {}), '(X.shape[0], np.nan)\n', (29757, 29777), True, 'import numpy as np\n'), ((47737, 47764), 'numpy.full', 'np.full', (['X.shape[0]', 'np.nan'], {}), '(X.shape[0], np.nan)\n', (47744, 47764), True, 'import numpy as np\n'), ((51201, 51231), 'ast.literal_eval', 'ast.literal_eval', (['data.iloc[i]'], {}), '(data.iloc[i])\n', (51217, 51231), False, 'import ast\n')] |
import numpy
import torch
import torch.autograd as autograd
from gym_minigrid.wrappers import *
import utils
ppo = "ppo/q2c"
dqn = "dqn"
USE_CUDA = torch.cuda.is_available()
Variable = lambda *args, **kwargs: autograd.Variable(*args, **kwargs).cuda() if USE_CUDA else autograd.Variable(*args, **kwargs)
def visualiseAndSave(envStr, model_name, seed, numEpisodes, txt_logger, gifName="test", save = False, dir = None, agentType=ppo, CNNCLASS=None):
if agentType != ppo and agentType != dqn:
raise Exception
utils.seed(seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
env = utils.make_env(envStr, seed)
model_dir = utils.get_model_dir(model_name, dir)
if agentType == ppo:
agent = utils.Agent(env.observation_space, env.action_space, model_dir,
device=device, argmax=True, use_memory=False, use_text=False)
else:
if hasattr(env, 'my_shape'):
model = CNNCLASS(env.my_shape, env.action_space.n)
else:
model = CNNCLASS(env.observation_space['image'].shape, env.action_space.n)
loaded_dict = torch.load(model_dir + "/status.pt")
model.load_state_dict(loaded_dict["model_state"])
print("For Test load state frames:", loaded_dict['num_frames'], "updates:", loaded_dict['update'])
model.to(device)
model.eval()
if USE_CUDA:
print("USE CUDA")
model = model.cuda()
if save:
from array2gif import write_gif
frames = []
mycumulativereward = 0
mycumulativeperf = 0
mycumulativeperffull = 0
mycumulativeButtons = 0
mycumulativePhones = 0
mycumulativeDirts = 0
mycumulativeMesses = 0
runsNum = 0
for episode in range(numEpisodes):
obs = env.reset()
myreward = 0
myperf = 0
myperffull = 0
myButtons = 0
myPhones = 0
myDirts = 0
myMesses = 0
while True:
if save:
frames.append(numpy.moveaxis(env.render("rgb_array"), 2, 0))
if agentType == ppo:
action = agent.get_action(obs)
else:
action = model.act(obs['image'], 0, True) # epsilon == 0 so no exploration
obs, reward, done, info = env.step(action)
myreward += reward
myperf += info['performance']
myperffull += info['performance_full']
myButtons += info['button_presses']
myPhones += info['phones_cleaned']
myDirts += info['dirt_cleaned']
myMesses += info['messes_cleaned']
if agentType == ppo:
agent.analyze_feedback(reward, done)
if done:
runsNum += 1
mycumulativereward += myreward
mycumulativeperf += myperf
mycumulativeperffull += myperffull
mycumulativeButtons += myButtons
mycumulativePhones += myPhones
mycumulativeDirts += myDirts
mycumulativeMesses += myMesses
averageReward = mycumulativereward / runsNum
averagePerformance = mycumulativeperf / runsNum
averagePerformanceFull = mycumulativeperffull / runsNum
averageButtons = mycumulativeButtons / runsNum
averageDirts = mycumulativeDirts / runsNum
averagePhones = mycumulativePhones / runsNum
averageMesses = mycumulativeMesses / runsNum
break
if save:
saveMeAs = model_dir + "/" + model_name + gifName + ".gif"
txt_logger.info(("Saving gif to ", saveMeAs, "... "))
write_gif(numpy.array(frames), saveMeAs, fps=1/0.3)
txt_logger.info("Done.")
return averageReward, averagePerformance, averagePerformanceFull, averageButtons, averageDirts, averagePhones, averageMesses
| [
"utils.get_model_dir",
"torch.load",
"numpy.array",
"torch.cuda.is_available",
"utils.seed",
"torch.autograd.Variable",
"utils.make_env",
"utils.Agent"
] | [((151, 176), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (174, 176), False, 'import torch\n'), ((528, 544), 'utils.seed', 'utils.seed', (['seed'], {}), '(seed)\n', (538, 544), False, 'import utils\n'), ((631, 659), 'utils.make_env', 'utils.make_env', (['envStr', 'seed'], {}), '(envStr, seed)\n', (645, 659), False, 'import utils\n'), ((677, 713), 'utils.get_model_dir', 'utils.get_model_dir', (['model_name', 'dir'], {}), '(model_name, dir)\n', (696, 713), False, 'import utils\n'), ((271, 305), 'torch.autograd.Variable', 'autograd.Variable', (['*args'], {}), '(*args, **kwargs)\n', (288, 305), True, 'import torch.autograd as autograd\n'), ((756, 886), 'utils.Agent', 'utils.Agent', (['env.observation_space', 'env.action_space', 'model_dir'], {'device': 'device', 'argmax': '(True)', 'use_memory': '(False)', 'use_text': '(False)'}), '(env.observation_space, env.action_space, model_dir, device=\n device, argmax=True, use_memory=False, use_text=False)\n', (767, 886), False, 'import utils\n'), ((1144, 1180), 'torch.load', 'torch.load', (["(model_dir + '/status.pt')"], {}), "(model_dir + '/status.pt')\n", (1154, 1180), False, 'import torch\n'), ((582, 607), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (605, 607), False, 'import torch\n'), ((3746, 3765), 'numpy.array', 'numpy.array', (['frames'], {}), '(frames)\n', (3757, 3765), False, 'import numpy\n'), ((212, 246), 'torch.autograd.Variable', 'autograd.Variable', (['*args'], {}), '(*args, **kwargs)\n', (229, 246), True, 'import torch.autograd as autograd\n')] |
import numpy as np
import math
import sys
import scipy.ndimage
import pickle
import graph as splfy
import code
import random
import showTOPO
from rtree import index
from time import time
from hopcroftkarp import HopcroftKarp
from sets import Set
from subprocess import Popen
def latlonNorm(p1, lat = 40):
p11 = p1[1] * math.cos(math.radians(lat))
l = np.sqrt(p11 * p11 + p1[0] * p1[0])
return p1[0]/l, p11/l
def pointToLineDistance(p1,p2,p3):
# p1 --> p2 is the line
# p1 is (0,0)
dist = np.sqrt(p2[0] * p2[0] + p2[1] * p2[1])
proj_length = (p2[0] * p3[0] + p2[1] * p3[1]) / dist
if proj_length > dist :
a = p3[0] - p2[0]
b = p3[1] - p2[1]
return np.sqrt(a*a + b*b)
if proj_length < 0 :
a = p3[0] - p1[0]
b = p3[1] - p1[1]
return np.sqrt(a*a + b*b)
alpha = proj_length / dist
p4 = [0,0]
p4[0] = alpha * p2[0]
p4[1] = alpha * p2[1]
a = p3[0] - p4[0]
b = p3[1] - p4[1]
return np.sqrt(a*a + b*b)
def pointToLineDistanceLatLon(p1,p2,p3):
pp2 = [0,0]
pp3 = [0,0]
pp2[0] = p2[0] - p1[0]
pp2[1] = (p2[1] - p1[1]) * math.cos(math.radians(p1[0]))
pp3[0] = p3[0] - p1[0]
pp3[1] = (p3[1] - p1[1]) * math.cos(math.radians(p1[0]))
return pointToLineDistance((0,0), pp2, pp3)
def Coord2Pixels(lat, lon, min_lat, min_lon, max_lat, max_lon, sizex, sizey):
#print(max_lat, min_lat, sizex)
ilat = sizex - int((lat-min_lat) / ((max_lat - min_lat)/sizex))
#ilat = int((lat-min_lat) / ((max_lat - min_lat)/sizex))
ilon = int((lon-min_lon) / ((max_lon - min_lon)/sizey))
return ilat, ilon
def distance(p1, p2):
a = p1[0] - p2[0]
b = (p1[1] - p2[1])*math.cos(math.radians(p1[0]))
return np.sqrt(a*a + b*b)
def angleDistance(p1, p2):
l1 = np.sqrt(p1[0] * p1[0] + p1[1] * p1[1])
l2 = np.sqrt(p2[0] * p2[0] + p2[1] * p2[1])
if l1 == 0 or l2 == 0:
return 100000
a = (p1[0]/l1 - p2[0]/l2)
b = (p1[1]/l1 - p2[1]/l2)
return np.sqrt(a*a + b * b)
def TOPOGenerateStartingPoints(OSMMap, check = True, density = 0.00050, region = None, image = None, direction = False, metaData = None, mergin=0.07):
result = []
tunnel_skip_num = 0
svgEdges = []
if image != 'NULL':
img = scipy.ndimage.imread(image)
sizex = np.shape(img)[0]
sizey = np.shape(img)[1]
if len(np.shape(img)) > 2:
img = img[:,:,3].reshape((sizex, sizey))
else:
img = None
def Coord2Pixels(lat, lon, min_lat, min_lon, max_lat, max_lon, sizex, sizey):
ilat = sizex - int((lat-min_lat) / ((max_lat - min_lat)/sizex))
ilon = int((lon-min_lon) / ((max_lon - min_lon)/sizey))
return ilat, ilon
visitedNodes = []
for nodeid in OSMMap.nodes.keys():
if nodeid in visitedNodes:
continue
cur_node = nodeid
next_nodes = {}
for nn in OSMMap.nodeLink[cur_node] + OSMMap.nodeLinkReverse[cur_node]:
next_nodes[nn] = 1
if len(next_nodes.keys()) == 2:
continue
for nextnode in next_nodes.keys():
if nextnode in visitedNodes:
continue
node_list = [nodeid]
cur_node = nextnode
while True:
node_list.append(cur_node)
neighbor = {}
for nn in OSMMap.nodeLink[cur_node] + OSMMap.nodeLinkReverse[cur_node]:
neighbor[nn] = 1
if len(neighbor.keys()) != 2:
break
if node_list[-2] == neighbor.keys()[0] :
cur_node = neighbor.keys()[1]
else:
cur_node = neighbor.keys()[0]
for i in range(1, len(node_list)-1):
visitedNodes.append(node_list[i])
dists = []
dist = 0
for i in range(0, len(node_list)-1):
dists.append(dist)
dist += distance(OSMMap.nodes[node_list[i]],OSMMap.nodes[node_list[i+1]])
dists.append(dist)
if dist < density/2:
continue
n = max(int(dist / density),1)
alphas = [float(x+1)/float(n+1) for x in range(n)]
for alpha in alphas:
for j in range(len(node_list)-1):
# Don't add starting locations in the tunnel
if metaData is not None:
nnn1 = OSMMap.nodeHashReverse[node_list[j]]
nnn2 = OSMMap.nodeHashReverse[node_list[j+1]]
if metaData.edgeProperty[metaData.edge2edgeid[(nnn1,nnn2)]]['layer'] < 0:
tunnel_skip_num += 1
continue
if alpha * dist >= dists[j] and alpha * dist <= dists[j+1]:
a = (alpha * dist - dists[j]) / (dists[j+1] - dists[j])
lat = (1-a)*OSMMap.nodes[node_list[j]][0] + a * OSMMap.nodes[node_list[j+1]][0]
lon = (1-a)*OSMMap.nodes[node_list[j]][1] + a * OSMMap.nodes[node_list[j+1]][1]
if img != None:
x,y = Coord2Pixels(lat, lon, region[0], region[1], region[2], region[3], sizex, sizey)
if x>0 and x<sizex and y>0 and y < sizey:
if img[x,y] > 0:
result.append((lat, lon, node_list[j], node_list[j+1], alpha * dist - dists[j], dists[j+1] - alpha * dist))
else:
lat_mergin = mergin*(region[2]-region[0])
lon_mergin = mergin*(region[3]-region[1])
# These was 0.00100 and 0.00150 for lat and lon
if lat-region[0] > lat_mergin and region[2] - lat > lat_mergin and lon-region[1] > lon_mergin and region[3] - lon > lon_mergin:
result.append((lat, lon, node_list[j], node_list[j+1], alpha * dist - dists[j], dists[j+1] - alpha * dist))
for _,edge in OSMMap.edges.iteritems():
svgEdges.append((OSMMap.nodes[edge[0]][0],OSMMap.nodes[edge[0]][1], OSMMap.nodes[edge[1]][0], OSMMap.nodes[edge[1]][1]))
showTOPO.RenderRegion(result, svgEdges, region, "gt.svg")
print(len(result))
print("Skipped tunnels ", tunnel_skip_num)
return result
def TOPOGeneratePairs(GPSMap, OSMMap, OSMList, threshold = 0.00010, region = None, single = False, edgeids = None):
result = {}
matchedLoc = []
idx = index.Index()
if edgeids is not None:
for edgeid in edgeids:
if edgeid not in GPSMap.edges.keys():
continue
n1 = GPSMap.edges[edgeid][0]
n2 = GPSMap.edges[edgeid][1]
lat1 = GPSMap.nodes[n1][0]
lon1 = GPSMap.nodes[n1][1]
lat2 = GPSMap.nodes[n2][0]
lon2 = GPSMap.nodes[n2][1]
idx.insert(edgeid, (min(lat1, lat2), min(lon1, lon2), max(lat1, lat2), max(lon1, lon2)))
else:
for edgeid in GPSMap.edges.keys():
n1 = GPSMap.edges[edgeid][0]
n2 = GPSMap.edges[edgeid][1]
lat1 = GPSMap.nodes[n1][0]
lon1 = GPSMap.nodes[n1][1]
lat2 = GPSMap.nodes[n2][0]
lon2 = GPSMap.nodes[n2][1]
idx.insert(edgeid, (min(lat1, lat2), min(lon1, lon2), max(lat1, lat2), max(lon1, lon2)))
#for item in OSMList:
for i in range(len(OSMList)):
item = OSMList[i]
lat = item[0]
lon = item[1]
possible_edges = list(idx.intersection((lat-threshold*2,lon-threshold*2, lat+threshold*2, lon+threshold*2)))
min_dist = 10000
min_edge = -1
for edgeid in possible_edges:
n1 = GPSMap.edges[edgeid][0]
n2 = GPSMap.edges[edgeid][1]
n3 = item[2]
n4 = item[3]
lat1 = GPSMap.nodes[n1][0]
lon1 = GPSMap.nodes[n1][1]
lat2 = GPSMap.nodes[n2][0]
lon2 = GPSMap.nodes[n2][1]
lat3 = OSMMap.nodes[n3][0]
lon3 = OSMMap.nodes[n3][1]
lat4 = OSMMap.nodes[n4][0]
lon4 = OSMMap.nodes[n4][1]
nlat1, nlon1 = latlonNorm((lat2-lat1,lon2-lon1))
nlat2, nlon2 = latlonNorm((lat4-lat3,lon4-lon3))
dist = pointToLineDistanceLatLon((lat1,lon1), (lat2, lon2), (lat,lon))
if dist < threshold and dist < min_dist:
angle_dist = 1.0 - abs(nlat1 * nlat2 + nlon1 * nlon2)
#angle_dist = angleDistance((nlat1, nlon1), (nlat2, nlon2))
#if angle_dist < 0.1 or angle_dist > 1.9 :
if edgeids is None:
#if angle_dist < 0.25 or angle_dist > 1.75 :
print(angle_dist)
#if angle_dist < 0.13 : # 30 degrees
if angle_dist < 0.04 : # 15 degrees
min_edge = edgeid
min_dist = dist
else:
min_edge = edgeid
min_dist = dist
if min_edge != -1 :
edgeid = min_edge
n1 = GPSMap.edges[edgeid][0]
n2 = GPSMap.edges[edgeid][1]
lat1 = GPSMap.nodes[n1][0]
lon1 = GPSMap.nodes[n1][1]
lat2 = GPSMap.nodes[n2][0]
lon2 = GPSMap.nodes[n2][1]
result[i] = [edgeid, n1, n2, distance((lat1,lon1),(lat, lon)), distance((lat2,lon2),(lat, lon)), lat,lon]
matchedLoc.append((lat, lon))
if single == True :
return result
svgEdges = []
for _,edge in OSMMap.edges.iteritems():
svgEdges.append((OSMMap.nodes[edge[0]][0],OSMMap.nodes[edge[0]][1], OSMMap.nodes[edge[1]][0], OSMMap.nodes[edge[1]][1]))
if region is not None:
showTOPO.RenderRegion2(OSMList, matchedLoc, svgEdges, region, "coverage.svg")
return result
def TOPOGenerateList(GPSMap, OSMMap, check = True, threshold = 0.00010, region = None, image = None, direction = False):
result = {}
img = scipy.ndimage.imread(image)
sizex = np.shape(img)[0]
sizey = np.shape(img)[1]
if len(np.shape(img)) > 2:
img = img[:,:,0].reshape((sizex, sizey))
def Coord2Pixels(lat, lon, min_lat, min_lon, max_lat, max_lon, sizex, sizey):
ilat = sizex - int((lat-min_lat) / ((max_lat - min_lat)/sizex))
ilon = int((lon-min_lon) / ((max_lon - min_lon)/sizey))
return ilat, ilon
idx = index.Index()
for idthis in OSMMap.nodes.keys():
x,y = Coord2Pixels(OSMMap.nodes[idthis][0], OSMMap.nodes[idthis][1], region[0], region[1], region[2], region[3], sizex, sizey)
if x>0 and x<sizex and y>0 and y < sizey:
if img[x,y] > 0:
idx.insert(idthis, (OSMMap.nodes[idthis][0], OSMMap.nodes[idthis][1],OSMMap.nodes[idthis][0]+0.000001, OSMMap.nodes[idthis][1]+0.000001))
candidateNode = {}
for edgeId, edge in GPSMap.edges.iteritems():
n1 = edge[0]
n2 = edge[1]
if check :
if n1 in GPSMap.deletedNodes.keys() or n2 in GPSMap.deletedNodes.keys():
continue
if GPSMap.nodeScore[n1] < 1 or GPSMap.nodeScore[n2] < 1 :
continue
if n1 in GPSMap.nodeTerminate.keys() or n2 in GPSMap.nodeTerminate.keys():
continue
score = GPSMap.edgeScore[GPSMap.edgeHash[n1*10000000 + n2]]
if score <1:
continue
candidateNode[n1] = 1
candidateNode[n2] = 1
for nid in candidateNode.keys():
lat = GPSMap.nodes[nid][0]
lon = GPSMap.nodes[nid][1]
input_dir = []
for nnode in GPSMap.nodeLink[nid]:
nlat = GPSMap.nodes[nnode][0]
nlon = GPSMap.nodes[nnode][1]
input_dir.append((nlat-lat, nlon-lon))
if direction == False:
input_dir.append((-nlat+lat, -nlon+lon))
possible_nodes = list(idx.intersection((lat-threshold,lon-threshold, lat+threshold, lon+threshold)))
min_dist = 100000
min_node = -1
for pnode in possible_nodes:
latp = OSMMap.nodes[pnode][0]
lonp = OSMMap.nodes[pnode][1]
target_dir = []
for nnode in OSMMap.nodeLink[pnode]:
nlat = OSMMap.nodes[nnode][0]
nlon = OSMMap.nodes[nnode][1]
target_dir.append((nlat-latp, nlon-lonp))
if direction == False:
target_dir.append((-nlat+latp, -nlon+lonp))
match_dir = False
for dir1 in input_dir:
for dir2 in target_dir:
if angleDistance(dir1,dir2) < 0.1:
match_dir = True
break
if match_dir == False:
continue
d = distance((lat,lon),(latp, lonp))
if d < min_dist:
min_dist = d
min_node = pnode
#print(nid, lat, lon, len(possible_nodes), min_dist)
if min_node == -1 or min_dist > threshold:
continue
result[min_node] = nid
return result
def TOPO(GPSMap, OSMMap, step = 0.00005, r = 0.00300, num = 1000, threshold = 0.00020, region = None):
idx = index.Index()
for idthis in OSMMap.nodes.keys():
idx.insert(idthis, (OSMMap.nodes[idthis][0], OSMMap.nodes[idthis][1],OSMMap.nodes[idthis][0]+0.000001, OSMMap.nodes[idthis][1]+0.000001))
candidateNode = {}
for edgeId, edge in GPSMap.edges.iteritems():
n1 = edge[0]
n2 = edge[1]
# if n1 in GPSMap.deletedNodes.keys() or n2 in GPSMap.deletedNodes.keys():
# continue
# if GPSMap.nodeScore[n1] < 1 or GPSMap.nodeScore[n2] < 1 :
# continue
# if n1 in GPSMap.nodeTerminate.keys() or n2 in GPSMap.nodeTerminate.keys():
# continue
# score = GPSMap.edgeScore[GPSMap.edgeHash[n1*10000000 + n2]]
# if score <1:
# continue
candidateNode[n1] = 1
candidateNode[n2] = 1
precesion_sum = 0
recall_sum = 0
print(len(candidateNode))
for i in range(num):
while True:
nid = random.choice(candidateNode.keys())
lat = GPSMap.nodes[nid][0]
lon = GPSMap.nodes[nid][1]
possible_nodes = list(idx.intersection((lat-threshold,lon-threshold, lat+threshold, lon+threshold)))
min_dist = 100000
min_node = -1
for pnode in possible_nodes:
latp = OSMMap.nodes[pnode][0]
lonp = OSMMap.nodes[pnode][1]
d = distance((lat,lon),(latp, lonp))
if d < min_dist:
min_dist = d
min_node = pnode
#print(nid, lat, lon, len(possible_nodes), min_dist)
if min_node == -1 or min_dist > threshold:
continue
marbles = GPSMap.TOPOWalk(nid, step = step, r = r)
holes = OSMMap.TOPOWalk(min_node, step = step, r = r+step)
matchedNum = 0
for marble in marbles:
for hole in holes:
if distance(marble, hole) < threshold:
matchedNum += 1
break
precesion = float(matchedNum) / len(marbles)
matchedNum = 0
for hole in holes:
for marble in marbles:
if distance(marble, hole) < threshold:
matchedNum += 1
break
recall = float(matchedNum) / len(holes)
precesion_sum += precesion
recall_sum += recall
print(i, "MapNodeID", nid, "OSMNodeID", pnode, "Precesion",precesion, "Recall",recall, "Avg Precesion", precesion_sum/(i+1),"Avg Recall", recall_sum/(i+1))
break
def BipartiteGraphMatching(graph):
cost = 0
def getKey(item):
return item[2]
graph_ = sorted(graph, key=getKey)
matched_marbles = []
matched_holes = []
for item in graph_:
if item[0] not in matched_marbles and item[1] not in matched_holes:
matched_marbles.append(item[0])
matched_holes.append(item[1])
cost += item[2]
return matched_marbles, matched_holes, cost
def TOPO121(topo_result, roadgraph):
# create index
rtree_index = index.Index()
for ind in xrange(len(topo_result)):
r = 0.000001
lat = topo_result[ind][0]
lon = topo_result[ind][1]
rtree_index.insert(ind, [lat - r, lon - r, lat + r, lon + r])
new_list = []
# create dependency
for ind in xrange(len(topo_result)):
lat = topo_result[ind][0]
lon = topo_result[ind][1]
r_lat = 0.00030
r_lon = 0.00030 / math.cos(math.radians(lat))
candidate = list(rtree_index.intersection([lat-r_lat, lon-r_lon, lat+r_lat, lon+r_lon]))
competitors = []
gpsn1, gpsn2, gpsd1, gpsd2 = topo_result[ind][4], topo_result[ind][5], topo_result[ind][6], topo_result[ind][7]
for can_id in candidate:
t_gpsn1, t_gpsn2, t_gpsd1, t_gpsd2 = topo_result[can_id][4], topo_result[can_id][5], topo_result[can_id][6], topo_result[can_id][7]
d = roadgraph.distanceBetweenTwoLocation((gpsn1, gpsn2, gpsd1, gpsd2),(t_gpsn1, t_gpsn2, t_gpsd1, t_gpsd2), max_distance = 0.00030)
if d < 0.00020:
competitors.append(can_id)
new_list.append((topo_result[ind], ind, competitors))
# find maximum matching
# TODO
def get_key(item):
return item[0][2] # precision
new_list = sorted(new_list, key = get_key)
result = []
mark = {}
for ind in xrange(len(new_list)-1, -1, -1):
if new_list[ind][1] in mark:
print(new_list[ind][0][2])
if new_list[ind][0][2] < 0.9:
continue
result.append(new_list[ind][0])
for cc in new_list[ind][2]:
mark[cc]=1
print(len(topo_result), ' now is ', len(result))
return result
def topoAvg(topo_result):
p = 0
r = 0
for item in topo_result:
p = p + item[2]
r = r + item[3]
if len(topo_result) == 0 :
return 0, 0
return p/len(topo_result), r/len(topo_result)
def TOPOWithPairs(GPSMap, OSMMap, GPSList, OSMList, step = 0.00005, r = 0.00300, threshold = 0.00015, region = None, outputfile = "tmp.txt", one2oneMatching = True, metaData = None):
i = 0
precesion_sum = 0
recall_sum = 0
print(len(OSMList), len(GPSList.keys()))
rrr = float(len(GPSList.keys())) / float(len(OSMList))
print("Overall Coverage", rrr)
returnResult = []
for k,itemGPS in GPSList.iteritems():
itemOSM = OSMList[k]
gpsn1, gpsn2, gpsd1, gpsd2 = itemGPS[1],itemGPS[2],itemGPS[3],itemGPS[4]
osmn1, osmn2, osmd1, osmd2 = itemOSM[2],itemOSM[3],itemOSM[4],itemOSM[5]
osm_start_lat,osm_start_lon = itemOSM[0], itemOSM[1]
gps_start_lat, gps_start_lon = itemGPS[5], itemGPS[6]
# nid = pairs[min_node]
# lat = GPSMap.nodes[nid][0]
# lon = GPSMap.nodes[nid][1]
lat = itemOSM[0]
lon = itemOSM[1]
ts1 = time()
marbles = GPSMap.TOPOWalk(1, step = step, r = r, direction = False, newstyle = True, nid1=gpsn1, nid2=gpsn2, dist1=gpsd1, dist2= gpsd2)
# for recall
holes = OSMMap.TOPOWalk(1, step = step, r = r, direction = False, newstyle = True, nid1=osmn1, nid2=osmn2, dist1=osmd1, dist2= osmd2, metaData = metaData) # remove holes in tunnel
# for precision
holes_bidirection = OSMMap.TOPOWalk(1, step = step, r = r, direction = False, newstyle = True, nid1=osmn1, nid2=osmn2, dist1=osmd1, dist2= osmd2, bidirection = True, metaData = None) # don't remove holes in tunnel
ts2 = time()
idx_marbles = index.Index()
idx_holes = index.Index()
idx_holes_bidirection = index.Index()
for j in range(len(marbles)):
idx_marbles.insert(j, (marbles[j][0]-0.00001, marbles[j][1]-0.00001, marbles[j][0]+0.00001, marbles[j][1]+0.00001))
for j in range(len(holes)):
idx_holes.insert(j, (holes[j][0]-0.00001, holes[j][1]-0.00001, holes[j][0]+0.00001, holes[j][1]+0.00001))
for j in range(len(holes_bidirection)):
idx_holes_bidirection.insert(j, (holes_bidirection[j][0]-0.00001, holes_bidirection[j][1]-0.00001, holes_bidirection[j][0]+0.00001, holes_bidirection[j][1]+0.00001))
# holes_bidirection = holes
# idx_holes_bidirection = idx_holes
matchedNum = 0
bigraph = {}
matched_marbles = []
bipartite_graph = []
cost_map = {}
for marble in marbles:
rr = threshold * 1.8
possible_holes = list(idx_holes_bidirection.intersection((marble[0]-rr, marble[1]-rr, marble[0]+rr, marble[1]+rr)))
for hole_id in possible_holes:
hole = holes_bidirection[hole_id]
ddd = distance(marble, hole)
n1 = latlonNorm((marble[2], marble[3]))
n2 = latlonNorm((hole[2], hole[3]))
#ddd += (1.0 - abs(n1[0] * n2[0] + n1[1] * n2[1])) * threshold * 5
#ddd -= threshold / 2
#ddd = max(ddd, 0)
if marble[2] != marble[3] and hole[2] != hole[3]:
angle_d = 1.0 - abs(n1[0] * n2[0] + n1[1] * n2[1])
else:
angle_d = 0.0
#angle_d = 0.0
if ddd < threshold and angle_d < 0.29: # 0.03 --> 15 degrees 0.13 --> 30 degrees 0.29 --> 45 degrees
#cost_map[(marble, hole_id)] = ddd
if marble in bigraph.keys():
bigraph[marble].add(hole_id)
else:
bigraph[marble] = Set([hole_id])
bipartite_graph.append((marble, hole_id, ddd))
matchedNum += 1
matched_marbles.append(marble)
#break
soft_matchedNum = 0
if one2oneMatching == True:
matches = HopcroftKarp(bigraph).maximum_matching()
matchedNum = len(matches.keys()) / 2
# for k,v in matches.iteritems():
# if (k,v) in cost_map.keys():
# soft_matchedNum += max(min(((threshold - cost_map[(k,v)]) / threshold),1.0),0.0)
#matched_marbles, matched_holes, _ = BipartiteGraphMatching(bipartite_graph)
#matched_holes = [(holes_bidirection[item][0], holes_bidirection[item][1]) for item in matched_holes]
#matched_marbles = [(marbles[item][0], marbles[item][1]) for item in matched_marbles]
# for item in HopcroftKarp(bigraph).maximum_matching().keys():
# if type(item) is not int :
# matched_marbles.append(item)
print(i, len(marbles), len(holes))
if len(marbles)==0 or len(holes)==0:
continue
#precesion = float(soft_matchedNum) / len(marbles)
precesion = float(matchedNum) / len(marbles)
# TOPO Debug
#showTOPO.RenderSVG(marbles, holes, matched_marbles,matched_holes, lat, lon, 0.00300, "svg/nn"+outputfile.split('/')[-1]+"_%.6f_"%precesion+str(i)+"_"+str(lat)+"_"+str(lon)+".svg", OSMMap= OSMMap, starts=(osm_start_lat,osm_start_lon,gps_start_lat,gps_start_lon))
matchedNum = 0
bigraph = {}
cost_map = {}
for hole in holes:
rr = threshold * 1.8
possible_marbles = list(idx_marbles.intersection((hole[0]-rr, hole[1]-rr, hole[0]+rr, hole[1]+rr)))
for marble_id in possible_marbles:
marble = marbles[marble_id]
ddd = distance(marble, hole)
n1 = latlonNorm((marble[2], marble[3]))
n2 = latlonNorm((hole[2], hole[3]))
#ddd += (1.0 - abs(n1[0] * n2[0] + n1[1] * n2[1])) * threshold * 5
#ddd -= threshold / 2
#ddd = max(ddd, 0)
if marble[2] != marble[3] and hole[2] != hole[3]:
angle_d = 1.0 - abs(n1[0] * n2[0] + n1[1] * n2[1])
else:
angle_d = 0.0
#angle_d = 0.0
if ddd < threshold and angle_d < 0.29:
#cost_map[(hole, marble_id)] = ddd
if hole in bigraph.keys():
bigraph[hole].add(marble_id)
else:
bigraph[hole] = Set([marble_id])
matchedNum += 1
#break
soft_matchedNum = 0
if one2oneMatching == True:
#matchedNum = len(HopcroftKarp(bigraph).maximum_matching().keys()) / 2
matches = HopcroftKarp(bigraph).maximum_matching()
matchedNum = len(matches.keys()) / 2
# for k,v in matches.iteritems():
# if (k,v) in cost_map.keys():
# soft_matchedNum += max(min(((threshold - cost_map[(k,v)]) / threshold),1.0),0.0)
#recall = float(soft_matchedNum) / len(holes)
recall = float(matchedNum) / len(holes)
precesion_sum += precesion
recall_sum += recall
ts3 = time()
with open(outputfile, "a") as fout:
fout.write(str(i)+ " " + str(lat)+" "+str(lon)+" "+str(gpsn1)+ " "+str(gpsn2)+ " Precesion " + str(precesion)+ " Recall "+str(recall)+ " Avg Precesion "+ str(precesion_sum/(i+1)) + " Avg Recall " + str(recall_sum/(i+1))+" \n")
print(i, "Precesion",precesion, "Recall",recall, "Avg Precesion", precesion_sum/(i+1),"Avg Recall", recall_sum/(i+1), rrr, ts2-ts1, ts3-ts2)
returnResult.append((lat, lon, precesion, recall, gpsn1, gpsn2, gpsd1, gpsd2))
i = i + 1
#if i > 100:
# break
# try:
# with open(outputfile, "a") as fout:
# fout.write(str(precesion_sum/i)+" "+str(recall_sum/i)+" "+str(rrr)+ " "+ str(rrr * recall_sum/i) +"\n")
# except:
# with open(outputfile, "a") as fout:
# fout.write(str(0)+" "+str(0)+" "+str(0)+ " "+ "0.0" +"\n")
#with open("TOPOResultSummary.txt","a") as fout:
# fout.write(str(precesion_sum/i)+" "+str(recall_sum/i)+" "+str(rrr)+ " "+ str(rrr * recall_sum/i) +"\n")
new_topoResult = TOPO121(returnResult, GPSMap)
# Debug svg
# for rr in returnResult:
# if rr not in new_topoResult:
# print("remove rr")
# Popen("rm svg/*%s*.svg" % (str(rr[0])+"_"+str(rr[1])),shell=True).wait()
#print(topoAvg(returnResult), len(returnResult)/float(len(OSMList)))
print(topoAvg(new_topoResult), len(new_topoResult)/float(len(OSMList)))
p,r = topoAvg(new_topoResult)
# with open(outputfile, "a") as fout:
# fout.write(str(p)+" "+str(r)+" "+str(len(new_topoResult)/float(len(OSMList)))+"\n")
print("precision="+ str(p)+ " overall-recall="+ str(r * len(new_topoResult)/float(len(OSMList))))
try:
with open(outputfile, "a") as fout:
fout.write(str(p)+" "+str(r)+" "+str(len(new_topoResult)/float(len(OSMList)))+ " " + str(r * len(new_topoResult)/float(len(OSMList))) +"\n")
fout.write("precision="+ str(p)+ " overall-recall="+ str(r * len(new_topoResult)/float(len(OSMList))))
except:
with open(outputfile, "a") as fout:
fout.write(str(0)+" "+str(0)+" "+str(0)+ " " + str(0) +"\n")
return new_topoResult
def TOPOWithPairsNew(GPSMap, OSMMap, GPSList, OSMList, step = 0.00005, r = 0.00300, threshold = 0.00015, region = None, outputfile = "tmp.txt", one2oneMatching = True, base_n = None, svgname = "", soft = True, CheckGPS = None):
i = 0
precesion_sum = 0
recall_sum = 0
#print(len(OSMList), len(GPSList.keys()))
rrr = float(len(GPSList.keys())) / float(len(OSMList))
#print("Overall Coverage", rrr)
total_score = 0
total_f = 0
cost = 0
matchedNum = 0
marbles =[]
number_of_holes = []
for k,itemGPS in GPSList.iteritems():
itemOSM = OSMList[k]
gpsn1, gpsn2, gpsd1, gpsd2 = itemGPS[1],itemGPS[2],itemGPS[3],itemGPS[4]
osmn1, osmn2, osmd1, osmd2 = itemOSM[2],itemOSM[3],itemOSM[4],itemOSM[5]
# nid = pairs[min_node]
# lat = GPSMap.nodes[nid][0]
# lon = GPSMap.nodes[nid][1]
lat = itemOSM[0]
lon = itemOSM[1]
ts1 = time()
if gpsn1 in GPSMap.nodes.keys():
marbles = GPSMap.TOPOWalk(1, step = step, r = r, direction = False, newstyle = True, nid1=gpsn1, nid2=gpsn2, dist1=gpsd1, dist2= gpsd2)
else:
marbles = []
holes = OSMMap.TOPOWalk(1, step = step, r = r, direction = False, newstyle = True, nid1=osmn1, nid2=osmn2, dist1=osmd1, dist2= osmd2, CheckGPS = CheckGPS)
number_of_holes.append(len(holes))
#holes_bidirection = OSMMap.TOPOWalk(1, step = step, r = r, direction = False, newstyle = True, nid1=osmn1, nid2=osmn2, dist1=osmd1, dist2= osmd2, bidirection = True)
ts2 = time()
holes_bidirection = holes
idx_marbles = index.Index()
idx_holes = index.Index()
#idx_holes_bidirection = index.Index()
for j in range(len(marbles)):
idx_marbles.insert(j, (marbles[j][0]-0.00001, marbles[j][1]-0.00001, marbles[j][0]+0.00001, marbles[j][1]+0.00001))
for j in range(len(holes)):
idx_holes.insert(j, (holes[j][0]-0.00001, holes[j][1]-0.00001, holes[j][0]+0.00001, holes[j][1]+0.00001))
idx_holes_bidirection = idx_holes
# for j in range(len(holes_bidirection)):
# idx_holes_bidirection.insert(j, (holes_bidirection[j][0]-0.00001, holes_bidirection[j][1]-0.00001, holes_bidirection[j][0]+0.00001, holes_bidirection[j][1]+0.00001))
bigraph = {}
matched_marbles = []
bipartite_graph = []
for marble in marbles:
rr = threshold * 1.8
possible_holes = list(idx_holes_bidirection.intersection((marble[0]-rr, marble[1]-rr, marble[0]+rr, marble[1]+rr)))
for hole_id in possible_holes:
hole = holes_bidirection[hole_id]
ddd = distance(marble, hole)
n1 = latlonNorm((marble[2], marble[3]))
n2 = latlonNorm((hole[2], hole[3]))
angle_d = (1.0 - abs(n1[0] * n2[0] + n1[1] * n2[1]))
if ddd < threshold and angle_d < 0.3:
if marble in bigraph.keys():
bigraph[marble].add(hole_id)
else:
bigraph[marble] = Set([hole_id])
n1 = latlonNorm((marble[2], marble[3]))
n2 = latlonNorm((hole[2], hole[3]))
ddd -= threshold / 3
ddd = max(ddd*1.5, 0)
ddd += angle_d * threshold * 0.5
bipartite_graph.append((marble, hole, ddd))
matchedNum += 1
matched_marbles.append(marble)
#break
#if one2oneMatching == True:
# matchedNum = len(HopcroftKarp(bigraph).maximum_matching().keys()) / 2
matched_marbles, matched_holes, cost = BipartiteGraphMatching(bipartite_graph)
matchedNum = len(matched_marbles)
if soft == False:
cost = 0
showTOPO.RenderSVG(marbles, holes_bidirection, matched_marbles,matched_holes, lat, lon, 0.00500, "svg/"+svgname +str(i)+"_"+str(lat)+"_"+str(lon)+".svg")
score = cost + (len(marbles) - matchedNum) * threshold * 1.15
total_score += score
#print(i, len(marbles), len(holes), score, matchedNum, cost)
i = i + 1
#if base_n == None:
base_n = len(holes)
if len(marbles) == 0:
f = 0
else:
smooth_precision = 1.0 - (cost+ (len(marbles) - matchedNum) * threshold * 1.15) / (len(marbles) * threshold * 1.15)
smooth_recall = 1.0 - (cost+ (base_n - matchedNum) * threshold * 1.15) / (base_n * threshold * 1.15)
print(smooth_precision, smooth_recall, len(marbles), len(holes))
if smooth_recall + smooth_precision == 0:
f = 0
else:
f = 2*smooth_precision*smooth_recall/(smooth_recall + smooth_precision)
total_f += f
total_f /= i
return total_score, total_f, number_of_holes #cost+ (len(marbles) - matchedNum) * threshold * 3
#return total_score, 10
def TOPOWithList(GPSMap, OSMMap, pairs, step = 0.00005, r = 0.00300, threshold = 0.00015, region = None, outputfile = "tmp.txt"):
i = 0
precesion_sum = 0
recall_sum = 0
for min_node in pairs.keys():
nid = pairs[min_node]
lat = GPSMap.nodes[nid][0]
lon = GPSMap.nodes[nid][1]
marbles = GPSMap.TOPOWalk(nid, step = step, r = r, direction = False)
holes = OSMMap.TOPOWalk(min_node, step = step, r = r, direction = False)
showTOPO.RenderSVG(marbles, holes, lat, lon, 0.00500, "svg/"+str(i)+"_"+str(lat)+"_"+str(lon)+".svg")
matchedNum = 0
for marble in marbles:
for hole in holes:
if distance(marble, hole) < threshold:
matchedNum += 1
break
if len(marbles)==0 or len(holes)==0:
continue
precesion = float(matchedNum) / len(marbles)
matchedNum = 0
for hole in holes:
for marble in marbles:
if distance(marble, hole) < threshold:
matchedNum += 1
break
recall = float(matchedNum) / len(holes)
precesion_sum += precesion
recall_sum += recall
with open(outputfile, "a") as fout:
fout.write(str(i)+ " MapNodeID "+ str(nid)+ " OSMNodeID "+str(min_node)+ " Precesion " + str(precesion)+ " Recall "+str(recall)+ " Avg Precesion "+ str(precesion_sum/(i+1)) + " Avg Recall " + str(recall_sum/(i+1))+" \n")
print(i, "MapNodeID", nid, "OSMNodeID", min_node, "Precesion",precesion, "Recall",recall, "Avg Precesion", precesion_sum/(i+1),"Avg Recall", recall_sum/(i+1))
i = i + 1
with open(outputfile, "a") as fout:
fout.write(str(precesion_sum/i)+" "+str(recall_sum/i)+"\n")
with open("TOPOResultSummary.txt","a") as fout:
fout.write(str(precesion_sum/i)+" "+str(recall_sum/i)+"\n")
| [
"showTOPO.RenderRegion2",
"numpy.sqrt",
"math.radians",
"sets.Set",
"rtree.index.Index",
"showTOPO.RenderRegion",
"numpy.shape",
"time.time",
"hopcroftkarp.HopcroftKarp"
] | [((364, 398), 'numpy.sqrt', 'np.sqrt', (['(p11 * p11 + p1[0] * p1[0])'], {}), '(p11 * p11 + p1[0] * p1[0])\n', (371, 398), True, 'import numpy as np\n'), ((520, 558), 'numpy.sqrt', 'np.sqrt', (['(p2[0] * p2[0] + p2[1] * p2[1])'], {}), '(p2[0] * p2[0] + p2[1] * p2[1])\n', (527, 558), True, 'import numpy as np\n'), ((1007, 1029), 'numpy.sqrt', 'np.sqrt', (['(a * a + b * b)'], {}), '(a * a + b * b)\n', (1014, 1029), True, 'import numpy as np\n'), ((1766, 1788), 'numpy.sqrt', 'np.sqrt', (['(a * a + b * b)'], {}), '(a * a + b * b)\n', (1773, 1788), True, 'import numpy as np\n'), ((1823, 1861), 'numpy.sqrt', 'np.sqrt', (['(p1[0] * p1[0] + p1[1] * p1[1])'], {}), '(p1[0] * p1[0] + p1[1] * p1[1])\n', (1830, 1861), True, 'import numpy as np\n'), ((1871, 1909), 'numpy.sqrt', 'np.sqrt', (['(p2[0] * p2[0] + p2[1] * p2[1])'], {}), '(p2[0] * p2[0] + p2[1] * p2[1])\n', (1878, 1909), True, 'import numpy as np\n'), ((2033, 2055), 'numpy.sqrt', 'np.sqrt', (['(a * a + b * b)'], {}), '(a * a + b * b)\n', (2040, 2055), True, 'import numpy as np\n'), ((6361, 6418), 'showTOPO.RenderRegion', 'showTOPO.RenderRegion', (['result', 'svgEdges', 'region', '"""gt.svg"""'], {}), "(result, svgEdges, region, 'gt.svg')\n", (6382, 6418), False, 'import showTOPO\n'), ((6680, 6693), 'rtree.index.Index', 'index.Index', ([], {}), '()\n', (6691, 6693), False, 'from rtree import index\n'), ((10748, 10761), 'rtree.index.Index', 'index.Index', ([], {}), '()\n', (10759, 10761), False, 'from rtree import index\n'), ((13589, 13602), 'rtree.index.Index', 'index.Index', ([], {}), '()\n', (13600, 13602), False, 'from rtree import index\n'), ((16747, 16760), 'rtree.index.Index', 'index.Index', ([], {}), '()\n', (16758, 16760), False, 'from rtree import index\n'), ((716, 738), 'numpy.sqrt', 'np.sqrt', (['(a * a + b * b)'], {}), '(a * a + b * b)\n', (723, 738), True, 'import numpy as np\n'), ((829, 851), 'numpy.sqrt', 'np.sqrt', (['(a * a + b * b)'], {}), '(a * a + b * b)\n', (836, 851), True, 'import numpy as np\n'), ((10064, 10141), 'showTOPO.RenderRegion2', 'showTOPO.RenderRegion2', (['OSMList', 'matchedLoc', 'svgEdges', 'region', '"""coverage.svg"""'], {}), "(OSMList, matchedLoc, svgEdges, region, 'coverage.svg')\n", (10086, 10141), False, 'import showTOPO\n'), ((10364, 10377), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (10372, 10377), True, 'import numpy as np\n'), ((10393, 10406), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (10401, 10406), True, 'import numpy as np\n'), ((19646, 19652), 'time.time', 'time', ([], {}), '()\n', (19650, 19652), False, 'from time import time\n'), ((20285, 20291), 'time.time', 'time', ([], {}), '()\n', (20289, 20291), False, 'from time import time\n'), ((20325, 20338), 'rtree.index.Index', 'index.Index', ([], {}), '()\n', (20336, 20338), False, 'from rtree import index\n'), ((20359, 20372), 'rtree.index.Index', 'index.Index', ([], {}), '()\n', (20370, 20372), False, 'from rtree import index\n'), ((20405, 20418), 'rtree.index.Index', 'index.Index', ([], {}), '()\n', (20416, 20418), False, 'from rtree import index\n'), ((25782, 25788), 'time.time', 'time', ([], {}), '()\n', (25786, 25788), False, 'from time import time\n'), ((28960, 28966), 'time.time', 'time', ([], {}), '()\n', (28964, 28966), False, 'from time import time\n'), ((29601, 29607), 'time.time', 'time', ([], {}), '()\n', (29605, 29607), False, 'from time import time\n'), ((29667, 29680), 'rtree.index.Index', 'index.Index', ([], {}), '()\n', (29678, 29680), False, 'from rtree import index\n'), ((29701, 29714), 'rtree.index.Index', 'index.Index', ([], {}), '()\n', (29712, 29714), False, 'from rtree import index\n'), ((336, 353), 'math.radians', 'math.radians', (['lat'], {}), '(lat)\n', (348, 353), False, 'import math\n'), ((1168, 1187), 'math.radians', 'math.radians', (['p1[0]'], {}), '(p1[0])\n', (1180, 1187), False, 'import math\n'), ((1257, 1276), 'math.radians', 'math.radians', (['p1[0]'], {}), '(p1[0])\n', (1269, 1276), False, 'import math\n'), ((1734, 1753), 'math.radians', 'math.radians', (['p1[0]'], {}), '(p1[0])\n', (1746, 1753), False, 'import math\n'), ((2352, 2365), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (2360, 2365), True, 'import numpy as np\n'), ((2385, 2398), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (2393, 2398), True, 'import numpy as np\n'), ((10422, 10435), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (10430, 10435), True, 'import numpy as np\n'), ((2418, 2431), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (2426, 2431), True, 'import numpy as np\n'), ((17175, 17192), 'math.radians', 'math.radians', (['lat'], {}), '(lat)\n', (17187, 17192), False, 'import math\n'), ((22645, 22666), 'hopcroftkarp.HopcroftKarp', 'HopcroftKarp', (['bigraph'], {}), '(bigraph)\n', (22657, 22666), False, 'from hopcroftkarp import HopcroftKarp\n'), ((25319, 25340), 'hopcroftkarp.HopcroftKarp', 'HopcroftKarp', (['bigraph'], {}), '(bigraph)\n', (25331, 25340), False, 'from hopcroftkarp import HopcroftKarp\n'), ((22351, 22365), 'sets.Set', 'Set', (['[hole_id]'], {}), '([hole_id])\n', (22354, 22365), False, 'from sets import Set\n'), ((25067, 25083), 'sets.Set', 'Set', (['[marble_id]'], {}), '([marble_id])\n', (25070, 25083), False, 'from sets import Set\n'), ((31183, 31197), 'sets.Set', 'Set', (['[hole_id]'], {}), '([hole_id])\n', (31186, 31197), False, 'from sets import Set\n')] |
import numpy as np
from scipy import signal, stats
import matplotlib.pyplot as plt
from scipy.fft import fft, fftfreq
import xgboost as xgb
import signal
def randomtimesignal(n, t, plot):
def sinusoidal():
# random sinusoidal
a = np.random.uniform(0, 10, 1)[0]
f = np.random.uniform(1e9, 1e10, 1)[0]
ph = np.random.uniform(-2*np.pi, 2*np.pi, 1)[0]
return a*np.sin(2*np.pi*f*t + ph*f)
i = 0
tot = 0
while i < n:
tot = tot + sinusoidal()
i += 1
if plot:
plt.plot(t, tot)
plt.xlabel("Time t")
plt.ylabel("Signal e(t)")
plt.show()
return tot
def rts(t):
a = np.random.uniform(0, 10, 1)[0]
f = np.random.uniform(1e9, 1e10, 1)[0]
ph = np.random.uniform(-2 * np.pi, 2 * np.pi, 1)[0]
i = 0
n = 1000
tot = 0
while i < n:
tot = tot + a*np.sin(2*np.pi*f*t + ph*f)
i += 1
return tot
def ffts(tau, t, plot, n_samp):
sin = randomtimesignal(10, t, plot)
yf = fft(sin)
xf = fftfreq(n_samp, tau)[:n_samp//10]
if plot:
plt.plot(xf, 2.0/n_samp * np.abs(yf[0:n_samp//10]))
plt.xlabel("Frequency")
plt.ylabel("Amplitude")
plt.show()
return sin, yf, signal.find_peaks(np.abs(yf[0:n_samp//10]))[0]/10
def comparison(e_dec_t, e_t, p, t):
k = int(len(e_t)*p)
plt.plot(t[:k], e_dec_t[:k])
plt.xlabel("Time t")
plt.ylabel("Signal e(t) deconvolution")
plt.show()
plt.plot(t[:k], e_t[:k])
plt.xlabel("Time t")
plt.ylabel("Signal e(t) original")
plt.show()
plt.plot(t[:k], 100*abs(e_dec_t[:k] - e_t[:k])/(e_t[:k]))
plt.xlabel("Time t")
plt.ylabel("Percentage % error")
plt.show()
def stat_analysis(n, func, tau, t, n_samp):
i = 0
ks = []
stats1 = []
while i < n:
e_t, e_f, f1 = ffts(tau, t, False, n_samp)
h_t, h_f, f2 = ffts(tau, t, False, n_samp)
r_t = signal.convolve(e_t, h_t)
e_dec_t, remainder = func(r_t, h_t)
errors = 100*abs(e_dec_t - e_t)/e_t
ks.append(np.argmax(errors > 1))
length_min = np.min([len(f1), len(f2)])
pearson = stats.pearsonr(f1[:length_min-1], f2[:length_min-1])[0]
eucl = np.linalg.norm(f1[:length_min-1]-f2[:length_min-1])
stats1.append([np.var(f1), np.var(f2), np.max(f1), np.max(f2), np.min(f1), np.min(f2), 10-len(f1),
10-len(f2), pearson, eucl])
i += 1
reg = xgb.XGBRegressor()
reg.fit(stats1, ks)
reg.get_booster().feature_names = ["Variance of e(t)", "Variance of h(t)", "Maximum value of e(t)",
"Maximum value of h(t)", "Minimum value of e(t)", "Minimum value of h(t)",
"Number of indistinct peaks of h(t)", "Number of indistinct peaks of e(t)",
"Pearson Correlation", "Euclidean Distance"]
xgb.plot_importance(reg)
return ks, stats1
| [
"scipy.stats.pearsonr",
"scipy.fft.fftfreq",
"numpy.abs",
"matplotlib.pyplot.ylabel",
"xgboost.plot_importance",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linalg.norm",
"signal.convolve",
"numpy.argmax",
"numpy.max",
"numpy.min",
"xgboost.XGBRegressor",
"numpy.random.uni... | [((1059, 1067), 'scipy.fft.fft', 'fft', (['sin'], {}), '(sin)\n', (1062, 1067), False, 'from scipy.fft import fft, fftfreq\n'), ((1415, 1443), 'matplotlib.pyplot.plot', 'plt.plot', (['t[:k]', 'e_dec_t[:k]'], {}), '(t[:k], e_dec_t[:k])\n', (1423, 1443), True, 'import matplotlib.pyplot as plt\n'), ((1449, 1469), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time t"""'], {}), "('Time t')\n", (1459, 1469), True, 'import matplotlib.pyplot as plt\n'), ((1475, 1514), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Signal e(t) deconvolution"""'], {}), "('Signal e(t) deconvolution')\n", (1485, 1514), True, 'import matplotlib.pyplot as plt\n'), ((1520, 1530), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1528, 1530), True, 'import matplotlib.pyplot as plt\n'), ((1536, 1560), 'matplotlib.pyplot.plot', 'plt.plot', (['t[:k]', 'e_t[:k]'], {}), '(t[:k], e_t[:k])\n', (1544, 1560), True, 'import matplotlib.pyplot as plt\n'), ((1566, 1586), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time t"""'], {}), "('Time t')\n", (1576, 1586), True, 'import matplotlib.pyplot as plt\n'), ((1592, 1626), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Signal e(t) original"""'], {}), "('Signal e(t) original')\n", (1602, 1626), True, 'import matplotlib.pyplot as plt\n'), ((1632, 1642), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1640, 1642), True, 'import matplotlib.pyplot as plt\n'), ((1711, 1731), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time t"""'], {}), "('Time t')\n", (1721, 1731), True, 'import matplotlib.pyplot as plt\n'), ((1737, 1769), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percentage % error"""'], {}), "('Percentage % error')\n", (1747, 1769), True, 'import matplotlib.pyplot as plt\n'), ((1775, 1785), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1783, 1785), True, 'import matplotlib.pyplot as plt\n'), ((2550, 2568), 'xgboost.XGBRegressor', 'xgb.XGBRegressor', ([], {}), '()\n', (2566, 2568), True, 'import xgboost as xgb\n'), ((3020, 3044), 'xgboost.plot_importance', 'xgb.plot_importance', (['reg'], {}), '(reg)\n', (3039, 3044), True, 'import xgboost as xgb\n'), ((559, 575), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'tot'], {}), '(t, tot)\n', (567, 575), True, 'import matplotlib.pyplot as plt\n'), ((585, 605), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time t"""'], {}), "('Time t')\n", (595, 605), True, 'import matplotlib.pyplot as plt\n'), ((615, 640), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Signal e(t)"""'], {}), "('Signal e(t)')\n", (625, 640), True, 'import matplotlib.pyplot as plt\n'), ((650, 660), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (658, 660), True, 'import matplotlib.pyplot as plt\n'), ((701, 728), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)', '(1)'], {}), '(0, 10, 1)\n', (718, 728), True, 'import numpy as np\n'), ((741, 790), 'numpy.random.uniform', 'np.random.uniform', (['(1000000000.0)', '(10000000000.0)', '(1)'], {}), '(1000000000.0, 10000000000.0, 1)\n', (758, 790), True, 'import numpy as np\n'), ((786, 829), 'numpy.random.uniform', 'np.random.uniform', (['(-2 * np.pi)', '(2 * np.pi)', '(1)'], {}), '(-2 * np.pi, 2 * np.pi, 1)\n', (803, 829), True, 'import numpy as np\n'), ((1078, 1098), 'scipy.fft.fftfreq', 'fftfreq', (['n_samp', 'tau'], {}), '(n_samp, tau)\n', (1085, 1098), False, 'from scipy.fft import fft, fftfreq\n'), ((1196, 1219), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency"""'], {}), "('Frequency')\n", (1206, 1219), True, 'import matplotlib.pyplot as plt\n'), ((1229, 1252), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude"""'], {}), "('Amplitude')\n", (1239, 1252), True, 'import matplotlib.pyplot as plt\n'), ((1262, 1272), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1270, 1272), True, 'import matplotlib.pyplot as plt\n'), ((2013, 2038), 'signal.convolve', 'signal.convolve', (['e_t', 'h_t'], {}), '(e_t, h_t)\n', (2028, 2038), False, 'import signal\n'), ((2311, 2368), 'numpy.linalg.norm', 'np.linalg.norm', (['(f1[:length_min - 1] - f2[:length_min - 1])'], {}), '(f1[:length_min - 1] - f2[:length_min - 1])\n', (2325, 2368), True, 'import numpy as np\n'), ((263, 290), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)', '(1)'], {}), '(0, 10, 1)\n', (280, 290), True, 'import numpy as np\n'), ((307, 356), 'numpy.random.uniform', 'np.random.uniform', (['(1000000000.0)', '(10000000000.0)', '(1)'], {}), '(1000000000.0, 10000000000.0, 1)\n', (324, 356), True, 'import numpy as np\n'), ((356, 399), 'numpy.random.uniform', 'np.random.uniform', (['(-2 * np.pi)', '(2 * np.pi)', '(1)'], {}), '(-2 * np.pi, 2 * np.pi, 1)\n', (373, 399), True, 'import numpy as np\n'), ((417, 451), 'numpy.sin', 'np.sin', (['(2 * np.pi * f * t + ph * f)'], {}), '(2 * np.pi * f * t + ph * f)\n', (423, 451), True, 'import numpy as np\n'), ((2148, 2169), 'numpy.argmax', 'np.argmax', (['(errors > 1)'], {}), '(errors > 1)\n', (2157, 2169), True, 'import numpy as np\n'), ((2239, 2295), 'scipy.stats.pearsonr', 'stats.pearsonr', (['f1[:length_min - 1]', 'f2[:length_min - 1]'], {}), '(f1[:length_min - 1], f2[:length_min - 1])\n', (2253, 2295), False, 'from scipy import signal, stats\n'), ((912, 946), 'numpy.sin', 'np.sin', (['(2 * np.pi * f * t + ph * f)'], {}), '(2 * np.pi * f * t + ph * f)\n', (918, 946), True, 'import numpy as np\n'), ((1161, 1187), 'numpy.abs', 'np.abs', (['yf[0:n_samp // 10]'], {}), '(yf[0:n_samp // 10])\n', (1167, 1187), True, 'import numpy as np\n'), ((2387, 2397), 'numpy.var', 'np.var', (['f1'], {}), '(f1)\n', (2393, 2397), True, 'import numpy as np\n'), ((2399, 2409), 'numpy.var', 'np.var', (['f2'], {}), '(f2)\n', (2405, 2409), True, 'import numpy as np\n'), ((2411, 2421), 'numpy.max', 'np.max', (['f1'], {}), '(f1)\n', (2417, 2421), True, 'import numpy as np\n'), ((2423, 2433), 'numpy.max', 'np.max', (['f2'], {}), '(f2)\n', (2429, 2433), True, 'import numpy as np\n'), ((2435, 2445), 'numpy.min', 'np.min', (['f1'], {}), '(f1)\n', (2441, 2445), True, 'import numpy as np\n'), ((2447, 2457), 'numpy.min', 'np.min', (['f2'], {}), '(f2)\n', (2453, 2457), True, 'import numpy as np\n'), ((1312, 1338), 'numpy.abs', 'np.abs', (['yf[0:n_samp // 10]'], {}), '(yf[0:n_samp // 10])\n', (1318, 1338), True, 'import numpy as np\n')] |
import numpy as np
from numpy.core.fromnumeric import size
from scipy.ndimage import affine_transform
from .._transform import Transformer
class Resize(Transformer):
def __init__(self) -> None:
super().__init__()
def transform_matric(self, scale):
assert len(scale) == 2, f'len(sclae) = {len(scale)} != 2'
resize_axis_matrix = np.array(
[[1 / scale[0], 0., 0.],
[0., 1 / scale[1], 0.],
[0., 0., 1.]])
return resize_axis_matrix
def __call__(self, inp, mask, scale=None, size=None):
assert scale is not None or size is not None, \
'Scale is None and size is None.'
assert scale is None or size is None, \
'Ambiguous, scale is not None and size is not None.'
width = mask.shape[0]
height = mask.shape[1]
if scale is not None and not isinstance(scale, (tuple, list)):
scale = (scale, scale)
if size is not None and not isinstance(size, (tuple, list)):
size = (size, size)
if scale is None:
scale = (size[0] / width,
size[1] / height)
if size is None:
size = (int(width * scale[0]),
int(height * scale[1]))
affine_matrix = self.transform_matric(scale)
if inp.ndim == 2:
inp = affine_transform(inp, affine_matrix, output_shape=size)
else:
inp_ = []
for i in range(inp.shape[0]):
inp_.append(affine_transform(inp[i], affine_matrix, output_shape=size))
inp = np.stack(inp_, axis=0)
mask = affine_transform(mask, affine_matrix, order=0, output_shape=size)
return inp, mask.round()
class RandomResize(Transformer):
def __init__(self, r_min, r_max) -> None:
super().__init__()
assert r_max > r_min, \
f'r_max <= r_min, r_max={r_max} and r_min={r_min}'
self.r_max = r_max
self.r_min = r_min
self.resizer = Resize()
def __call__(self, inp, mask):
scale = np.random.rand() * (self.r_max - self.r_min) + self.r_min
return self.resizer(inp, mask, scale=scale)
class ResizeTo(Transformer):
def __init__(self, size) -> None:
super().__init__()
assert isinstance(size, (tuple, list)) and len(size) == 2
self.size = size
self.resizer = Resize()
def __call__(self, inp, mask):
return self.resizer(inp, mask, size=self.size)
| [
"numpy.array",
"numpy.random.rand",
"scipy.ndimage.affine_transform",
"numpy.stack"
] | [((362, 441), 'numpy.array', 'np.array', (['[[1 / scale[0], 0.0, 0.0], [0.0, 1 / scale[1], 0.0], [0.0, 0.0, 1.0]]'], {}), '([[1 / scale[0], 0.0, 0.0], [0.0, 1 / scale[1], 0.0], [0.0, 0.0, 1.0]])\n', (370, 441), True, 'import numpy as np\n'), ((1699, 1764), 'scipy.ndimage.affine_transform', 'affine_transform', (['mask', 'affine_matrix'], {'order': '(0)', 'output_shape': 'size'}), '(mask, affine_matrix, order=0, output_shape=size)\n', (1715, 1764), False, 'from scipy.ndimage import affine_transform\n'), ((1421, 1476), 'scipy.ndimage.affine_transform', 'affine_transform', (['inp', 'affine_matrix'], {'output_shape': 'size'}), '(inp, affine_matrix, output_shape=size)\n', (1437, 1476), False, 'from scipy.ndimage import affine_transform\n'), ((1661, 1683), 'numpy.stack', 'np.stack', (['inp_'], {'axis': '(0)'}), '(inp_, axis=0)\n', (1669, 1683), True, 'import numpy as np\n'), ((2139, 2155), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2153, 2155), True, 'import numpy as np\n'), ((1583, 1641), 'scipy.ndimage.affine_transform', 'affine_transform', (['inp[i]', 'affine_matrix'], {'output_shape': 'size'}), '(inp[i], affine_matrix, output_shape=size)\n', (1599, 1641), False, 'from scipy.ndimage import affine_transform\n')] |
from copy import deepcopy
from typing import Dict, Callable, Optional
import numpy as np
from sklearn.linear_model import SGDRegressor
from .domain import Predictor
from attr import attrs, attrib
from .nodes import Node
import padasip as pa
@attrs(auto_attribs=True, slots=True, frozen=True)
class BinaryIntermediatePredictor(Predictor):
left: Node
right: Node
binary_condition: Callable[[np.ndarray, Dict[bool, Node]], Node]
_mapping: Dict[bool, Node] = attrib(init=False)
def predict(self, X: np.ndarray, is_alt: bool) -> np.ndarray:
return self.binary_condition(X, self._mapping).predict(X, is_alt)
def __call__(self, X: np.ndarray, y: np.ndarray, is_alt: bool) -> np.ndarray:
return self.binary_condition(X, self._mapping)(X, y, is_alt)
def offspring(self) -> "BinaryIntermediatePredictor":
return deepcopy(self)
@attrs(auto_attribs=True, slots=True, frozen=True)
class LeastSquarePredictor(Predictor):
n_dim: int
init_weights: Optional[np.ndarray] = None
_adaptive_filter: pa.filters.FilterRLS = attrib(init=False)
def __attrs_post_init__(self):
object.__setattr__(self, "_adaptive_filter", pa.filters.FilterRLS(self.n_dim))
if self.init_weights is not None:
self._adaptive_filter.w = self.init_weights
def predict(self, X: np.ndarray, is_alt: bool) -> np.ndarray:
x = np.hstack((1.0, X))
return self._adaptive_filter.predict(x)
def __call__(self, X: np.ndarray, y: np.ndarray, is_alt: bool) -> np.ndarray:
x = np.hstack((1.0, X))
self._adaptive_filter.adapt(y, x)
return self._adaptive_filter.predict(x)
def offspring(self) -> "LeastSquarePredictor":
return LeastSquarePredictor(n_dim=self.n_dim, init_weights=self._adaptive_filter.w)
@attrs(auto_attribs=True, slots=True, frozen=True)
class GradientDecentPredictor(Predictor):
sgd_factory: Callable[[], SGDRegressor] = SGDRegressor
init_sgd: Optional[SGDRegressor] = None
_sgd: SGDRegressor = attrib(init=False)
def __attrs_post_init__(self):
object.__setattr__(self, "_sgd", self.init_sgd if self.init_sgd is None else self.sgd_factory())
def predict(self, X: np.ndarray, is_alt: bool) -> np.ndarray:
return self._sgd.predict(X)
def __call__(self, X: np.ndarray, y: np.ndarray, is_alt: bool) -> np.ndarray:
y_pred = self.predict(X, is_alt=is_alt)
self._sgd.partial_fit(X, y)
return y_pred
def offspring(self) -> "GradientDecentPredictor":
return GradientDecentPredictor(self.sgd_factory, deepcopy(self._sgd))
| [
"attr.attrs",
"numpy.hstack",
"attr.attrib",
"copy.deepcopy",
"padasip.filters.FilterRLS"
] | [((247, 296), 'attr.attrs', 'attrs', ([], {'auto_attribs': '(True)', 'slots': '(True)', 'frozen': '(True)'}), '(auto_attribs=True, slots=True, frozen=True)\n', (252, 296), False, 'from attr import attrs, attrib\n'), ((881, 930), 'attr.attrs', 'attrs', ([], {'auto_attribs': '(True)', 'slots': '(True)', 'frozen': '(True)'}), '(auto_attribs=True, slots=True, frozen=True)\n', (886, 930), False, 'from attr import attrs, attrib\n'), ((1816, 1865), 'attr.attrs', 'attrs', ([], {'auto_attribs': '(True)', 'slots': '(True)', 'frozen': '(True)'}), '(auto_attribs=True, slots=True, frozen=True)\n', (1821, 1865), False, 'from attr import attrs, attrib\n'), ((477, 495), 'attr.attrib', 'attrib', ([], {'init': '(False)'}), '(init=False)\n', (483, 495), False, 'from attr import attrs, attrib\n'), ((1077, 1095), 'attr.attrib', 'attrib', ([], {'init': '(False)'}), '(init=False)\n', (1083, 1095), False, 'from attr import attrs, attrib\n'), ((2037, 2055), 'attr.attrib', 'attrib', ([], {'init': '(False)'}), '(init=False)\n', (2043, 2055), False, 'from attr import attrs, attrib\n'), ((863, 877), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (871, 877), False, 'from copy import deepcopy\n'), ((1396, 1415), 'numpy.hstack', 'np.hstack', (['(1.0, X)'], {}), '((1.0, X))\n', (1405, 1415), True, 'import numpy as np\n'), ((1559, 1578), 'numpy.hstack', 'np.hstack', (['(1.0, X)'], {}), '((1.0, X))\n', (1568, 1578), True, 'import numpy as np\n'), ((1185, 1217), 'padasip.filters.FilterRLS', 'pa.filters.FilterRLS', (['self.n_dim'], {}), '(self.n_dim)\n', (1205, 1217), True, 'import padasip as pa\n'), ((2601, 2620), 'copy.deepcopy', 'deepcopy', (['self._sgd'], {}), '(self._sgd)\n', (2609, 2620), False, 'from copy import deepcopy\n')] |
import os
import sys
import argparse
import logging
import yaml
from typing import List
import copy
import random
from dataclasses import dataclass
from tqdm import tqdm
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader
from poutyne.framework import Model
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from sklearn.preprocessing import Normalizer
from analogy.data import build_analogy_examples_from_file
from analogy.metrics import CorrelationMetric, CorrelationBinnedAccuracyMetric
from analogy.models import MyEmbeddings, AnalogyModel, IdentityMapper, NeuralMapper
LANGUAGES = [
'da',
'de',
'en',
'es',
'fi',
'fr',
'it',
'nl',
'pl',
'pt',
'sv',
]
@dataclass
class AnalogyExample:
e1: List[str]
e2: List[str]
e3: List[str]
e4: List[str]
distance: float
def r2_score_pytorch(y_pred, y_true):
return r2_score(y_true.detach().cpu().numpy(), y_pred.detach().cpu().numpy())
def process_entity(entity):
elements = list()
entity = entity.lower()
es = entity.split()
for e in es:
elements.append(e)
return elements
def preprocess_analogies(analogies):
logging.info("Preprocessing analogies")
processed_analogies = list()
for analogy in analogies:
processed_analogies.append(AnalogyExample(
process_entity(analogy.q_1_source),
process_entity(analogy.q_1_target),
process_entity(analogy.q_2_source),
process_entity(analogy.q_2_target),
float(analogy.distance_pairwise)
))
return processed_analogies
def build_vocab(processed_analogies):
logging.info("Building vocabulary")
vocab = set()
for a in processed_analogies:
for element in a.e1:
vocab.add(element)
for element in a.e2:
vocab.add(element)
for element in a.e3:
vocab.add(element)
for element in a.e4:
vocab.add(element)
return vocab
def get_vectors_for_vocab(path, vocab):
logging.info("Getting word vectors for vocab")
vectors = dict()
with open(path, 'r', encoding='utf-8') as fhandle:
for i, line in enumerate(fhandle):
elements = line.split()
if len(elements) > 2:
try:
word = elements[0].lower()
if word in vocab:
vector = np.asarray([float(i) for i in elements[1:]])
vectors[word] = vector
except:
pass
# print("Could not process line {}".format(i))
return vectors
def build_word_mapping(vocab):
logging.info("Building word to index mapping")
word_to_idx = dict()
for word in vocab:
word_to_idx[word] = len(word_to_idx)
return word_to_idx
def vectorize_dataset(analogies, word_to_idx):
elements = list()
for a in analogies:
v_e1 = word_to_idx[a.e1]
v_e2 = word_to_idx[a.e2]
v_e3 = word_to_idx[a.e3]
v_e4 = word_to_idx[a.e4]
data = {
"original": (a.e1, a.e2, a.e3, a.e4),
"input_ids": (v_e1, v_e2, v_e3, v_e4),
"distance": a.distance
}
elements.append(data)
return elements
def merge_entity(entity, vectors):
# This method merges an entity tokens
# and create a new mean embedding from the token found in the entity
new_entities = list()
num_entities_found_in_vectors = 0
new_vector = np.zeros_like(list(vectors.values())[0])
for e in entity:
if e in vectors:
num_entities_found_in_vectors += 1
new_entities.append(e)
new_vector += vectors[e]
else:
new_entities.append("#UNK({})".format(e))
new_entity = "_".join(new_entities)
if num_entities_found_in_vectors > 0:
if new_entity not in vectors:
vectors[new_entity] = new_vector / num_entities_found_in_vectors # Compute the mean for these
return new_entity
else:
return None
def merge_entities_and_augment_vectors(analogies, vectors):
logging.info("Merging entities and averaging their embeddings")
analogies_with_merged_entities = list()
for a in tqdm(analogies):
new_e1 = merge_entity(a.e1, vectors)
new_e2 = merge_entity(a.e2, vectors)
new_e3 = merge_entity(a.e3, vectors)
new_e4 = merge_entity(a.e4, vectors)
if new_e1 and new_e2 and new_e3 and new_e4: # if we found anything for each entity
analogies_with_merged_entities.append(AnalogyExample(
new_e1,
new_e2,
new_e3,
new_e4,
a.distance
))
return analogies_with_merged_entities
def collate(examples):
input_ids = list()
distances = list()
for e in examples:
input_ids.append(e['input_ids'])
distances.append(e['distance'])
return (torch.tensor(input_ids), torch.FloatTensor(distances)), torch.LongTensor([1] * len(examples))
def split_examples(dataset, ratio=0.8):
np.random.shuffle(dataset)
train = dataset[:int(len(dataset)*ratio)]
test = dataset[int(len(dataset)*ratio):]
return train, test
def filter_vectors_for_entities(analogies, vectors):
entities_set = set()
for a in analogies:
entities_set.add(a.e1)
entities_set.add(a.e2)
entities_set.add(a.e3)
entities_set.add(a.e4)
filtered_vector_set = {k: v for k, v in vectors.items() if k in entities_set}
return filtered_vector_set
def load_set(language, dataset_path, set_name, test=False):
set_path = f"{dataset_path}.{set_name}"
analogies = build_analogy_examples_from_file(set_path)
if test:
analogies = list(analogies)[:10000]
processed_analogies = preprocess_analogies(analogies)
vocab = build_vocab(processed_analogies)
vector_path = './data/embeddings/wiki.{}.align.vec'.format(language)
vectors = get_vectors_for_vocab(vector_path, vocab)
analogies_with_merged_entities = merge_entities_and_augment_vectors(processed_analogies, vectors)
filtered_vector_set = filter_vectors_for_entities(analogies_with_merged_entities, vectors)
word_to_idx = build_word_mapping(vectors.keys())
return analogies_with_merged_entities, filtered_vector_set, word_to_idx
def init_weights(m):
if type(m) == nn.Linear:
torch.nn.init.kaiming_uniform_(m.weight.data)
def evaluate(configs, language):
cuda_device = 0
device = torch.device("cuda:%d" % cuda_device if torch.cuda.is_available() else "cpu")
logging.info("Sending model to device {}".format(device))
logging.info("Working on language {}".format(language))
dataset_path = "./data/analogy_dists_splits/analogy_{dataset}_{language}_dists.csv".format(
dataset=configs['dataset'],
language=language
)
train_set, train_vectors, train_word_to_idx = load_set(language, dataset_path, 'train', configs['test'])
valid_set, valid_vectors, valid_word_to_idx = load_set(language, dataset_path, 'valid', configs['test'])
test_set, test_vectors, test_word_to_idx = load_set(language, dataset_path, 'test', configs['test'])
vectorized_train = vectorize_dataset(train_set, train_word_to_idx)
vectorized_valid = vectorize_dataset(valid_set, valid_word_to_idx)
vectorized_test = vectorize_dataset(test_set, test_word_to_idx)
train_loader = DataLoader(vectorized_train, batch_size=16, collate_fn=collate, shuffle=True, drop_last=True)
valid_loader = DataLoader(vectorized_valid, batch_size=16, collate_fn=collate)
test_loader = DataLoader(vectorized_test, batch_size=16, collate_fn=collate)
train_embeddings = MyEmbeddings(train_word_to_idx, embedding_dim=300)
train_embeddings.load_words_embeddings(train_vectors)
valid_embeddings = MyEmbeddings(valid_word_to_idx, embedding_dim=300)
valid_embeddings.load_words_embeddings(valid_vectors)
test_embeddings = MyEmbeddings(test_word_to_idx, embedding_dim=300)
test_embeddings.load_words_embeddings(test_vectors)
model = AnalogyModel(train_embeddings, valid_embeddings, test_embeddings, configs['reg_term_lambda'], configs['delta'])
mapper = IdentityMapper()
model.set_mapper(mapper)
poutyne_model = Model(
model,
'adam',
loss_function=model.loss_function,
batch_metrics=[model.accuracy],
epoch_metrics=[CorrelationMetric(), CorrelationBinnedAccuracyMetric()]
)
poutyne_model.to(device)
loss, (acc, corr, _) = poutyne_model.evaluate_generator(valid_loader)
logging.info("Statistics on valid set before train (IdentityMapper used);")
logging.info("Accuracy: {}".format(acc))
logging.info("Correlation: {}".format(corr))
# Setting the embedding table
model.test_embeddings = test_embeddings
loss, (acc, corr, _) = poutyne_model.evaluate_generator(test_loader)
logging.info("Statistics on test set before train (IdentityMapper used);")
logging.info("Accuracy: {}".format(acc))
logging.info("Correlation: {}".format(corr))
logging.info("Launching train")
poutyne_model.fit_generator(train_loader, epochs=10)
# Train mapper
original_embeddings = MyEmbeddings(train_word_to_idx, embedding_dim=300)
original_embeddings.load_words_embeddings(train_vectors)
normalizer = Normalizer()
X = original_embeddings.weight.data.cpu().numpy()
Y = train_embeddings.weight.data.cpu().numpy()
X_norm = normalizer.transform(X)
Y_norm = normalizer.transform(Y)
X_train, X_test, Y_train, Y_test = train_test_split(X_norm, Y_norm)
mapper_model = nn.Sequential(
nn.Linear(300, 512),
nn.Tanh(),
nn.Linear(512, 512),
nn.Tanh(),
nn.Linear(512, 300),
)
mapper_model.apply(init_weights)
poutyne_mapper_model = Model(mapper_model, 'adam', 'mse', batch_metrics=[r2_score_pytorch])
poutyne_mapper_model.to(device)
poutyne_mapper_model.fit(
X_train, Y_train,
validation_data=(X_test, Y_test),
epochs=30,
batch_size=16
)
neural_mapper = NeuralMapper(mapper_model, device)
model.set_mapper(neural_mapper)
model.test_embeddings = valid_embeddings
loss, (acc, corr, _) = poutyne_model.evaluate_generator(valid_loader)
logging.info("Statistics on valid set after train (IdentityMapper used);")
logging.info("Accuracy: {}".format(acc))
logging.info("Correlation: {}".format(corr))
model.test_embeddings = test_embeddings
loss, (acc, corr, _) = poutyne_model.evaluate_generator(test_loader)
logging.info("Statistics on test set after train (NeuralMapper used);")
logging.info("Accuracy: {}".format(acc))
logging.info("Correlation: {}".format(corr))
def main(configs):
for language in LANGUAGES:
evaluate(configs, language)
if __name__ == '__main__':
random.seed(42)
np.random.seed(42)
torch.manual_seed(42)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s : %(levelname)s : %(message)s')
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
base_configs = yaml.load(open('./configs/base.yaml'), Loader=yaml.FullLoader)
argument_parser = argparse.ArgumentParser()
for config, value in base_configs.items():
if type(value) is bool:
# Hack as per https://stackoverflow.com/a/46951029
argument_parser.add_argument('--{}'.format(config),
type=lambda x: (str(x).lower() in ['true', '1', 'yes']),
default=value)
else:
argument_parser.add_argument('--{}'.format(config), type=type(value), default=value)
options = argument_parser.parse_args()
configs = vars(options)
main(configs)
| [
"logging.getLogger",
"logging.StreamHandler",
"torch.nn.Tanh",
"analogy.metrics.CorrelationMetric",
"torch.cuda.is_available",
"analogy.metrics.CorrelationBinnedAccuracyMetric",
"logging.info",
"argparse.ArgumentParser",
"numpy.random.seed",
"sklearn.preprocessing.Normalizer",
"analogy.models.Id... | [((1297, 1336), 'logging.info', 'logging.info', (['"""Preprocessing analogies"""'], {}), "('Preprocessing analogies')\n", (1309, 1336), False, 'import logging\n'), ((1774, 1809), 'logging.info', 'logging.info', (['"""Building vocabulary"""'], {}), "('Building vocabulary')\n", (1786, 1809), False, 'import logging\n'), ((2165, 2211), 'logging.info', 'logging.info', (['"""Getting word vectors for vocab"""'], {}), "('Getting word vectors for vocab')\n", (2177, 2211), False, 'import logging\n'), ((2804, 2850), 'logging.info', 'logging.info', (['"""Building word to index mapping"""'], {}), "('Building word to index mapping')\n", (2816, 2850), False, 'import logging\n'), ((4264, 4327), 'logging.info', 'logging.info', (['"""Merging entities and averaging their embeddings"""'], {}), "('Merging entities and averaging their embeddings')\n", (4276, 4327), False, 'import logging\n'), ((4385, 4400), 'tqdm.tqdm', 'tqdm', (['analogies'], {}), '(analogies)\n', (4389, 4400), False, 'from tqdm import tqdm\n'), ((5247, 5273), 'numpy.random.shuffle', 'np.random.shuffle', (['dataset'], {}), '(dataset)\n', (5264, 5273), True, 'import numpy as np\n'), ((5851, 5893), 'analogy.data.build_analogy_examples_from_file', 'build_analogy_examples_from_file', (['set_path'], {}), '(set_path)\n', (5883, 5893), False, 'from analogy.data import build_analogy_examples_from_file\n'), ((7603, 7701), 'torch.utils.data.DataLoader', 'DataLoader', (['vectorized_train'], {'batch_size': '(16)', 'collate_fn': 'collate', 'shuffle': '(True)', 'drop_last': '(True)'}), '(vectorized_train, batch_size=16, collate_fn=collate, shuffle=\n True, drop_last=True)\n', (7613, 7701), False, 'from torch.utils.data import DataLoader\n'), ((7716, 7779), 'torch.utils.data.DataLoader', 'DataLoader', (['vectorized_valid'], {'batch_size': '(16)', 'collate_fn': 'collate'}), '(vectorized_valid, batch_size=16, collate_fn=collate)\n', (7726, 7779), False, 'from torch.utils.data import DataLoader\n'), ((7798, 7860), 'torch.utils.data.DataLoader', 'DataLoader', (['vectorized_test'], {'batch_size': '(16)', 'collate_fn': 'collate'}), '(vectorized_test, batch_size=16, collate_fn=collate)\n', (7808, 7860), False, 'from torch.utils.data import DataLoader\n'), ((7885, 7935), 'analogy.models.MyEmbeddings', 'MyEmbeddings', (['train_word_to_idx'], {'embedding_dim': '(300)'}), '(train_word_to_idx, embedding_dim=300)\n', (7897, 7935), False, 'from analogy.models import MyEmbeddings, AnalogyModel, IdentityMapper, NeuralMapper\n'), ((8018, 8068), 'analogy.models.MyEmbeddings', 'MyEmbeddings', (['valid_word_to_idx'], {'embedding_dim': '(300)'}), '(valid_word_to_idx, embedding_dim=300)\n', (8030, 8068), False, 'from analogy.models import MyEmbeddings, AnalogyModel, IdentityMapper, NeuralMapper\n'), ((8150, 8199), 'analogy.models.MyEmbeddings', 'MyEmbeddings', (['test_word_to_idx'], {'embedding_dim': '(300)'}), '(test_word_to_idx, embedding_dim=300)\n', (8162, 8199), False, 'from analogy.models import MyEmbeddings, AnalogyModel, IdentityMapper, NeuralMapper\n'), ((8270, 8386), 'analogy.models.AnalogyModel', 'AnalogyModel', (['train_embeddings', 'valid_embeddings', 'test_embeddings', "configs['reg_term_lambda']", "configs['delta']"], {}), "(train_embeddings, valid_embeddings, test_embeddings, configs[\n 'reg_term_lambda'], configs['delta'])\n", (8282, 8386), False, 'from analogy.models import MyEmbeddings, AnalogyModel, IdentityMapper, NeuralMapper\n'), ((8395, 8411), 'analogy.models.IdentityMapper', 'IdentityMapper', ([], {}), '()\n', (8409, 8411), False, 'from analogy.models import MyEmbeddings, AnalogyModel, IdentityMapper, NeuralMapper\n'), ((8776, 8851), 'logging.info', 'logging.info', (['"""Statistics on valid set before train (IdentityMapper used);"""'], {}), "('Statistics on valid set before train (IdentityMapper used);')\n", (8788, 8851), False, 'import logging\n'), ((9102, 9176), 'logging.info', 'logging.info', (['"""Statistics on test set before train (IdentityMapper used);"""'], {}), "('Statistics on test set before train (IdentityMapper used);')\n", (9114, 9176), False, 'import logging\n'), ((9276, 9307), 'logging.info', 'logging.info', (['"""Launching train"""'], {}), "('Launching train')\n", (9288, 9307), False, 'import logging\n'), ((9411, 9461), 'analogy.models.MyEmbeddings', 'MyEmbeddings', (['train_word_to_idx'], {'embedding_dim': '(300)'}), '(train_word_to_idx, embedding_dim=300)\n', (9423, 9461), False, 'from analogy.models import MyEmbeddings, AnalogyModel, IdentityMapper, NeuralMapper\n'), ((9540, 9552), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {}), '()\n', (9550, 9552), False, 'from sklearn.preprocessing import Normalizer\n'), ((9771, 9803), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_norm', 'Y_norm'], {}), '(X_norm, Y_norm)\n', (9787, 9803), False, 'from sklearn.model_selection import train_test_split\n'), ((10033, 10101), 'poutyne.framework.Model', 'Model', (['mapper_model', '"""adam"""', '"""mse"""'], {'batch_metrics': '[r2_score_pytorch]'}), "(mapper_model, 'adam', 'mse', batch_metrics=[r2_score_pytorch])\n", (10038, 10101), False, 'from poutyne.framework import Model\n'), ((10304, 10338), 'analogy.models.NeuralMapper', 'NeuralMapper', (['mapper_model', 'device'], {}), '(mapper_model, device)\n', (10316, 10338), False, 'from analogy.models import MyEmbeddings, AnalogyModel, IdentityMapper, NeuralMapper\n'), ((10499, 10573), 'logging.info', 'logging.info', (['"""Statistics on valid set after train (IdentityMapper used);"""'], {}), "('Statistics on valid set after train (IdentityMapper used);')\n", (10511, 10573), False, 'import logging\n'), ((10790, 10861), 'logging.info', 'logging.info', (['"""Statistics on test set after train (NeuralMapper used);"""'], {}), "('Statistics on test set after train (NeuralMapper used);')\n", (10802, 10861), False, 'import logging\n'), ((11078, 11093), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (11089, 11093), False, 'import random\n'), ((11098, 11116), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (11112, 11116), True, 'import numpy as np\n'), ((11121, 11142), 'torch.manual_seed', 'torch.manual_seed', (['(42)'], {}), '(42)\n', (11138, 11142), False, 'import torch\n'), ((11156, 11175), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (11173, 11175), False, 'import logging\n'), ((11226, 11288), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s : %(levelname)s : %(message)s"""'], {}), "('%(asctime)s : %(levelname)s : %(message)s')\n", (11243, 11288), False, 'import logging\n'), ((11310, 11343), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (11331, 11343), False, 'import logging\n'), ((11529, 11554), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (11552, 11554), False, 'import argparse\n'), ((6569, 6614), 'torch.nn.init.kaiming_uniform_', 'torch.nn.init.kaiming_uniform_', (['m.weight.data'], {}), '(m.weight.data)\n', (6599, 6614), False, 'import torch\n'), ((9846, 9865), 'torch.nn.Linear', 'nn.Linear', (['(300)', '(512)'], {}), '(300, 512)\n', (9855, 9865), False, 'from torch import nn\n'), ((9875, 9884), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (9882, 9884), False, 'from torch import nn\n'), ((9894, 9913), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(512)'], {}), '(512, 512)\n', (9903, 9913), False, 'from torch import nn\n'), ((9923, 9932), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (9930, 9932), False, 'from torch import nn\n'), ((9942, 9961), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(300)'], {}), '(512, 300)\n', (9951, 9961), False, 'from torch import nn\n'), ((5107, 5130), 'torch.tensor', 'torch.tensor', (['input_ids'], {}), '(input_ids)\n', (5119, 5130), False, 'import torch\n'), ((5132, 5160), 'torch.FloatTensor', 'torch.FloatTensor', (['distances'], {}), '(distances)\n', (5149, 5160), False, 'import torch\n'), ((6723, 6748), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6746, 6748), False, 'import torch\n'), ((8606, 8625), 'analogy.metrics.CorrelationMetric', 'CorrelationMetric', ([], {}), '()\n', (8623, 8625), False, 'from analogy.metrics import CorrelationMetric, CorrelationBinnedAccuracyMetric\n'), ((8627, 8660), 'analogy.metrics.CorrelationBinnedAccuracyMetric', 'CorrelationBinnedAccuracyMetric', ([], {}), '()\n', (8658, 8660), False, 'from analogy.metrics import CorrelationMetric, CorrelationBinnedAccuracyMetric\n')] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Pixel Starships Market API
# ----- Packages ------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import datetime
import csv
import numpy as np
import os
import pandas as pd
import pss_core as core
import pss_prestige as p
import re
import urllib.request
import xml.etree.ElementTree
# Discord limits messages to 2000 characters
MESSAGE_CHARACTER_LIMIT = 2000
HOME = os.getenv('HOME')
base_url = 'http://{}/'.format(core.get_production_server())
# ----- Utilities -----------------------------------------------------
def save_raw_text(raw_text, filename):
with open(filename, 'w') as f:
f.write(raw_text)
def get_base_url(api_version=1, https=False):
if https is True:
prefix = 'https://'
else:
prefix = 'http://'
if api_version==2:
return prefix + 'api2.pixelstarships.com/'
else:
return prefix + 'api.pixelstarships.com/'
# ----- Get Latest Version --------------------------------------------
def get_latest_version():
url= base_url + 'SettingService/GetLatestVersion?language=Key=en'
data = urllib.request.urlopen(url).read()
return data.decode()
# ----- Item Designs --------------------------------------------------
def get_item_designs():
url = base_url + 'ItemService/ListItemDesigns2?languageKey=en'
data = urllib.request.urlopen(url).read()
return data.decode()
def save_item_design_raw(raw_text):
now = datetime.datetime.now()
filename = 'data/items-{}.txt'.format(now.strftime('%Y%m%d'))
save_raw_text(raw_text, filename)
def load_item_design_raw(refresh=False):
now = datetime.datetime.now()
filename = 'data/items{}.txt'.format(now.strftime('%Y%m%d'))
if os.path.isfile(filename) and refresh is False:
with open(filename, 'r') as f:
raw_text = f.read()
else:
raw_text = get_item_designs()
save_item_design_raw(raw_text)
return raw_text
def parse_item_designs(raw_text):
d = {}
# r_lookup = {}
root = xml.etree.ElementTree.fromstring(raw_text)
for c in root:
# print(c.tag) # ListItemDesigns
for cc in c:
# print(cc.tag) # ItemDesigns
for ccc in cc:
# print(ccc.tag) # ItemDesign
if ccc.tag != 'ItemDesign':
continue
item_name = ccc.attrib['ItemDesignName']
d[item_name] = ccc.attrib
# r_lookup[int(ccc.attrib['ItemDesignId'])] = item_name
return d
def xmltext_to_df(raw_text):
df = pd.DataFrame()
root = xml.etree.ElementTree.fromstring(raw_text)
for c in root:
for cc in c:
for i, ccc in enumerate(cc):
df = df.append(pd.DataFrame(ccc.attrib, index=[i]))
return df
# ----- Lists ---------------------------------------------------------
def get_lists(df_items):
item_rarities = list(df_items.Rarity.unique())
item_enhancements = list(df_items.EnhancementType.unique())
item_types = list(df_items.ItemType.unique())
item_subtypes = list(df_items.ItemSubType.unique())
return item_rarities, item_enhancements, item_types, item_subtypes
# ----- Parsing -------------------------------------------------------
def fix_item(item):
# Convert to lower case & non alpha-numeric
item = re.sub('[^a-z0-9]', '', item.lower())
item = re.sub('anonmask', 'anonymousmask', item)
item = re.sub('armour', 'armor', item)
item = re.sub('bunny', 'rabbit', item)
item = re.sub("(darkmatterrifle|dmr)(mark|mk)?(ii|2)", "dmrmarkii", item)
item = re.sub('golden', 'gold', item)
return item
def filter_item_designs(search_str, rtbl, filter):
item_original = list(rtbl.keys())
item_lookup = [ fix_item(s) for s in item_original ]
item_fixed = fix_item(search_str)
txt = ''
for i, item_name in enumerate(item_lookup):
m = re.search(item_fixed, item_name)
if m is not None:
item_name = item_original[i]
d = rtbl[item_name]
# Filter out items
if (item_name == 'Gas' or
item_name == 'Mineral' or
d['MissileDesignId'] != '0' or
d['CraftDesignId'] != '0' or
d['CharacterDesignId'] != '0'):
continue
# Process
# item_price = d['FairPrice']
item_price = d['MarketPrice']
item_slot = re.sub('Equipment', '', d['ItemSubType'])
item_stat = d['EnhancementType']
item_stat_value = d['EnhancementValue']
if filter == 'price':
if item_price == '0':
item_price = 'NA'
txt += '{}: {}\n'.format(item_name, item_price)
elif filter == 'stats':
if item_stat == 'None':
continue
txt += '{}: {} +{} ({})\n'.format(item_name,
item_stat, item_stat_value, item_slot)
else:
print('Invalid filter')
quit()
if len(txt) == 0:
return None
else:
return txt.strip('\n')
def get_real_name(search_str, rtbl):
item_original = list(rtbl.keys())
item_lookup = [ fix_item(s) for s in item_original ]
item_fixed = fix_item(search_str)
try:
# Attempt to find an exact match
idx = item_lookup.index(item_fixed)
return item_original[idx]
except:
# Perform search if the exact match failed
m = [ re.search(item_fixed, n) is not None for n in item_lookup ]
item = pd.Series(item_original)[m]
if len(item) > 0:
return item.iloc[0]
else:
return None
# ----- Item Stats ----------------------------------------------------
def get_item_stats(item_name):
raw_text = load_item_design_raw()
item_lookup = parse_item_designs(raw_text)
market_txt = filter_item_designs(item_name, item_lookup, filter='stats')
if market_txt is not None:
market_txt = '**Item Stats**\n' + market_txt
return market_txt
# ----- Best Items ----------------------------------------------------
def rtbl2items(rtbl):
df_rtbl = pd.DataFrame(rtbl).T
m1 = df_rtbl.EnhancementType != 'None'
m2 = df_rtbl.ItemSubType.str.contains('Equipment')
df_items = df_rtbl[m1 & m2].copy()
df_items.ItemSubType = df_items.ItemSubType.str.replace('Equipment', '')
df_items.ItemSubType = df_items.ItemSubType.str.lower()
df_items.EnhancementType = df_items.EnhancementType.str.lower()
df_items.EnhancementValue = df_items.EnhancementValue.astype(float)
return df_items
def filter_item(df_items, slot, enhancement, cols=None):
slot = slot.lower()
enhancement = enhancement.lower()
m1 = df_items.ItemSubType == slot
m2 = df_items.EnhancementType == enhancement
if cols is None:
return df_items[m1 & m2].sort_values(
'EnhancementValue', ascending=False).copy()
else:
return df_items.loc[m1 & m2, cols].sort_values(
'EnhancementValue', ascending=False).copy()
def itemfilter2txt(df_filter):
if len(df_filter) == 0:
return None
txt = ''
for row in df_filter.iterrows():
data = row[1]
mprice = data['MarketPrice']
if mprice == '0':
mprice = 'NA'
txt += '{}: {} ({} bux)\n'.format(data[0], data[1], mprice)
return txt
# ----- Item Ingredients ----------------------------------------------
def get_item_rlookup(df):
item_rlookup = {}
for row in df.iterrows():
data = row[1]
item_rlookup[data['ItemDesignId']] = data['ItemDesignName']
return item_rlookup
def get_recipe(df, item_rlookup, item_name):
ingredients = df.loc[df['ItemDesignName'] == item_name, 'Ingredients']
if len(ingredients) == 1:
ingredients = ingredients.values[0]
if len(ingredients) == 0:
return None
ingredients = ingredients.split('|')
recipe = {}
for ingredient in ingredients:
item_id, item_qty = ingredient.split('x')
recipe[item_rlookup[item_id]] = int(item_qty)
return recipe
else:
return None
def print_recipe(recipe, df_items):
txt = ''
total = 0
for ingredient in recipe.keys():
qty = recipe[ingredient]
fprice = df_items.loc[df_items['ItemDesignName'] == ingredient, 'FairPrice'].iloc[0]
mprice = df_items.loc[df_items['ItemDesignName'] == ingredient, 'MarketPrice'].iloc[0]
if mprice == '0':
mprice = np.nan
txt += '{} x {} (price: NA)\n'.format(qty, ingredient)
else:
mprice = int(mprice)
txt += '{} x {} ({} bux): {} bux\n'.format(qty, ingredient, mprice, qty*mprice)
total += qty*mprice
if np.isnan(total):
txt += 'Crafting Cost: NA'
else:
txt += 'Crafting Cost: {} bux'.format(total)
return txt
def collapse_recipe(recipe, df_items, item_rlookup):
collapse = False
sub_recipe = {}
for ingredient in recipe.keys():
qty = recipe[ingredient]
sub_ingredients = get_recipe(df_items, item_rlookup, ingredient)
if sub_ingredients is None:
if ingredient in sub_recipe.keys():
sub_recipe[ingredient] += recipe[ingredient]
else:
sub_recipe[ingredient] = recipe[ingredient]
else:
for sub_ingredient in sub_ingredients:
if sub_ingredient in sub_recipe.keys():
sub_recipe[sub_ingredient] += qty * sub_ingredients[sub_ingredient]
else:
sub_recipe[sub_ingredient] = qty * sub_ingredients[sub_ingredient]
collapse = True
# print('{} x {}: {}'.format(qty, ingredient, sub_ingredients))
if collapse is True:
return sub_recipe
else:
return None
def get_multi_recipe(name, levels=1):
raw_text = load_item_design_raw()
item_lookup = parse_item_designs(raw_text)
real_name = get_real_name(name, item_lookup)
df_items = xmltext_to_df(raw_text)
item_rlookup = get_item_rlookup(df_items)
recipe = get_recipe(df_items, item_rlookup, real_name)
txt = ''
level = 1
while recipe is not None:
txt += print_recipe(recipe, df_items)
recipe = collapse_recipe(recipe, df_items, item_rlookup)
level += 1
if level > levels:
break
if recipe is not None:
txt += '\n\n'
return txt
def get_item_recipe(name, levels=5):
raw_text = load_item_design_raw()
item_lookup = parse_item_designs(raw_text)
# print('name = {}'.format(name))
real_name = get_real_name(name, item_lookup)
# print('real_name = {}'.format(real_name))
if real_name is not None:
content = get_multi_recipe(real_name, levels)
return content, real_name
# ----- Lists ---------------------------------------------------------
def get_item_list():
raw_text = load_item_design_raw()
df_items = xmltext_to_df(raw_text)
items = list(df_items['ItemDesignName'])
# print('List of items: ' + ', '.join(items))
return core.list_to_text(items)
# ----- Main ----------------------------------------------------------
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=
'Pixel Starships Market API')
parser.add_argument('--market', action='store_true',
help='Get Market Data')
parser.add_argument('--subtype', default='None',
help='Subtype for market data')
parser.add_argument('--rarity', default='None',
help='Rarity for market data')
parser.add_argument('--stats', default=None,
help='Get Stats on Item')
parser.add_argument('--recipe', default=None,
help='Get Recipe for Item')
parser.add_argument('--price', default=None,
help='Get Price on Item')
parser.add_argument('--list', action='store_true',
help='Get List of items')
args = parser.parse_args()
if args.list is True:
# python3 pss_market.py --list
txt_list = get_item_list()
for txt in txt_list:
print(txt)
elif args.stats is not None:
# python3 pss_market.py --stats 'assault armor'
pass
elif args.recipe is not None:
name = args.recipe
content, real_name = get_item_recipe(name, levels=5)
if real_name is not None:
content = '**Recipe for {}**\n'.format(real_name) + content
content = content + '\n\nNote: bux prices listed here may not always be accurate due to transfers between alts/friends or other reasons'
print(content)
elif args.price is not None:
# python3 pss_market.py --price 'assault armor'
item_name = args.price
raw_text = load_item_design_raw()
rtbl = parse_item_designs(raw_text)
real_name = get_real_name(item_name, rtbl)
if real_name is not None:
print('Getting the price of {}'.format(real_name))
mkt_text = filter_item_designs(real_name, rtbl, filter='price')
print(mkt_text)
else:
print('{} not found'.format(item_name))
else:
print('Problem parsing argument list')
print('args.stats = {}'.format(args.stats))
print('args.price = {}'.format(args.price))
| [
"pandas.Series",
"pss_core.list_to_text",
"os.getenv",
"argparse.ArgumentParser",
"os.path.isfile",
"datetime.datetime.now",
"pss_core.get_production_server",
"numpy.isnan",
"pandas.DataFrame",
"re.sub",
"re.search"
] | [((588, 605), 'os.getenv', 'os.getenv', (['"""HOME"""'], {}), "('HOME')\n", (597, 605), False, 'import os\n'), ((638, 666), 'pss_core.get_production_server', 'core.get_production_server', ([], {}), '()\n', (664, 666), True, 'import pss_core as core\n'), ((1636, 1659), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1657, 1659), False, 'import datetime\n'), ((1817, 1840), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1838, 1840), False, 'import datetime\n'), ((2753, 2767), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2765, 2767), True, 'import pandas as pd\n'), ((3578, 3619), 're.sub', 're.sub', (['"""anonmask"""', '"""anonymousmask"""', 'item'], {}), "('anonmask', 'anonymousmask', item)\n", (3584, 3619), False, 'import re\n'), ((3631, 3662), 're.sub', 're.sub', (['"""armour"""', '"""armor"""', 'item'], {}), "('armour', 'armor', item)\n", (3637, 3662), False, 'import re\n'), ((3674, 3705), 're.sub', 're.sub', (['"""bunny"""', '"""rabbit"""', 'item'], {}), "('bunny', 'rabbit', item)\n", (3680, 3705), False, 'import re\n'), ((3717, 3783), 're.sub', 're.sub', (['"""(darkmatterrifle|dmr)(mark|mk)?(ii|2)"""', '"""dmrmarkii"""', 'item'], {}), "('(darkmatterrifle|dmr)(mark|mk)?(ii|2)', 'dmrmarkii', item)\n", (3723, 3783), False, 'import re\n'), ((3795, 3825), 're.sub', 're.sub', (['"""golden"""', '"""gold"""', 'item'], {}), "('golden', 'gold', item)\n", (3801, 3825), False, 'import re\n'), ((9076, 9091), 'numpy.isnan', 'np.isnan', (['total'], {}), '(total)\n', (9084, 9091), True, 'import numpy as np\n'), ((11442, 11466), 'pss_core.list_to_text', 'core.list_to_text', (['items'], {}), '(items)\n', (11459, 11466), True, 'import pss_core as core\n'), ((11581, 11646), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Pixel Starships Market API"""'}), "(description='Pixel Starships Market API')\n", (11604, 11646), False, 'import argparse\n'), ((1913, 1937), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (1927, 1937), False, 'import os\n'), ((4104, 4136), 're.search', 're.search', (['item_fixed', 'item_name'], {}), '(item_fixed, item_name)\n', (4113, 4136), False, 'import re\n'), ((6436, 6454), 'pandas.DataFrame', 'pd.DataFrame', (['rtbl'], {}), '(rtbl)\n', (6448, 6454), True, 'import pandas as pd\n'), ((4670, 4711), 're.sub', 're.sub', (['"""Equipment"""', '""""""', "d['ItemSubType']"], {}), "('Equipment', '', d['ItemSubType'])\n", (4676, 4711), False, 'import re\n'), ((5829, 5853), 'pandas.Series', 'pd.Series', (['item_original'], {}), '(item_original)\n', (5838, 5853), True, 'import pandas as pd\n'), ((2934, 2969), 'pandas.DataFrame', 'pd.DataFrame', (['ccc.attrib'], {'index': '[i]'}), '(ccc.attrib, index=[i])\n', (2946, 2969), True, 'import pandas as pd\n'), ((5754, 5778), 're.search', 're.search', (['item_fixed', 'n'], {}), '(item_fixed, n)\n', (5763, 5778), False, 'import re\n')] |
#Note
#CNN used
#XRMB Dataset
# had to degrade numpy to 1.11.0 as 1.13.0 doesn't support float index type in arrays
# myarray = np.fromfile('BinaryData.dat',dtype=float)
import sys
import math
import random
import warnings
import numpy as np
from sklearn import svm
import keras.backend as K
from keras.models import Model
#from theano import tensor as T
import matplotlib.pyplot as plt
from keras.layers import Input, Merge
from keras.engine.topology import Layer
from sklearn.metrics import accuracy_score
from keras.layers.core import Activation, Dense, Reshape
from keras.layers import Conv1D, MaxPooling1D, UpSampling1D, Flatten
warnings.simplefilter("ignore")
nb_epoch = 40
batch_size = 100
dimx = 273
dimy = 112
lamda = 0.02
loss_type = 2 # 1 - l1+l2+l3-L4; 2 - l2+l3-L4; 3 - l1+l2+l3 , 4 - l2+l3
def svm_classifier(train_x, train_y, valid_x, valid_y, test_x, test_y):
clf = svm.LinearSVC()
#print train_x.shape,train_y.shape
clf.fit(train_x,train_y)
pred = clf.predict(valid_x)
va = accuracy_score(np.ravel(valid_y),np.ravel(pred))
pred = clf.predict(test_x)
ta = accuracy_score(np.ravel(test_y),np.ravel(pred))
return va, ta
def split(train_l,train_r,label,ratio):
total = train_l.shape[0]
train_samples = int(total*(1-ratio))
test_samples = total-train_samples
tr_l,tst_l,tr_r,tst_r,l_tr,l_tst=[],[],[],[],[],[]
dat=random.sample(range(total),train_samples)
for a in dat:
tr_l.append(train_l[a,:])
tr_r.append(train_r[a,:])
l_tr.append(label[a])
for i in range(test_samples):
if i not in dat:
tst_l.append(train_l[i,:])
tst_r.append(train_r[i,:])
l_tst.append(label[i])
tr_l = np.array(tr_l)
tr_r = np.array(tr_r)
tst_l = np.array(tst_l)
tst_r = np.array(tst_r)
l_tr = np.array(l_tr)
l_tst = np.array(l_tst)
return tr_l,tst_l,tr_r,tst_r,l_tr,l_tst
class ZeroPadding(Layer):
def __init__(self, **kwargs):
super(ZeroPadding, self).__init__(**kwargs)
def call(self, x, mask=None):
return K.zeros_like(x)
def get_output_shape_for(self, input_shape):
return input_shape
class MultiplyBy2(Layer):
def __init__(self, **kwargs):
super(MultiplyBy2, self).__init__(**kwargs)
def call(self, x, mask=None):
return 2*x
def get_output_shape_for(self, input_shape):
return input_shape
class CorrnetCost(Layer):
def __init__(self,lamda, **kwargs):
super(CorrnetCost, self).__init__(**kwargs)
self.lamda = lamda
def cor(self,y1, y2, lamda):
y1_mean = K.mean(y1, axis=0)
y1_centered = y1 - y1_mean
y2_mean = K.mean(y2, axis=0)
y2_centered = y2 - y2_mean
corr_nr = K.sum(y1_centered * y2_centered, axis=0)
corr_dr1 = K.sqrt(K.sum(y1_centered * y1_centered, axis=0) + 1e-8)
corr_dr2 = K.sqrt(K.sum(y2_centered * y2_centered, axis=0) + 1e-8)
corr_dr = corr_dr1 * corr_dr2
corr = corr_nr / corr_dr
return K.sum(corr) * lamda
def call(self ,x ,mask=None):
h1=x[0]
h2=x[1]
corr = self.cor(h1,h2,self.lamda)
#self.add_loss(corr,x)
#we output junk but be sure to use it for the loss to be added
return corr
def get_output_shape_for(self, input_shape):
#print input_shape[0][0]
return (input_shape[0][0],input_shape[0][1])
def corr_loss(y_true, y_pred):
#print y_true.type,y_pred.type
#return K.zeros_like(y_pred)
return y_pred
def project(model,inp):
#print (inp[0].shape, inp[1].shape)
m = model.predict([inp[0],inp[1]])
return m[2]
def reconstruct_from_left(model,inp):
img_inp = inp.reshape((28,14))
f, axarr = plt.subplots(1,2,sharey=False)
pred = model.predict([inp,np.zeros_like(inp)])
img = pred[0].reshape((28,14))
axarr[0].imshow(img_inp)
axarr[1].imshow(img)
def reconstruct_from_right(model,inp):
img_inp = inp.reshape((28,14))
f, axarr = plt.subplots(1,2,sharey=False)
pred = model.predict([np.zeros_like(inp),inp])
img = pred[1].reshape((28,14))
axarr[1].imshow(img_inp)
axarr[0].imshow(img)
def sum_corr(model):
view1 = np.load("MFCC_Test.npy")
view2 = np.load("XRMB_Test.npy")
x = project(model,[view1,np.zeros_like(view2)])
y = project(model,[np.zeros_like(view1),view2])
print ("test correlation")
corr = 0
for i in range(0,len(x[0])):
x1 = x[:,i] - (np.ones(len(x))*(sum(x[:,i])/len(x)))
x2 = y[:,i] - (np.ones(len(y))*(sum(y[:,i])/len(y)))
nr = sum(x1 * x2)/(math.sqrt(sum(x1*x1))*math.sqrt(sum(x2*x2)))
corr+=nr
print (corr)
def transfer(model):
view11 = np.load("MFCC_Test.npy")
view22 = np.load("XRMB_Test.npy")
labels = np.load("Labels_Test.npy")
view1 = project(model,[view11,np.zeros_like(view22)])
view2 = project(model,[np.zeros_like(view11),view22])
perp = len(view1) // 5
print ("view1 to view2")
acc = 0
for i in range(5):
print('@ i' + str(i))
test_x = view2[int(i*perp):int((i+1)*perp)]
test_y = labels[i*perp:(i+1)*perp]
if i==0:
train_x = view1[perp:len(view1)]
train_y = labels[perp:len(view1)]
elif i==4:
train_x = view1[0:4*perp]
train_y = labels[0:4*perp]
else:
train_x1 = view1[0:i*perp]
train_y1 = labels[0:i*perp]
train_x2 = view1[(i+1)*perp:len(view1)]
train_y2 = labels[(i+1)*perp:len(view1)]
train_x = np.concatenate((train_x1,train_x2))
train_y = np.concatenate((train_y1,train_y2))
va, ta = svm_classifier(train_x, train_y, test_x, test_y, test_x, test_y)
acc += ta
print (acc/5)
print ("view2 to view1")
acc = 0
for i in range(5):
print('@ i' + str(i))
test_x = view1[i*perp:(i+1)*perp]
test_y = labels[i*perp:(i+1)*perp]
if i==0:
train_x = view2[perp:len(view1)]
train_y = labels[perp:len(view1)]
elif i==4:
train_x = view2[0:4*perp]
train_y = labels[0:4*perp]
else:
train_x1 = view2[0:i*perp]
train_y1 = labels[0:i*perp]
train_x2 = view2[(i+1)*perp:len(view1)]
train_y2 = labels[(i+1)*perp:len(view1)]
train_x = np.concatenate((train_x1,train_x2))
train_y = np.concatenate((train_y1,train_y2))
va, ta = svm_classifier(train_x, train_y, test_x, test_y, test_x, test_y)
acc += ta
print (acc/5)
def prepare_data():
data_l = np.load('MFCC_Train.npy')
data_r = np.load('XRMB_Train.npy')
label = np.load('Labels_Train.npy')
X_train_l, X_test_l, X_train_r, X_test_r,y_train,y_test = split(data_l,data_r,label,ratio=0.0)
return X_train_l, X_train_r
def buildModel(loss_type,lamda):
inpx = Input(shape=(dimx,))
inpy = Input(shape=(dimy,))
hx = Reshape((dimx, 1))(inpx)
hx = Conv1D(256, 5, activation='relu', padding='valid', strides=1)(hx)
hx = MaxPooling1D(pool_size=4, padding='valid')(hx)
hx = Conv1D(65, 4, activation='relu', padding='valid', strides=1)(hx)
hx = MaxPooling1D(pool_size=3, padding='valid')(hx)
hx = Flatten()(hx)
hx = Dense(560,activation='sigmoid')(hx)
hx = Dense(280, activation='sigmoid')(hx)
hx = Dense(112, activation='sigmoid')(hx)
hx = Dense(680, activation='sigmoid')(hx)
hx = Dense(1365, activation='sigmoid')(hx)
hy = Reshape((dimy, 1))(inpy)
hy = Conv1D(256, 3, activation='relu', padding='valid', strides=1)(hy)
hy = MaxPooling1D(pool_size=2, padding='valid')(hy)
hy = Conv1D(50, 3, activation='relu', padding='valid', strides=1)(hy)
hy = MaxPooling1D(pool_size=2, padding='valid')(hy)
hy = Flatten()(hy)
hy = Dense(560,activation='sigmoid')(hy)
hy = Dense(280, activation='sigmoid')(hy)
hy = Dense(112, activation='sigmoid')(hy)
hy = Dense(680, activation='sigmoid')(hy)
hy = Dense(1365, activation='sigmoid')(hy)
#h = Activation("sigmoid")( Merge(mode="sum")([hx,hy]) )
h = Merge(mode="sum")([hx,hy])
#recx = Dense(hdim_deep,activation='sigmoid')(h)
recx = Dense(dimx)(h)
#recy = Dense(hdim_deep,activation='sigmoid')(h)
recy = Dense(dimy)(h)
branchModel = Model( [inpx,inpy],[recx,recy,h])
[recx1,recy1,h1] = branchModel( [inpx, ZeroPadding()(inpy)])
[recx2,recy2,h2] = branchModel( [ZeroPadding()(inpx), inpy ])
#you may probably add a reconstruction from combined
[recx3,recy3,h] = branchModel([inpx, inpy])
corr=CorrnetCost(-lamda)([h1,h2])
model = Model( [inpx,inpy],[recy1,recx2,recx1,recy2,corr]) #2
model.compile( loss=["mse","mse","mse","mse",corr_loss],optimizer="rmsprop")
branchModel.summary()
# if loss_type == 1:
# model = Model( [inpx,inpy],[recy1,recx2,recx3,recx1,recy2,recy3,corr])
# model.compile( loss=["mse","mse","mse","mse","mse","mse",corr_loss],optimizer="rmsprop")
# elif loss_type == 2:
# model = Model( [inpx,inpy],[recy1,recx2,recx1,recy2,corr])
# model.compile( loss=["mse","mse","mse","mse",corr_loss],optimizer="rmsprop")
# elif loss_type == 3:
# model = Model( [inpx,inpy],[recy1,recx2,recx3,recx1,recy2,recy3])
# model.compile( loss=["mse","mse","mse","mse","mse","mse"],optimizer="rmsprop")
# elif loss_type == 4:
# model = Model( [inpx,inpy],[recy1,recx2,recx1,recy2])
# model.compile( loss=["mse","mse","mse","mse"],optimizer="rmsprop")
return model, branchModel
def trainModel(model,data_left,data_right,loss_type,nb_epoch,batch_size):
X_train_l = data_left
X_train_r = data_right
#y_train = np_utils.to_categorical(y_train, nb_classes)
#y_test = np_utils.to_categorical(y_test, nb_classes)
data_l = np.load('MFCC_Train.npy')
data_r = np.load('XRMB_Train.npy')
label = np.load('Labels_Train.npy')
X_train_l, X_test_l, X_train_r, X_test_r,y_train,y_test = split(data_l,data_r,label,ratio=0.01)
print ('data split')
model.fit([X_train_l,X_train_r], [X_train_r,X_train_l,X_train_l,X_train_r,np.zeros((X_train_l.shape[0],112))],
nb_epoch=nb_epoch,
batch_size=batch_size,verbose=1)
# if loss_type == 1:
# print ('L_Type: l1+l2+l3-L4 h_dim:',hdim,' lamda:',lamda)
# model.fit([X_train_l,X_train_r], [X_train_r,X_train_l,X_train_l,X_train_l,X_train_r,X_train_r,np.zeros((X_train_l.shape[0],h_loss))],
# nb_epoch=nb_epoch,
# batch_size=batch_size,verbose=1)
# elif loss_type == 2:
# print ('L_Type: l2+l3-L4 h_dim:',hdim,' hdim_deep',hdim_deep,' lamda:',lamda)
# model.fit([X_train_l,X_train_r], [X_train_r,X_train_l,X_train_l,X_train_r,np.zeros((X_train_l.shape[0],h_loss))],
# nb_epoch=nb_epoch,
# batch_size=batch_size,verbose=1)
# elif loss_type == 3:
# print ('L_Type: l1+l2+l3 h_dim:',hdim,' lamda:',lamda)
# model.fit([X_train_l,X_train_r], [X_train_r,X_train_l,X_train_l,X_train_l,X_train_r,X_train_r],
# nb_epoch=nb_epoch,
# batch_size=batch_size,verbose=1)
# elif loss_type == 4:
# print ('L_Type: l2+l3 h_dim:',hdim,' lamda:',lamda)
# model.fit([X_train_l,X_train_r], [X_train_r,X_train_l,X_train_l,X_train_r],
# nb_epoch=nb_epoch,
# batch_size=batch_size,verbose=1)
# score = m.evaluate([X_test_l,X_test_r], [X_test_l,X_test_l,X_test_r,X_test_r,np.zeros((X_test_l.shape[0],hdim))],
# batch_size=100)
# print score
def testModel(b_model):
transfer(b_model)
sum_corr(b_model)
left_view, right_view = prepare_data()
model,branchModel = buildModel(loss_type=loss_type,lamda=lamda)
trainModel(model=model, data_left=left_view, data_right = right_view,
loss_type=loss_type,nb_epoch=nb_epoch,batch_size=batch_size)
testModel(branchModel) | [
"keras.backend.sum",
"numpy.array",
"keras.layers.MaxPooling1D",
"keras.layers.core.Reshape",
"keras.models.Model",
"numpy.concatenate",
"warnings.simplefilter",
"keras.layers.Flatten",
"sklearn.svm.LinearSVC",
"keras.layers.core.Dense",
"keras.layers.Merge",
"keras.backend.mean",
"keras.lay... | [((663, 694), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (684, 694), False, 'import warnings\n'), ((933, 948), 'sklearn.svm.LinearSVC', 'svm.LinearSVC', ([], {}), '()\n', (946, 948), False, 'from sklearn import svm\n'), ((1821, 1835), 'numpy.array', 'np.array', (['tr_l'], {}), '(tr_l)\n', (1829, 1835), True, 'import numpy as np\n'), ((1848, 1862), 'numpy.array', 'np.array', (['tr_r'], {}), '(tr_r)\n', (1856, 1862), True, 'import numpy as np\n'), ((1876, 1891), 'numpy.array', 'np.array', (['tst_l'], {}), '(tst_l)\n', (1884, 1891), True, 'import numpy as np\n'), ((1905, 1920), 'numpy.array', 'np.array', (['tst_r'], {}), '(tst_r)\n', (1913, 1920), True, 'import numpy as np\n'), ((1933, 1947), 'numpy.array', 'np.array', (['l_tr'], {}), '(l_tr)\n', (1941, 1947), True, 'import numpy as np\n'), ((1961, 1976), 'numpy.array', 'np.array', (['l_tst'], {}), '(l_tst)\n', (1969, 1976), True, 'import numpy as np\n'), ((3940, 3972), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharey': '(False)'}), '(1, 2, sharey=False)\n', (3952, 3972), True, 'import matplotlib.pyplot as plt\n'), ((4209, 4241), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharey': '(False)'}), '(1, 2, sharey=False)\n', (4221, 4241), True, 'import matplotlib.pyplot as plt\n'), ((4425, 4449), 'numpy.load', 'np.load', (['"""MFCC_Test.npy"""'], {}), "('MFCC_Test.npy')\n", (4432, 4449), True, 'import numpy as np\n'), ((4463, 4487), 'numpy.load', 'np.load', (['"""XRMB_Test.npy"""'], {}), "('XRMB_Test.npy')\n", (4470, 4487), True, 'import numpy as np\n'), ((4946, 4970), 'numpy.load', 'np.load', (['"""MFCC_Test.npy"""'], {}), "('MFCC_Test.npy')\n", (4953, 4970), True, 'import numpy as np\n'), ((4985, 5009), 'numpy.load', 'np.load', (['"""XRMB_Test.npy"""'], {}), "('XRMB_Test.npy')\n", (4992, 5009), True, 'import numpy as np\n'), ((5024, 5050), 'numpy.load', 'np.load', (['"""Labels_Test.npy"""'], {}), "('Labels_Test.npy')\n", (5031, 5050), True, 'import numpy as np\n'), ((6935, 6960), 'numpy.load', 'np.load', (['"""MFCC_Train.npy"""'], {}), "('MFCC_Train.npy')\n", (6942, 6960), True, 'import numpy as np\n'), ((6975, 7000), 'numpy.load', 'np.load', (['"""XRMB_Train.npy"""'], {}), "('XRMB_Train.npy')\n", (6982, 7000), True, 'import numpy as np\n'), ((7014, 7041), 'numpy.load', 'np.load', (['"""Labels_Train.npy"""'], {}), "('Labels_Train.npy')\n", (7021, 7041), True, 'import numpy as np\n'), ((7225, 7245), 'keras.layers.Input', 'Input', ([], {'shape': '(dimx,)'}), '(shape=(dimx,))\n', (7230, 7245), False, 'from keras.layers import Input, Merge\n'), ((7258, 7278), 'keras.layers.Input', 'Input', ([], {'shape': '(dimy,)'}), '(shape=(dimy,))\n', (7263, 7278), False, 'from keras.layers import Input, Merge\n'), ((8704, 8740), 'keras.models.Model', 'Model', (['[inpx, inpy]', '[recx, recy, h]'], {}), '([inpx, inpy], [recx, recy, h])\n', (8709, 8740), False, 'from keras.models import Model\n'), ((9036, 9091), 'keras.models.Model', 'Model', (['[inpx, inpy]', '[recy1, recx2, recx1, recy2, corr]'], {}), '([inpx, inpy], [recy1, recx2, recx1, recy2, corr])\n', (9041, 9091), False, 'from keras.models import Model\n'), ((10260, 10285), 'numpy.load', 'np.load', (['"""MFCC_Train.npy"""'], {}), "('MFCC_Train.npy')\n", (10267, 10285), True, 'import numpy as np\n'), ((10300, 10325), 'numpy.load', 'np.load', (['"""XRMB_Train.npy"""'], {}), "('XRMB_Train.npy')\n", (10307, 10325), True, 'import numpy as np\n'), ((10339, 10366), 'numpy.load', 'np.load', (['"""Labels_Train.npy"""'], {}), "('Labels_Train.npy')\n", (10346, 10366), True, 'import numpy as np\n'), ((1077, 1094), 'numpy.ravel', 'np.ravel', (['valid_y'], {}), '(valid_y)\n', (1085, 1094), True, 'import numpy as np\n'), ((1095, 1109), 'numpy.ravel', 'np.ravel', (['pred'], {}), '(pred)\n', (1103, 1109), True, 'import numpy as np\n'), ((1168, 1184), 'numpy.ravel', 'np.ravel', (['test_y'], {}), '(test_y)\n', (1176, 1184), True, 'import numpy as np\n'), ((1185, 1199), 'numpy.ravel', 'np.ravel', (['pred'], {}), '(pred)\n', (1193, 1199), True, 'import numpy as np\n'), ((2208, 2223), 'keras.backend.zeros_like', 'K.zeros_like', (['x'], {}), '(x)\n', (2220, 2223), True, 'import keras.backend as K\n'), ((2766, 2784), 'keras.backend.mean', 'K.mean', (['y1'], {'axis': '(0)'}), '(y1, axis=0)\n', (2772, 2784), True, 'import keras.backend as K\n'), ((2840, 2858), 'keras.backend.mean', 'K.mean', (['y2'], {'axis': '(0)'}), '(y2, axis=0)\n', (2846, 2858), True, 'import keras.backend as K\n'), ((2914, 2954), 'keras.backend.sum', 'K.sum', (['(y1_centered * y2_centered)'], {'axis': '(0)'}), '(y1_centered * y2_centered, axis=0)\n', (2919, 2954), True, 'import keras.backend as K\n'), ((7295, 7313), 'keras.layers.core.Reshape', 'Reshape', (['(dimx, 1)'], {}), '((dimx, 1))\n', (7302, 7313), False, 'from keras.layers.core import Activation, Dense, Reshape\n'), ((7330, 7391), 'keras.layers.Conv1D', 'Conv1D', (['(256)', '(5)'], {'activation': '"""relu"""', 'padding': '"""valid"""', 'strides': '(1)'}), "(256, 5, activation='relu', padding='valid', strides=1)\n", (7336, 7391), False, 'from keras.layers import Conv1D, MaxPooling1D, UpSampling1D, Flatten\n'), ((7406, 7448), 'keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(4)', 'padding': '"""valid"""'}), "(pool_size=4, padding='valid')\n", (7418, 7448), False, 'from keras.layers import Conv1D, MaxPooling1D, UpSampling1D, Flatten\n'), ((7463, 7523), 'keras.layers.Conv1D', 'Conv1D', (['(65)', '(4)'], {'activation': '"""relu"""', 'padding': '"""valid"""', 'strides': '(1)'}), "(65, 4, activation='relu', padding='valid', strides=1)\n", (7469, 7523), False, 'from keras.layers import Conv1D, MaxPooling1D, UpSampling1D, Flatten\n'), ((7538, 7580), 'keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(3)', 'padding': '"""valid"""'}), "(pool_size=3, padding='valid')\n", (7550, 7580), False, 'from keras.layers import Conv1D, MaxPooling1D, UpSampling1D, Flatten\n'), ((7595, 7604), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (7602, 7604), False, 'from keras.layers import Conv1D, MaxPooling1D, UpSampling1D, Flatten\n'), ((7619, 7651), 'keras.layers.core.Dense', 'Dense', (['(560)'], {'activation': '"""sigmoid"""'}), "(560, activation='sigmoid')\n", (7624, 7651), False, 'from keras.layers.core import Activation, Dense, Reshape\n'), ((7665, 7697), 'keras.layers.core.Dense', 'Dense', (['(280)'], {'activation': '"""sigmoid"""'}), "(280, activation='sigmoid')\n", (7670, 7697), False, 'from keras.layers.core import Activation, Dense, Reshape\n'), ((7712, 7744), 'keras.layers.core.Dense', 'Dense', (['(112)'], {'activation': '"""sigmoid"""'}), "(112, activation='sigmoid')\n", (7717, 7744), False, 'from keras.layers.core import Activation, Dense, Reshape\n'), ((7759, 7791), 'keras.layers.core.Dense', 'Dense', (['(680)'], {'activation': '"""sigmoid"""'}), "(680, activation='sigmoid')\n", (7764, 7791), False, 'from keras.layers.core import Activation, Dense, Reshape\n'), ((7806, 7839), 'keras.layers.core.Dense', 'Dense', (['(1365)'], {'activation': '"""sigmoid"""'}), "(1365, activation='sigmoid')\n", (7811, 7839), False, 'from keras.layers.core import Activation, Dense, Reshape\n'), ((7860, 7878), 'keras.layers.core.Reshape', 'Reshape', (['(dimy, 1)'], {}), '((dimy, 1))\n', (7867, 7878), False, 'from keras.layers.core import Activation, Dense, Reshape\n'), ((7895, 7956), 'keras.layers.Conv1D', 'Conv1D', (['(256)', '(3)'], {'activation': '"""relu"""', 'padding': '"""valid"""', 'strides': '(1)'}), "(256, 3, activation='relu', padding='valid', strides=1)\n", (7901, 7956), False, 'from keras.layers import Conv1D, MaxPooling1D, UpSampling1D, Flatten\n'), ((7971, 8013), 'keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(2)', 'padding': '"""valid"""'}), "(pool_size=2, padding='valid')\n", (7983, 8013), False, 'from keras.layers import Conv1D, MaxPooling1D, UpSampling1D, Flatten\n'), ((8028, 8088), 'keras.layers.Conv1D', 'Conv1D', (['(50)', '(3)'], {'activation': '"""relu"""', 'padding': '"""valid"""', 'strides': '(1)'}), "(50, 3, activation='relu', padding='valid', strides=1)\n", (8034, 8088), False, 'from keras.layers import Conv1D, MaxPooling1D, UpSampling1D, Flatten\n'), ((8103, 8145), 'keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(2)', 'padding': '"""valid"""'}), "(pool_size=2, padding='valid')\n", (8115, 8145), False, 'from keras.layers import Conv1D, MaxPooling1D, UpSampling1D, Flatten\n'), ((8160, 8169), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (8167, 8169), False, 'from keras.layers import Conv1D, MaxPooling1D, UpSampling1D, Flatten\n'), ((8184, 8216), 'keras.layers.core.Dense', 'Dense', (['(560)'], {'activation': '"""sigmoid"""'}), "(560, activation='sigmoid')\n", (8189, 8216), False, 'from keras.layers.core import Activation, Dense, Reshape\n'), ((8230, 8262), 'keras.layers.core.Dense', 'Dense', (['(280)'], {'activation': '"""sigmoid"""'}), "(280, activation='sigmoid')\n", (8235, 8262), False, 'from keras.layers.core import Activation, Dense, Reshape\n'), ((8277, 8309), 'keras.layers.core.Dense', 'Dense', (['(112)'], {'activation': '"""sigmoid"""'}), "(112, activation='sigmoid')\n", (8282, 8309), False, 'from keras.layers.core import Activation, Dense, Reshape\n'), ((8324, 8356), 'keras.layers.core.Dense', 'Dense', (['(680)'], {'activation': '"""sigmoid"""'}), "(680, activation='sigmoid')\n", (8329, 8356), False, 'from keras.layers.core import Activation, Dense, Reshape\n'), ((8371, 8404), 'keras.layers.core.Dense', 'Dense', (['(1365)'], {'activation': '"""sigmoid"""'}), "(1365, activation='sigmoid')\n", (8376, 8404), False, 'from keras.layers.core import Activation, Dense, Reshape\n'), ((8483, 8500), 'keras.layers.Merge', 'Merge', ([], {'mode': '"""sum"""'}), "(mode='sum')\n", (8488, 8500), False, 'from keras.layers import Input, Merge\n'), ((8583, 8594), 'keras.layers.core.Dense', 'Dense', (['dimx'], {}), '(dimx)\n', (8588, 8594), False, 'from keras.layers.core import Activation, Dense, Reshape\n'), ((8664, 8675), 'keras.layers.core.Dense', 'Dense', (['dimy'], {}), '(dimy)\n', (8669, 8675), False, 'from keras.layers.core import Activation, Dense, Reshape\n'), ((3196, 3207), 'keras.backend.sum', 'K.sum', (['corr'], {}), '(corr)\n', (3201, 3207), True, 'import keras.backend as K\n'), ((4002, 4020), 'numpy.zeros_like', 'np.zeros_like', (['inp'], {}), '(inp)\n', (4015, 4020), True, 'import numpy as np\n'), ((4267, 4285), 'numpy.zeros_like', 'np.zeros_like', (['inp'], {}), '(inp)\n', (4280, 4285), True, 'import numpy as np\n'), ((4518, 4538), 'numpy.zeros_like', 'np.zeros_like', (['view2'], {}), '(view2)\n', (4531, 4538), True, 'import numpy as np\n'), ((4565, 4585), 'numpy.zeros_like', 'np.zeros_like', (['view1'], {}), '(view1)\n', (4578, 4585), True, 'import numpy as np\n'), ((5086, 5107), 'numpy.zeros_like', 'np.zeros_like', (['view22'], {}), '(view22)\n', (5099, 5107), True, 'import numpy as np\n'), ((5138, 5159), 'numpy.zeros_like', 'np.zeros_like', (['view11'], {}), '(view11)\n', (5151, 5159), True, 'import numpy as np\n'), ((10573, 10608), 'numpy.zeros', 'np.zeros', (['(X_train_l.shape[0], 112)'], {}), '((X_train_l.shape[0], 112))\n', (10581, 10608), True, 'import numpy as np\n'), ((2982, 3022), 'keras.backend.sum', 'K.sum', (['(y1_centered * y1_centered)'], {'axis': '(0)'}), '(y1_centered * y1_centered, axis=0)\n', (2987, 3022), True, 'import keras.backend as K\n'), ((3058, 3098), 'keras.backend.sum', 'K.sum', (['(y2_centered * y2_centered)'], {'axis': '(0)'}), '(y2_centered * y2_centered, axis=0)\n', (3063, 3098), True, 'import keras.backend as K\n'), ((5834, 5870), 'numpy.concatenate', 'np.concatenate', (['(train_x1, train_x2)'], {}), '((train_x1, train_x2))\n', (5848, 5870), True, 'import numpy as np\n'), ((5893, 5929), 'numpy.concatenate', 'np.concatenate', (['(train_y1, train_y2)'], {}), '((train_y1, train_y2))\n', (5907, 5929), True, 'import numpy as np\n'), ((6682, 6718), 'numpy.concatenate', 'np.concatenate', (['(train_x1, train_x2)'], {}), '((train_x1, train_x2))\n', (6696, 6718), True, 'import numpy as np\n'), ((6741, 6777), 'numpy.concatenate', 'np.concatenate', (['(train_y1, train_y2)'], {}), '((train_y1, train_y2))\n', (6755, 6777), True, 'import numpy as np\n')] |
import numpy as np
import torch
from mushroom_rl.algorithms.policy_search import *
from mushroom_rl.approximators import Regressor
from mushroom_rl.approximators.parametric import LinearApproximator
from mushroom_rl.core import Core
from mushroom_rl.environments.lqr import LQR
from mushroom_rl.policy.gaussian_policy import StateStdGaussianPolicy
from mushroom_rl.utils.parameters import AdaptiveParameter
def learn(alg, alg_params):
mdp = LQR.generate(dimensions=1)
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
approximator_params = dict(input_dim=mdp.info.observation_space.shape)
approximator = Regressor(LinearApproximator,
input_shape=mdp.info.observation_space.shape,
output_shape=mdp.info.action_space.shape,
params=approximator_params)
sigma = Regressor(LinearApproximator,
input_shape=mdp.info.observation_space.shape,
output_shape=mdp.info.action_space.shape,
params=approximator_params)
sigma_weights = 2 * np.ones(sigma.weights_size)
sigma.set_weights(sigma_weights)
policy = StateStdGaussianPolicy(approximator, sigma)
agent = alg(mdp.info, policy, **alg_params)
core = Core(agent, mdp)
core.learn(n_episodes=10, n_episodes_per_fit=5)
return policy
def test_REINFORCE():
params = dict(learning_rate=AdaptiveParameter(value=.01))
policy = learn(REINFORCE, params)
w = np.array([-0.0084793 , 2.00536528])
assert np.allclose(w, policy.get_weights())
def test_GPOMDP():
params = dict(learning_rate=AdaptiveParameter(value=.01))
policy = learn(GPOMDP, params)
w = np.array([-0.07623939, 2.05232858])
assert np.allclose(w, policy.get_weights())
def test_eNAC():
params = dict(learning_rate=AdaptiveParameter(value=.01))
policy = learn(eNAC, params)
w = np.array([-0.03668018, 2.05112355])
assert np.allclose(w, policy.get_weights())
| [
"mushroom_rl.environments.lqr.LQR.generate",
"torch.manual_seed",
"mushroom_rl.approximators.Regressor",
"mushroom_rl.policy.gaussian_policy.StateStdGaussianPolicy",
"numpy.ones",
"mushroom_rl.utils.parameters.AdaptiveParameter",
"numpy.array",
"numpy.random.seed",
"torch.cuda.manual_seed",
"mushr... | [((448, 474), 'mushroom_rl.environments.lqr.LQR.generate', 'LQR.generate', ([], {'dimensions': '(1)'}), '(dimensions=1)\n', (460, 474), False, 'from mushroom_rl.environments.lqr import LQR\n'), ((479, 496), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (493, 496), True, 'import numpy as np\n'), ((501, 521), 'torch.manual_seed', 'torch.manual_seed', (['(1)'], {}), '(1)\n', (518, 521), False, 'import torch\n'), ((526, 551), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(1)'], {}), '(1)\n', (548, 551), False, 'import torch\n'), ((647, 796), 'mushroom_rl.approximators.Regressor', 'Regressor', (['LinearApproximator'], {'input_shape': 'mdp.info.observation_space.shape', 'output_shape': 'mdp.info.action_space.shape', 'params': 'approximator_params'}), '(LinearApproximator, input_shape=mdp.info.observation_space.shape,\n output_shape=mdp.info.action_space.shape, params=approximator_params)\n', (656, 796), False, 'from mushroom_rl.approximators import Regressor\n'), ((893, 1042), 'mushroom_rl.approximators.Regressor', 'Regressor', (['LinearApproximator'], {'input_shape': 'mdp.info.observation_space.shape', 'output_shape': 'mdp.info.action_space.shape', 'params': 'approximator_params'}), '(LinearApproximator, input_shape=mdp.info.observation_space.shape,\n output_shape=mdp.info.action_space.shape, params=approximator_params)\n', (902, 1042), False, 'from mushroom_rl.approximators import Regressor\n'), ((1209, 1252), 'mushroom_rl.policy.gaussian_policy.StateStdGaussianPolicy', 'StateStdGaussianPolicy', (['approximator', 'sigma'], {}), '(approximator, sigma)\n', (1231, 1252), False, 'from mushroom_rl.policy.gaussian_policy import StateStdGaussianPolicy\n'), ((1314, 1330), 'mushroom_rl.core.Core', 'Core', (['agent', 'mdp'], {}), '(agent, mdp)\n', (1318, 1330), False, 'from mushroom_rl.core import Core\n'), ((1535, 1569), 'numpy.array', 'np.array', (['[-0.0084793, 2.00536528]'], {}), '([-0.0084793, 2.00536528])\n', (1543, 1569), True, 'import numpy as np\n'), ((1747, 1782), 'numpy.array', 'np.array', (['[-0.07623939, 2.05232858]'], {}), '([-0.07623939, 2.05232858])\n', (1755, 1782), True, 'import numpy as np\n'), ((1955, 1990), 'numpy.array', 'np.array', (['[-0.03668018, 2.05112355]'], {}), '([-0.03668018, 2.05112355])\n', (1963, 1990), True, 'import numpy as np\n'), ((1130, 1157), 'numpy.ones', 'np.ones', (['sigma.weights_size'], {}), '(sigma.weights_size)\n', (1137, 1157), True, 'import numpy as np\n'), ((1459, 1488), 'mushroom_rl.utils.parameters.AdaptiveParameter', 'AdaptiveParameter', ([], {'value': '(0.01)'}), '(value=0.01)\n', (1476, 1488), False, 'from mushroom_rl.utils.parameters import AdaptiveParameter\n'), ((1674, 1703), 'mushroom_rl.utils.parameters.AdaptiveParameter', 'AdaptiveParameter', ([], {'value': '(0.01)'}), '(value=0.01)\n', (1691, 1703), False, 'from mushroom_rl.utils.parameters import AdaptiveParameter\n'), ((1884, 1913), 'mushroom_rl.utils.parameters.AdaptiveParameter', 'AdaptiveParameter', ([], {'value': '(0.01)'}), '(value=0.01)\n', (1901, 1913), False, 'from mushroom_rl.utils.parameters import AdaptiveParameter\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.